title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
ENH: Expose symlog scaling in plotting API
diff --git a/.gitignore b/.gitignore index 4598714db6c6a..816aff376fc83 100644 --- a/.gitignore +++ b/.gitignore @@ -101,7 +101,8 @@ asv_bench/pandas/ # Documentation generated files # ################################# doc/source/generated -doc/source/api/generated +doc/source/user_guide/styled.xlsx +doc/source/reference/api doc/source/_static doc/source/vbench doc/source/vbench.rst @@ -109,6 +110,5 @@ doc/source/index.rst doc/build/html/index.html # Windows specific leftover: doc/tmp.sv -doc/source/styled.xlsx env/ doc/source/savefig/ diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index e5dab0cb066aa..4b5b2848f7e0f 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -223,12 +223,19 @@ class CategoricalSlicing(object): def setup(self, index): N = 10**6 - values = list('a' * N + 'b' * N + 'c' * N) - indices = { - 'monotonic_incr': pd.Categorical(values), - 'monotonic_decr': pd.Categorical(reversed(values)), - 'non_monotonic': pd.Categorical(list('abc' * N))} - self.data = indices[index] + categories = ['a', 'b', 'c'] + values = [0] * N + [1] * N + [2] * N + if index == 'monotonic_incr': + self.data = pd.Categorical.from_codes(values, + categories=categories) + elif index == 'monotonic_decr': + self.data = pd.Categorical.from_codes(list(reversed(values)), + categories=categories) + elif index == 'non_monotonic': + self.data = pd.Categorical.from_codes([0, 1, 2] * N, + categories=categories) + else: + raise ValueError('Invalid index param: {}'.format(index)) self.scalar = 10000 self.list = list(range(10000)) diff --git a/asv_bench/benchmarks/ctors.py b/asv_bench/benchmarks/ctors.py index 9082b4186bfa4..5715c4fb2d0d4 100644 --- a/asv_bench/benchmarks/ctors.py +++ b/asv_bench/benchmarks/ctors.py @@ -72,7 +72,7 @@ class SeriesDtypesConstructors(object): def setup(self): N = 10**4 - self.arr = np.random.randn(N, N) + self.arr = np.random.randn(N) self.arr_str = np.array(['foo', 'bar', 'baz'], dtype=object) self.s = Series([Timestamp('20110101'), Timestamp('20120101'), Timestamp('20130101')] * N * 10) diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index f76040921393f..bbe164d4858ab 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -138,7 +138,8 @@ def setup(self, dtype): self.sorted = self.idx.sort_values() half = N // 2 self.non_unique = self.idx[:half].append(self.idx[:half]) - self.non_unique_sorted = self.sorted[:half].append(self.sorted[:half]) + self.non_unique_sorted = (self.sorted[:half].append(self.sorted[:half]) + .sort_values()) self.key = self.sorted[N // 4] def time_boolean_array(self, dtype): diff --git a/doc/make.py b/doc/make.py index 0b14a9dcd4c34..438c4a04a3f08 100755 --- a/doc/make.py +++ b/doc/make.py @@ -15,15 +15,18 @@ import sys import os import shutil +import csv import subprocess import argparse import webbrowser +import docutils +import docutils.parsers.rst DOC_PATH = os.path.dirname(os.path.abspath(__file__)) SOURCE_PATH = os.path.join(DOC_PATH, 'source') BUILD_PATH = os.path.join(DOC_PATH, 'build') -BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates'] +REDIRECTS_FILE = os.path.join(DOC_PATH, 'redirects.csv') class DocBuilder: @@ -50,7 +53,7 @@ def __init__(self, num_jobs=0, include_api=True, single_doc=None, if single_doc and single_doc.endswith('.rst'): self.single_doc_html = os.path.splitext(single_doc)[0] + '.html' elif single_doc: - self.single_doc_html = 'api/generated/pandas.{}.html'.format( + self.single_doc_html = 'reference/api/pandas.{}.html'.format( single_doc) def _process_single_doc(self, single_doc): @@ -60,7 +63,7 @@ def _process_single_doc(self, single_doc): For example, categorial.rst or pandas.DataFrame.head. For the latter, return the corresponding file path - (e.g. generated/pandas.DataFrame.head.rst). + (e.g. reference/api/pandas.DataFrame.head.rst). """ base_name, extension = os.path.splitext(single_doc) if extension in ('.rst', '.ipynb'): @@ -118,8 +121,6 @@ def _sphinx_build(self, kind): raise ValueError('kind must be html or latex, ' 'not {}'.format(kind)) - self.clean() - cmd = ['sphinx-build', '-b', kind] if self.num_jobs: cmd += ['-j', str(self.num_jobs)] @@ -139,6 +140,77 @@ def _open_browser(self, single_doc_html): single_doc_html) webbrowser.open(url, new=2) + def _get_page_title(self, page): + """ + Open the rst file `page` and extract its title. + """ + fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) + option_parser = docutils.frontend.OptionParser( + components=(docutils.parsers.rst.Parser,)) + doc = docutils.utils.new_document( + '<doc>', + option_parser.get_default_values()) + with open(fname) as f: + data = f.read() + + parser = docutils.parsers.rst.Parser() + # do not generate any warning when parsing the rst + with open(os.devnull, 'a') as f: + doc.reporter.stream = f + parser.parse(data, doc) + + section = next(node for node in doc.children + if isinstance(node, docutils.nodes.section)) + title = next(node for node in section.children + if isinstance(node, docutils.nodes.title)) + + return title.astext() + + def _add_redirects(self): + """ + Create in the build directory an html file with a redirect, + for every row in REDIRECTS_FILE. + """ + html = ''' + <html> + <head> + <meta http-equiv="refresh" content="0;URL={url}"/> + </head> + <body> + <p> + The page has been moved to <a href="{url}">{title}</a> + </p> + </body> + <html> + ''' + with open(REDIRECTS_FILE) as mapping_fd: + reader = csv.reader(mapping_fd) + for row in reader: + if not row or row[0].strip().startswith('#'): + continue + + path = os.path.join(BUILD_PATH, + 'html', + *row[0].split('/')) + '.html' + + try: + title = self._get_page_title(row[1]) + except Exception: + # the file can be an ipynb and not an rst, or docutils + # may not be able to read the rst because it has some + # sphinx specific stuff + title = 'this page' + + if os.path.exists(path): + raise RuntimeError(( + 'Redirection would overwrite an existing file: ' + '{}').format(path)) + + with open(path, 'w') as moved_page_fd: + moved_page_fd.write( + html.format(url='{}.html'.format(row[1]), + title=title)) + def html(self): """ Build HTML documentation. @@ -150,6 +222,8 @@ def html(self): if self.single_doc_html is not None: self._open_browser(self.single_doc_html) + else: + self._add_redirects() return ret_code def latex(self, force=False): @@ -184,7 +258,7 @@ def clean(): Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) - shutil.rmtree(os.path.join(SOURCE_PATH, 'api', 'generated'), + shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True) def zip_html(self): diff --git a/doc/redirects.csv b/doc/redirects.csv new file mode 100644 index 0000000000000..a7886779c97d5 --- /dev/null +++ b/doc/redirects.csv @@ -0,0 +1,1581 @@ +# This file should contain all the redirects in the documentation +# in the format `<old_path>,<new_path>` + +# whatsnew +whatsnew,whatsnew/index +release,whatsnew/index + +# getting started +10min,getting_started/10min +basics,getting_started/basics +comparison_with_r,getting_started/comparison/comparison_with_r +comparison_with_sql,getting_started/comparison/comparison_with_sql +comparison_with_sas,getting_started/comparison/comparison_with_sas +comparison_with_stata,getting_started/comparison/comparison_with_stata +dsintro,getting_started/dsintro +overview,getting_started/overview +tutorials,getting_started/tutorials + +# user guide +advanced,user_guide/advanced +categorical,user_guide/categorical +computation,user_guide/computation +cookbook,user_guide/cookbook +enhancingperf,user_guide/enhancingperf +gotchas,user_guide/gotchas +groupby,user_guide/groupby +indexing,user_guide/indexing +integer_na,user_guide/integer_na +io,user_guide/io +merging,user_guide/merging +missing_data,user_guide/missing_data +options,user_guide/options +reshaping,user_guide/reshaping +sparse,user_guide/sparse +style,user_guide/style +text,user_guide/text +timedeltas,user_guide/timedeltas +timeseries,user_guide/timeseries +visualization,user_guide/visualization + +# development +contributing,development/contributing +contributing_docstring,development/contributing_docstring +developer,development/developer +extending,development/extending +internals,development/internals + +# api +api,reference/index +generated/pandas.api.extensions.ExtensionArray.argsort,../reference/api/pandas.api.extensions.ExtensionArray.argsort +generated/pandas.api.extensions.ExtensionArray.astype,../reference/api/pandas.api.extensions.ExtensionArray.astype +generated/pandas.api.extensions.ExtensionArray.copy,../reference/api/pandas.api.extensions.ExtensionArray.copy +generated/pandas.api.extensions.ExtensionArray.dropna,../reference/api/pandas.api.extensions.ExtensionArray.dropna +generated/pandas.api.extensions.ExtensionArray.dtype,../reference/api/pandas.api.extensions.ExtensionArray.dtype +generated/pandas.api.extensions.ExtensionArray.factorize,../reference/api/pandas.api.extensions.ExtensionArray.factorize +generated/pandas.api.extensions.ExtensionArray.fillna,../reference/api/pandas.api.extensions.ExtensionArray.fillna +generated/pandas.api.extensions.ExtensionArray,../reference/api/pandas.api.extensions.ExtensionArray +generated/pandas.api.extensions.ExtensionArray.isna,../reference/api/pandas.api.extensions.ExtensionArray.isna +generated/pandas.api.extensions.ExtensionArray.nbytes,../reference/api/pandas.api.extensions.ExtensionArray.nbytes +generated/pandas.api.extensions.ExtensionArray.ndim,../reference/api/pandas.api.extensions.ExtensionArray.ndim +generated/pandas.api.extensions.ExtensionArray.shape,../reference/api/pandas.api.extensions.ExtensionArray.shape +generated/pandas.api.extensions.ExtensionArray.take,../reference/api/pandas.api.extensions.ExtensionArray.take +generated/pandas.api.extensions.ExtensionArray.unique,../reference/api/pandas.api.extensions.ExtensionArray.unique +generated/pandas.api.extensions.ExtensionDtype.construct_array_type,../reference/api/pandas.api.extensions.ExtensionDtype.construct_array_type +generated/pandas.api.extensions.ExtensionDtype.construct_from_string,../reference/api/pandas.api.extensions.ExtensionDtype.construct_from_string +generated/pandas.api.extensions.ExtensionDtype,../reference/api/pandas.api.extensions.ExtensionDtype +generated/pandas.api.extensions.ExtensionDtype.is_dtype,../reference/api/pandas.api.extensions.ExtensionDtype.is_dtype +generated/pandas.api.extensions.ExtensionDtype.kind,../reference/api/pandas.api.extensions.ExtensionDtype.kind +generated/pandas.api.extensions.ExtensionDtype.name,../reference/api/pandas.api.extensions.ExtensionDtype.name +generated/pandas.api.extensions.ExtensionDtype.names,../reference/api/pandas.api.extensions.ExtensionDtype.names +generated/pandas.api.extensions.ExtensionDtype.na_value,../reference/api/pandas.api.extensions.ExtensionDtype.na_value +generated/pandas.api.extensions.ExtensionDtype.type,../reference/api/pandas.api.extensions.ExtensionDtype.type +generated/pandas.api.extensions.register_dataframe_accessor,../reference/api/pandas.api.extensions.register_dataframe_accessor +generated/pandas.api.extensions.register_extension_dtype,../reference/api/pandas.api.extensions.register_extension_dtype +generated/pandas.api.extensions.register_index_accessor,../reference/api/pandas.api.extensions.register_index_accessor +generated/pandas.api.extensions.register_series_accessor,../reference/api/pandas.api.extensions.register_series_accessor +generated/pandas.api.types.infer_dtype,../reference/api/pandas.api.types.infer_dtype +generated/pandas.api.types.is_bool_dtype,../reference/api/pandas.api.types.is_bool_dtype +generated/pandas.api.types.is_bool,../reference/api/pandas.api.types.is_bool +generated/pandas.api.types.is_categorical_dtype,../reference/api/pandas.api.types.is_categorical_dtype +generated/pandas.api.types.is_categorical,../reference/api/pandas.api.types.is_categorical +generated/pandas.api.types.is_complex_dtype,../reference/api/pandas.api.types.is_complex_dtype +generated/pandas.api.types.is_complex,../reference/api/pandas.api.types.is_complex +generated/pandas.api.types.is_datetime64_any_dtype,../reference/api/pandas.api.types.is_datetime64_any_dtype +generated/pandas.api.types.is_datetime64_dtype,../reference/api/pandas.api.types.is_datetime64_dtype +generated/pandas.api.types.is_datetime64_ns_dtype,../reference/api/pandas.api.types.is_datetime64_ns_dtype +generated/pandas.api.types.is_datetime64tz_dtype,../reference/api/pandas.api.types.is_datetime64tz_dtype +generated/pandas.api.types.is_datetimetz,../reference/api/pandas.api.types.is_datetimetz +generated/pandas.api.types.is_dict_like,../reference/api/pandas.api.types.is_dict_like +generated/pandas.api.types.is_extension_array_dtype,../reference/api/pandas.api.types.is_extension_array_dtype +generated/pandas.api.types.is_extension_type,../reference/api/pandas.api.types.is_extension_type +generated/pandas.api.types.is_file_like,../reference/api/pandas.api.types.is_file_like +generated/pandas.api.types.is_float_dtype,../reference/api/pandas.api.types.is_float_dtype +generated/pandas.api.types.is_float,../reference/api/pandas.api.types.is_float +generated/pandas.api.types.is_hashable,../reference/api/pandas.api.types.is_hashable +generated/pandas.api.types.is_int64_dtype,../reference/api/pandas.api.types.is_int64_dtype +generated/pandas.api.types.is_integer_dtype,../reference/api/pandas.api.types.is_integer_dtype +generated/pandas.api.types.is_integer,../reference/api/pandas.api.types.is_integer +generated/pandas.api.types.is_interval_dtype,../reference/api/pandas.api.types.is_interval_dtype +generated/pandas.api.types.is_interval,../reference/api/pandas.api.types.is_interval +generated/pandas.api.types.is_iterator,../reference/api/pandas.api.types.is_iterator +generated/pandas.api.types.is_list_like,../reference/api/pandas.api.types.is_list_like +generated/pandas.api.types.is_named_tuple,../reference/api/pandas.api.types.is_named_tuple +generated/pandas.api.types.is_number,../reference/api/pandas.api.types.is_number +generated/pandas.api.types.is_numeric_dtype,../reference/api/pandas.api.types.is_numeric_dtype +generated/pandas.api.types.is_object_dtype,../reference/api/pandas.api.types.is_object_dtype +generated/pandas.api.types.is_period_dtype,../reference/api/pandas.api.types.is_period_dtype +generated/pandas.api.types.is_period,../reference/api/pandas.api.types.is_period +generated/pandas.api.types.is_re_compilable,../reference/api/pandas.api.types.is_re_compilable +generated/pandas.api.types.is_re,../reference/api/pandas.api.types.is_re +generated/pandas.api.types.is_scalar,../reference/api/pandas.api.types.is_scalar +generated/pandas.api.types.is_signed_integer_dtype,../reference/api/pandas.api.types.is_signed_integer_dtype +generated/pandas.api.types.is_sparse,../reference/api/pandas.api.types.is_sparse +generated/pandas.api.types.is_string_dtype,../reference/api/pandas.api.types.is_string_dtype +generated/pandas.api.types.is_timedelta64_dtype,../reference/api/pandas.api.types.is_timedelta64_dtype +generated/pandas.api.types.is_timedelta64_ns_dtype,../reference/api/pandas.api.types.is_timedelta64_ns_dtype +generated/pandas.api.types.is_unsigned_integer_dtype,../reference/api/pandas.api.types.is_unsigned_integer_dtype +generated/pandas.api.types.pandas_dtype,../reference/api/pandas.api.types.pandas_dtype +generated/pandas.api.types.union_categoricals,../reference/api/pandas.api.types.union_categoricals +generated/pandas.bdate_range,../reference/api/pandas.bdate_range +generated/pandas.Categorical.__array__,../reference/api/pandas.Categorical.__array__ +generated/pandas.Categorical.categories,../reference/api/pandas.Categorical.categories +generated/pandas.Categorical.codes,../reference/api/pandas.Categorical.codes +generated/pandas.CategoricalDtype.categories,../reference/api/pandas.CategoricalDtype.categories +generated/pandas.Categorical.dtype,../reference/api/pandas.Categorical.dtype +generated/pandas.CategoricalDtype,../reference/api/pandas.CategoricalDtype +generated/pandas.CategoricalDtype.ordered,../reference/api/pandas.CategoricalDtype.ordered +generated/pandas.Categorical.from_codes,../reference/api/pandas.Categorical.from_codes +generated/pandas.Categorical,../reference/api/pandas.Categorical +generated/pandas.CategoricalIndex.add_categories,../reference/api/pandas.CategoricalIndex.add_categories +generated/pandas.CategoricalIndex.as_ordered,../reference/api/pandas.CategoricalIndex.as_ordered +generated/pandas.CategoricalIndex.as_unordered,../reference/api/pandas.CategoricalIndex.as_unordered +generated/pandas.CategoricalIndex.categories,../reference/api/pandas.CategoricalIndex.categories +generated/pandas.CategoricalIndex.codes,../reference/api/pandas.CategoricalIndex.codes +generated/pandas.CategoricalIndex.equals,../reference/api/pandas.CategoricalIndex.equals +generated/pandas.CategoricalIndex,../reference/api/pandas.CategoricalIndex +generated/pandas.CategoricalIndex.map,../reference/api/pandas.CategoricalIndex.map +generated/pandas.CategoricalIndex.ordered,../reference/api/pandas.CategoricalIndex.ordered +generated/pandas.CategoricalIndex.remove_categories,../reference/api/pandas.CategoricalIndex.remove_categories +generated/pandas.CategoricalIndex.remove_unused_categories,../reference/api/pandas.CategoricalIndex.remove_unused_categories +generated/pandas.CategoricalIndex.rename_categories,../reference/api/pandas.CategoricalIndex.rename_categories +generated/pandas.CategoricalIndex.reorder_categories,../reference/api/pandas.CategoricalIndex.reorder_categories +generated/pandas.CategoricalIndex.set_categories,../reference/api/pandas.CategoricalIndex.set_categories +generated/pandas.Categorical.ordered,../reference/api/pandas.Categorical.ordered +generated/pandas.concat,../reference/api/pandas.concat +generated/pandas.core.groupby.DataFrameGroupBy.all,../reference/api/pandas.core.groupby.DataFrameGroupBy.all +generated/pandas.core.groupby.DataFrameGroupBy.any,../reference/api/pandas.core.groupby.DataFrameGroupBy.any +generated/pandas.core.groupby.DataFrameGroupBy.bfill,../reference/api/pandas.core.groupby.DataFrameGroupBy.bfill +generated/pandas.core.groupby.DataFrameGroupBy.boxplot,../reference/api/pandas.core.groupby.DataFrameGroupBy.boxplot +generated/pandas.core.groupby.DataFrameGroupBy.corr,../reference/api/pandas.core.groupby.DataFrameGroupBy.corr +generated/pandas.core.groupby.DataFrameGroupBy.corrwith,../reference/api/pandas.core.groupby.DataFrameGroupBy.corrwith +generated/pandas.core.groupby.DataFrameGroupBy.count,../reference/api/pandas.core.groupby.DataFrameGroupBy.count +generated/pandas.core.groupby.DataFrameGroupBy.cov,../reference/api/pandas.core.groupby.DataFrameGroupBy.cov +generated/pandas.core.groupby.DataFrameGroupBy.cummax,../reference/api/pandas.core.groupby.DataFrameGroupBy.cummax +generated/pandas.core.groupby.DataFrameGroupBy.cummin,../reference/api/pandas.core.groupby.DataFrameGroupBy.cummin +generated/pandas.core.groupby.DataFrameGroupBy.cumprod,../reference/api/pandas.core.groupby.DataFrameGroupBy.cumprod +generated/pandas.core.groupby.DataFrameGroupBy.cumsum,../reference/api/pandas.core.groupby.DataFrameGroupBy.cumsum +generated/pandas.core.groupby.DataFrameGroupBy.describe,../reference/api/pandas.core.groupby.DataFrameGroupBy.describe +generated/pandas.core.groupby.DataFrameGroupBy.diff,../reference/api/pandas.core.groupby.DataFrameGroupBy.diff +generated/pandas.core.groupby.DataFrameGroupBy.ffill,../reference/api/pandas.core.groupby.DataFrameGroupBy.ffill +generated/pandas.core.groupby.DataFrameGroupBy.fillna,../reference/api/pandas.core.groupby.DataFrameGroupBy.fillna +generated/pandas.core.groupby.DataFrameGroupBy.filter,../reference/api/pandas.core.groupby.DataFrameGroupBy.filter +generated/pandas.core.groupby.DataFrameGroupBy.hist,../reference/api/pandas.core.groupby.DataFrameGroupBy.hist +generated/pandas.core.groupby.DataFrameGroupBy.idxmax,../reference/api/pandas.core.groupby.DataFrameGroupBy.idxmax +generated/pandas.core.groupby.DataFrameGroupBy.idxmin,../reference/api/pandas.core.groupby.DataFrameGroupBy.idxmin +generated/pandas.core.groupby.DataFrameGroupBy.mad,../reference/api/pandas.core.groupby.DataFrameGroupBy.mad +generated/pandas.core.groupby.DataFrameGroupBy.pct_change,../reference/api/pandas.core.groupby.DataFrameGroupBy.pct_change +generated/pandas.core.groupby.DataFrameGroupBy.plot,../reference/api/pandas.core.groupby.DataFrameGroupBy.plot +generated/pandas.core.groupby.DataFrameGroupBy.quantile,../reference/api/pandas.core.groupby.DataFrameGroupBy.quantile +generated/pandas.core.groupby.DataFrameGroupBy.rank,../reference/api/pandas.core.groupby.DataFrameGroupBy.rank +generated/pandas.core.groupby.DataFrameGroupBy.resample,../reference/api/pandas.core.groupby.DataFrameGroupBy.resample +generated/pandas.core.groupby.DataFrameGroupBy.shift,../reference/api/pandas.core.groupby.DataFrameGroupBy.shift +generated/pandas.core.groupby.DataFrameGroupBy.size,../reference/api/pandas.core.groupby.DataFrameGroupBy.size +generated/pandas.core.groupby.DataFrameGroupBy.skew,../reference/api/pandas.core.groupby.DataFrameGroupBy.skew +generated/pandas.core.groupby.DataFrameGroupBy.take,../reference/api/pandas.core.groupby.DataFrameGroupBy.take +generated/pandas.core.groupby.DataFrameGroupBy.tshift,../reference/api/pandas.core.groupby.DataFrameGroupBy.tshift +generated/pandas.core.groupby.GroupBy.agg,../reference/api/pandas.core.groupby.GroupBy.agg +generated/pandas.core.groupby.GroupBy.aggregate,../reference/api/pandas.core.groupby.GroupBy.aggregate +generated/pandas.core.groupby.GroupBy.all,../reference/api/pandas.core.groupby.GroupBy.all +generated/pandas.core.groupby.GroupBy.any,../reference/api/pandas.core.groupby.GroupBy.any +generated/pandas.core.groupby.GroupBy.apply,../reference/api/pandas.core.groupby.GroupBy.apply +generated/pandas.core.groupby.GroupBy.bfill,../reference/api/pandas.core.groupby.GroupBy.bfill +generated/pandas.core.groupby.GroupBy.count,../reference/api/pandas.core.groupby.GroupBy.count +generated/pandas.core.groupby.GroupBy.cumcount,../reference/api/pandas.core.groupby.GroupBy.cumcount +generated/pandas.core.groupby.GroupBy.ffill,../reference/api/pandas.core.groupby.GroupBy.ffill +generated/pandas.core.groupby.GroupBy.first,../reference/api/pandas.core.groupby.GroupBy.first +generated/pandas.core.groupby.GroupBy.get_group,../reference/api/pandas.core.groupby.GroupBy.get_group +generated/pandas.core.groupby.GroupBy.groups,../reference/api/pandas.core.groupby.GroupBy.groups +generated/pandas.core.groupby.GroupBy.head,../reference/api/pandas.core.groupby.GroupBy.head +generated/pandas.core.groupby.GroupBy.indices,../reference/api/pandas.core.groupby.GroupBy.indices +generated/pandas.core.groupby.GroupBy.__iter__,../reference/api/pandas.core.groupby.GroupBy.__iter__ +generated/pandas.core.groupby.GroupBy.last,../reference/api/pandas.core.groupby.GroupBy.last +generated/pandas.core.groupby.GroupBy.max,../reference/api/pandas.core.groupby.GroupBy.max +generated/pandas.core.groupby.GroupBy.mean,../reference/api/pandas.core.groupby.GroupBy.mean +generated/pandas.core.groupby.GroupBy.median,../reference/api/pandas.core.groupby.GroupBy.median +generated/pandas.core.groupby.GroupBy.min,../reference/api/pandas.core.groupby.GroupBy.min +generated/pandas.core.groupby.GroupBy.ngroup,../reference/api/pandas.core.groupby.GroupBy.ngroup +generated/pandas.core.groupby.GroupBy.nth,../reference/api/pandas.core.groupby.GroupBy.nth +generated/pandas.core.groupby.GroupBy.ohlc,../reference/api/pandas.core.groupby.GroupBy.ohlc +generated/pandas.core.groupby.GroupBy.pct_change,../reference/api/pandas.core.groupby.GroupBy.pct_change +generated/pandas.core.groupby.GroupBy.pipe,../reference/api/pandas.core.groupby.GroupBy.pipe +generated/pandas.core.groupby.GroupBy.prod,../reference/api/pandas.core.groupby.GroupBy.prod +generated/pandas.core.groupby.GroupBy.rank,../reference/api/pandas.core.groupby.GroupBy.rank +generated/pandas.core.groupby.GroupBy.sem,../reference/api/pandas.core.groupby.GroupBy.sem +generated/pandas.core.groupby.GroupBy.size,../reference/api/pandas.core.groupby.GroupBy.size +generated/pandas.core.groupby.GroupBy.std,../reference/api/pandas.core.groupby.GroupBy.std +generated/pandas.core.groupby.GroupBy.sum,../reference/api/pandas.core.groupby.GroupBy.sum +generated/pandas.core.groupby.GroupBy.tail,../reference/api/pandas.core.groupby.GroupBy.tail +generated/pandas.core.groupby.GroupBy.transform,../reference/api/pandas.core.groupby.GroupBy.transform +generated/pandas.core.groupby.GroupBy.var,../reference/api/pandas.core.groupby.GroupBy.var +generated/pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing,../reference/api/pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing +generated/pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing,../reference/api/pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing +generated/pandas.core.groupby.SeriesGroupBy.nlargest,../reference/api/pandas.core.groupby.SeriesGroupBy.nlargest +generated/pandas.core.groupby.SeriesGroupBy.nsmallest,../reference/api/pandas.core.groupby.SeriesGroupBy.nsmallest +generated/pandas.core.groupby.SeriesGroupBy.nunique,../reference/api/pandas.core.groupby.SeriesGroupBy.nunique +generated/pandas.core.groupby.SeriesGroupBy.unique,../reference/api/pandas.core.groupby.SeriesGroupBy.unique +generated/pandas.core.groupby.SeriesGroupBy.value_counts,../reference/api/pandas.core.groupby.SeriesGroupBy.value_counts +generated/pandas.core.resample.Resampler.aggregate,../reference/api/pandas.core.resample.Resampler.aggregate +generated/pandas.core.resample.Resampler.apply,../reference/api/pandas.core.resample.Resampler.apply +generated/pandas.core.resample.Resampler.asfreq,../reference/api/pandas.core.resample.Resampler.asfreq +generated/pandas.core.resample.Resampler.backfill,../reference/api/pandas.core.resample.Resampler.backfill +generated/pandas.core.resample.Resampler.bfill,../reference/api/pandas.core.resample.Resampler.bfill +generated/pandas.core.resample.Resampler.count,../reference/api/pandas.core.resample.Resampler.count +generated/pandas.core.resample.Resampler.ffill,../reference/api/pandas.core.resample.Resampler.ffill +generated/pandas.core.resample.Resampler.fillna,../reference/api/pandas.core.resample.Resampler.fillna +generated/pandas.core.resample.Resampler.first,../reference/api/pandas.core.resample.Resampler.first +generated/pandas.core.resample.Resampler.get_group,../reference/api/pandas.core.resample.Resampler.get_group +generated/pandas.core.resample.Resampler.groups,../reference/api/pandas.core.resample.Resampler.groups +generated/pandas.core.resample.Resampler.indices,../reference/api/pandas.core.resample.Resampler.indices +generated/pandas.core.resample.Resampler.interpolate,../reference/api/pandas.core.resample.Resampler.interpolate +generated/pandas.core.resample.Resampler.__iter__,../reference/api/pandas.core.resample.Resampler.__iter__ +generated/pandas.core.resample.Resampler.last,../reference/api/pandas.core.resample.Resampler.last +generated/pandas.core.resample.Resampler.max,../reference/api/pandas.core.resample.Resampler.max +generated/pandas.core.resample.Resampler.mean,../reference/api/pandas.core.resample.Resampler.mean +generated/pandas.core.resample.Resampler.median,../reference/api/pandas.core.resample.Resampler.median +generated/pandas.core.resample.Resampler.min,../reference/api/pandas.core.resample.Resampler.min +generated/pandas.core.resample.Resampler.nearest,../reference/api/pandas.core.resample.Resampler.nearest +generated/pandas.core.resample.Resampler.nunique,../reference/api/pandas.core.resample.Resampler.nunique +generated/pandas.core.resample.Resampler.ohlc,../reference/api/pandas.core.resample.Resampler.ohlc +generated/pandas.core.resample.Resampler.pad,../reference/api/pandas.core.resample.Resampler.pad +generated/pandas.core.resample.Resampler.pipe,../reference/api/pandas.core.resample.Resampler.pipe +generated/pandas.core.resample.Resampler.prod,../reference/api/pandas.core.resample.Resampler.prod +generated/pandas.core.resample.Resampler.quantile,../reference/api/pandas.core.resample.Resampler.quantile +generated/pandas.core.resample.Resampler.sem,../reference/api/pandas.core.resample.Resampler.sem +generated/pandas.core.resample.Resampler.size,../reference/api/pandas.core.resample.Resampler.size +generated/pandas.core.resample.Resampler.std,../reference/api/pandas.core.resample.Resampler.std +generated/pandas.core.resample.Resampler.sum,../reference/api/pandas.core.resample.Resampler.sum +generated/pandas.core.resample.Resampler.transform,../reference/api/pandas.core.resample.Resampler.transform +generated/pandas.core.resample.Resampler.var,../reference/api/pandas.core.resample.Resampler.var +generated/pandas.core.window.EWM.corr,../reference/api/pandas.core.window.EWM.corr +generated/pandas.core.window.EWM.cov,../reference/api/pandas.core.window.EWM.cov +generated/pandas.core.window.EWM.mean,../reference/api/pandas.core.window.EWM.mean +generated/pandas.core.window.EWM.std,../reference/api/pandas.core.window.EWM.std +generated/pandas.core.window.EWM.var,../reference/api/pandas.core.window.EWM.var +generated/pandas.core.window.Expanding.aggregate,../reference/api/pandas.core.window.Expanding.aggregate +generated/pandas.core.window.Expanding.apply,../reference/api/pandas.core.window.Expanding.apply +generated/pandas.core.window.Expanding.corr,../reference/api/pandas.core.window.Expanding.corr +generated/pandas.core.window.Expanding.count,../reference/api/pandas.core.window.Expanding.count +generated/pandas.core.window.Expanding.cov,../reference/api/pandas.core.window.Expanding.cov +generated/pandas.core.window.Expanding.kurt,../reference/api/pandas.core.window.Expanding.kurt +generated/pandas.core.window.Expanding.max,../reference/api/pandas.core.window.Expanding.max +generated/pandas.core.window.Expanding.mean,../reference/api/pandas.core.window.Expanding.mean +generated/pandas.core.window.Expanding.median,../reference/api/pandas.core.window.Expanding.median +generated/pandas.core.window.Expanding.min,../reference/api/pandas.core.window.Expanding.min +generated/pandas.core.window.Expanding.quantile,../reference/api/pandas.core.window.Expanding.quantile +generated/pandas.core.window.Expanding.skew,../reference/api/pandas.core.window.Expanding.skew +generated/pandas.core.window.Expanding.std,../reference/api/pandas.core.window.Expanding.std +generated/pandas.core.window.Expanding.sum,../reference/api/pandas.core.window.Expanding.sum +generated/pandas.core.window.Expanding.var,../reference/api/pandas.core.window.Expanding.var +generated/pandas.core.window.Rolling.aggregate,../reference/api/pandas.core.window.Rolling.aggregate +generated/pandas.core.window.Rolling.apply,../reference/api/pandas.core.window.Rolling.apply +generated/pandas.core.window.Rolling.corr,../reference/api/pandas.core.window.Rolling.corr +generated/pandas.core.window.Rolling.count,../reference/api/pandas.core.window.Rolling.count +generated/pandas.core.window.Rolling.cov,../reference/api/pandas.core.window.Rolling.cov +generated/pandas.core.window.Rolling.kurt,../reference/api/pandas.core.window.Rolling.kurt +generated/pandas.core.window.Rolling.max,../reference/api/pandas.core.window.Rolling.max +generated/pandas.core.window.Rolling.mean,../reference/api/pandas.core.window.Rolling.mean +generated/pandas.core.window.Rolling.median,../reference/api/pandas.core.window.Rolling.median +generated/pandas.core.window.Rolling.min,../reference/api/pandas.core.window.Rolling.min +generated/pandas.core.window.Rolling.quantile,../reference/api/pandas.core.window.Rolling.quantile +generated/pandas.core.window.Rolling.skew,../reference/api/pandas.core.window.Rolling.skew +generated/pandas.core.window.Rolling.std,../reference/api/pandas.core.window.Rolling.std +generated/pandas.core.window.Rolling.sum,../reference/api/pandas.core.window.Rolling.sum +generated/pandas.core.window.Rolling.var,../reference/api/pandas.core.window.Rolling.var +generated/pandas.core.window.Window.mean,../reference/api/pandas.core.window.Window.mean +generated/pandas.core.window.Window.sum,../reference/api/pandas.core.window.Window.sum +generated/pandas.crosstab,../reference/api/pandas.crosstab +generated/pandas.cut,../reference/api/pandas.cut +generated/pandas.DataFrame.abs,../reference/api/pandas.DataFrame.abs +generated/pandas.DataFrame.add,../reference/api/pandas.DataFrame.add +generated/pandas.DataFrame.add_prefix,../reference/api/pandas.DataFrame.add_prefix +generated/pandas.DataFrame.add_suffix,../reference/api/pandas.DataFrame.add_suffix +generated/pandas.DataFrame.agg,../reference/api/pandas.DataFrame.agg +generated/pandas.DataFrame.aggregate,../reference/api/pandas.DataFrame.aggregate +generated/pandas.DataFrame.align,../reference/api/pandas.DataFrame.align +generated/pandas.DataFrame.all,../reference/api/pandas.DataFrame.all +generated/pandas.DataFrame.any,../reference/api/pandas.DataFrame.any +generated/pandas.DataFrame.append,../reference/api/pandas.DataFrame.append +generated/pandas.DataFrame.apply,../reference/api/pandas.DataFrame.apply +generated/pandas.DataFrame.applymap,../reference/api/pandas.DataFrame.applymap +generated/pandas.DataFrame.as_blocks,../reference/api/pandas.DataFrame.as_blocks +generated/pandas.DataFrame.asfreq,../reference/api/pandas.DataFrame.asfreq +generated/pandas.DataFrame.as_matrix,../reference/api/pandas.DataFrame.as_matrix +generated/pandas.DataFrame.asof,../reference/api/pandas.DataFrame.asof +generated/pandas.DataFrame.assign,../reference/api/pandas.DataFrame.assign +generated/pandas.DataFrame.astype,../reference/api/pandas.DataFrame.astype +generated/pandas.DataFrame.at,../reference/api/pandas.DataFrame.at +generated/pandas.DataFrame.at_time,../reference/api/pandas.DataFrame.at_time +generated/pandas.DataFrame.axes,../reference/api/pandas.DataFrame.axes +generated/pandas.DataFrame.between_time,../reference/api/pandas.DataFrame.between_time +generated/pandas.DataFrame.bfill,../reference/api/pandas.DataFrame.bfill +generated/pandas.DataFrame.blocks,../reference/api/pandas.DataFrame.blocks +generated/pandas.DataFrame.bool,../reference/api/pandas.DataFrame.bool +generated/pandas.DataFrame.boxplot,../reference/api/pandas.DataFrame.boxplot +generated/pandas.DataFrame.clip,../reference/api/pandas.DataFrame.clip +generated/pandas.DataFrame.clip_lower,../reference/api/pandas.DataFrame.clip_lower +generated/pandas.DataFrame.clip_upper,../reference/api/pandas.DataFrame.clip_upper +generated/pandas.DataFrame.columns,../reference/api/pandas.DataFrame.columns +generated/pandas.DataFrame.combine_first,../reference/api/pandas.DataFrame.combine_first +generated/pandas.DataFrame.combine,../reference/api/pandas.DataFrame.combine +generated/pandas.DataFrame.compound,../reference/api/pandas.DataFrame.compound +generated/pandas.DataFrame.convert_objects,../reference/api/pandas.DataFrame.convert_objects +generated/pandas.DataFrame.copy,../reference/api/pandas.DataFrame.copy +generated/pandas.DataFrame.corr,../reference/api/pandas.DataFrame.corr +generated/pandas.DataFrame.corrwith,../reference/api/pandas.DataFrame.corrwith +generated/pandas.DataFrame.count,../reference/api/pandas.DataFrame.count +generated/pandas.DataFrame.cov,../reference/api/pandas.DataFrame.cov +generated/pandas.DataFrame.cummax,../reference/api/pandas.DataFrame.cummax +generated/pandas.DataFrame.cummin,../reference/api/pandas.DataFrame.cummin +generated/pandas.DataFrame.cumprod,../reference/api/pandas.DataFrame.cumprod +generated/pandas.DataFrame.cumsum,../reference/api/pandas.DataFrame.cumsum +generated/pandas.DataFrame.describe,../reference/api/pandas.DataFrame.describe +generated/pandas.DataFrame.diff,../reference/api/pandas.DataFrame.diff +generated/pandas.DataFrame.div,../reference/api/pandas.DataFrame.div +generated/pandas.DataFrame.divide,../reference/api/pandas.DataFrame.divide +generated/pandas.DataFrame.dot,../reference/api/pandas.DataFrame.dot +generated/pandas.DataFrame.drop_duplicates,../reference/api/pandas.DataFrame.drop_duplicates +generated/pandas.DataFrame.drop,../reference/api/pandas.DataFrame.drop +generated/pandas.DataFrame.droplevel,../reference/api/pandas.DataFrame.droplevel +generated/pandas.DataFrame.dropna,../reference/api/pandas.DataFrame.dropna +generated/pandas.DataFrame.dtypes,../reference/api/pandas.DataFrame.dtypes +generated/pandas.DataFrame.duplicated,../reference/api/pandas.DataFrame.duplicated +generated/pandas.DataFrame.empty,../reference/api/pandas.DataFrame.empty +generated/pandas.DataFrame.eq,../reference/api/pandas.DataFrame.eq +generated/pandas.DataFrame.equals,../reference/api/pandas.DataFrame.equals +generated/pandas.DataFrame.eval,../reference/api/pandas.DataFrame.eval +generated/pandas.DataFrame.ewm,../reference/api/pandas.DataFrame.ewm +generated/pandas.DataFrame.expanding,../reference/api/pandas.DataFrame.expanding +generated/pandas.DataFrame.ffill,../reference/api/pandas.DataFrame.ffill +generated/pandas.DataFrame.fillna,../reference/api/pandas.DataFrame.fillna +generated/pandas.DataFrame.filter,../reference/api/pandas.DataFrame.filter +generated/pandas.DataFrame.first,../reference/api/pandas.DataFrame.first +generated/pandas.DataFrame.first_valid_index,../reference/api/pandas.DataFrame.first_valid_index +generated/pandas.DataFrame.floordiv,../reference/api/pandas.DataFrame.floordiv +generated/pandas.DataFrame.from_csv,../reference/api/pandas.DataFrame.from_csv +generated/pandas.DataFrame.from_dict,../reference/api/pandas.DataFrame.from_dict +generated/pandas.DataFrame.from_items,../reference/api/pandas.DataFrame.from_items +generated/pandas.DataFrame.from_records,../reference/api/pandas.DataFrame.from_records +generated/pandas.DataFrame.ftypes,../reference/api/pandas.DataFrame.ftypes +generated/pandas.DataFrame.ge,../reference/api/pandas.DataFrame.ge +generated/pandas.DataFrame.get_dtype_counts,../reference/api/pandas.DataFrame.get_dtype_counts +generated/pandas.DataFrame.get_ftype_counts,../reference/api/pandas.DataFrame.get_ftype_counts +generated/pandas.DataFrame.get,../reference/api/pandas.DataFrame.get +generated/pandas.DataFrame.get_value,../reference/api/pandas.DataFrame.get_value +generated/pandas.DataFrame.get_values,../reference/api/pandas.DataFrame.get_values +generated/pandas.DataFrame.groupby,../reference/api/pandas.DataFrame.groupby +generated/pandas.DataFrame.gt,../reference/api/pandas.DataFrame.gt +generated/pandas.DataFrame.head,../reference/api/pandas.DataFrame.head +generated/pandas.DataFrame.hist,../reference/api/pandas.DataFrame.hist +generated/pandas.DataFrame,../reference/api/pandas.DataFrame +generated/pandas.DataFrame.iat,../reference/api/pandas.DataFrame.iat +generated/pandas.DataFrame.idxmax,../reference/api/pandas.DataFrame.idxmax +generated/pandas.DataFrame.idxmin,../reference/api/pandas.DataFrame.idxmin +generated/pandas.DataFrame.iloc,../reference/api/pandas.DataFrame.iloc +generated/pandas.DataFrame.index,../reference/api/pandas.DataFrame.index +generated/pandas.DataFrame.infer_objects,../reference/api/pandas.DataFrame.infer_objects +generated/pandas.DataFrame.info,../reference/api/pandas.DataFrame.info +generated/pandas.DataFrame.insert,../reference/api/pandas.DataFrame.insert +generated/pandas.DataFrame.interpolate,../reference/api/pandas.DataFrame.interpolate +generated/pandas.DataFrame.is_copy,../reference/api/pandas.DataFrame.is_copy +generated/pandas.DataFrame.isin,../reference/api/pandas.DataFrame.isin +generated/pandas.DataFrame.isna,../reference/api/pandas.DataFrame.isna +generated/pandas.DataFrame.isnull,../reference/api/pandas.DataFrame.isnull +generated/pandas.DataFrame.items,../reference/api/pandas.DataFrame.items +generated/pandas.DataFrame.__iter__,../reference/api/pandas.DataFrame.__iter__ +generated/pandas.DataFrame.iteritems,../reference/api/pandas.DataFrame.iteritems +generated/pandas.DataFrame.iterrows,../reference/api/pandas.DataFrame.iterrows +generated/pandas.DataFrame.itertuples,../reference/api/pandas.DataFrame.itertuples +generated/pandas.DataFrame.ix,../reference/api/pandas.DataFrame.ix +generated/pandas.DataFrame.join,../reference/api/pandas.DataFrame.join +generated/pandas.DataFrame.keys,../reference/api/pandas.DataFrame.keys +generated/pandas.DataFrame.kurt,../reference/api/pandas.DataFrame.kurt +generated/pandas.DataFrame.kurtosis,../reference/api/pandas.DataFrame.kurtosis +generated/pandas.DataFrame.last,../reference/api/pandas.DataFrame.last +generated/pandas.DataFrame.last_valid_index,../reference/api/pandas.DataFrame.last_valid_index +generated/pandas.DataFrame.le,../reference/api/pandas.DataFrame.le +generated/pandas.DataFrame.loc,../reference/api/pandas.DataFrame.loc +generated/pandas.DataFrame.lookup,../reference/api/pandas.DataFrame.lookup +generated/pandas.DataFrame.lt,../reference/api/pandas.DataFrame.lt +generated/pandas.DataFrame.mad,../reference/api/pandas.DataFrame.mad +generated/pandas.DataFrame.mask,../reference/api/pandas.DataFrame.mask +generated/pandas.DataFrame.max,../reference/api/pandas.DataFrame.max +generated/pandas.DataFrame.mean,../reference/api/pandas.DataFrame.mean +generated/pandas.DataFrame.median,../reference/api/pandas.DataFrame.median +generated/pandas.DataFrame.melt,../reference/api/pandas.DataFrame.melt +generated/pandas.DataFrame.memory_usage,../reference/api/pandas.DataFrame.memory_usage +generated/pandas.DataFrame.merge,../reference/api/pandas.DataFrame.merge +generated/pandas.DataFrame.min,../reference/api/pandas.DataFrame.min +generated/pandas.DataFrame.mode,../reference/api/pandas.DataFrame.mode +generated/pandas.DataFrame.mod,../reference/api/pandas.DataFrame.mod +generated/pandas.DataFrame.mul,../reference/api/pandas.DataFrame.mul +generated/pandas.DataFrame.multiply,../reference/api/pandas.DataFrame.multiply +generated/pandas.DataFrame.ndim,../reference/api/pandas.DataFrame.ndim +generated/pandas.DataFrame.ne,../reference/api/pandas.DataFrame.ne +generated/pandas.DataFrame.nlargest,../reference/api/pandas.DataFrame.nlargest +generated/pandas.DataFrame.notna,../reference/api/pandas.DataFrame.notna +generated/pandas.DataFrame.notnull,../reference/api/pandas.DataFrame.notnull +generated/pandas.DataFrame.nsmallest,../reference/api/pandas.DataFrame.nsmallest +generated/pandas.DataFrame.nunique,../reference/api/pandas.DataFrame.nunique +generated/pandas.DataFrame.pct_change,../reference/api/pandas.DataFrame.pct_change +generated/pandas.DataFrame.pipe,../reference/api/pandas.DataFrame.pipe +generated/pandas.DataFrame.pivot,../reference/api/pandas.DataFrame.pivot +generated/pandas.DataFrame.pivot_table,../reference/api/pandas.DataFrame.pivot_table +generated/pandas.DataFrame.plot.barh,../reference/api/pandas.DataFrame.plot.barh +generated/pandas.DataFrame.plot.bar,../reference/api/pandas.DataFrame.plot.bar +generated/pandas.DataFrame.plot.box,../reference/api/pandas.DataFrame.plot.box +generated/pandas.DataFrame.plot.density,../reference/api/pandas.DataFrame.plot.density +generated/pandas.DataFrame.plot.hexbin,../reference/api/pandas.DataFrame.plot.hexbin +generated/pandas.DataFrame.plot.hist,../reference/api/pandas.DataFrame.plot.hist +generated/pandas.DataFrame.plot,../reference/api/pandas.DataFrame.plot +generated/pandas.DataFrame.plot.kde,../reference/api/pandas.DataFrame.plot.kde +generated/pandas.DataFrame.plot.line,../reference/api/pandas.DataFrame.plot.line +generated/pandas.DataFrame.plot.pie,../reference/api/pandas.DataFrame.plot.pie +generated/pandas.DataFrame.plot.scatter,../reference/api/pandas.DataFrame.plot.scatter +generated/pandas.DataFrame.pop,../reference/api/pandas.DataFrame.pop +generated/pandas.DataFrame.pow,../reference/api/pandas.DataFrame.pow +generated/pandas.DataFrame.prod,../reference/api/pandas.DataFrame.prod +generated/pandas.DataFrame.product,../reference/api/pandas.DataFrame.product +generated/pandas.DataFrame.quantile,../reference/api/pandas.DataFrame.quantile +generated/pandas.DataFrame.query,../reference/api/pandas.DataFrame.query +generated/pandas.DataFrame.radd,../reference/api/pandas.DataFrame.radd +generated/pandas.DataFrame.rank,../reference/api/pandas.DataFrame.rank +generated/pandas.DataFrame.rdiv,../reference/api/pandas.DataFrame.rdiv +generated/pandas.DataFrame.reindex_axis,../reference/api/pandas.DataFrame.reindex_axis +generated/pandas.DataFrame.reindex,../reference/api/pandas.DataFrame.reindex +generated/pandas.DataFrame.reindex_like,../reference/api/pandas.DataFrame.reindex_like +generated/pandas.DataFrame.rename_axis,../reference/api/pandas.DataFrame.rename_axis +generated/pandas.DataFrame.rename,../reference/api/pandas.DataFrame.rename +generated/pandas.DataFrame.reorder_levels,../reference/api/pandas.DataFrame.reorder_levels +generated/pandas.DataFrame.replace,../reference/api/pandas.DataFrame.replace +generated/pandas.DataFrame.resample,../reference/api/pandas.DataFrame.resample +generated/pandas.DataFrame.reset_index,../reference/api/pandas.DataFrame.reset_index +generated/pandas.DataFrame.rfloordiv,../reference/api/pandas.DataFrame.rfloordiv +generated/pandas.DataFrame.rmod,../reference/api/pandas.DataFrame.rmod +generated/pandas.DataFrame.rmul,../reference/api/pandas.DataFrame.rmul +generated/pandas.DataFrame.rolling,../reference/api/pandas.DataFrame.rolling +generated/pandas.DataFrame.round,../reference/api/pandas.DataFrame.round +generated/pandas.DataFrame.rpow,../reference/api/pandas.DataFrame.rpow +generated/pandas.DataFrame.rsub,../reference/api/pandas.DataFrame.rsub +generated/pandas.DataFrame.rtruediv,../reference/api/pandas.DataFrame.rtruediv +generated/pandas.DataFrame.sample,../reference/api/pandas.DataFrame.sample +generated/pandas.DataFrame.select_dtypes,../reference/api/pandas.DataFrame.select_dtypes +generated/pandas.DataFrame.select,../reference/api/pandas.DataFrame.select +generated/pandas.DataFrame.sem,../reference/api/pandas.DataFrame.sem +generated/pandas.DataFrame.set_axis,../reference/api/pandas.DataFrame.set_axis +generated/pandas.DataFrame.set_index,../reference/api/pandas.DataFrame.set_index +generated/pandas.DataFrame.set_value,../reference/api/pandas.DataFrame.set_value +generated/pandas.DataFrame.shape,../reference/api/pandas.DataFrame.shape +generated/pandas.DataFrame.shift,../reference/api/pandas.DataFrame.shift +generated/pandas.DataFrame.size,../reference/api/pandas.DataFrame.size +generated/pandas.DataFrame.skew,../reference/api/pandas.DataFrame.skew +generated/pandas.DataFrame.slice_shift,../reference/api/pandas.DataFrame.slice_shift +generated/pandas.DataFrame.sort_index,../reference/api/pandas.DataFrame.sort_index +generated/pandas.DataFrame.sort_values,../reference/api/pandas.DataFrame.sort_values +generated/pandas.DataFrame.squeeze,../reference/api/pandas.DataFrame.squeeze +generated/pandas.DataFrame.stack,../reference/api/pandas.DataFrame.stack +generated/pandas.DataFrame.std,../reference/api/pandas.DataFrame.std +generated/pandas.DataFrame.style,../reference/api/pandas.DataFrame.style +generated/pandas.DataFrame.sub,../reference/api/pandas.DataFrame.sub +generated/pandas.DataFrame.subtract,../reference/api/pandas.DataFrame.subtract +generated/pandas.DataFrame.sum,../reference/api/pandas.DataFrame.sum +generated/pandas.DataFrame.swapaxes,../reference/api/pandas.DataFrame.swapaxes +generated/pandas.DataFrame.swaplevel,../reference/api/pandas.DataFrame.swaplevel +generated/pandas.DataFrame.tail,../reference/api/pandas.DataFrame.tail +generated/pandas.DataFrame.take,../reference/api/pandas.DataFrame.take +generated/pandas.DataFrame.T,../reference/api/pandas.DataFrame.T +generated/pandas.DataFrame.timetuple,../reference/api/pandas.DataFrame.timetuple +generated/pandas.DataFrame.to_clipboard,../reference/api/pandas.DataFrame.to_clipboard +generated/pandas.DataFrame.to_csv,../reference/api/pandas.DataFrame.to_csv +generated/pandas.DataFrame.to_dense,../reference/api/pandas.DataFrame.to_dense +generated/pandas.DataFrame.to_dict,../reference/api/pandas.DataFrame.to_dict +generated/pandas.DataFrame.to_excel,../reference/api/pandas.DataFrame.to_excel +generated/pandas.DataFrame.to_feather,../reference/api/pandas.DataFrame.to_feather +generated/pandas.DataFrame.to_gbq,../reference/api/pandas.DataFrame.to_gbq +generated/pandas.DataFrame.to_hdf,../reference/api/pandas.DataFrame.to_hdf +generated/pandas.DataFrame.to,../reference/api/pandas.DataFrame.to +generated/pandas.DataFrame.to_json,../reference/api/pandas.DataFrame.to_json +generated/pandas.DataFrame.to_latex,../reference/api/pandas.DataFrame.to_latex +generated/pandas.DataFrame.to_msgpack,../reference/api/pandas.DataFrame.to_msgpack +generated/pandas.DataFrame.to_numpy,../reference/api/pandas.DataFrame.to_numpy +generated/pandas.DataFrame.to_panel,../reference/api/pandas.DataFrame.to_panel +generated/pandas.DataFrame.to_parquet,../reference/api/pandas.DataFrame.to_parquet +generated/pandas.DataFrame.to_period,../reference/api/pandas.DataFrame.to_period +generated/pandas.DataFrame.to_pickle,../reference/api/pandas.DataFrame.to_pickle +generated/pandas.DataFrame.to_records,../reference/api/pandas.DataFrame.to_records +generated/pandas.DataFrame.to_sparse,../reference/api/pandas.DataFrame.to_sparse +generated/pandas.DataFrame.to_sql,../reference/api/pandas.DataFrame.to_sql +generated/pandas.DataFrame.to_stata,../reference/api/pandas.DataFrame.to_stata +generated/pandas.DataFrame.to_string,../reference/api/pandas.DataFrame.to_string +generated/pandas.DataFrame.to_timestamp,../reference/api/pandas.DataFrame.to_timestamp +generated/pandas.DataFrame.to_xarray,../reference/api/pandas.DataFrame.to_xarray +generated/pandas.DataFrame.transform,../reference/api/pandas.DataFrame.transform +generated/pandas.DataFrame.transpose,../reference/api/pandas.DataFrame.transpose +generated/pandas.DataFrame.truediv,../reference/api/pandas.DataFrame.truediv +generated/pandas.DataFrame.truncate,../reference/api/pandas.DataFrame.truncate +generated/pandas.DataFrame.tshift,../reference/api/pandas.DataFrame.tshift +generated/pandas.DataFrame.tz_convert,../reference/api/pandas.DataFrame.tz_convert +generated/pandas.DataFrame.tz_localize,../reference/api/pandas.DataFrame.tz_localize +generated/pandas.DataFrame.unstack,../reference/api/pandas.DataFrame.unstack +generated/pandas.DataFrame.update,../reference/api/pandas.DataFrame.update +generated/pandas.DataFrame.values,../reference/api/pandas.DataFrame.values +generated/pandas.DataFrame.var,../reference/api/pandas.DataFrame.var +generated/pandas.DataFrame.where,../reference/api/pandas.DataFrame.where +generated/pandas.DataFrame.xs,../reference/api/pandas.DataFrame.xs +generated/pandas.date_range,../reference/api/pandas.date_range +generated/pandas.DatetimeIndex.ceil,../reference/api/pandas.DatetimeIndex.ceil +generated/pandas.DatetimeIndex.date,../reference/api/pandas.DatetimeIndex.date +generated/pandas.DatetimeIndex.day,../reference/api/pandas.DatetimeIndex.day +generated/pandas.DatetimeIndex.day_name,../reference/api/pandas.DatetimeIndex.day_name +generated/pandas.DatetimeIndex.dayofweek,../reference/api/pandas.DatetimeIndex.dayofweek +generated/pandas.DatetimeIndex.dayofyear,../reference/api/pandas.DatetimeIndex.dayofyear +generated/pandas.DatetimeIndex.floor,../reference/api/pandas.DatetimeIndex.floor +generated/pandas.DatetimeIndex.freq,../reference/api/pandas.DatetimeIndex.freq +generated/pandas.DatetimeIndex.freqstr,../reference/api/pandas.DatetimeIndex.freqstr +generated/pandas.DatetimeIndex.hour,../reference/api/pandas.DatetimeIndex.hour +generated/pandas.DatetimeIndex,../reference/api/pandas.DatetimeIndex +generated/pandas.DatetimeIndex.indexer_at_time,../reference/api/pandas.DatetimeIndex.indexer_at_time +generated/pandas.DatetimeIndex.indexer_between_time,../reference/api/pandas.DatetimeIndex.indexer_between_time +generated/pandas.DatetimeIndex.inferred_freq,../reference/api/pandas.DatetimeIndex.inferred_freq +generated/pandas.DatetimeIndex.is_leap_year,../reference/api/pandas.DatetimeIndex.is_leap_year +generated/pandas.DatetimeIndex.is_month_end,../reference/api/pandas.DatetimeIndex.is_month_end +generated/pandas.DatetimeIndex.is_month_start,../reference/api/pandas.DatetimeIndex.is_month_start +generated/pandas.DatetimeIndex.is_quarter_end,../reference/api/pandas.DatetimeIndex.is_quarter_end +generated/pandas.DatetimeIndex.is_quarter_start,../reference/api/pandas.DatetimeIndex.is_quarter_start +generated/pandas.DatetimeIndex.is_year_end,../reference/api/pandas.DatetimeIndex.is_year_end +generated/pandas.DatetimeIndex.is_year_start,../reference/api/pandas.DatetimeIndex.is_year_start +generated/pandas.DatetimeIndex.microsecond,../reference/api/pandas.DatetimeIndex.microsecond +generated/pandas.DatetimeIndex.minute,../reference/api/pandas.DatetimeIndex.minute +generated/pandas.DatetimeIndex.month,../reference/api/pandas.DatetimeIndex.month +generated/pandas.DatetimeIndex.month_name,../reference/api/pandas.DatetimeIndex.month_name +generated/pandas.DatetimeIndex.nanosecond,../reference/api/pandas.DatetimeIndex.nanosecond +generated/pandas.DatetimeIndex.normalize,../reference/api/pandas.DatetimeIndex.normalize +generated/pandas.DatetimeIndex.quarter,../reference/api/pandas.DatetimeIndex.quarter +generated/pandas.DatetimeIndex.round,../reference/api/pandas.DatetimeIndex.round +generated/pandas.DatetimeIndex.second,../reference/api/pandas.DatetimeIndex.second +generated/pandas.DatetimeIndex.snap,../reference/api/pandas.DatetimeIndex.snap +generated/pandas.DatetimeIndex.strftime,../reference/api/pandas.DatetimeIndex.strftime +generated/pandas.DatetimeIndex.time,../reference/api/pandas.DatetimeIndex.time +generated/pandas.DatetimeIndex.timetz,../reference/api/pandas.DatetimeIndex.timetz +generated/pandas.DatetimeIndex.to_frame,../reference/api/pandas.DatetimeIndex.to_frame +generated/pandas.DatetimeIndex.to_perioddelta,../reference/api/pandas.DatetimeIndex.to_perioddelta +generated/pandas.DatetimeIndex.to_period,../reference/api/pandas.DatetimeIndex.to_period +generated/pandas.DatetimeIndex.to_pydatetime,../reference/api/pandas.DatetimeIndex.to_pydatetime +generated/pandas.DatetimeIndex.to_series,../reference/api/pandas.DatetimeIndex.to_series +generated/pandas.DatetimeIndex.tz_convert,../reference/api/pandas.DatetimeIndex.tz_convert +generated/pandas.DatetimeIndex.tz,../reference/api/pandas.DatetimeIndex.tz +generated/pandas.DatetimeIndex.tz_localize,../reference/api/pandas.DatetimeIndex.tz_localize +generated/pandas.DatetimeIndex.weekday,../reference/api/pandas.DatetimeIndex.weekday +generated/pandas.DatetimeIndex.week,../reference/api/pandas.DatetimeIndex.week +generated/pandas.DatetimeIndex.weekofyear,../reference/api/pandas.DatetimeIndex.weekofyear +generated/pandas.DatetimeIndex.year,../reference/api/pandas.DatetimeIndex.year +generated/pandas.DatetimeTZDtype.base,../reference/api/pandas.DatetimeTZDtype.base +generated/pandas.DatetimeTZDtype.construct_array_type,../reference/api/pandas.DatetimeTZDtype.construct_array_type +generated/pandas.DatetimeTZDtype.construct_from_string,../reference/api/pandas.DatetimeTZDtype.construct_from_string +generated/pandas.DatetimeTZDtype,../reference/api/pandas.DatetimeTZDtype +generated/pandas.DatetimeTZDtype.isbuiltin,../reference/api/pandas.DatetimeTZDtype.isbuiltin +generated/pandas.DatetimeTZDtype.is_dtype,../reference/api/pandas.DatetimeTZDtype.is_dtype +generated/pandas.DatetimeTZDtype.isnative,../reference/api/pandas.DatetimeTZDtype.isnative +generated/pandas.DatetimeTZDtype.itemsize,../reference/api/pandas.DatetimeTZDtype.itemsize +generated/pandas.DatetimeTZDtype.kind,../reference/api/pandas.DatetimeTZDtype.kind +generated/pandas.DatetimeTZDtype.name,../reference/api/pandas.DatetimeTZDtype.name +generated/pandas.DatetimeTZDtype.names,../reference/api/pandas.DatetimeTZDtype.names +generated/pandas.DatetimeTZDtype.na_value,../reference/api/pandas.DatetimeTZDtype.na_value +generated/pandas.DatetimeTZDtype.num,../reference/api/pandas.DatetimeTZDtype.num +generated/pandas.DatetimeTZDtype.reset_cache,../reference/api/pandas.DatetimeTZDtype.reset_cache +generated/pandas.DatetimeTZDtype.shape,../reference/api/pandas.DatetimeTZDtype.shape +generated/pandas.DatetimeTZDtype.str,../reference/api/pandas.DatetimeTZDtype.str +generated/pandas.DatetimeTZDtype.subdtype,../reference/api/pandas.DatetimeTZDtype.subdtype +generated/pandas.DatetimeTZDtype.tz,../reference/api/pandas.DatetimeTZDtype.tz +generated/pandas.DatetimeTZDtype.unit,../reference/api/pandas.DatetimeTZDtype.unit +generated/pandas.describe_option,../reference/api/pandas.describe_option +generated/pandas.errors.DtypeWarning,../reference/api/pandas.errors.DtypeWarning +generated/pandas.errors.EmptyDataError,../reference/api/pandas.errors.EmptyDataError +generated/pandas.errors.OutOfBoundsDatetime,../reference/api/pandas.errors.OutOfBoundsDatetime +generated/pandas.errors.ParserError,../reference/api/pandas.errors.ParserError +generated/pandas.errors.ParserWarning,../reference/api/pandas.errors.ParserWarning +generated/pandas.errors.PerformanceWarning,../reference/api/pandas.errors.PerformanceWarning +generated/pandas.errors.UnsortedIndexError,../reference/api/pandas.errors.UnsortedIndexError +generated/pandas.errors.UnsupportedFunctionCall,../reference/api/pandas.errors.UnsupportedFunctionCall +generated/pandas.eval,../reference/api/pandas.eval +generated/pandas.ExcelFile.parse,../reference/api/pandas.ExcelFile.parse +generated/pandas.ExcelWriter,../reference/api/pandas.ExcelWriter +generated/pandas.factorize,../reference/api/pandas.factorize +generated/pandas.Float64Index,../reference/api/pandas.Float64Index +generated/pandas.get_dummies,../reference/api/pandas.get_dummies +generated/pandas.get_option,../reference/api/pandas.get_option +generated/pandas.Grouper,../reference/api/pandas.Grouper +generated/pandas.HDFStore.append,../reference/api/pandas.HDFStore.append +generated/pandas.HDFStore.get,../reference/api/pandas.HDFStore.get +generated/pandas.HDFStore.groups,../reference/api/pandas.HDFStore.groups +generated/pandas.HDFStore.info,../reference/api/pandas.HDFStore.info +generated/pandas.HDFStore.keys,../reference/api/pandas.HDFStore.keys +generated/pandas.HDFStore.put,../reference/api/pandas.HDFStore.put +generated/pandas.HDFStore.select,../reference/api/pandas.HDFStore.select +generated/pandas.HDFStore.walk,../reference/api/pandas.HDFStore.walk +generated/pandas.Index.all,../reference/api/pandas.Index.all +generated/pandas.Index.any,../reference/api/pandas.Index.any +generated/pandas.Index.append,../reference/api/pandas.Index.append +generated/pandas.Index.argmax,../reference/api/pandas.Index.argmax +generated/pandas.Index.argmin,../reference/api/pandas.Index.argmin +generated/pandas.Index.argsort,../reference/api/pandas.Index.argsort +generated/pandas.Index.array,../reference/api/pandas.Index.array +generated/pandas.Index.asi8,../reference/api/pandas.Index.asi8 +generated/pandas.Index.asof,../reference/api/pandas.Index.asof +generated/pandas.Index.asof_locs,../reference/api/pandas.Index.asof_locs +generated/pandas.Index.astype,../reference/api/pandas.Index.astype +generated/pandas.Index.base,../reference/api/pandas.Index.base +generated/pandas.Index.contains,../reference/api/pandas.Index.contains +generated/pandas.Index.copy,../reference/api/pandas.Index.copy +generated/pandas.Index.data,../reference/api/pandas.Index.data +generated/pandas.Index.delete,../reference/api/pandas.Index.delete +generated/pandas.Index.difference,../reference/api/pandas.Index.difference +generated/pandas.Index.drop_duplicates,../reference/api/pandas.Index.drop_duplicates +generated/pandas.Index.drop,../reference/api/pandas.Index.drop +generated/pandas.Index.droplevel,../reference/api/pandas.Index.droplevel +generated/pandas.Index.dropna,../reference/api/pandas.Index.dropna +generated/pandas.Index.dtype,../reference/api/pandas.Index.dtype +generated/pandas.Index.dtype_str,../reference/api/pandas.Index.dtype_str +generated/pandas.Index.duplicated,../reference/api/pandas.Index.duplicated +generated/pandas.Index.empty,../reference/api/pandas.Index.empty +generated/pandas.Index.equals,../reference/api/pandas.Index.equals +generated/pandas.Index.factorize,../reference/api/pandas.Index.factorize +generated/pandas.Index.fillna,../reference/api/pandas.Index.fillna +generated/pandas.Index.flags,../reference/api/pandas.Index.flags +generated/pandas.Index.format,../reference/api/pandas.Index.format +generated/pandas.Index.get_duplicates,../reference/api/pandas.Index.get_duplicates +generated/pandas.Index.get_indexer_for,../reference/api/pandas.Index.get_indexer_for +generated/pandas.Index.get_indexer,../reference/api/pandas.Index.get_indexer +generated/pandas.Index.get_indexer_non_unique,../reference/api/pandas.Index.get_indexer_non_unique +generated/pandas.Index.get_level_values,../reference/api/pandas.Index.get_level_values +generated/pandas.Index.get_loc,../reference/api/pandas.Index.get_loc +generated/pandas.Index.get_slice_bound,../reference/api/pandas.Index.get_slice_bound +generated/pandas.Index.get_value,../reference/api/pandas.Index.get_value +generated/pandas.Index.get_values,../reference/api/pandas.Index.get_values +generated/pandas.Index.groupby,../reference/api/pandas.Index.groupby +generated/pandas.Index.has_duplicates,../reference/api/pandas.Index.has_duplicates +generated/pandas.Index.hasnans,../reference/api/pandas.Index.hasnans +generated/pandas.Index.holds_integer,../reference/api/pandas.Index.holds_integer +generated/pandas.Index,../reference/api/pandas.Index +generated/pandas.Index.identical,../reference/api/pandas.Index.identical +generated/pandas.Index.inferred_type,../reference/api/pandas.Index.inferred_type +generated/pandas.Index.insert,../reference/api/pandas.Index.insert +generated/pandas.Index.intersection,../reference/api/pandas.Index.intersection +generated/pandas.Index.is_all_dates,../reference/api/pandas.Index.is_all_dates +generated/pandas.Index.is_boolean,../reference/api/pandas.Index.is_boolean +generated/pandas.Index.is_categorical,../reference/api/pandas.Index.is_categorical +generated/pandas.Index.is_floating,../reference/api/pandas.Index.is_floating +generated/pandas.Index.is_,../reference/api/pandas.Index.is_ +generated/pandas.Index.isin,../reference/api/pandas.Index.isin +generated/pandas.Index.is_integer,../reference/api/pandas.Index.is_integer +generated/pandas.Index.is_interval,../reference/api/pandas.Index.is_interval +generated/pandas.Index.is_lexsorted_for_tuple,../reference/api/pandas.Index.is_lexsorted_for_tuple +generated/pandas.Index.is_mixed,../reference/api/pandas.Index.is_mixed +generated/pandas.Index.is_monotonic_decreasing,../reference/api/pandas.Index.is_monotonic_decreasing +generated/pandas.Index.is_monotonic,../reference/api/pandas.Index.is_monotonic +generated/pandas.Index.is_monotonic_increasing,../reference/api/pandas.Index.is_monotonic_increasing +generated/pandas.Index.isna,../reference/api/pandas.Index.isna +generated/pandas.Index.isnull,../reference/api/pandas.Index.isnull +generated/pandas.Index.is_numeric,../reference/api/pandas.Index.is_numeric +generated/pandas.Index.is_object,../reference/api/pandas.Index.is_object +generated/pandas.Index.is_type_compatible,../reference/api/pandas.Index.is_type_compatible +generated/pandas.Index.is_unique,../reference/api/pandas.Index.is_unique +generated/pandas.Index.item,../reference/api/pandas.Index.item +generated/pandas.Index.itemsize,../reference/api/pandas.Index.itemsize +generated/pandas.Index.join,../reference/api/pandas.Index.join +generated/pandas.Index.map,../reference/api/pandas.Index.map +generated/pandas.Index.max,../reference/api/pandas.Index.max +generated/pandas.Index.memory_usage,../reference/api/pandas.Index.memory_usage +generated/pandas.Index.min,../reference/api/pandas.Index.min +generated/pandas.Index.name,../reference/api/pandas.Index.name +generated/pandas.Index.names,../reference/api/pandas.Index.names +generated/pandas.Index.nbytes,../reference/api/pandas.Index.nbytes +generated/pandas.Index.ndim,../reference/api/pandas.Index.ndim +generated/pandas.Index.nlevels,../reference/api/pandas.Index.nlevels +generated/pandas.Index.notna,../reference/api/pandas.Index.notna +generated/pandas.Index.notnull,../reference/api/pandas.Index.notnull +generated/pandas.Index.nunique,../reference/api/pandas.Index.nunique +generated/pandas.Index.putmask,../reference/api/pandas.Index.putmask +generated/pandas.Index.ravel,../reference/api/pandas.Index.ravel +generated/pandas.Index.reindex,../reference/api/pandas.Index.reindex +generated/pandas.Index.rename,../reference/api/pandas.Index.rename +generated/pandas.Index.repeat,../reference/api/pandas.Index.repeat +generated/pandas.Index.searchsorted,../reference/api/pandas.Index.searchsorted +generated/pandas.Index.set_names,../reference/api/pandas.Index.set_names +generated/pandas.Index.set_value,../reference/api/pandas.Index.set_value +generated/pandas.Index.shape,../reference/api/pandas.Index.shape +generated/pandas.Index.shift,../reference/api/pandas.Index.shift +generated/pandas.Index.size,../reference/api/pandas.Index.size +generated/pandas.IndexSlice,../reference/api/pandas.IndexSlice +generated/pandas.Index.slice_indexer,../reference/api/pandas.Index.slice_indexer +generated/pandas.Index.slice_locs,../reference/api/pandas.Index.slice_locs +generated/pandas.Index.sort,../reference/api/pandas.Index.sort +generated/pandas.Index.sortlevel,../reference/api/pandas.Index.sortlevel +generated/pandas.Index.sort_values,../reference/api/pandas.Index.sort_values +generated/pandas.Index.str,../reference/api/pandas.Index.str +generated/pandas.Index.strides,../reference/api/pandas.Index.strides +generated/pandas.Index.summary,../reference/api/pandas.Index.summary +generated/pandas.Index.symmetric_difference,../reference/api/pandas.Index.symmetric_difference +generated/pandas.Index.take,../reference/api/pandas.Index.take +generated/pandas.Index.T,../reference/api/pandas.Index.T +generated/pandas.Index.to_flat_index,../reference/api/pandas.Index.to_flat_index +generated/pandas.Index.to_frame,../reference/api/pandas.Index.to_frame +generated/pandas.Index.to_list,../reference/api/pandas.Index.to_list +generated/pandas.Index.tolist,../reference/api/pandas.Index.tolist +generated/pandas.Index.to_native_types,../reference/api/pandas.Index.to_native_types +generated/pandas.Index.to_numpy,../reference/api/pandas.Index.to_numpy +generated/pandas.Index.to_series,../reference/api/pandas.Index.to_series +generated/pandas.Index.transpose,../reference/api/pandas.Index.transpose +generated/pandas.Index.union,../reference/api/pandas.Index.union +generated/pandas.Index.unique,../reference/api/pandas.Index.unique +generated/pandas.Index.value_counts,../reference/api/pandas.Index.value_counts +generated/pandas.Index.values,../reference/api/pandas.Index.values +generated/pandas.Index.view,../reference/api/pandas.Index.view +generated/pandas.Index.where,../reference/api/pandas.Index.where +generated/pandas.infer_freq,../reference/api/pandas.infer_freq +generated/pandas.Interval.closed,../reference/api/pandas.Interval.closed +generated/pandas.Interval.closed_left,../reference/api/pandas.Interval.closed_left +generated/pandas.Interval.closed_right,../reference/api/pandas.Interval.closed_right +generated/pandas.Interval,../reference/api/pandas.Interval +generated/pandas.IntervalIndex.closed,../reference/api/pandas.IntervalIndex.closed +generated/pandas.IntervalIndex.contains,../reference/api/pandas.IntervalIndex.contains +generated/pandas.IntervalIndex.from_arrays,../reference/api/pandas.IntervalIndex.from_arrays +generated/pandas.IntervalIndex.from_breaks,../reference/api/pandas.IntervalIndex.from_breaks +generated/pandas.IntervalIndex.from_tuples,../reference/api/pandas.IntervalIndex.from_tuples +generated/pandas.IntervalIndex.get_indexer,../reference/api/pandas.IntervalIndex.get_indexer +generated/pandas.IntervalIndex.get_loc,../reference/api/pandas.IntervalIndex.get_loc +generated/pandas.IntervalIndex,../reference/api/pandas.IntervalIndex +generated/pandas.IntervalIndex.is_non_overlapping_monotonic,../reference/api/pandas.IntervalIndex.is_non_overlapping_monotonic +generated/pandas.IntervalIndex.is_overlapping,../reference/api/pandas.IntervalIndex.is_overlapping +generated/pandas.IntervalIndex.left,../reference/api/pandas.IntervalIndex.left +generated/pandas.IntervalIndex.length,../reference/api/pandas.IntervalIndex.length +generated/pandas.IntervalIndex.mid,../reference/api/pandas.IntervalIndex.mid +generated/pandas.IntervalIndex.overlaps,../reference/api/pandas.IntervalIndex.overlaps +generated/pandas.IntervalIndex.right,../reference/api/pandas.IntervalIndex.right +generated/pandas.IntervalIndex.set_closed,../reference/api/pandas.IntervalIndex.set_closed +generated/pandas.IntervalIndex.to_tuples,../reference/api/pandas.IntervalIndex.to_tuples +generated/pandas.IntervalIndex.values,../reference/api/pandas.IntervalIndex.values +generated/pandas.Interval.left,../reference/api/pandas.Interval.left +generated/pandas.Interval.length,../reference/api/pandas.Interval.length +generated/pandas.Interval.mid,../reference/api/pandas.Interval.mid +generated/pandas.Interval.open_left,../reference/api/pandas.Interval.open_left +generated/pandas.Interval.open_right,../reference/api/pandas.Interval.open_right +generated/pandas.Interval.overlaps,../reference/api/pandas.Interval.overlaps +generated/pandas.interval_range,../reference/api/pandas.interval_range +generated/pandas.Interval.right,../reference/api/pandas.Interval.right +generated/pandas.io.formats.style.Styler.apply,../reference/api/pandas.io.formats.style.Styler.apply +generated/pandas.io.formats.style.Styler.applymap,../reference/api/pandas.io.formats.style.Styler.applymap +generated/pandas.io.formats.style.Styler.background_gradient,../reference/api/pandas.io.formats.style.Styler.background_gradient +generated/pandas.io.formats.style.Styler.bar,../reference/api/pandas.io.formats.style.Styler.bar +generated/pandas.io.formats.style.Styler.clear,../reference/api/pandas.io.formats.style.Styler.clear +generated/pandas.io.formats.style.Styler.env,../reference/api/pandas.io.formats.style.Styler.env +generated/pandas.io.formats.style.Styler.export,../reference/api/pandas.io.formats.style.Styler.export +generated/pandas.io.formats.style.Styler.format,../reference/api/pandas.io.formats.style.Styler.format +generated/pandas.io.formats.style.Styler.from_custom_template,../reference/api/pandas.io.formats.style.Styler.from_custom_template +generated/pandas.io.formats.style.Styler.hide_columns,../reference/api/pandas.io.formats.style.Styler.hide_columns +generated/pandas.io.formats.style.Styler.hide_index,../reference/api/pandas.io.formats.style.Styler.hide_index +generated/pandas.io.formats.style.Styler.highlight_max,../reference/api/pandas.io.formats.style.Styler.highlight_max +generated/pandas.io.formats.style.Styler.highlight_min,../reference/api/pandas.io.formats.style.Styler.highlight_min +generated/pandas.io.formats.style.Styler.highlight_null,../reference/api/pandas.io.formats.style.Styler.highlight_null +generated/pandas.io.formats.style.Styler,../reference/api/pandas.io.formats.style.Styler +generated/pandas.io.formats.style.Styler.loader,../reference/api/pandas.io.formats.style.Styler.loader +generated/pandas.io.formats.style.Styler.pipe,../reference/api/pandas.io.formats.style.Styler.pipe +generated/pandas.io.formats.style.Styler.render,../reference/api/pandas.io.formats.style.Styler.render +generated/pandas.io.formats.style.Styler.set_caption,../reference/api/pandas.io.formats.style.Styler.set_caption +generated/pandas.io.formats.style.Styler.set_precision,../reference/api/pandas.io.formats.style.Styler.set_precision +generated/pandas.io.formats.style.Styler.set_properties,../reference/api/pandas.io.formats.style.Styler.set_properties +generated/pandas.io.formats.style.Styler.set_table_attributes,../reference/api/pandas.io.formats.style.Styler.set_table_attributes +generated/pandas.io.formats.style.Styler.set_table_styles,../reference/api/pandas.io.formats.style.Styler.set_table_styles +generated/pandas.io.formats.style.Styler.set_uuid,../reference/api/pandas.io.formats.style.Styler.set_uuid +generated/pandas.io.formats.style.Styler.template,../reference/api/pandas.io.formats.style.Styler.template +generated/pandas.io.formats.style.Styler.to_excel,../reference/api/pandas.io.formats.style.Styler.to_excel +generated/pandas.io.formats.style.Styler.use,../reference/api/pandas.io.formats.style.Styler.use +generated/pandas.io.formats.style.Styler.where,../reference/api/pandas.io.formats.style.Styler.where +generated/pandas.io.json.build_table_schema,../reference/api/pandas.io.json.build_table_schema +generated/pandas.io.json.json_normalize,../reference/api/pandas.io.json.json_normalize +generated/pandas.io.stata.StataReader.data,../reference/api/pandas.io.stata.StataReader.data +generated/pandas.io.stata.StataReader.data_label,../reference/api/pandas.io.stata.StataReader.data_label +generated/pandas.io.stata.StataReader.value_labels,../reference/api/pandas.io.stata.StataReader.value_labels +generated/pandas.io.stata.StataReader.variable_labels,../reference/api/pandas.io.stata.StataReader.variable_labels +generated/pandas.io.stata.StataWriter.write_file,../reference/api/pandas.io.stata.StataWriter.write_file +generated/pandas.isna,../reference/api/pandas.isna +generated/pandas.isnull,../reference/api/pandas.isnull +generated/pandas.melt,../reference/api/pandas.melt +generated/pandas.merge_asof,../reference/api/pandas.merge_asof +generated/pandas.merge,../reference/api/pandas.merge +generated/pandas.merge_ordered,../reference/api/pandas.merge_ordered +generated/pandas.MultiIndex.codes,../reference/api/pandas.MultiIndex.codes +generated/pandas.MultiIndex.droplevel,../reference/api/pandas.MultiIndex.droplevel +generated/pandas.MultiIndex.from_arrays,../reference/api/pandas.MultiIndex.from_arrays +generated/pandas.MultiIndex.from_frame,../reference/api/pandas.MultiIndex.from_frame +generated/pandas.MultiIndex.from_product,../reference/api/pandas.MultiIndex.from_product +generated/pandas.MultiIndex.from_tuples,../reference/api/pandas.MultiIndex.from_tuples +generated/pandas.MultiIndex.get_indexer,../reference/api/pandas.MultiIndex.get_indexer +generated/pandas.MultiIndex.get_level_values,../reference/api/pandas.MultiIndex.get_level_values +generated/pandas.MultiIndex.get_loc,../reference/api/pandas.MultiIndex.get_loc +generated/pandas.MultiIndex.get_loc_level,../reference/api/pandas.MultiIndex.get_loc_level +generated/pandas.MultiIndex,../reference/api/pandas.MultiIndex +generated/pandas.MultiIndex.is_lexsorted,../reference/api/pandas.MultiIndex.is_lexsorted +generated/pandas.MultiIndex.levels,../reference/api/pandas.MultiIndex.levels +generated/pandas.MultiIndex.levshape,../reference/api/pandas.MultiIndex.levshape +generated/pandas.MultiIndex.names,../reference/api/pandas.MultiIndex.names +generated/pandas.MultiIndex.nlevels,../reference/api/pandas.MultiIndex.nlevels +generated/pandas.MultiIndex.remove_unused_levels,../reference/api/pandas.MultiIndex.remove_unused_levels +generated/pandas.MultiIndex.reorder_levels,../reference/api/pandas.MultiIndex.reorder_levels +generated/pandas.MultiIndex.set_codes,../reference/api/pandas.MultiIndex.set_codes +generated/pandas.MultiIndex.set_levels,../reference/api/pandas.MultiIndex.set_levels +generated/pandas.MultiIndex.sortlevel,../reference/api/pandas.MultiIndex.sortlevel +generated/pandas.MultiIndex.swaplevel,../reference/api/pandas.MultiIndex.swaplevel +generated/pandas.MultiIndex.to_flat_index,../reference/api/pandas.MultiIndex.to_flat_index +generated/pandas.MultiIndex.to_frame,../reference/api/pandas.MultiIndex.to_frame +generated/pandas.MultiIndex.to_hierarchical,../reference/api/pandas.MultiIndex.to_hierarchical +generated/pandas.notna,../reference/api/pandas.notna +generated/pandas.notnull,../reference/api/pandas.notnull +generated/pandas.option_context,../reference/api/pandas.option_context +generated/pandas.Panel.abs,../reference/api/pandas.Panel.abs +generated/pandas.Panel.add,../reference/api/pandas.Panel.add +generated/pandas.Panel.add_prefix,../reference/api/pandas.Panel.add_prefix +generated/pandas.Panel.add_suffix,../reference/api/pandas.Panel.add_suffix +generated/pandas.Panel.agg,../reference/api/pandas.Panel.agg +generated/pandas.Panel.aggregate,../reference/api/pandas.Panel.aggregate +generated/pandas.Panel.align,../reference/api/pandas.Panel.align +generated/pandas.Panel.all,../reference/api/pandas.Panel.all +generated/pandas.Panel.any,../reference/api/pandas.Panel.any +generated/pandas.Panel.apply,../reference/api/pandas.Panel.apply +generated/pandas.Panel.as_blocks,../reference/api/pandas.Panel.as_blocks +generated/pandas.Panel.asfreq,../reference/api/pandas.Panel.asfreq +generated/pandas.Panel.as_matrix,../reference/api/pandas.Panel.as_matrix +generated/pandas.Panel.asof,../reference/api/pandas.Panel.asof +generated/pandas.Panel.astype,../reference/api/pandas.Panel.astype +generated/pandas.Panel.at,../reference/api/pandas.Panel.at +generated/pandas.Panel.at_time,../reference/api/pandas.Panel.at_time +generated/pandas.Panel.axes,../reference/api/pandas.Panel.axes +generated/pandas.Panel.between_time,../reference/api/pandas.Panel.between_time +generated/pandas.Panel.bfill,../reference/api/pandas.Panel.bfill +generated/pandas.Panel.blocks,../reference/api/pandas.Panel.blocks +generated/pandas.Panel.bool,../reference/api/pandas.Panel.bool +generated/pandas.Panel.clip,../reference/api/pandas.Panel.clip +generated/pandas.Panel.clip_lower,../reference/api/pandas.Panel.clip_lower +generated/pandas.Panel.clip_upper,../reference/api/pandas.Panel.clip_upper +generated/pandas.Panel.compound,../reference/api/pandas.Panel.compound +generated/pandas.Panel.conform,../reference/api/pandas.Panel.conform +generated/pandas.Panel.convert_objects,../reference/api/pandas.Panel.convert_objects +generated/pandas.Panel.copy,../reference/api/pandas.Panel.copy +generated/pandas.Panel.count,../reference/api/pandas.Panel.count +generated/pandas.Panel.cummax,../reference/api/pandas.Panel.cummax +generated/pandas.Panel.cummin,../reference/api/pandas.Panel.cummin +generated/pandas.Panel.cumprod,../reference/api/pandas.Panel.cumprod +generated/pandas.Panel.cumsum,../reference/api/pandas.Panel.cumsum +generated/pandas.Panel.describe,../reference/api/pandas.Panel.describe +generated/pandas.Panel.div,../reference/api/pandas.Panel.div +generated/pandas.Panel.divide,../reference/api/pandas.Panel.divide +generated/pandas.Panel.drop,../reference/api/pandas.Panel.drop +generated/pandas.Panel.droplevel,../reference/api/pandas.Panel.droplevel +generated/pandas.Panel.dropna,../reference/api/pandas.Panel.dropna +generated/pandas.Panel.dtypes,../reference/api/pandas.Panel.dtypes +generated/pandas.Panel.empty,../reference/api/pandas.Panel.empty +generated/pandas.Panel.eq,../reference/api/pandas.Panel.eq +generated/pandas.Panel.equals,../reference/api/pandas.Panel.equals +generated/pandas.Panel.ffill,../reference/api/pandas.Panel.ffill +generated/pandas.Panel.fillna,../reference/api/pandas.Panel.fillna +generated/pandas.Panel.filter,../reference/api/pandas.Panel.filter +generated/pandas.Panel.first,../reference/api/pandas.Panel.first +generated/pandas.Panel.first_valid_index,../reference/api/pandas.Panel.first_valid_index +generated/pandas.Panel.floordiv,../reference/api/pandas.Panel.floordiv +generated/pandas.Panel.from_dict,../reference/api/pandas.Panel.from_dict +generated/pandas.Panel.fromDict,../reference/api/pandas.Panel.fromDict +generated/pandas.Panel.ftypes,../reference/api/pandas.Panel.ftypes +generated/pandas.Panel.ge,../reference/api/pandas.Panel.ge +generated/pandas.Panel.get_dtype_counts,../reference/api/pandas.Panel.get_dtype_counts +generated/pandas.Panel.get_ftype_counts,../reference/api/pandas.Panel.get_ftype_counts +generated/pandas.Panel.get,../reference/api/pandas.Panel.get +generated/pandas.Panel.get_value,../reference/api/pandas.Panel.get_value +generated/pandas.Panel.get_values,../reference/api/pandas.Panel.get_values +generated/pandas.Panel.groupby,../reference/api/pandas.Panel.groupby +generated/pandas.Panel.gt,../reference/api/pandas.Panel.gt +generated/pandas.Panel.head,../reference/api/pandas.Panel.head +generated/pandas.Panel,../reference/api/pandas.Panel +generated/pandas.Panel.iat,../reference/api/pandas.Panel.iat +generated/pandas.Panel.iloc,../reference/api/pandas.Panel.iloc +generated/pandas.Panel.infer_objects,../reference/api/pandas.Panel.infer_objects +generated/pandas.Panel.interpolate,../reference/api/pandas.Panel.interpolate +generated/pandas.Panel.is_copy,../reference/api/pandas.Panel.is_copy +generated/pandas.Panel.isna,../reference/api/pandas.Panel.isna +generated/pandas.Panel.isnull,../reference/api/pandas.Panel.isnull +generated/pandas.Panel.items,../reference/api/pandas.Panel.items +generated/pandas.Panel.__iter__,../reference/api/pandas.Panel.__iter__ +generated/pandas.Panel.iteritems,../reference/api/pandas.Panel.iteritems +generated/pandas.Panel.ix,../reference/api/pandas.Panel.ix +generated/pandas.Panel.join,../reference/api/pandas.Panel.join +generated/pandas.Panel.keys,../reference/api/pandas.Panel.keys +generated/pandas.Panel.kurt,../reference/api/pandas.Panel.kurt +generated/pandas.Panel.kurtosis,../reference/api/pandas.Panel.kurtosis +generated/pandas.Panel.last,../reference/api/pandas.Panel.last +generated/pandas.Panel.last_valid_index,../reference/api/pandas.Panel.last_valid_index +generated/pandas.Panel.le,../reference/api/pandas.Panel.le +generated/pandas.Panel.loc,../reference/api/pandas.Panel.loc +generated/pandas.Panel.lt,../reference/api/pandas.Panel.lt +generated/pandas.Panel.mad,../reference/api/pandas.Panel.mad +generated/pandas.Panel.major_axis,../reference/api/pandas.Panel.major_axis +generated/pandas.Panel.major_xs,../reference/api/pandas.Panel.major_xs +generated/pandas.Panel.mask,../reference/api/pandas.Panel.mask +generated/pandas.Panel.max,../reference/api/pandas.Panel.max +generated/pandas.Panel.mean,../reference/api/pandas.Panel.mean +generated/pandas.Panel.median,../reference/api/pandas.Panel.median +generated/pandas.Panel.min,../reference/api/pandas.Panel.min +generated/pandas.Panel.minor_axis,../reference/api/pandas.Panel.minor_axis +generated/pandas.Panel.minor_xs,../reference/api/pandas.Panel.minor_xs +generated/pandas.Panel.mod,../reference/api/pandas.Panel.mod +generated/pandas.Panel.mul,../reference/api/pandas.Panel.mul +generated/pandas.Panel.multiply,../reference/api/pandas.Panel.multiply +generated/pandas.Panel.ndim,../reference/api/pandas.Panel.ndim +generated/pandas.Panel.ne,../reference/api/pandas.Panel.ne +generated/pandas.Panel.notna,../reference/api/pandas.Panel.notna +generated/pandas.Panel.notnull,../reference/api/pandas.Panel.notnull +generated/pandas.Panel.pct_change,../reference/api/pandas.Panel.pct_change +generated/pandas.Panel.pipe,../reference/api/pandas.Panel.pipe +generated/pandas.Panel.pop,../reference/api/pandas.Panel.pop +generated/pandas.Panel.pow,../reference/api/pandas.Panel.pow +generated/pandas.Panel.prod,../reference/api/pandas.Panel.prod +generated/pandas.Panel.product,../reference/api/pandas.Panel.product +generated/pandas.Panel.radd,../reference/api/pandas.Panel.radd +generated/pandas.Panel.rank,../reference/api/pandas.Panel.rank +generated/pandas.Panel.rdiv,../reference/api/pandas.Panel.rdiv +generated/pandas.Panel.reindex_axis,../reference/api/pandas.Panel.reindex_axis +generated/pandas.Panel.reindex,../reference/api/pandas.Panel.reindex +generated/pandas.Panel.reindex_like,../reference/api/pandas.Panel.reindex_like +generated/pandas.Panel.rename_axis,../reference/api/pandas.Panel.rename_axis +generated/pandas.Panel.rename,../reference/api/pandas.Panel.rename +generated/pandas.Panel.replace,../reference/api/pandas.Panel.replace +generated/pandas.Panel.resample,../reference/api/pandas.Panel.resample +generated/pandas.Panel.rfloordiv,../reference/api/pandas.Panel.rfloordiv +generated/pandas.Panel.rmod,../reference/api/pandas.Panel.rmod +generated/pandas.Panel.rmul,../reference/api/pandas.Panel.rmul +generated/pandas.Panel.round,../reference/api/pandas.Panel.round +generated/pandas.Panel.rpow,../reference/api/pandas.Panel.rpow +generated/pandas.Panel.rsub,../reference/api/pandas.Panel.rsub +generated/pandas.Panel.rtruediv,../reference/api/pandas.Panel.rtruediv +generated/pandas.Panel.sample,../reference/api/pandas.Panel.sample +generated/pandas.Panel.select,../reference/api/pandas.Panel.select +generated/pandas.Panel.sem,../reference/api/pandas.Panel.sem +generated/pandas.Panel.set_axis,../reference/api/pandas.Panel.set_axis +generated/pandas.Panel.set_value,../reference/api/pandas.Panel.set_value +generated/pandas.Panel.shape,../reference/api/pandas.Panel.shape +generated/pandas.Panel.shift,../reference/api/pandas.Panel.shift +generated/pandas.Panel.size,../reference/api/pandas.Panel.size +generated/pandas.Panel.skew,../reference/api/pandas.Panel.skew +generated/pandas.Panel.slice_shift,../reference/api/pandas.Panel.slice_shift +generated/pandas.Panel.sort_index,../reference/api/pandas.Panel.sort_index +generated/pandas.Panel.sort_values,../reference/api/pandas.Panel.sort_values +generated/pandas.Panel.squeeze,../reference/api/pandas.Panel.squeeze +generated/pandas.Panel.std,../reference/api/pandas.Panel.std +generated/pandas.Panel.sub,../reference/api/pandas.Panel.sub +generated/pandas.Panel.subtract,../reference/api/pandas.Panel.subtract +generated/pandas.Panel.sum,../reference/api/pandas.Panel.sum +generated/pandas.Panel.swapaxes,../reference/api/pandas.Panel.swapaxes +generated/pandas.Panel.swaplevel,../reference/api/pandas.Panel.swaplevel +generated/pandas.Panel.tail,../reference/api/pandas.Panel.tail +generated/pandas.Panel.take,../reference/api/pandas.Panel.take +generated/pandas.Panel.timetuple,../reference/api/pandas.Panel.timetuple +generated/pandas.Panel.to_clipboard,../reference/api/pandas.Panel.to_clipboard +generated/pandas.Panel.to_csv,../reference/api/pandas.Panel.to_csv +generated/pandas.Panel.to_dense,../reference/api/pandas.Panel.to_dense +generated/pandas.Panel.to_excel,../reference/api/pandas.Panel.to_excel +generated/pandas.Panel.to_frame,../reference/api/pandas.Panel.to_frame +generated/pandas.Panel.to_hdf,../reference/api/pandas.Panel.to_hdf +generated/pandas.Panel.to_json,../reference/api/pandas.Panel.to_json +generated/pandas.Panel.to_latex,../reference/api/pandas.Panel.to_latex +generated/pandas.Panel.to_msgpack,../reference/api/pandas.Panel.to_msgpack +generated/pandas.Panel.to_pickle,../reference/api/pandas.Panel.to_pickle +generated/pandas.Panel.to_sparse,../reference/api/pandas.Panel.to_sparse +generated/pandas.Panel.to_sql,../reference/api/pandas.Panel.to_sql +generated/pandas.Panel.to_xarray,../reference/api/pandas.Panel.to_xarray +generated/pandas.Panel.transform,../reference/api/pandas.Panel.transform +generated/pandas.Panel.transpose,../reference/api/pandas.Panel.transpose +generated/pandas.Panel.truediv,../reference/api/pandas.Panel.truediv +generated/pandas.Panel.truncate,../reference/api/pandas.Panel.truncate +generated/pandas.Panel.tshift,../reference/api/pandas.Panel.tshift +generated/pandas.Panel.tz_convert,../reference/api/pandas.Panel.tz_convert +generated/pandas.Panel.tz_localize,../reference/api/pandas.Panel.tz_localize +generated/pandas.Panel.update,../reference/api/pandas.Panel.update +generated/pandas.Panel.values,../reference/api/pandas.Panel.values +generated/pandas.Panel.var,../reference/api/pandas.Panel.var +generated/pandas.Panel.where,../reference/api/pandas.Panel.where +generated/pandas.Panel.xs,../reference/api/pandas.Panel.xs +generated/pandas.Period.asfreq,../reference/api/pandas.Period.asfreq +generated/pandas.Period.day,../reference/api/pandas.Period.day +generated/pandas.Period.dayofweek,../reference/api/pandas.Period.dayofweek +generated/pandas.Period.dayofyear,../reference/api/pandas.Period.dayofyear +generated/pandas.Period.days_in_month,../reference/api/pandas.Period.days_in_month +generated/pandas.Period.daysinmonth,../reference/api/pandas.Period.daysinmonth +generated/pandas.Period.end_time,../reference/api/pandas.Period.end_time +generated/pandas.Period.freq,../reference/api/pandas.Period.freq +generated/pandas.Period.freqstr,../reference/api/pandas.Period.freqstr +generated/pandas.Period.hour,../reference/api/pandas.Period.hour +generated/pandas.Period,../reference/api/pandas.Period +generated/pandas.PeriodIndex.asfreq,../reference/api/pandas.PeriodIndex.asfreq +generated/pandas.PeriodIndex.day,../reference/api/pandas.PeriodIndex.day +generated/pandas.PeriodIndex.dayofweek,../reference/api/pandas.PeriodIndex.dayofweek +generated/pandas.PeriodIndex.dayofyear,../reference/api/pandas.PeriodIndex.dayofyear +generated/pandas.PeriodIndex.days_in_month,../reference/api/pandas.PeriodIndex.days_in_month +generated/pandas.PeriodIndex.daysinmonth,../reference/api/pandas.PeriodIndex.daysinmonth +generated/pandas.PeriodIndex.end_time,../reference/api/pandas.PeriodIndex.end_time +generated/pandas.PeriodIndex.freq,../reference/api/pandas.PeriodIndex.freq +generated/pandas.PeriodIndex.freqstr,../reference/api/pandas.PeriodIndex.freqstr +generated/pandas.PeriodIndex.hour,../reference/api/pandas.PeriodIndex.hour +generated/pandas.PeriodIndex,../reference/api/pandas.PeriodIndex +generated/pandas.PeriodIndex.is_leap_year,../reference/api/pandas.PeriodIndex.is_leap_year +generated/pandas.PeriodIndex.minute,../reference/api/pandas.PeriodIndex.minute +generated/pandas.PeriodIndex.month,../reference/api/pandas.PeriodIndex.month +generated/pandas.PeriodIndex.quarter,../reference/api/pandas.PeriodIndex.quarter +generated/pandas.PeriodIndex.qyear,../reference/api/pandas.PeriodIndex.qyear +generated/pandas.PeriodIndex.second,../reference/api/pandas.PeriodIndex.second +generated/pandas.PeriodIndex.start_time,../reference/api/pandas.PeriodIndex.start_time +generated/pandas.PeriodIndex.strftime,../reference/api/pandas.PeriodIndex.strftime +generated/pandas.PeriodIndex.to_timestamp,../reference/api/pandas.PeriodIndex.to_timestamp +generated/pandas.PeriodIndex.weekday,../reference/api/pandas.PeriodIndex.weekday +generated/pandas.PeriodIndex.week,../reference/api/pandas.PeriodIndex.week +generated/pandas.PeriodIndex.weekofyear,../reference/api/pandas.PeriodIndex.weekofyear +generated/pandas.PeriodIndex.year,../reference/api/pandas.PeriodIndex.year +generated/pandas.Period.is_leap_year,../reference/api/pandas.Period.is_leap_year +generated/pandas.Period.minute,../reference/api/pandas.Period.minute +generated/pandas.Period.month,../reference/api/pandas.Period.month +generated/pandas.Period.now,../reference/api/pandas.Period.now +generated/pandas.Period.ordinal,../reference/api/pandas.Period.ordinal +generated/pandas.Period.quarter,../reference/api/pandas.Period.quarter +generated/pandas.Period.qyear,../reference/api/pandas.Period.qyear +generated/pandas.period_range,../reference/api/pandas.period_range +generated/pandas.Period.second,../reference/api/pandas.Period.second +generated/pandas.Period.start_time,../reference/api/pandas.Period.start_time +generated/pandas.Period.strftime,../reference/api/pandas.Period.strftime +generated/pandas.Period.to_timestamp,../reference/api/pandas.Period.to_timestamp +generated/pandas.Period.weekday,../reference/api/pandas.Period.weekday +generated/pandas.Period.week,../reference/api/pandas.Period.week +generated/pandas.Period.weekofyear,../reference/api/pandas.Period.weekofyear +generated/pandas.Period.year,../reference/api/pandas.Period.year +generated/pandas.pivot,../reference/api/pandas.pivot +generated/pandas.pivot_table,../reference/api/pandas.pivot_table +generated/pandas.plotting.andrews_curves,../reference/api/pandas.plotting.andrews_curves +generated/pandas.plotting.bootstrap_plot,../reference/api/pandas.plotting.bootstrap_plot +generated/pandas.plotting.deregister_matplotlib_converters,../reference/api/pandas.plotting.deregister_matplotlib_converters +generated/pandas.plotting.lag_plot,../reference/api/pandas.plotting.lag_plot +generated/pandas.plotting.parallel_coordinates,../reference/api/pandas.plotting.parallel_coordinates +generated/pandas.plotting.radviz,../reference/api/pandas.plotting.radviz +generated/pandas.plotting.register_matplotlib_converters,../reference/api/pandas.plotting.register_matplotlib_converters +generated/pandas.plotting.scatter_matrix,../reference/api/pandas.plotting.scatter_matrix +generated/pandas.qcut,../reference/api/pandas.qcut +generated/pandas.RangeIndex.from_range,../reference/api/pandas.RangeIndex.from_range +generated/pandas.RangeIndex,../reference/api/pandas.RangeIndex +generated/pandas.read_clipboard,../reference/api/pandas.read_clipboard +generated/pandas.read_csv,../reference/api/pandas.read_csv +generated/pandas.read_excel,../reference/api/pandas.read_excel +generated/pandas.read_feather,../reference/api/pandas.read_feather +generated/pandas.read_fwf,../reference/api/pandas.read_fwf +generated/pandas.read_gbq,../reference/api/pandas.read_gbq +generated/pandas.read_hdf,../reference/api/pandas.read_hdf +generated/pandas.read,../reference/api/pandas.read +generated/pandas.read_json,../reference/api/pandas.read_json +generated/pandas.read_msgpack,../reference/api/pandas.read_msgpack +generated/pandas.read_parquet,../reference/api/pandas.read_parquet +generated/pandas.read_pickle,../reference/api/pandas.read_pickle +generated/pandas.read_sas,../reference/api/pandas.read_sas +generated/pandas.read_sql,../reference/api/pandas.read_sql +generated/pandas.read_sql_query,../reference/api/pandas.read_sql_query +generated/pandas.read_sql_table,../reference/api/pandas.read_sql_table +generated/pandas.read_stata,../reference/api/pandas.read_stata +generated/pandas.read_table,../reference/api/pandas.read_table +generated/pandas.reset_option,../reference/api/pandas.reset_option +generated/pandas.Series.abs,../reference/api/pandas.Series.abs +generated/pandas.Series.add,../reference/api/pandas.Series.add +generated/pandas.Series.add_prefix,../reference/api/pandas.Series.add_prefix +generated/pandas.Series.add_suffix,../reference/api/pandas.Series.add_suffix +generated/pandas.Series.agg,../reference/api/pandas.Series.agg +generated/pandas.Series.aggregate,../reference/api/pandas.Series.aggregate +generated/pandas.Series.align,../reference/api/pandas.Series.align +generated/pandas.Series.all,../reference/api/pandas.Series.all +generated/pandas.Series.any,../reference/api/pandas.Series.any +generated/pandas.Series.append,../reference/api/pandas.Series.append +generated/pandas.Series.apply,../reference/api/pandas.Series.apply +generated/pandas.Series.argmax,../reference/api/pandas.Series.argmax +generated/pandas.Series.argmin,../reference/api/pandas.Series.argmin +generated/pandas.Series.argsort,../reference/api/pandas.Series.argsort +generated/pandas.Series.__array__,../reference/api/pandas.Series.__array__ +generated/pandas.Series.array,../reference/api/pandas.Series.array +generated/pandas.Series.as_blocks,../reference/api/pandas.Series.as_blocks +generated/pandas.Series.asfreq,../reference/api/pandas.Series.asfreq +generated/pandas.Series.as_matrix,../reference/api/pandas.Series.as_matrix +generated/pandas.Series.asobject,../reference/api/pandas.Series.asobject +generated/pandas.Series.asof,../reference/api/pandas.Series.asof +generated/pandas.Series.astype,../reference/api/pandas.Series.astype +generated/pandas.Series.at,../reference/api/pandas.Series.at +generated/pandas.Series.at_time,../reference/api/pandas.Series.at_time +generated/pandas.Series.autocorr,../reference/api/pandas.Series.autocorr +generated/pandas.Series.axes,../reference/api/pandas.Series.axes +generated/pandas.Series.base,../reference/api/pandas.Series.base +generated/pandas.Series.between,../reference/api/pandas.Series.between +generated/pandas.Series.between_time,../reference/api/pandas.Series.between_time +generated/pandas.Series.bfill,../reference/api/pandas.Series.bfill +generated/pandas.Series.blocks,../reference/api/pandas.Series.blocks +generated/pandas.Series.bool,../reference/api/pandas.Series.bool +generated/pandas.Series.cat.add_categories,../reference/api/pandas.Series.cat.add_categories +generated/pandas.Series.cat.as_ordered,../reference/api/pandas.Series.cat.as_ordered +generated/pandas.Series.cat.as_unordered,../reference/api/pandas.Series.cat.as_unordered +generated/pandas.Series.cat.categories,../reference/api/pandas.Series.cat.categories +generated/pandas.Series.cat.codes,../reference/api/pandas.Series.cat.codes +generated/pandas.Series.cat,../reference/api/pandas.Series.cat +generated/pandas.Series.cat.ordered,../reference/api/pandas.Series.cat.ordered +generated/pandas.Series.cat.remove_categories,../reference/api/pandas.Series.cat.remove_categories +generated/pandas.Series.cat.remove_unused_categories,../reference/api/pandas.Series.cat.remove_unused_categories +generated/pandas.Series.cat.rename_categories,../reference/api/pandas.Series.cat.rename_categories +generated/pandas.Series.cat.reorder_categories,../reference/api/pandas.Series.cat.reorder_categories +generated/pandas.Series.cat.set_categories,../reference/api/pandas.Series.cat.set_categories +generated/pandas.Series.clip,../reference/api/pandas.Series.clip +generated/pandas.Series.clip_lower,../reference/api/pandas.Series.clip_lower +generated/pandas.Series.clip_upper,../reference/api/pandas.Series.clip_upper +generated/pandas.Series.combine_first,../reference/api/pandas.Series.combine_first +generated/pandas.Series.combine,../reference/api/pandas.Series.combine +generated/pandas.Series.compound,../reference/api/pandas.Series.compound +generated/pandas.Series.compress,../reference/api/pandas.Series.compress +generated/pandas.Series.convert_objects,../reference/api/pandas.Series.convert_objects +generated/pandas.Series.copy,../reference/api/pandas.Series.copy +generated/pandas.Series.corr,../reference/api/pandas.Series.corr +generated/pandas.Series.count,../reference/api/pandas.Series.count +generated/pandas.Series.cov,../reference/api/pandas.Series.cov +generated/pandas.Series.cummax,../reference/api/pandas.Series.cummax +generated/pandas.Series.cummin,../reference/api/pandas.Series.cummin +generated/pandas.Series.cumprod,../reference/api/pandas.Series.cumprod +generated/pandas.Series.cumsum,../reference/api/pandas.Series.cumsum +generated/pandas.Series.data,../reference/api/pandas.Series.data +generated/pandas.Series.describe,../reference/api/pandas.Series.describe +generated/pandas.Series.diff,../reference/api/pandas.Series.diff +generated/pandas.Series.div,../reference/api/pandas.Series.div +generated/pandas.Series.divide,../reference/api/pandas.Series.divide +generated/pandas.Series.divmod,../reference/api/pandas.Series.divmod +generated/pandas.Series.dot,../reference/api/pandas.Series.dot +generated/pandas.Series.drop_duplicates,../reference/api/pandas.Series.drop_duplicates +generated/pandas.Series.drop,../reference/api/pandas.Series.drop +generated/pandas.Series.droplevel,../reference/api/pandas.Series.droplevel +generated/pandas.Series.dropna,../reference/api/pandas.Series.dropna +generated/pandas.Series.dt.ceil,../reference/api/pandas.Series.dt.ceil +generated/pandas.Series.dt.components,../reference/api/pandas.Series.dt.components +generated/pandas.Series.dt.date,../reference/api/pandas.Series.dt.date +generated/pandas.Series.dt.day,../reference/api/pandas.Series.dt.day +generated/pandas.Series.dt.day_name,../reference/api/pandas.Series.dt.day_name +generated/pandas.Series.dt.dayofweek,../reference/api/pandas.Series.dt.dayofweek +generated/pandas.Series.dt.dayofyear,../reference/api/pandas.Series.dt.dayofyear +generated/pandas.Series.dt.days,../reference/api/pandas.Series.dt.days +generated/pandas.Series.dt.days_in_month,../reference/api/pandas.Series.dt.days_in_month +generated/pandas.Series.dt.daysinmonth,../reference/api/pandas.Series.dt.daysinmonth +generated/pandas.Series.dt.end_time,../reference/api/pandas.Series.dt.end_time +generated/pandas.Series.dt.floor,../reference/api/pandas.Series.dt.floor +generated/pandas.Series.dt.freq,../reference/api/pandas.Series.dt.freq +generated/pandas.Series.dt.hour,../reference/api/pandas.Series.dt.hour +generated/pandas.Series.dt,../reference/api/pandas.Series.dt +generated/pandas.Series.dt.is_leap_year,../reference/api/pandas.Series.dt.is_leap_year +generated/pandas.Series.dt.is_month_end,../reference/api/pandas.Series.dt.is_month_end +generated/pandas.Series.dt.is_month_start,../reference/api/pandas.Series.dt.is_month_start +generated/pandas.Series.dt.is_quarter_end,../reference/api/pandas.Series.dt.is_quarter_end +generated/pandas.Series.dt.is_quarter_start,../reference/api/pandas.Series.dt.is_quarter_start +generated/pandas.Series.dt.is_year_end,../reference/api/pandas.Series.dt.is_year_end +generated/pandas.Series.dt.is_year_start,../reference/api/pandas.Series.dt.is_year_start +generated/pandas.Series.dt.microsecond,../reference/api/pandas.Series.dt.microsecond +generated/pandas.Series.dt.microseconds,../reference/api/pandas.Series.dt.microseconds +generated/pandas.Series.dt.minute,../reference/api/pandas.Series.dt.minute +generated/pandas.Series.dt.month,../reference/api/pandas.Series.dt.month +generated/pandas.Series.dt.month_name,../reference/api/pandas.Series.dt.month_name +generated/pandas.Series.dt.nanosecond,../reference/api/pandas.Series.dt.nanosecond +generated/pandas.Series.dt.nanoseconds,../reference/api/pandas.Series.dt.nanoseconds +generated/pandas.Series.dt.normalize,../reference/api/pandas.Series.dt.normalize +generated/pandas.Series.dt.quarter,../reference/api/pandas.Series.dt.quarter +generated/pandas.Series.dt.qyear,../reference/api/pandas.Series.dt.qyear +generated/pandas.Series.dt.round,../reference/api/pandas.Series.dt.round +generated/pandas.Series.dt.second,../reference/api/pandas.Series.dt.second +generated/pandas.Series.dt.seconds,../reference/api/pandas.Series.dt.seconds +generated/pandas.Series.dt.start_time,../reference/api/pandas.Series.dt.start_time +generated/pandas.Series.dt.strftime,../reference/api/pandas.Series.dt.strftime +generated/pandas.Series.dt.time,../reference/api/pandas.Series.dt.time +generated/pandas.Series.dt.timetz,../reference/api/pandas.Series.dt.timetz +generated/pandas.Series.dt.to_period,../reference/api/pandas.Series.dt.to_period +generated/pandas.Series.dt.to_pydatetime,../reference/api/pandas.Series.dt.to_pydatetime +generated/pandas.Series.dt.to_pytimedelta,../reference/api/pandas.Series.dt.to_pytimedelta +generated/pandas.Series.dt.total_seconds,../reference/api/pandas.Series.dt.total_seconds +generated/pandas.Series.dt.tz_convert,../reference/api/pandas.Series.dt.tz_convert +generated/pandas.Series.dt.tz,../reference/api/pandas.Series.dt.tz +generated/pandas.Series.dt.tz_localize,../reference/api/pandas.Series.dt.tz_localize +generated/pandas.Series.dt.weekday,../reference/api/pandas.Series.dt.weekday +generated/pandas.Series.dt.week,../reference/api/pandas.Series.dt.week +generated/pandas.Series.dt.weekofyear,../reference/api/pandas.Series.dt.weekofyear +generated/pandas.Series.dt.year,../reference/api/pandas.Series.dt.year +generated/pandas.Series.dtype,../reference/api/pandas.Series.dtype +generated/pandas.Series.dtypes,../reference/api/pandas.Series.dtypes +generated/pandas.Series.duplicated,../reference/api/pandas.Series.duplicated +generated/pandas.Series.empty,../reference/api/pandas.Series.empty +generated/pandas.Series.eq,../reference/api/pandas.Series.eq +generated/pandas.Series.equals,../reference/api/pandas.Series.equals +generated/pandas.Series.ewm,../reference/api/pandas.Series.ewm +generated/pandas.Series.expanding,../reference/api/pandas.Series.expanding +generated/pandas.Series.factorize,../reference/api/pandas.Series.factorize +generated/pandas.Series.ffill,../reference/api/pandas.Series.ffill +generated/pandas.Series.fillna,../reference/api/pandas.Series.fillna +generated/pandas.Series.filter,../reference/api/pandas.Series.filter +generated/pandas.Series.first,../reference/api/pandas.Series.first +generated/pandas.Series.first_valid_index,../reference/api/pandas.Series.first_valid_index +generated/pandas.Series.flags,../reference/api/pandas.Series.flags +generated/pandas.Series.floordiv,../reference/api/pandas.Series.floordiv +generated/pandas.Series.from_array,../reference/api/pandas.Series.from_array +generated/pandas.Series.from_csv,../reference/api/pandas.Series.from_csv +generated/pandas.Series.ftype,../reference/api/pandas.Series.ftype +generated/pandas.Series.ftypes,../reference/api/pandas.Series.ftypes +generated/pandas.Series.ge,../reference/api/pandas.Series.ge +generated/pandas.Series.get_dtype_counts,../reference/api/pandas.Series.get_dtype_counts +generated/pandas.Series.get_ftype_counts,../reference/api/pandas.Series.get_ftype_counts +generated/pandas.Series.get,../reference/api/pandas.Series.get +generated/pandas.Series.get_value,../reference/api/pandas.Series.get_value +generated/pandas.Series.get_values,../reference/api/pandas.Series.get_values +generated/pandas.Series.groupby,../reference/api/pandas.Series.groupby +generated/pandas.Series.gt,../reference/api/pandas.Series.gt +generated/pandas.Series.hasnans,../reference/api/pandas.Series.hasnans +generated/pandas.Series.head,../reference/api/pandas.Series.head +generated/pandas.Series.hist,../reference/api/pandas.Series.hist +generated/pandas.Series,../reference/api/pandas.Series +generated/pandas.Series.iat,../reference/api/pandas.Series.iat +generated/pandas.Series.idxmax,../reference/api/pandas.Series.idxmax +generated/pandas.Series.idxmin,../reference/api/pandas.Series.idxmin +generated/pandas.Series.iloc,../reference/api/pandas.Series.iloc +generated/pandas.Series.imag,../reference/api/pandas.Series.imag +generated/pandas.Series.index,../reference/api/pandas.Series.index +generated/pandas.Series.infer_objects,../reference/api/pandas.Series.infer_objects +generated/pandas.Series.interpolate,../reference/api/pandas.Series.interpolate +generated/pandas.Series.is_copy,../reference/api/pandas.Series.is_copy +generated/pandas.Series.isin,../reference/api/pandas.Series.isin +generated/pandas.Series.is_monotonic_decreasing,../reference/api/pandas.Series.is_monotonic_decreasing +generated/pandas.Series.is_monotonic,../reference/api/pandas.Series.is_monotonic +generated/pandas.Series.is_monotonic_increasing,../reference/api/pandas.Series.is_monotonic_increasing +generated/pandas.Series.isna,../reference/api/pandas.Series.isna +generated/pandas.Series.isnull,../reference/api/pandas.Series.isnull +generated/pandas.Series.is_unique,../reference/api/pandas.Series.is_unique +generated/pandas.Series.item,../reference/api/pandas.Series.item +generated/pandas.Series.items,../reference/api/pandas.Series.items +generated/pandas.Series.itemsize,../reference/api/pandas.Series.itemsize +generated/pandas.Series.__iter__,../reference/api/pandas.Series.__iter__ +generated/pandas.Series.iteritems,../reference/api/pandas.Series.iteritems +generated/pandas.Series.ix,../reference/api/pandas.Series.ix +generated/pandas.Series.keys,../reference/api/pandas.Series.keys +generated/pandas.Series.kurt,../reference/api/pandas.Series.kurt +generated/pandas.Series.kurtosis,../reference/api/pandas.Series.kurtosis +generated/pandas.Series.last,../reference/api/pandas.Series.last +generated/pandas.Series.last_valid_index,../reference/api/pandas.Series.last_valid_index +generated/pandas.Series.le,../reference/api/pandas.Series.le +generated/pandas.Series.loc,../reference/api/pandas.Series.loc +generated/pandas.Series.lt,../reference/api/pandas.Series.lt +generated/pandas.Series.mad,../reference/api/pandas.Series.mad +generated/pandas.Series.map,../reference/api/pandas.Series.map +generated/pandas.Series.mask,../reference/api/pandas.Series.mask +generated/pandas.Series.max,../reference/api/pandas.Series.max +generated/pandas.Series.mean,../reference/api/pandas.Series.mean +generated/pandas.Series.median,../reference/api/pandas.Series.median +generated/pandas.Series.memory_usage,../reference/api/pandas.Series.memory_usage +generated/pandas.Series.min,../reference/api/pandas.Series.min +generated/pandas.Series.mode,../reference/api/pandas.Series.mode +generated/pandas.Series.mod,../reference/api/pandas.Series.mod +generated/pandas.Series.mul,../reference/api/pandas.Series.mul +generated/pandas.Series.multiply,../reference/api/pandas.Series.multiply +generated/pandas.Series.name,../reference/api/pandas.Series.name +generated/pandas.Series.nbytes,../reference/api/pandas.Series.nbytes +generated/pandas.Series.ndim,../reference/api/pandas.Series.ndim +generated/pandas.Series.ne,../reference/api/pandas.Series.ne +generated/pandas.Series.nlargest,../reference/api/pandas.Series.nlargest +generated/pandas.Series.nonzero,../reference/api/pandas.Series.nonzero +generated/pandas.Series.notna,../reference/api/pandas.Series.notna +generated/pandas.Series.notnull,../reference/api/pandas.Series.notnull +generated/pandas.Series.nsmallest,../reference/api/pandas.Series.nsmallest +generated/pandas.Series.nunique,../reference/api/pandas.Series.nunique +generated/pandas.Series.pct_change,../reference/api/pandas.Series.pct_change +generated/pandas.Series.pipe,../reference/api/pandas.Series.pipe +generated/pandas.Series.plot.area,../reference/api/pandas.Series.plot.area +generated/pandas.Series.plot.barh,../reference/api/pandas.Series.plot.barh +generated/pandas.Series.plot.bar,../reference/api/pandas.Series.plot.bar +generated/pandas.Series.plot.box,../reference/api/pandas.Series.plot.box +generated/pandas.Series.plot.density,../reference/api/pandas.Series.plot.density +generated/pandas.Series.plot.hist,../reference/api/pandas.Series.plot.hist +generated/pandas.Series.plot,../reference/api/pandas.Series.plot +generated/pandas.Series.plot.kde,../reference/api/pandas.Series.plot.kde +generated/pandas.Series.plot.line,../reference/api/pandas.Series.plot.line +generated/pandas.Series.plot.pie,../reference/api/pandas.Series.plot.pie +generated/pandas.Series.pop,../reference/api/pandas.Series.pop +generated/pandas.Series.pow,../reference/api/pandas.Series.pow +generated/pandas.Series.prod,../reference/api/pandas.Series.prod +generated/pandas.Series.product,../reference/api/pandas.Series.product +generated/pandas.Series.ptp,../reference/api/pandas.Series.ptp +generated/pandas.Series.put,../reference/api/pandas.Series.put +generated/pandas.Series.quantile,../reference/api/pandas.Series.quantile +generated/pandas.Series.radd,../reference/api/pandas.Series.radd +generated/pandas.Series.rank,../reference/api/pandas.Series.rank +generated/pandas.Series.ravel,../reference/api/pandas.Series.ravel +generated/pandas.Series.rdiv,../reference/api/pandas.Series.rdiv +generated/pandas.Series.rdivmod,../reference/api/pandas.Series.rdivmod +generated/pandas.Series.real,../reference/api/pandas.Series.real +generated/pandas.Series.reindex_axis,../reference/api/pandas.Series.reindex_axis +generated/pandas.Series.reindex,../reference/api/pandas.Series.reindex +generated/pandas.Series.reindex_like,../reference/api/pandas.Series.reindex_like +generated/pandas.Series.rename_axis,../reference/api/pandas.Series.rename_axis +generated/pandas.Series.rename,../reference/api/pandas.Series.rename +generated/pandas.Series.reorder_levels,../reference/api/pandas.Series.reorder_levels +generated/pandas.Series.repeat,../reference/api/pandas.Series.repeat +generated/pandas.Series.replace,../reference/api/pandas.Series.replace +generated/pandas.Series.resample,../reference/api/pandas.Series.resample +generated/pandas.Series.reset_index,../reference/api/pandas.Series.reset_index +generated/pandas.Series.rfloordiv,../reference/api/pandas.Series.rfloordiv +generated/pandas.Series.rmod,../reference/api/pandas.Series.rmod +generated/pandas.Series.rmul,../reference/api/pandas.Series.rmul +generated/pandas.Series.rolling,../reference/api/pandas.Series.rolling +generated/pandas.Series.round,../reference/api/pandas.Series.round +generated/pandas.Series.rpow,../reference/api/pandas.Series.rpow +generated/pandas.Series.rsub,../reference/api/pandas.Series.rsub +generated/pandas.Series.rtruediv,../reference/api/pandas.Series.rtruediv +generated/pandas.Series.sample,../reference/api/pandas.Series.sample +generated/pandas.Series.searchsorted,../reference/api/pandas.Series.searchsorted +generated/pandas.Series.select,../reference/api/pandas.Series.select +generated/pandas.Series.sem,../reference/api/pandas.Series.sem +generated/pandas.Series.set_axis,../reference/api/pandas.Series.set_axis +generated/pandas.Series.set_value,../reference/api/pandas.Series.set_value +generated/pandas.Series.shape,../reference/api/pandas.Series.shape +generated/pandas.Series.shift,../reference/api/pandas.Series.shift +generated/pandas.Series.size,../reference/api/pandas.Series.size +generated/pandas.Series.skew,../reference/api/pandas.Series.skew +generated/pandas.Series.slice_shift,../reference/api/pandas.Series.slice_shift +generated/pandas.Series.sort_index,../reference/api/pandas.Series.sort_index +generated/pandas.Series.sort_values,../reference/api/pandas.Series.sort_values +generated/pandas.Series.sparse.density,../reference/api/pandas.Series.sparse.density +generated/pandas.Series.sparse.fill_value,../reference/api/pandas.Series.sparse.fill_value +generated/pandas.Series.sparse.from_coo,../reference/api/pandas.Series.sparse.from_coo +generated/pandas.Series.sparse.npoints,../reference/api/pandas.Series.sparse.npoints +generated/pandas.Series.sparse.sp_values,../reference/api/pandas.Series.sparse.sp_values +generated/pandas.Series.sparse.to_coo,../reference/api/pandas.Series.sparse.to_coo +generated/pandas.Series.squeeze,../reference/api/pandas.Series.squeeze +generated/pandas.Series.std,../reference/api/pandas.Series.std +generated/pandas.Series.str.capitalize,../reference/api/pandas.Series.str.capitalize +generated/pandas.Series.str.cat,../reference/api/pandas.Series.str.cat +generated/pandas.Series.str.center,../reference/api/pandas.Series.str.center +generated/pandas.Series.str.contains,../reference/api/pandas.Series.str.contains +generated/pandas.Series.str.count,../reference/api/pandas.Series.str.count +generated/pandas.Series.str.decode,../reference/api/pandas.Series.str.decode +generated/pandas.Series.str.encode,../reference/api/pandas.Series.str.encode +generated/pandas.Series.str.endswith,../reference/api/pandas.Series.str.endswith +generated/pandas.Series.str.extractall,../reference/api/pandas.Series.str.extractall +generated/pandas.Series.str.extract,../reference/api/pandas.Series.str.extract +generated/pandas.Series.str.findall,../reference/api/pandas.Series.str.findall +generated/pandas.Series.str.find,../reference/api/pandas.Series.str.find +generated/pandas.Series.str.get_dummies,../reference/api/pandas.Series.str.get_dummies +generated/pandas.Series.str.get,../reference/api/pandas.Series.str.get +generated/pandas.Series.str,../reference/api/pandas.Series.str +generated/pandas.Series.strides,../reference/api/pandas.Series.strides +generated/pandas.Series.str.index,../reference/api/pandas.Series.str.index +generated/pandas.Series.str.isalnum,../reference/api/pandas.Series.str.isalnum +generated/pandas.Series.str.isalpha,../reference/api/pandas.Series.str.isalpha +generated/pandas.Series.str.isdecimal,../reference/api/pandas.Series.str.isdecimal +generated/pandas.Series.str.isdigit,../reference/api/pandas.Series.str.isdigit +generated/pandas.Series.str.islower,../reference/api/pandas.Series.str.islower +generated/pandas.Series.str.isnumeric,../reference/api/pandas.Series.str.isnumeric +generated/pandas.Series.str.isspace,../reference/api/pandas.Series.str.isspace +generated/pandas.Series.str.istitle,../reference/api/pandas.Series.str.istitle +generated/pandas.Series.str.isupper,../reference/api/pandas.Series.str.isupper +generated/pandas.Series.str.join,../reference/api/pandas.Series.str.join +generated/pandas.Series.str.len,../reference/api/pandas.Series.str.len +generated/pandas.Series.str.ljust,../reference/api/pandas.Series.str.ljust +generated/pandas.Series.str.lower,../reference/api/pandas.Series.str.lower +generated/pandas.Series.str.lstrip,../reference/api/pandas.Series.str.lstrip +generated/pandas.Series.str.match,../reference/api/pandas.Series.str.match +generated/pandas.Series.str.normalize,../reference/api/pandas.Series.str.normalize +generated/pandas.Series.str.pad,../reference/api/pandas.Series.str.pad +generated/pandas.Series.str.partition,../reference/api/pandas.Series.str.partition +generated/pandas.Series.str.repeat,../reference/api/pandas.Series.str.repeat +generated/pandas.Series.str.replace,../reference/api/pandas.Series.str.replace +generated/pandas.Series.str.rfind,../reference/api/pandas.Series.str.rfind +generated/pandas.Series.str.rindex,../reference/api/pandas.Series.str.rindex +generated/pandas.Series.str.rjust,../reference/api/pandas.Series.str.rjust +generated/pandas.Series.str.rpartition,../reference/api/pandas.Series.str.rpartition +generated/pandas.Series.str.rsplit,../reference/api/pandas.Series.str.rsplit +generated/pandas.Series.str.rstrip,../reference/api/pandas.Series.str.rstrip +generated/pandas.Series.str.slice,../reference/api/pandas.Series.str.slice +generated/pandas.Series.str.slice_replace,../reference/api/pandas.Series.str.slice_replace +generated/pandas.Series.str.split,../reference/api/pandas.Series.str.split +generated/pandas.Series.str.startswith,../reference/api/pandas.Series.str.startswith +generated/pandas.Series.str.strip,../reference/api/pandas.Series.str.strip +generated/pandas.Series.str.swapcase,../reference/api/pandas.Series.str.swapcase +generated/pandas.Series.str.title,../reference/api/pandas.Series.str.title +generated/pandas.Series.str.translate,../reference/api/pandas.Series.str.translate +generated/pandas.Series.str.upper,../reference/api/pandas.Series.str.upper +generated/pandas.Series.str.wrap,../reference/api/pandas.Series.str.wrap +generated/pandas.Series.str.zfill,../reference/api/pandas.Series.str.zfill +generated/pandas.Series.sub,../reference/api/pandas.Series.sub +generated/pandas.Series.subtract,../reference/api/pandas.Series.subtract +generated/pandas.Series.sum,../reference/api/pandas.Series.sum +generated/pandas.Series.swapaxes,../reference/api/pandas.Series.swapaxes +generated/pandas.Series.swaplevel,../reference/api/pandas.Series.swaplevel +generated/pandas.Series.tail,../reference/api/pandas.Series.tail +generated/pandas.Series.take,../reference/api/pandas.Series.take +generated/pandas.Series.T,../reference/api/pandas.Series.T +generated/pandas.Series.timetuple,../reference/api/pandas.Series.timetuple +generated/pandas.Series.to_clipboard,../reference/api/pandas.Series.to_clipboard +generated/pandas.Series.to_csv,../reference/api/pandas.Series.to_csv +generated/pandas.Series.to_dense,../reference/api/pandas.Series.to_dense +generated/pandas.Series.to_dict,../reference/api/pandas.Series.to_dict +generated/pandas.Series.to_excel,../reference/api/pandas.Series.to_excel +generated/pandas.Series.to_frame,../reference/api/pandas.Series.to_frame +generated/pandas.Series.to_hdf,../reference/api/pandas.Series.to_hdf +generated/pandas.Series.to_json,../reference/api/pandas.Series.to_json +generated/pandas.Series.to_latex,../reference/api/pandas.Series.to_latex +generated/pandas.Series.to_list,../reference/api/pandas.Series.to_list +generated/pandas.Series.tolist,../reference/api/pandas.Series.tolist +generated/pandas.Series.to_msgpack,../reference/api/pandas.Series.to_msgpack +generated/pandas.Series.to_numpy,../reference/api/pandas.Series.to_numpy +generated/pandas.Series.to_period,../reference/api/pandas.Series.to_period +generated/pandas.Series.to_pickle,../reference/api/pandas.Series.to_pickle +generated/pandas.Series.to_sparse,../reference/api/pandas.Series.to_sparse +generated/pandas.Series.to_sql,../reference/api/pandas.Series.to_sql +generated/pandas.Series.to_string,../reference/api/pandas.Series.to_string +generated/pandas.Series.to_timestamp,../reference/api/pandas.Series.to_timestamp +generated/pandas.Series.to_xarray,../reference/api/pandas.Series.to_xarray +generated/pandas.Series.transform,../reference/api/pandas.Series.transform +generated/pandas.Series.transpose,../reference/api/pandas.Series.transpose +generated/pandas.Series.truediv,../reference/api/pandas.Series.truediv +generated/pandas.Series.truncate,../reference/api/pandas.Series.truncate +generated/pandas.Series.tshift,../reference/api/pandas.Series.tshift +generated/pandas.Series.tz_convert,../reference/api/pandas.Series.tz_convert +generated/pandas.Series.tz_localize,../reference/api/pandas.Series.tz_localize +generated/pandas.Series.unique,../reference/api/pandas.Series.unique +generated/pandas.Series.unstack,../reference/api/pandas.Series.unstack +generated/pandas.Series.update,../reference/api/pandas.Series.update +generated/pandas.Series.valid,../reference/api/pandas.Series.valid +generated/pandas.Series.value_counts,../reference/api/pandas.Series.value_counts +generated/pandas.Series.values,../reference/api/pandas.Series.values +generated/pandas.Series.var,../reference/api/pandas.Series.var +generated/pandas.Series.view,../reference/api/pandas.Series.view +generated/pandas.Series.where,../reference/api/pandas.Series.where +generated/pandas.Series.xs,../reference/api/pandas.Series.xs +generated/pandas.set_option,../reference/api/pandas.set_option +generated/pandas.SparseDataFrame.to_coo,../reference/api/pandas.SparseDataFrame.to_coo +generated/pandas.SparseSeries.from_coo,../reference/api/pandas.SparseSeries.from_coo +generated/pandas.SparseSeries.to_coo,../reference/api/pandas.SparseSeries.to_coo +generated/pandas.test,../reference/api/pandas.test +generated/pandas.testing.assert_frame_equal,../reference/api/pandas.testing.assert_frame_equal +generated/pandas.testing.assert_index_equal,../reference/api/pandas.testing.assert_index_equal +generated/pandas.testing.assert_series_equal,../reference/api/pandas.testing.assert_series_equal +generated/pandas.Timedelta.asm8,../reference/api/pandas.Timedelta.asm8 +generated/pandas.Timedelta.ceil,../reference/api/pandas.Timedelta.ceil +generated/pandas.Timedelta.components,../reference/api/pandas.Timedelta.components +generated/pandas.Timedelta.days,../reference/api/pandas.Timedelta.days +generated/pandas.Timedelta.delta,../reference/api/pandas.Timedelta.delta +generated/pandas.Timedelta.floor,../reference/api/pandas.Timedelta.floor +generated/pandas.Timedelta.freq,../reference/api/pandas.Timedelta.freq +generated/pandas.Timedelta,../reference/api/pandas.Timedelta +generated/pandas.TimedeltaIndex.ceil,../reference/api/pandas.TimedeltaIndex.ceil +generated/pandas.TimedeltaIndex.components,../reference/api/pandas.TimedeltaIndex.components +generated/pandas.TimedeltaIndex.days,../reference/api/pandas.TimedeltaIndex.days +generated/pandas.TimedeltaIndex.floor,../reference/api/pandas.TimedeltaIndex.floor +generated/pandas.TimedeltaIndex,../reference/api/pandas.TimedeltaIndex +generated/pandas.TimedeltaIndex.inferred_freq,../reference/api/pandas.TimedeltaIndex.inferred_freq +generated/pandas.TimedeltaIndex.microseconds,../reference/api/pandas.TimedeltaIndex.microseconds +generated/pandas.TimedeltaIndex.nanoseconds,../reference/api/pandas.TimedeltaIndex.nanoseconds +generated/pandas.TimedeltaIndex.round,../reference/api/pandas.TimedeltaIndex.round +generated/pandas.TimedeltaIndex.seconds,../reference/api/pandas.TimedeltaIndex.seconds +generated/pandas.TimedeltaIndex.to_frame,../reference/api/pandas.TimedeltaIndex.to_frame +generated/pandas.TimedeltaIndex.to_pytimedelta,../reference/api/pandas.TimedeltaIndex.to_pytimedelta +generated/pandas.TimedeltaIndex.to_series,../reference/api/pandas.TimedeltaIndex.to_series +generated/pandas.Timedelta.isoformat,../reference/api/pandas.Timedelta.isoformat +generated/pandas.Timedelta.is_populated,../reference/api/pandas.Timedelta.is_populated +generated/pandas.Timedelta.max,../reference/api/pandas.Timedelta.max +generated/pandas.Timedelta.microseconds,../reference/api/pandas.Timedelta.microseconds +generated/pandas.Timedelta.min,../reference/api/pandas.Timedelta.min +generated/pandas.Timedelta.nanoseconds,../reference/api/pandas.Timedelta.nanoseconds +generated/pandas.timedelta_range,../reference/api/pandas.timedelta_range +generated/pandas.Timedelta.resolution,../reference/api/pandas.Timedelta.resolution +generated/pandas.Timedelta.round,../reference/api/pandas.Timedelta.round +generated/pandas.Timedelta.seconds,../reference/api/pandas.Timedelta.seconds +generated/pandas.Timedelta.to_pytimedelta,../reference/api/pandas.Timedelta.to_pytimedelta +generated/pandas.Timedelta.total_seconds,../reference/api/pandas.Timedelta.total_seconds +generated/pandas.Timedelta.to_timedelta64,../reference/api/pandas.Timedelta.to_timedelta64 +generated/pandas.Timedelta.value,../reference/api/pandas.Timedelta.value +generated/pandas.Timedelta.view,../reference/api/pandas.Timedelta.view +generated/pandas.Timestamp.asm8,../reference/api/pandas.Timestamp.asm8 +generated/pandas.Timestamp.astimezone,../reference/api/pandas.Timestamp.astimezone +generated/pandas.Timestamp.ceil,../reference/api/pandas.Timestamp.ceil +generated/pandas.Timestamp.combine,../reference/api/pandas.Timestamp.combine +generated/pandas.Timestamp.ctime,../reference/api/pandas.Timestamp.ctime +generated/pandas.Timestamp.date,../reference/api/pandas.Timestamp.date +generated/pandas.Timestamp.day,../reference/api/pandas.Timestamp.day +generated/pandas.Timestamp.day_name,../reference/api/pandas.Timestamp.day_name +generated/pandas.Timestamp.dayofweek,../reference/api/pandas.Timestamp.dayofweek +generated/pandas.Timestamp.dayofyear,../reference/api/pandas.Timestamp.dayofyear +generated/pandas.Timestamp.days_in_month,../reference/api/pandas.Timestamp.days_in_month +generated/pandas.Timestamp.daysinmonth,../reference/api/pandas.Timestamp.daysinmonth +generated/pandas.Timestamp.dst,../reference/api/pandas.Timestamp.dst +generated/pandas.Timestamp.floor,../reference/api/pandas.Timestamp.floor +generated/pandas.Timestamp.fold,../reference/api/pandas.Timestamp.fold +generated/pandas.Timestamp.freq,../reference/api/pandas.Timestamp.freq +generated/pandas.Timestamp.freqstr,../reference/api/pandas.Timestamp.freqstr +generated/pandas.Timestamp.fromisoformat,../reference/api/pandas.Timestamp.fromisoformat +generated/pandas.Timestamp.fromordinal,../reference/api/pandas.Timestamp.fromordinal +generated/pandas.Timestamp.fromtimestamp,../reference/api/pandas.Timestamp.fromtimestamp +generated/pandas.Timestamp.hour,../reference/api/pandas.Timestamp.hour +generated/pandas.Timestamp,../reference/api/pandas.Timestamp +generated/pandas.Timestamp.is_leap_year,../reference/api/pandas.Timestamp.is_leap_year +generated/pandas.Timestamp.is_month_end,../reference/api/pandas.Timestamp.is_month_end +generated/pandas.Timestamp.is_month_start,../reference/api/pandas.Timestamp.is_month_start +generated/pandas.Timestamp.isocalendar,../reference/api/pandas.Timestamp.isocalendar +generated/pandas.Timestamp.isoformat,../reference/api/pandas.Timestamp.isoformat +generated/pandas.Timestamp.isoweekday,../reference/api/pandas.Timestamp.isoweekday +generated/pandas.Timestamp.is_quarter_end,../reference/api/pandas.Timestamp.is_quarter_end +generated/pandas.Timestamp.is_quarter_start,../reference/api/pandas.Timestamp.is_quarter_start +generated/pandas.Timestamp.is_year_end,../reference/api/pandas.Timestamp.is_year_end +generated/pandas.Timestamp.is_year_start,../reference/api/pandas.Timestamp.is_year_start +generated/pandas.Timestamp.max,../reference/api/pandas.Timestamp.max +generated/pandas.Timestamp.microsecond,../reference/api/pandas.Timestamp.microsecond +generated/pandas.Timestamp.min,../reference/api/pandas.Timestamp.min +generated/pandas.Timestamp.minute,../reference/api/pandas.Timestamp.minute +generated/pandas.Timestamp.month,../reference/api/pandas.Timestamp.month +generated/pandas.Timestamp.month_name,../reference/api/pandas.Timestamp.month_name +generated/pandas.Timestamp.nanosecond,../reference/api/pandas.Timestamp.nanosecond +generated/pandas.Timestamp.normalize,../reference/api/pandas.Timestamp.normalize +generated/pandas.Timestamp.now,../reference/api/pandas.Timestamp.now +generated/pandas.Timestamp.quarter,../reference/api/pandas.Timestamp.quarter +generated/pandas.Timestamp.replace,../reference/api/pandas.Timestamp.replace +generated/pandas.Timestamp.resolution,../reference/api/pandas.Timestamp.resolution +generated/pandas.Timestamp.round,../reference/api/pandas.Timestamp.round +generated/pandas.Timestamp.second,../reference/api/pandas.Timestamp.second +generated/pandas.Timestamp.strftime,../reference/api/pandas.Timestamp.strftime +generated/pandas.Timestamp.strptime,../reference/api/pandas.Timestamp.strptime +generated/pandas.Timestamp.time,../reference/api/pandas.Timestamp.time +generated/pandas.Timestamp.timestamp,../reference/api/pandas.Timestamp.timestamp +generated/pandas.Timestamp.timetuple,../reference/api/pandas.Timestamp.timetuple +generated/pandas.Timestamp.timetz,../reference/api/pandas.Timestamp.timetz +generated/pandas.Timestamp.to_datetime64,../reference/api/pandas.Timestamp.to_datetime64 +generated/pandas.Timestamp.today,../reference/api/pandas.Timestamp.today +generated/pandas.Timestamp.to_julian_date,../reference/api/pandas.Timestamp.to_julian_date +generated/pandas.Timestamp.toordinal,../reference/api/pandas.Timestamp.toordinal +generated/pandas.Timestamp.to_period,../reference/api/pandas.Timestamp.to_period +generated/pandas.Timestamp.to_pydatetime,../reference/api/pandas.Timestamp.to_pydatetime +generated/pandas.Timestamp.tz_convert,../reference/api/pandas.Timestamp.tz_convert +generated/pandas.Timestamp.tz,../reference/api/pandas.Timestamp.tz +generated/pandas.Timestamp.tzinfo,../reference/api/pandas.Timestamp.tzinfo +generated/pandas.Timestamp.tz_localize,../reference/api/pandas.Timestamp.tz_localize +generated/pandas.Timestamp.tzname,../reference/api/pandas.Timestamp.tzname +generated/pandas.Timestamp.utcfromtimestamp,../reference/api/pandas.Timestamp.utcfromtimestamp +generated/pandas.Timestamp.utcnow,../reference/api/pandas.Timestamp.utcnow +generated/pandas.Timestamp.utcoffset,../reference/api/pandas.Timestamp.utcoffset +generated/pandas.Timestamp.utctimetuple,../reference/api/pandas.Timestamp.utctimetuple +generated/pandas.Timestamp.value,../reference/api/pandas.Timestamp.value +generated/pandas.Timestamp.weekday,../reference/api/pandas.Timestamp.weekday +generated/pandas.Timestamp.weekday_name,../reference/api/pandas.Timestamp.weekday_name +generated/pandas.Timestamp.week,../reference/api/pandas.Timestamp.week +generated/pandas.Timestamp.weekofyear,../reference/api/pandas.Timestamp.weekofyear +generated/pandas.Timestamp.year,../reference/api/pandas.Timestamp.year +generated/pandas.to_datetime,../reference/api/pandas.to_datetime +generated/pandas.to_numeric,../reference/api/pandas.to_numeric +generated/pandas.to_timedelta,../reference/api/pandas.to_timedelta +generated/pandas.tseries.frequencies.to_offset,../reference/api/pandas.tseries.frequencies.to_offset +generated/pandas.unique,../reference/api/pandas.unique +generated/pandas.util.hash_array,../reference/api/pandas.util.hash_array +generated/pandas.util.hash_pandas_object,../reference/api/pandas.util.hash_pandas_object +generated/pandas.wide_to_long,../reference/api/pandas.wide_to_long diff --git a/doc/source/api/scalars.rst b/doc/source/api/scalars.rst deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/doc/source/contributing.rst b/doc/source/development/contributing.rst similarity index 99% rename from doc/source/contributing.rst rename to doc/source/development/contributing.rst index a68e5c70087e9..c9d6845107dfc 100644 --- a/doc/source/contributing.rst +++ b/doc/source/development/contributing.rst @@ -698,7 +698,7 @@ A pull-request will be considered for merging when you have an all 'green' build then you will get a red 'X', where you can click through to see the individual failed tests. This is an example of a green build. -.. image:: _static/ci.png +.. image:: ../_static/ci.png .. note:: diff --git a/doc/source/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst similarity index 100% rename from doc/source/contributing_docstring.rst rename to doc/source/development/contributing_docstring.rst diff --git a/doc/source/developer.rst b/doc/source/development/developer.rst similarity index 100% rename from doc/source/developer.rst rename to doc/source/development/developer.rst diff --git a/doc/source/extending.rst b/doc/source/development/extending.rst similarity index 100% rename from doc/source/extending.rst rename to doc/source/development/extending.rst diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst new file mode 100644 index 0000000000000..d67a6c3a2ca04 --- /dev/null +++ b/doc/source/development/index.rst @@ -0,0 +1,15 @@ +{{ header }} + +.. _development: + +=========== +Development +=========== + +.. toctree:: + :maxdepth: 2 + + contributing + internals + extending + developer diff --git a/doc/source/internals.rst b/doc/source/development/internals.rst similarity index 100% rename from doc/source/internals.rst rename to doc/source/development/internals.rst diff --git a/doc/source/10min.rst b/doc/source/getting_started/10min.rst similarity index 100% rename from doc/source/10min.rst rename to doc/source/getting_started/10min.rst diff --git a/doc/source/basics.rst b/doc/source/getting_started/basics.rst similarity index 100% rename from doc/source/basics.rst rename to doc/source/getting_started/basics.rst diff --git a/doc/source/comparison_with_r.rst b/doc/source/getting_started/comparison/comparison_with_r.rst similarity index 100% rename from doc/source/comparison_with_r.rst rename to doc/source/getting_started/comparison/comparison_with_r.rst diff --git a/doc/source/comparison_with_sas.rst b/doc/source/getting_started/comparison/comparison_with_sas.rst similarity index 100% rename from doc/source/comparison_with_sas.rst rename to doc/source/getting_started/comparison/comparison_with_sas.rst diff --git a/doc/source/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst similarity index 100% rename from doc/source/comparison_with_sql.rst rename to doc/source/getting_started/comparison/comparison_with_sql.rst diff --git a/doc/source/comparison_with_stata.rst b/doc/source/getting_started/comparison/comparison_with_stata.rst similarity index 100% rename from doc/source/comparison_with_stata.rst rename to doc/source/getting_started/comparison/comparison_with_stata.rst diff --git a/doc/source/getting_started/comparison/index.rst b/doc/source/getting_started/comparison/index.rst new file mode 100644 index 0000000000000..998706ce0c639 --- /dev/null +++ b/doc/source/getting_started/comparison/index.rst @@ -0,0 +1,15 @@ +{{ header }} + +.. _comparison: + +=========================== +Comparison with other tools +=========================== + +.. toctree:: + :maxdepth: 2 + + comparison_with_r + comparison_with_sql + comparison_with_sas + comparison_with_stata diff --git a/doc/source/dsintro.rst b/doc/source/getting_started/dsintro.rst similarity index 100% rename from doc/source/dsintro.rst rename to doc/source/getting_started/dsintro.rst diff --git a/doc/source/getting_started/index.rst b/doc/source/getting_started/index.rst new file mode 100644 index 0000000000000..4c5d26461a667 --- /dev/null +++ b/doc/source/getting_started/index.rst @@ -0,0 +1,17 @@ +{{ header }} + +.. _getting_started: + +=============== +Getting started +=============== + +.. toctree:: + :maxdepth: 2 + + overview + 10min + basics + dsintro + comparison/index + tutorials diff --git a/doc/source/overview.rst b/doc/source/getting_started/overview.rst similarity index 50% rename from doc/source/overview.rst rename to doc/source/getting_started/overview.rst index b98e2d4b9963c..b531f686951fc 100644 --- a/doc/source/overview.rst +++ b/doc/source/getting_started/overview.rst @@ -6,25 +6,80 @@ Package overview **************** -:mod:`pandas` is an open source, BSD-licensed library providing high-performance, -easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__ -programming language. - -:mod:`pandas` consists of the following elements: - -* A set of labeled array data structures, the primary of which are - Series and DataFrame. -* Index objects enabling both simple axis indexing and multi-level / - hierarchical axis indexing. -* An integrated group by engine for aggregating and transforming data sets. -* Date range generation (date_range) and custom date offsets enabling the - implementation of customized frequencies. -* Input/Output tools: loading tabular data from flat files (CSV, delimited, - Excel 2003), and saving and loading pandas objects from the fast and - efficient PyTables/HDF5 format. -* Memory-efficient "sparse" versions of the standard data structures for storing - data that is mostly missing or mostly constant (some fixed value). -* Moving window statistics (rolling mean, rolling standard deviation, etc.). +**pandas** is a `Python <https://www.python.org>`__ package providing fast, +flexible, and expressive data structures designed to make working with +"relational" or "labeled" data both easy and intuitive. It aims to be the +fundamental high-level building block for doing practical, **real world** data +analysis in Python. Additionally, it has the broader goal of becoming **the +most powerful and flexible open source data analysis / manipulation tool +available in any language**. It is already well on its way toward this goal. + +pandas is well suited for many different kinds of data: + + - Tabular data with heterogeneously-typed columns, as in an SQL table or + Excel spreadsheet + - Ordered and unordered (not necessarily fixed-frequency) time series data. + - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and + column labels + - Any other form of observational / statistical data sets. The data actually + need not be labeled at all to be placed into a pandas data structure + +The two primary data structures of pandas, :class:`Series` (1-dimensional) +and :class:`DataFrame` (2-dimensional), handle the vast majority of typical use +cases in finance, statistics, social science, and many areas of +engineering. For R users, :class:`DataFrame` provides everything that R's +``data.frame`` provides and much more. pandas is built on top of `NumPy +<https://www.numpy.org>`__ and is intended to integrate well within a scientific +computing environment with many other 3rd party libraries. + +Here are just a few of the things that pandas does well: + + - Easy handling of **missing data** (represented as NaN) in floating point as + well as non-floating point data + - Size mutability: columns can be **inserted and deleted** from DataFrame and + higher dimensional objects + - Automatic and explicit **data alignment**: objects can be explicitly + aligned to a set of labels, or the user can simply ignore the labels and + let `Series`, `DataFrame`, etc. automatically align the data for you in + computations + - Powerful, flexible **group by** functionality to perform + split-apply-combine operations on data sets, for both aggregating and + transforming data + - Make it **easy to convert** ragged, differently-indexed data in other + Python and NumPy data structures into DataFrame objects + - Intelligent label-based **slicing**, **fancy indexing**, and **subsetting** + of large data sets + - Intuitive **merging** and **joining** data sets + - Flexible **reshaping** and pivoting of data sets + - **Hierarchical** labeling of axes (possible to have multiple labels per + tick) + - Robust IO tools for loading data from **flat files** (CSV and delimited), + Excel files, databases, and saving / loading data from the ultrafast **HDF5 + format** + - **Time series**-specific functionality: date range generation and frequency + conversion, moving window statistics, moving window linear regressions, + date shifting and lagging, etc. + +Many of these principles are here to address the shortcomings frequently +experienced using other languages / scientific research environments. For data +scientists, working with data is typically divided into multiple stages: +munging and cleaning data, analyzing / modeling it, then organizing the results +of the analysis into a form suitable for plotting or tabular display. pandas +is the ideal tool for all of these tasks. + +Some other notes + + - pandas is **fast**. Many of the low-level algorithmic bits have been + extensively tweaked in `Cython <https://cython.org>`__ code. However, as with + anything else generalization usually sacrifices performance. So if you focus + on one feature for your application you may be able to create a faster + specialized tool. + + - pandas is a dependency of `statsmodels + <https://www.statsmodels.org/stable/index.html>`__, making it an important part of the + statistical computing ecosystem in Python. + + - pandas has been used extensively in production in financial applications. Data Structures --------------- @@ -119,5 +174,5 @@ The information about current institutional partners can be found on `pandas web License ------- -.. literalinclude:: ../../LICENSE +.. literalinclude:: ../../../LICENSE diff --git a/doc/source/tutorials.rst b/doc/source/getting_started/tutorials.rst similarity index 100% rename from doc/source/tutorials.rst rename to doc/source/getting_started/tutorials.rst diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index b85150c3444b7..d04e9194e71dc 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -1,168 +1,54 @@ .. pandas documentation master file, created by +.. module:: pandas + ********************************************* pandas: powerful Python data analysis toolkit ********************************************* -`PDF Version <pandas.pdf>`__ - -`Zipped HTML <pandas.zip>`__ - -.. module:: pandas - **Date**: |today| **Version**: |version| -**Binary Installers:** https://pypi.org/project/pandas - -**Source Repository:** https://github.com/pandas-dev/pandas - -**Issues & Ideas:** https://github.com/pandas-dev/pandas/issues - -**Q&A Support:** https://stackoverflow.com/questions/tagged/pandas - -**Developer Mailing List:** https://groups.google.com/forum/#!forum/pydata - -**pandas** is a `Python <https://www.python.org>`__ package providing fast, -flexible, and expressive data structures designed to make working with -"relational" or "labeled" data both easy and intuitive. It aims to be the -fundamental high-level building block for doing practical, **real world** data -analysis in Python. Additionally, it has the broader goal of becoming **the -most powerful and flexible open source data analysis / manipulation tool -available in any language**. It is already well on its way toward this goal. - -pandas is well suited for many different kinds of data: - - - Tabular data with heterogeneously-typed columns, as in an SQL table or - Excel spreadsheet - - Ordered and unordered (not necessarily fixed-frequency) time series data. - - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and - column labels - - Any other form of observational / statistical data sets. The data actually - need not be labeled at all to be placed into a pandas data structure - -The two primary data structures of pandas, :class:`Series` (1-dimensional) -and :class:`DataFrame` (2-dimensional), handle the vast majority of typical use -cases in finance, statistics, social science, and many areas of -engineering. For R users, :class:`DataFrame` provides everything that R's -``data.frame`` provides and much more. pandas is built on top of `NumPy -<https://www.numpy.org>`__ and is intended to integrate well within a scientific -computing environment with many other 3rd party libraries. - -Here are just a few of the things that pandas does well: - - - Easy handling of **missing data** (represented as NaN) in floating point as - well as non-floating point data - - Size mutability: columns can be **inserted and deleted** from DataFrame and - higher dimensional objects - - Automatic and explicit **data alignment**: objects can be explicitly - aligned to a set of labels, or the user can simply ignore the labels and - let `Series`, `DataFrame`, etc. automatically align the data for you in - computations - - Powerful, flexible **group by** functionality to perform - split-apply-combine operations on data sets, for both aggregating and - transforming data - - Make it **easy to convert** ragged, differently-indexed data in other - Python and NumPy data structures into DataFrame objects - - Intelligent label-based **slicing**, **fancy indexing**, and **subsetting** - of large data sets - - Intuitive **merging** and **joining** data sets - - Flexible **reshaping** and pivoting of data sets - - **Hierarchical** labeling of axes (possible to have multiple labels per - tick) - - Robust IO tools for loading data from **flat files** (CSV and delimited), - Excel files, databases, and saving / loading data from the ultrafast **HDF5 - format** - - **Time series**-specific functionality: date range generation and frequency - conversion, moving window statistics, moving window linear regressions, - date shifting and lagging, etc. - -Many of these principles are here to address the shortcomings frequently -experienced using other languages / scientific research environments. For data -scientists, working with data is typically divided into multiple stages: -munging and cleaning data, analyzing / modeling it, then organizing the results -of the analysis into a form suitable for plotting or tabular display. pandas -is the ideal tool for all of these tasks. - -Some other notes - - - pandas is **fast**. Many of the low-level algorithmic bits have been - extensively tweaked in `Cython <https://cython.org>`__ code. However, as with - anything else generalization usually sacrifices performance. So if you focus - on one feature for your application you may be able to create a faster - specialized tool. - - - pandas is a dependency of `statsmodels - <https://www.statsmodels.org/stable/index.html>`__, making it an important part of the - statistical computing ecosystem in Python. - - - pandas has been used extensively in production in financial applications. - -.. note:: +**Download documentation**: `PDF Version <pandas.pdf>`__ | `Zipped HTML <pandas.zip>`__ - This documentation assumes general familiarity with NumPy. If you haven't - used NumPy much or at all, do invest some time in `learning about NumPy - <https://docs.scipy.org>`__ first. +**Useful links**: +`Binary Installers <https://pypi.org/project/pandas>`__ | +`Source Repository <https://github.com/pandas-dev/pandas>`__ | +`Issues & Ideas <https://github.com/pandas-dev/pandas/issues>`__ | +`Q&A Support <https://stackoverflow.com/questions/tagged/pandas>`__ | +`Mailing List <https://groups.google.com/forum/#!forum/pydata>`__ -See the package overview for more detail about what's in the library. +:mod:`pandas` is an open source, BSD-licensed library providing high-performance, +easy-to-use data structures and data analysis tools for the `Python <https://www.python.org/>`__ +programming language. +See the :ref:`overview` for more detail about what's in the library. {% if single_doc and single_doc.endswith('.rst') -%} .. toctree:: - :maxdepth: 4 + :maxdepth: 2 {{ single_doc[:-4] }} {% elif single_doc %} .. autosummary:: - :toctree: api/generated/ + :toctree: reference/api/ {{ single_doc }} {% else -%} .. toctree:: - :maxdepth: 4 + :maxdepth: 2 {% endif %} {% if not single_doc -%} - What's New <whatsnew/v0.24.0> + What's New in 0.25.0 <whatsnew/v0.25.0> install - contributing - overview - 10min - tutorials - cookbook - dsintro - basics - text - options - indexing - advanced - computation - missing_data - groupby - merging - reshaping - timeseries - timedeltas - categorical - integer_na - visualization - style - io - enhancingperf - sparse - gotchas - r_interface + getting_started/index + user_guide/index ecosystem - comparison_with_r - comparison_with_sql - comparison_with_sas - comparison_with_stata {% endif -%} {% if include_api -%} - api/index + reference/index {% endif -%} {% if not single_doc -%} - developer - internals - extending + development/index whatsnew/index {% endif -%} diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst deleted file mode 100644 index 9839bba4884d4..0000000000000 --- a/doc/source/r_interface.rst +++ /dev/null @@ -1,94 +0,0 @@ -.. _rpy: - -{{ header }} - -****************** -rpy2 / R interface -****************** - -.. warning:: - - Up to pandas 0.19, a ``pandas.rpy`` module existed with functionality to - convert between pandas and ``rpy2`` objects. This functionality now lives in - the `rpy2 <https://rpy2.readthedocs.io/>`__ project itself. - See the `updating section <http://pandas.pydata.org/pandas-docs/version/0.19.0/r_interface.html#updating-your-code-to-use-rpy2-functions>`__ - of the previous documentation for a guide to port your code from the - removed ``pandas.rpy`` to ``rpy2`` functions. - - -`rpy2 <http://rpy2.bitbucket.org/>`__ is an interface to R running embedded in a Python process, and also includes functionality to deal with pandas DataFrames. -Converting data frames back and forth between rpy2 and pandas should be largely -automated (no need to convert explicitly, it will be done on the fly in most -rpy2 functions). -To convert explicitly, the functions are ``pandas2ri.py2ri()`` and -``pandas2ri.ri2py()``. - - -See also the documentation of the `rpy2 <http://rpy2.bitbucket.org/>`__ project: https://rpy2.readthedocs.io. - -In the remainder of this page, a few examples of explicit conversion is given. The pandas conversion of rpy2 needs first to be activated: - -.. ipython:: - :verbatim: - - In [1]: from rpy2.robjects import pandas2ri - ...: pandas2ri.activate() - -Transferring R data sets into Python ------------------------------------- - -Once the pandas conversion is activated (``pandas2ri.activate()``), many conversions -of R to pandas objects will be done automatically. For example, to obtain the 'iris' dataset as a pandas DataFrame: - -.. ipython:: - :verbatim: - - In [2]: from rpy2.robjects import r - - In [3]: r.data('iris') - - In [4]: r['iris'].head() - Out[4]: - Sepal.Length Sepal.Width Petal.Length Petal.Width Species - 0 5.1 3.5 1.4 0.2 setosa - 1 4.9 3.0 1.4 0.2 setosa - 2 4.7 3.2 1.3 0.2 setosa - 3 4.6 3.1 1.5 0.2 setosa - 4 5.0 3.6 1.4 0.2 setosa - -If the pandas conversion was not activated, the above could also be accomplished -by explicitly converting it with the ``pandas2ri.ri2py`` function -(``pandas2ri.ri2py(r['iris'])``). - -Converting DataFrames into R objects ------------------------------------- - -The ``pandas2ri.py2ri`` function support the reverse operation to convert -DataFrames into the equivalent R object (that is, **data.frame**): - -.. ipython:: - :verbatim: - - In [5]: df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': [7, 8, 9]}, - ...: index=["one", "two", "three"]) - - In [6]: r_dataframe = pandas2ri.py2ri(df) - - In [7]: print(type(r_dataframe)) - Out[7]: <class 'rpy2.robjects.vectors.DataFrame'> - - In [8]: print(r_dataframe) - Out[8]: - A B C - one 1 4 7 - two 2 5 8 - three 3 6 9 - - -The DataFrame's index is stored as the ``rownames`` attribute of the -data.frame instance. - - -.. - Calling R functions with pandas objects - High-level interface to R estimators diff --git a/doc/source/api/arrays.rst b/doc/source/reference/arrays.rst similarity index 92% rename from doc/source/api/arrays.rst rename to doc/source/reference/arrays.rst index 5ecc5181af22c..1dc74ad83b7e6 100644 --- a/doc/source/api/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -31,7 +31,7 @@ The top-level :meth:`array` method can be used to create a new array, which may stored in a :class:`Series`, :class:`Index`, or as a column in a :class:`DataFrame`. .. autosummary:: - :toctree: generated/ + :toctree: api/ array @@ -48,14 +48,14 @@ or timezone-aware values. scalar type for timezone-naive or timezone-aware datetime data. .. autosummary:: - :toctree: generated/ + :toctree: api/ Timestamp Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Timestamp.asm8 Timestamp.day @@ -91,7 +91,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Timestamp.astimezone Timestamp.ceil @@ -142,7 +142,7 @@ is used. If the data are tz-aware, then every value in the array must have the same timezone. .. autosummary:: - :toctree: generated/ + :toctree: api/ arrays.DatetimeArray DatetimeTZDtype @@ -156,14 +156,14 @@ NumPy can natively represent timedeltas. Pandas provides :class:`Timedelta` for symmetry with :class:`Timestamp`. .. autosummary:: - :toctree: generated/ + :toctree: api/ Timedelta Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Timedelta.asm8 Timedelta.components @@ -183,7 +183,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Timedelta.ceil Timedelta.floor @@ -196,7 +196,7 @@ Methods A collection of timedeltas may be stored in a :class:`TimedeltaArray`. .. autosummary:: - :toctree: generated/ + :toctree: api/ arrays.TimedeltaArray @@ -210,14 +210,14 @@ Pandas represents spans of times as :class:`Period` objects. Period ------ .. autosummary:: - :toctree: generated/ + :toctree: api/ Period Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Period.day Period.dayofweek @@ -244,7 +244,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Period.asfreq Period.now @@ -255,7 +255,7 @@ A collection of timedeltas may be stored in a :class:`arrays.PeriodArray`. Every period in a ``PeriodArray`` must have the same ``freq``. .. autosummary:: - :toctree: generated/ + :toctree: api/ arrays.DatetimeArray PeriodDtype @@ -268,14 +268,14 @@ Interval Data Arbitrary intervals can be represented as :class:`Interval` objects. .. autosummary:: - :toctree: generated/ + :toctree: api/ Interval Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Interval.closed Interval.closed_left @@ -288,12 +288,12 @@ Properties Interval.overlaps Interval.right -A collection of intervals may be stored in an :class:`IntervalArray`. +A collection of intervals may be stored in an :class:`arrays.IntervalArray`. .. autosummary:: - :toctree: generated/ + :toctree: api/ - IntervalArray + arrays.IntervalArray IntervalDtype .. _api.arrays.integer_na: @@ -305,7 +305,7 @@ Nullable Integer Pandas provides this through :class:`arrays.IntegerArray`. .. autosummary:: - :toctree: generated/ + :toctree: api/ arrays.IntegerArray Int8Dtype @@ -327,13 +327,13 @@ limited, fixed set of values. The dtype of a ``Categorical`` can be described by a :class:`pandas.api.types.CategoricalDtype`. .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst CategoricalDtype .. autosummary:: - :toctree: generated/ + :toctree: api/ CategoricalDtype.categories CategoricalDtype.ordered @@ -341,7 +341,7 @@ a :class:`pandas.api.types.CategoricalDtype`. Categorical data can be stored in a :class:`pandas.Categorical` .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst Categorical @@ -350,14 +350,14 @@ The alternative :meth:`Categorical.from_codes` constructor can be used when you have the categories and integer codes already: .. autosummary:: - :toctree: generated/ + :toctree: api/ Categorical.from_codes The dtype information is available on the ``Categorical`` .. autosummary:: - :toctree: generated/ + :toctree: api/ Categorical.dtype Categorical.categories @@ -368,7 +368,7 @@ The dtype information is available on the ``Categorical`` the Categorical back to a NumPy array, so categories and order information is not preserved! .. autosummary:: - :toctree: generated/ + :toctree: api/ Categorical.__array__ @@ -391,7 +391,7 @@ Data where a single value is repeated many times (e.g. ``0`` or ``NaN``) may be stored efficiently as a :class:`SparseArray`. .. autosummary:: - :toctree: generated/ + :toctree: api/ SparseArray SparseDtype diff --git a/doc/source/api/extensions.rst b/doc/source/reference/extensions.rst similarity index 95% rename from doc/source/api/extensions.rst rename to doc/source/reference/extensions.rst index 3972354ff9651..6146e34fab274 100644 --- a/doc/source/api/extensions.rst +++ b/doc/source/reference/extensions.rst @@ -11,7 +11,7 @@ These are primarily intended for library authors looking to extend pandas objects. .. autosummary:: - :toctree: generated/ + :toctree: api/ api.extensions.register_extension_dtype api.extensions.register_dataframe_accessor diff --git a/doc/source/api/frame.rst b/doc/source/reference/frame.rst similarity index 93% rename from doc/source/api/frame.rst rename to doc/source/reference/frame.rst index de16d59fe7c40..568acd5207bd1 100644 --- a/doc/source/api/frame.rst +++ b/doc/source/reference/frame.rst @@ -10,7 +10,7 @@ DataFrame Constructor ~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame @@ -19,13 +19,13 @@ Attributes and underlying data **Axes** .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.index DataFrame.columns .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.dtypes DataFrame.ftypes @@ -45,7 +45,7 @@ Attributes and underlying data Conversion ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.astype DataFrame.convert_objects @@ -58,7 +58,7 @@ Conversion Indexing, iteration ~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.head DataFrame.at @@ -88,7 +88,7 @@ For more information on ``.at``, ``.iat``, ``.loc``, and Binary operator functions ~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.add DataFrame.sub @@ -119,7 +119,7 @@ Binary operator functions Function application, GroupBy & Window ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.apply DataFrame.applymap @@ -137,7 +137,7 @@ Function application, GroupBy & Window Computations / Descriptive Stats ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.abs DataFrame.all @@ -181,7 +181,7 @@ Computations / Descriptive Stats Reindexing / Selection / Label manipulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.add_prefix DataFrame.add_suffix @@ -217,7 +217,7 @@ Reindexing / Selection / Label manipulation Missing data handling ~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.dropna DataFrame.fillna @@ -227,7 +227,7 @@ Missing data handling Reshaping, sorting, transposing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.droplevel DataFrame.pivot @@ -251,7 +251,7 @@ Reshaping, sorting, transposing Combining / joining / merging ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.append DataFrame.assign @@ -262,7 +262,7 @@ Combining / joining / merging Time series-related ~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.asfreq DataFrame.asof @@ -285,13 +285,13 @@ Plotting specific plotting methods of the form ``DataFrame.plot.<kind>``. .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_callable.rst DataFrame.plot .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_method.rst DataFrame.plot.area @@ -307,7 +307,7 @@ specific plotting methods of the form ``DataFrame.plot.<kind>``. DataFrame.plot.scatter .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.boxplot DataFrame.hist @@ -315,7 +315,7 @@ specific plotting methods of the form ``DataFrame.plot.<kind>``. Serialization / IO / Conversion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrame.from_csv DataFrame.from_dict @@ -346,6 +346,6 @@ Serialization / IO / Conversion Sparse ~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ SparseDataFrame.to_coo diff --git a/doc/source/api/general_functions.rst b/doc/source/reference/general_functions.rst similarity index 84% rename from doc/source/api/general_functions.rst rename to doc/source/reference/general_functions.rst index cef5d8cac6abc..b5832cb8aa591 100644 --- a/doc/source/api/general_functions.rst +++ b/doc/source/reference/general_functions.rst @@ -10,7 +10,7 @@ General functions Data manipulations ~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ melt pivot @@ -30,7 +30,7 @@ Data manipulations Top-level missing data ~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ isna isnull @@ -40,14 +40,14 @@ Top-level missing data Top-level conversions ~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ to_numeric Top-level dealing with datetimelike ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ to_datetime to_timedelta @@ -60,21 +60,21 @@ Top-level dealing with datetimelike Top-level dealing with intervals ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ interval_range Top-level evaluation ~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ eval Hashing ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ util.hash_array util.hash_pandas_object @@ -82,6 +82,6 @@ Hashing Testing ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ test diff --git a/doc/source/api/general_utility_functions.rst b/doc/source/reference/general_utility_functions.rst similarity index 93% rename from doc/source/api/general_utility_functions.rst rename to doc/source/reference/general_utility_functions.rst index e151f8f57ed5e..9c69770c0f1b7 100644 --- a/doc/source/api/general_utility_functions.rst +++ b/doc/source/reference/general_utility_functions.rst @@ -10,7 +10,7 @@ General utility functions Working with options -------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ describe_option reset_option @@ -21,7 +21,7 @@ Working with options Testing functions ----------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ testing.assert_frame_equal testing.assert_series_equal @@ -30,7 +30,7 @@ Testing functions Exceptions and warnings ----------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ errors.DtypeWarning errors.EmptyDataError @@ -44,7 +44,7 @@ Exceptions and warnings Data types related functionality -------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ api.types.union_categoricals api.types.infer_dtype @@ -53,7 +53,7 @@ Data types related functionality Dtype introspection ~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ api.types.is_bool_dtype api.types.is_categorical_dtype @@ -81,7 +81,7 @@ Dtype introspection Iterable introspection ~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ api.types.is_dict_like api.types.is_file_like @@ -92,7 +92,7 @@ Iterable introspection Scalar introspection ~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ api.types.is_bool api.types.is_categorical diff --git a/doc/source/api/groupby.rst b/doc/source/reference/groupby.rst similarity index 94% rename from doc/source/api/groupby.rst rename to doc/source/reference/groupby.rst index d67c7e0889522..6ed85ff2fac43 100644 --- a/doc/source/api/groupby.rst +++ b/doc/source/reference/groupby.rst @@ -12,7 +12,7 @@ GroupBy objects are returned by groupby calls: :func:`pandas.DataFrame.groupby`, Indexing, iteration ------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ GroupBy.__iter__ GroupBy.groups @@ -22,7 +22,7 @@ Indexing, iteration .. currentmodule:: pandas .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst Grouper @@ -32,7 +32,7 @@ Indexing, iteration Function application -------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ GroupBy.apply GroupBy.agg @@ -43,7 +43,7 @@ Function application Computations / Descriptive Stats -------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ GroupBy.all GroupBy.any @@ -78,7 +78,7 @@ axis argument, and often an argument indicating whether to restrict application to columns of a specific data type. .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrameGroupBy.all DataFrameGroupBy.any @@ -113,7 +113,7 @@ application to columns of a specific data type. The following methods are available only for ``SeriesGroupBy`` objects. .. autosummary:: - :toctree: generated/ + :toctree: api/ SeriesGroupBy.nlargest SeriesGroupBy.nsmallest @@ -126,7 +126,7 @@ The following methods are available only for ``SeriesGroupBy`` objects. The following methods are available only for ``DataFrameGroupBy`` objects. .. autosummary:: - :toctree: generated/ + :toctree: api/ DataFrameGroupBy.corrwith DataFrameGroupBy.boxplot diff --git a/doc/source/api/index.rst b/doc/source/reference/index.rst similarity index 56% rename from doc/source/api/index.rst rename to doc/source/reference/index.rst index e4d118e278128..ef4676054473a 100644 --- a/doc/source/api/index.rst +++ b/doc/source/reference/index.rst @@ -44,31 +44,31 @@ public functions related to data types in pandas. .. toctree:: :hidden: - generated/pandas.DataFrame.blocks - generated/pandas.DataFrame.as_matrix - generated/pandas.DataFrame.ix - generated/pandas.Index.asi8 - generated/pandas.Index.data - generated/pandas.Index.flags - generated/pandas.Index.holds_integer - generated/pandas.Index.is_type_compatible - generated/pandas.Index.nlevels - generated/pandas.Index.sort - generated/pandas.Panel.agg - generated/pandas.Panel.aggregate - generated/pandas.Panel.blocks - generated/pandas.Panel.empty - generated/pandas.Panel.is_copy - generated/pandas.Panel.items - generated/pandas.Panel.ix - generated/pandas.Panel.major_axis - generated/pandas.Panel.minor_axis - generated/pandas.Series.asobject - generated/pandas.Series.blocks - generated/pandas.Series.from_array - generated/pandas.Series.ix - generated/pandas.Series.imag - generated/pandas.Series.real + api/pandas.DataFrame.blocks + api/pandas.DataFrame.as_matrix + api/pandas.DataFrame.ix + api/pandas.Index.asi8 + api/pandas.Index.data + api/pandas.Index.flags + api/pandas.Index.holds_integer + api/pandas.Index.is_type_compatible + api/pandas.Index.nlevels + api/pandas.Index.sort + api/pandas.Panel.agg + api/pandas.Panel.aggregate + api/pandas.Panel.blocks + api/pandas.Panel.empty + api/pandas.Panel.is_copy + api/pandas.Panel.items + api/pandas.Panel.ix + api/pandas.Panel.major_axis + api/pandas.Panel.minor_axis + api/pandas.Series.asobject + api/pandas.Series.blocks + api/pandas.Series.from_array + api/pandas.Series.ix + api/pandas.Series.imag + api/pandas.Series.real .. Can't convince sphinx to generate toctree for this class attribute. @@ -77,4 +77,4 @@ public functions related to data types in pandas. .. toctree:: :hidden: - generated/pandas.api.extensions.ExtensionDtype.na_value + api/pandas.api.extensions.ExtensionDtype.na_value diff --git a/doc/source/api/indexing.rst b/doc/source/reference/indexing.rst similarity index 91% rename from doc/source/api/indexing.rst rename to doc/source/reference/indexing.rst index d27b05322c1f2..680cb7e3dac91 100644 --- a/doc/source/api/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -15,14 +15,14 @@ that contain an index (Series/DataFrame) and those should most likely be used before calling these methods directly.** .. autosummary:: - :toctree: generated/ + :toctree: api/ Index Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.values Index.is_monotonic @@ -51,7 +51,7 @@ Properties Modifying and Computations ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.all Index.any @@ -90,7 +90,7 @@ Modifying and Computations Compatibility with MultiIndex ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.set_names Index.is_lexsorted_for_tuple @@ -99,7 +99,7 @@ Compatibility with MultiIndex Missing Values ~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.fillna Index.dropna @@ -109,7 +109,7 @@ Missing Values Conversion ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.astype Index.item @@ -124,7 +124,7 @@ Conversion Sorting ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.argsort Index.searchsorted @@ -133,14 +133,14 @@ Sorting Time-specific operations ~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.shift Combining / joining / set operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.append Index.join @@ -152,7 +152,7 @@ Combining / joining / set operations Selecting ~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Index.asof Index.asof_locs @@ -176,7 +176,7 @@ Selecting Numeric Index ------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst RangeIndex @@ -188,7 +188,7 @@ Numeric Index .. Separate block, since they aren't classes. .. autosummary:: - :toctree: generated/ + :toctree: api/ RangeIndex.from_range @@ -197,7 +197,7 @@ Numeric Index CategoricalIndex ---------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst CategoricalIndex @@ -205,7 +205,7 @@ CategoricalIndex Categorical Components ~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CategoricalIndex.codes CategoricalIndex.categories @@ -222,7 +222,7 @@ Categorical Components Modifying and Computations ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CategoricalIndex.map CategoricalIndex.equals @@ -232,7 +232,7 @@ Modifying and Computations IntervalIndex ------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst IntervalIndex @@ -240,7 +240,7 @@ IntervalIndex IntervalIndex Components ~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ IntervalIndex.from_arrays IntervalIndex.from_tuples @@ -265,20 +265,20 @@ IntervalIndex Components MultiIndex ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst MultiIndex .. autosummary:: - :toctree: generated/ + :toctree: api/ IndexSlice MultiIndex Constructors ~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MultiIndex.from_arrays MultiIndex.from_tuples @@ -288,7 +288,7 @@ MultiIndex Constructors MultiIndex Properties ~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MultiIndex.names MultiIndex.levels @@ -299,7 +299,7 @@ MultiIndex Properties MultiIndex Components ~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MultiIndex.set_levels MultiIndex.set_codes @@ -316,7 +316,7 @@ MultiIndex Components MultiIndex Selecting ~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MultiIndex.get_loc MultiIndex.get_loc_level @@ -328,7 +328,7 @@ MultiIndex Selecting DatetimeIndex ------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst DatetimeIndex @@ -336,7 +336,7 @@ DatetimeIndex Time/Date Components ~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DatetimeIndex.year DatetimeIndex.month @@ -370,7 +370,7 @@ Time/Date Components Selecting ~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DatetimeIndex.indexer_at_time DatetimeIndex.indexer_between_time @@ -379,7 +379,7 @@ Selecting Time-specific operations ~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DatetimeIndex.normalize DatetimeIndex.strftime @@ -395,7 +395,7 @@ Time-specific operations Conversion ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DatetimeIndex.to_period DatetimeIndex.to_perioddelta @@ -406,7 +406,7 @@ Conversion TimedeltaIndex -------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst TimedeltaIndex @@ -414,7 +414,7 @@ TimedeltaIndex Components ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ TimedeltaIndex.days TimedeltaIndex.seconds @@ -426,7 +426,7 @@ Components Conversion ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ TimedeltaIndex.to_pytimedelta TimedeltaIndex.to_series @@ -440,7 +440,7 @@ Conversion PeriodIndex ----------- .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst PeriodIndex @@ -448,7 +448,7 @@ PeriodIndex Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ PeriodIndex.day PeriodIndex.dayofweek @@ -474,7 +474,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ PeriodIndex.asfreq PeriodIndex.strftime diff --git a/doc/source/api/io.rst b/doc/source/reference/io.rst similarity index 78% rename from doc/source/api/io.rst rename to doc/source/reference/io.rst index f2060b7c05413..9c776e3ff8a82 100644 --- a/doc/source/api/io.rst +++ b/doc/source/reference/io.rst @@ -10,14 +10,14 @@ Input/Output Pickling ~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_pickle Flat File ~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_table read_csv @@ -27,20 +27,20 @@ Flat File Clipboard ~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_clipboard Excel ~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_excel ExcelFile.parse .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/class_without_autosummary.rst ExcelWriter @@ -48,14 +48,14 @@ Excel JSON ~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_json .. currentmodule:: pandas.io.json .. autosummary:: - :toctree: generated/ + :toctree: api/ json_normalize build_table_schema @@ -65,14 +65,14 @@ JSON HTML ~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_html HDFStore: PyTables (HDF5) ~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_hdf HDFStore.put @@ -87,28 +87,28 @@ HDFStore: PyTables (HDF5) Feather ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_feather Parquet ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_parquet SAS ~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_sas SQL ~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_sql_table read_sql_query @@ -117,21 +117,21 @@ SQL Google BigQuery ~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_gbq STATA ~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ read_stata .. currentmodule:: pandas.io.stata .. autosummary:: - :toctree: generated/ + :toctree: api/ StataReader.data StataReader.data_label diff --git a/doc/source/api/offset_frequency.rst b/doc/source/reference/offset_frequency.rst similarity index 84% rename from doc/source/api/offset_frequency.rst rename to doc/source/reference/offset_frequency.rst index 42894fe8d7f2f..ccc1c7e171d22 100644 --- a/doc/source/api/offset_frequency.rst +++ b/doc/source/reference/offset_frequency.rst @@ -10,14 +10,14 @@ Date Offsets DateOffset ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ DateOffset Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DateOffset.freqstr DateOffset.kwds @@ -29,7 +29,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ DateOffset.apply DateOffset.copy @@ -39,14 +39,14 @@ Methods BusinessDay ----------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessDay Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessDay.freqstr BusinessDay.kwds @@ -58,7 +58,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessDay.apply BusinessDay.apply_index @@ -69,14 +69,14 @@ Methods BusinessHour ------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessHour Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessHour.freqstr BusinessHour.kwds @@ -88,7 +88,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessHour.apply BusinessHour.copy @@ -98,14 +98,14 @@ Methods CustomBusinessDay ----------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessDay Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessDay.freqstr CustomBusinessDay.kwds @@ -117,7 +117,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessDay.apply CustomBusinessDay.copy @@ -127,14 +127,14 @@ Methods CustomBusinessHour ------------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessHour Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessHour.freqstr CustomBusinessHour.kwds @@ -146,7 +146,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessHour.apply CustomBusinessHour.copy @@ -156,14 +156,14 @@ Methods MonthOffset ----------- .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthOffset Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthOffset.freqstr MonthOffset.kwds @@ -175,7 +175,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthOffset.apply MonthOffset.apply_index @@ -186,14 +186,14 @@ Methods MonthEnd -------- .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthEnd.freqstr MonthEnd.kwds @@ -205,7 +205,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthEnd.apply MonthEnd.apply_index @@ -216,14 +216,14 @@ Methods MonthBegin ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthBegin.freqstr MonthBegin.kwds @@ -235,7 +235,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ MonthBegin.apply MonthBegin.apply_index @@ -246,14 +246,14 @@ Methods BusinessMonthEnd ---------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessMonthEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessMonthEnd.freqstr BusinessMonthEnd.kwds @@ -265,7 +265,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessMonthEnd.apply BusinessMonthEnd.apply_index @@ -276,14 +276,14 @@ Methods BusinessMonthBegin ------------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessMonthBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessMonthBegin.freqstr BusinessMonthBegin.kwds @@ -295,7 +295,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BusinessMonthBegin.apply BusinessMonthBegin.apply_index @@ -306,14 +306,14 @@ Methods CustomBusinessMonthEnd ---------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessMonthEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessMonthEnd.freqstr CustomBusinessMonthEnd.kwds @@ -326,7 +326,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessMonthEnd.apply CustomBusinessMonthEnd.copy @@ -336,14 +336,14 @@ Methods CustomBusinessMonthBegin ------------------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessMonthBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessMonthBegin.freqstr CustomBusinessMonthBegin.kwds @@ -356,7 +356,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CustomBusinessMonthBegin.apply CustomBusinessMonthBegin.copy @@ -366,14 +366,14 @@ Methods SemiMonthOffset --------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthOffset Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthOffset.freqstr SemiMonthOffset.kwds @@ -385,7 +385,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthOffset.apply SemiMonthOffset.apply_index @@ -396,14 +396,14 @@ Methods SemiMonthEnd ------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthEnd.freqstr SemiMonthEnd.kwds @@ -415,7 +415,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthEnd.apply SemiMonthEnd.apply_index @@ -426,14 +426,14 @@ Methods SemiMonthBegin -------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthBegin.freqstr SemiMonthBegin.kwds @@ -445,7 +445,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ SemiMonthBegin.apply SemiMonthBegin.apply_index @@ -456,14 +456,14 @@ Methods Week ---- .. autosummary:: - :toctree: generated/ + :toctree: api/ Week Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Week.freqstr Week.kwds @@ -475,7 +475,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Week.apply Week.apply_index @@ -486,14 +486,14 @@ Methods WeekOfMonth ----------- .. autosummary:: - :toctree: generated/ + :toctree: api/ WeekOfMonth Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ WeekOfMonth.freqstr WeekOfMonth.kwds @@ -505,7 +505,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ WeekOfMonth.apply WeekOfMonth.copy @@ -515,14 +515,14 @@ Methods LastWeekOfMonth --------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ LastWeekOfMonth Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ LastWeekOfMonth.freqstr LastWeekOfMonth.kwds @@ -534,7 +534,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ LastWeekOfMonth.apply LastWeekOfMonth.copy @@ -544,14 +544,14 @@ Methods QuarterOffset ------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterOffset Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterOffset.freqstr QuarterOffset.kwds @@ -563,7 +563,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterOffset.apply QuarterOffset.apply_index @@ -574,14 +574,14 @@ Methods BQuarterEnd ----------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BQuarterEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BQuarterEnd.freqstr BQuarterEnd.kwds @@ -593,7 +593,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BQuarterEnd.apply BQuarterEnd.apply_index @@ -604,14 +604,14 @@ Methods BQuarterBegin ------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BQuarterBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BQuarterBegin.freqstr BQuarterBegin.kwds @@ -623,7 +623,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BQuarterBegin.apply BQuarterBegin.apply_index @@ -634,14 +634,14 @@ Methods QuarterEnd ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterEnd.freqstr QuarterEnd.kwds @@ -653,7 +653,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterEnd.apply QuarterEnd.apply_index @@ -664,14 +664,14 @@ Methods QuarterBegin ------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterBegin.freqstr QuarterBegin.kwds @@ -683,7 +683,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ QuarterBegin.apply QuarterBegin.apply_index @@ -694,14 +694,14 @@ Methods YearOffset ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ YearOffset Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ YearOffset.freqstr YearOffset.kwds @@ -713,7 +713,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ YearOffset.apply YearOffset.apply_index @@ -724,14 +724,14 @@ Methods BYearEnd -------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BYearEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BYearEnd.freqstr BYearEnd.kwds @@ -743,7 +743,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BYearEnd.apply BYearEnd.apply_index @@ -754,14 +754,14 @@ Methods BYearBegin ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BYearBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BYearBegin.freqstr BYearBegin.kwds @@ -773,7 +773,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BYearBegin.apply BYearBegin.apply_index @@ -784,14 +784,14 @@ Methods YearEnd ------- .. autosummary:: - :toctree: generated/ + :toctree: api/ YearEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ YearEnd.freqstr YearEnd.kwds @@ -803,7 +803,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ YearEnd.apply YearEnd.apply_index @@ -814,14 +814,14 @@ Methods YearBegin --------- .. autosummary:: - :toctree: generated/ + :toctree: api/ YearBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ YearBegin.freqstr YearBegin.kwds @@ -833,7 +833,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ YearBegin.apply YearBegin.apply_index @@ -844,14 +844,14 @@ Methods FY5253 ------ .. autosummary:: - :toctree: generated/ + :toctree: api/ FY5253 Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ FY5253.freqstr FY5253.kwds @@ -863,7 +863,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ FY5253.apply FY5253.copy @@ -875,14 +875,14 @@ Methods FY5253Quarter ------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ FY5253Quarter Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ FY5253Quarter.freqstr FY5253Quarter.kwds @@ -894,7 +894,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ FY5253Quarter.apply FY5253Quarter.copy @@ -906,14 +906,14 @@ Methods Easter ------ .. autosummary:: - :toctree: generated/ + :toctree: api/ Easter Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Easter.freqstr Easter.kwds @@ -925,7 +925,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Easter.apply Easter.copy @@ -935,14 +935,14 @@ Methods Tick ---- .. autosummary:: - :toctree: generated/ + :toctree: api/ Tick Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Tick.delta Tick.freqstr @@ -955,7 +955,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Tick.copy Tick.isAnchored @@ -964,14 +964,14 @@ Methods Day --- .. autosummary:: - :toctree: generated/ + :toctree: api/ Day Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Day.delta Day.freqstr @@ -984,7 +984,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Day.copy Day.isAnchored @@ -993,14 +993,14 @@ Methods Hour ---- .. autosummary:: - :toctree: generated/ + :toctree: api/ Hour Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Hour.delta Hour.freqstr @@ -1013,7 +1013,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Hour.copy Hour.isAnchored @@ -1022,14 +1022,14 @@ Methods Minute ------ .. autosummary:: - :toctree: generated/ + :toctree: api/ Minute Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Minute.delta Minute.freqstr @@ -1042,7 +1042,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Minute.copy Minute.isAnchored @@ -1051,14 +1051,14 @@ Methods Second ------ .. autosummary:: - :toctree: generated/ + :toctree: api/ Second Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Second.delta Second.freqstr @@ -1071,7 +1071,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Second.copy Second.isAnchored @@ -1080,14 +1080,14 @@ Methods Milli ----- .. autosummary:: - :toctree: generated/ + :toctree: api/ Milli Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Milli.delta Milli.freqstr @@ -1100,7 +1100,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Milli.copy Milli.isAnchored @@ -1109,14 +1109,14 @@ Methods Micro ----- .. autosummary:: - :toctree: generated/ + :toctree: api/ Micro Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Micro.delta Micro.freqstr @@ -1129,7 +1129,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Micro.copy Micro.isAnchored @@ -1138,14 +1138,14 @@ Methods Nano ---- .. autosummary:: - :toctree: generated/ + :toctree: api/ Nano Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Nano.delta Nano.freqstr @@ -1158,7 +1158,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Nano.copy Nano.isAnchored @@ -1167,14 +1167,14 @@ Methods BDay ---- .. autosummary:: - :toctree: generated/ + :toctree: api/ BDay Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BDay.base BDay.freqstr @@ -1188,7 +1188,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BDay.apply BDay.apply_index @@ -1201,14 +1201,14 @@ Methods BMonthEnd --------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BMonthEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BMonthEnd.base BMonthEnd.freqstr @@ -1221,7 +1221,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BMonthEnd.apply BMonthEnd.apply_index @@ -1234,14 +1234,14 @@ Methods BMonthBegin ----------- .. autosummary:: - :toctree: generated/ + :toctree: api/ BMonthBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BMonthBegin.base BMonthBegin.freqstr @@ -1254,7 +1254,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ BMonthBegin.apply BMonthBegin.apply_index @@ -1267,14 +1267,14 @@ Methods CBMonthEnd ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ CBMonthEnd Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CBMonthEnd.base CBMonthEnd.cbday_roll @@ -1291,7 +1291,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CBMonthEnd.apply CBMonthEnd.apply_index @@ -1304,14 +1304,14 @@ Methods CBMonthBegin ------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ CBMonthBegin Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CBMonthBegin.base CBMonthBegin.cbday_roll @@ -1328,7 +1328,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CBMonthBegin.apply CBMonthBegin.apply_index @@ -1341,14 +1341,14 @@ Methods CDay ---- .. autosummary:: - :toctree: generated/ + :toctree: api/ CDay Properties ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CDay.base CDay.freqstr @@ -1362,7 +1362,7 @@ Properties Methods ~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ CDay.apply CDay.apply_index @@ -1382,6 +1382,6 @@ Frequencies .. _api.offsets: .. autosummary:: - :toctree: generated/ + :toctree: api/ to_offset diff --git a/doc/source/api/panel.rst b/doc/source/reference/panel.rst similarity index 90% rename from doc/source/api/panel.rst rename to doc/source/reference/panel.rst index 4edcd22d2685d..39c8ba0828859 100644 --- a/doc/source/api/panel.rst +++ b/doc/source/reference/panel.rst @@ -10,7 +10,7 @@ Panel Constructor ~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel @@ -23,7 +23,7 @@ Properties and underlying data * **minor_axis**: axis 2; the columns of each of the DataFrames .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.values Panel.axes @@ -38,7 +38,7 @@ Properties and underlying data Conversion ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.astype Panel.copy @@ -48,7 +48,7 @@ Conversion Getting and setting ~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.get_value Panel.set_value @@ -56,7 +56,7 @@ Getting and setting Indexing, iteration, slicing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.at Panel.iat @@ -75,7 +75,7 @@ For more information on ``.at``, ``.iat``, ``.loc``, and Binary operator functions ~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.add Panel.sub @@ -103,7 +103,7 @@ Binary operator functions Function application, GroupBy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.apply Panel.groupby @@ -113,7 +113,7 @@ Function application, GroupBy Computations / Descriptive Stats ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.abs Panel.clip @@ -139,7 +139,7 @@ Computations / Descriptive Stats Reindexing / Selection / Label manipulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.add_prefix Panel.add_suffix @@ -160,14 +160,14 @@ Reindexing / Selection / Label manipulation Missing data handling ~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.dropna Reshaping, sorting, transposing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.sort_index Panel.swaplevel @@ -178,7 +178,7 @@ Reshaping, sorting, transposing Combining / joining / merging ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.join Panel.update @@ -186,7 +186,7 @@ Combining / joining / merging Time series-related ~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.asfreq Panel.shift @@ -197,7 +197,7 @@ Time series-related Serialization / IO / Conversion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Panel.from_dict Panel.to_pickle diff --git a/doc/source/api/plotting.rst b/doc/source/reference/plotting.rst similarity index 93% rename from doc/source/api/plotting.rst rename to doc/source/reference/plotting.rst index c4e6333ebda37..7615e1d20f5e2 100644 --- a/doc/source/api/plotting.rst +++ b/doc/source/reference/plotting.rst @@ -10,7 +10,7 @@ Plotting The following functions are contained in the `pandas.plotting` module. .. autosummary:: - :toctree: generated/ + :toctree: api/ andrews_curves bootstrap_plot diff --git a/doc/source/api/resampling.rst b/doc/source/reference/resampling.rst similarity index 91% rename from doc/source/api/resampling.rst rename to doc/source/reference/resampling.rst index f5c6ccce3cdd7..2a52defa3c68f 100644 --- a/doc/source/api/resampling.rst +++ b/doc/source/reference/resampling.rst @@ -12,7 +12,7 @@ Resampler objects are returned by resample calls: :func:`pandas.DataFrame.resamp Indexing, iteration ~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Resampler.__iter__ Resampler.groups @@ -22,7 +22,7 @@ Indexing, iteration Function application ~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Resampler.apply Resampler.aggregate @@ -32,7 +32,7 @@ Function application Upsampling ~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Resampler.ffill Resampler.backfill @@ -46,7 +46,7 @@ Upsampling Computations / Descriptive Stats ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: - :toctree: generated/ + :toctree: api/ Resampler.count Resampler.nunique diff --git a/doc/source/api/series.rst b/doc/source/reference/series.rst similarity index 93% rename from doc/source/api/series.rst rename to doc/source/reference/series.rst index aa43c8b643d44..a6ac40b5203bf 100644 --- a/doc/source/api/series.rst +++ b/doc/source/reference/series.rst @@ -10,7 +10,7 @@ Series Constructor ----------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series @@ -19,12 +19,12 @@ Attributes **Axes** .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.index .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.array Series.values @@ -52,7 +52,7 @@ Attributes Conversion ---------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.astype Series.infer_objects @@ -69,7 +69,7 @@ Conversion Indexing, iteration ------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.get Series.at @@ -90,7 +90,7 @@ For more information on ``.at``, ``.iat``, ``.loc``, and Binary operator functions ------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.add Series.sub @@ -123,7 +123,7 @@ Binary operator functions Function application, GroupBy & Window -------------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.apply Series.agg @@ -141,7 +141,7 @@ Function application, GroupBy & Window Computations / Descriptive Stats -------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.abs Series.all @@ -192,7 +192,7 @@ Computations / Descriptive Stats Reindexing / Selection / Label manipulation ------------------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.align Series.drop @@ -226,7 +226,7 @@ Reindexing / Selection / Label manipulation Missing data handling --------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.isna Series.notna @@ -237,7 +237,7 @@ Missing data handling Reshaping, sorting ------------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.argsort Series.argmin @@ -256,7 +256,7 @@ Reshaping, sorting Combining / joining / merging ----------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.append Series.replace @@ -265,7 +265,7 @@ Combining / joining / merging Time series-related ------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.asfreq Series.asof @@ -309,7 +309,7 @@ Datetime Properties ^^^^^^^^^^^^^^^^^^^ .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_attribute.rst Series.dt.date @@ -345,7 +345,7 @@ Datetime Methods ^^^^^^^^^^^^^^^^ .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_method.rst Series.dt.to_period @@ -364,7 +364,7 @@ Period Properties ^^^^^^^^^^^^^^^^^ .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_attribute.rst Series.dt.qyear @@ -375,7 +375,7 @@ Timedelta Properties ^^^^^^^^^^^^^^^^^^^^ .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_attribute.rst Series.dt.days @@ -388,7 +388,7 @@ Timedelta Methods ^^^^^^^^^^^^^^^^^ .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_method.rst Series.dt.to_pytimedelta @@ -405,7 +405,7 @@ strings and apply several methods to it. These can be accessed like ``Series.str.<function/property>``. .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_method.rst Series.str.capitalize @@ -467,7 +467,7 @@ strings and apply several methods to it. These can be accessed like .. .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor.rst Series.str @@ -484,7 +484,7 @@ Categorical-dtype specific methods and attributes are available under the ``Series.cat`` accessor. .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_attribute.rst Series.cat.categories @@ -492,7 +492,7 @@ the ``Series.cat`` accessor. Series.cat.codes .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_method.rst Series.cat.rename_categories @@ -514,7 +514,7 @@ Sparse-dtype specific methods and attributes are provided under the ``Series.sparse`` accessor. .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_attribute.rst Series.sparse.npoints @@ -523,7 +523,7 @@ Sparse-dtype specific methods and attributes are provided under the Series.sparse.sp_values .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.sparse.from_coo Series.sparse.to_coo @@ -535,13 +535,13 @@ Plotting specific plotting methods of the form ``Series.plot.<kind>``. .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_callable.rst Series.plot .. autosummary:: - :toctree: generated/ + :toctree: api/ :template: autosummary/accessor_method.rst Series.plot.area @@ -555,14 +555,14 @@ specific plotting methods of the form ``Series.plot.<kind>``. Series.plot.pie .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.hist Serialization / IO / Conversion ------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Series.to_pickle Series.to_csv @@ -585,7 +585,7 @@ Sparse ------ .. autosummary:: - :toctree: generated/ + :toctree: api/ SparseSeries.to_coo SparseSeries.from_coo diff --git a/doc/source/api/style.rst b/doc/source/reference/style.rst similarity index 88% rename from doc/source/api/style.rst rename to doc/source/reference/style.rst index 70913bbec410d..bd9635b41e343 100644 --- a/doc/source/api/style.rst +++ b/doc/source/reference/style.rst @@ -12,7 +12,7 @@ Style Styler Constructor ------------------ .. autosummary:: - :toctree: generated/ + :toctree: api/ Styler Styler.from_custom_template @@ -20,7 +20,7 @@ Styler Constructor Styler Properties ----------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Styler.env Styler.template @@ -29,7 +29,7 @@ Styler Properties Style Application ----------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Styler.apply Styler.applymap @@ -47,7 +47,7 @@ Style Application Builtin Styles -------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Styler.highlight_max Styler.highlight_min @@ -58,7 +58,7 @@ Builtin Styles Style Export and Import ----------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Styler.render Styler.export diff --git a/doc/source/api/window.rst b/doc/source/reference/window.rst similarity index 95% rename from doc/source/api/window.rst rename to doc/source/reference/window.rst index 3245f5f831688..9e1374a3bd8e4 100644 --- a/doc/source/api/window.rst +++ b/doc/source/reference/window.rst @@ -14,7 +14,7 @@ EWM objects are returned by ``.ewm`` calls: :func:`pandas.DataFrame.ewm`, :func: Standard moving window functions -------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Rolling.count Rolling.sum @@ -39,7 +39,7 @@ Standard moving window functions Standard expanding window functions ----------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ Expanding.count Expanding.sum @@ -60,7 +60,7 @@ Standard expanding window functions Exponentially-weighted moving window functions ---------------------------------------------- .. autosummary:: - :toctree: generated/ + :toctree: api/ EWM.mean EWM.std diff --git a/doc/source/advanced.rst b/doc/source/user_guide/advanced.rst similarity index 100% rename from doc/source/advanced.rst rename to doc/source/user_guide/advanced.rst diff --git a/doc/source/categorical.rst b/doc/source/user_guide/categorical.rst similarity index 100% rename from doc/source/categorical.rst rename to doc/source/user_guide/categorical.rst diff --git a/doc/source/computation.rst b/doc/source/user_guide/computation.rst similarity index 100% rename from doc/source/computation.rst rename to doc/source/user_guide/computation.rst diff --git a/doc/source/cookbook.rst b/doc/source/user_guide/cookbook.rst similarity index 100% rename from doc/source/cookbook.rst rename to doc/source/user_guide/cookbook.rst diff --git a/doc/source/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst similarity index 99% rename from doc/source/enhancingperf.rst rename to doc/source/user_guide/enhancingperf.rst index 0e3d389aa4f6e..9941ffcc9de4d 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -783,7 +783,7 @@ significant performance benefit. Here is a plot showing the running time of computation. The two lines are two different engines. -.. image:: _static/eval-perf.png +.. image:: ../_static/eval-perf.png .. note:: @@ -791,7 +791,7 @@ computation. The two lines are two different engines. Operations with smallish objects (around 15k-20k rows) are faster using plain Python: - .. image:: _static/eval-perf-small.png + .. image:: ../_static/eval-perf-small.png This plot was created using a ``DataFrame`` with 3 columns each containing diff --git a/doc/source/gotchas.rst b/doc/source/user_guide/gotchas.rst similarity index 100% rename from doc/source/gotchas.rst rename to doc/source/user_guide/gotchas.rst diff --git a/doc/source/groupby.rst b/doc/source/user_guide/groupby.rst similarity index 100% rename from doc/source/groupby.rst rename to doc/source/user_guide/groupby.rst diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst new file mode 100644 index 0000000000000..d39cf7103ab63 --- /dev/null +++ b/doc/source/user_guide/index.rst @@ -0,0 +1,40 @@ +{{ header }} + +.. _user_guide: + +========== +User Guide +========== + +The User Guide covers all of pandas by topic area. Each of the subsections +introduces a topic (such as "working with missing data"), and discusses how +pandas approaches the problem, with many examples throughout. + +Users brand-new to pandas should start with :ref:`10min`. + +Further information on any specific method can be obtained in the +:ref:`api`. + +.. toctree:: + :maxdepth: 2 + + io + indexing + advanced + merging + reshaping + text + missing_data + categorical + integer_na + visualization + computation + groupby + timeseries + timedeltas + style + options + enhancingperf + sparse + gotchas + cookbook diff --git a/doc/source/indexing.rst b/doc/source/user_guide/indexing.rst similarity index 99% rename from doc/source/indexing.rst rename to doc/source/user_guide/indexing.rst index 3fe416c48f670..be1745e2664a1 100644 --- a/doc/source/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -1392,7 +1392,7 @@ Performance of :meth:`~pandas.DataFrame.query` ``DataFrame.query()`` using ``numexpr`` is slightly faster than Python for large frames. -.. image:: _static/query-perf.png +.. image:: ../_static/query-perf.png .. note:: @@ -1400,7 +1400,7 @@ large frames. with ``DataFrame.query()`` if your frame has more than approximately 200,000 rows. - .. image:: _static/query-perf-small.png + .. image:: ../_static/query-perf-small.png This plot was created using a ``DataFrame`` with 3 columns each containing floating point values generated using ``numpy.random.randn()``. diff --git a/doc/source/integer_na.rst b/doc/source/user_guide/integer_na.rst similarity index 95% rename from doc/source/integer_na.rst rename to doc/source/user_guide/integer_na.rst index eb0c5e3d05863..c5667e9319ca6 100644 --- a/doc/source/integer_na.rst +++ b/doc/source/user_guide/integer_na.rst @@ -10,6 +10,12 @@ Nullable Integer Data Type .. versionadded:: 0.24.0 +.. note:: + + IntegerArray is currently experimental. Its API or implementation may + change without warning. + + In :ref:`missing_data`, we saw that pandas primarily uses ``NaN`` to represent missing data. Because ``NaN`` is a float, this forces an array of integers with any missing values to become floating point. In some cases, this may not matter diff --git a/doc/source/io.rst b/doc/source/user_guide/io.rst similarity index 99% rename from doc/source/io.rst rename to doc/source/user_guide/io.rst index dd1cde0bdff73..58e1b2370c7c8 100644 --- a/doc/source/io.rst +++ b/doc/source/user_guide/io.rst @@ -2549,7 +2549,7 @@ in the method ``to_string`` described above. HTML: .. raw:: html - :file: _static/basic.html + :file: ../_static/basic.html The ``columns`` argument will limit the columns shown: @@ -2565,7 +2565,7 @@ The ``columns`` argument will limit the columns shown: HTML: .. raw:: html - :file: _static/columns.html + :file: ../_static/columns.html ``float_format`` takes a Python callable to control the precision of floating point values: @@ -2582,7 +2582,7 @@ point values: HTML: .. raw:: html - :file: _static/float_format.html + :file: ../_static/float_format.html ``bold_rows`` will make the row labels bold by default, but you can turn that off: @@ -2597,7 +2597,7 @@ off: write_html(df, 'nobold', bold_rows=False) .. raw:: html - :file: _static/nobold.html + :file: ../_static/nobold.html The ``classes`` argument provides the ability to give the resulting HTML table CSS classes. Note that these classes are *appended* to the existing @@ -2627,7 +2627,7 @@ that contain URLs. HTML: .. raw:: html - :file: _static/render_links.html + :file: ../_static/render_links.html Finally, the ``escape`` argument allows you to control whether the "<", ">" and "&" characters escaped in the resulting HTML (by default it is @@ -2651,7 +2651,7 @@ Escaped: print(df.to_html()) .. raw:: html - :file: _static/escape.html + :file: ../_static/escape.html Not escaped: @@ -2660,7 +2660,7 @@ Not escaped: print(df.to_html(escape=False)) .. raw:: html - :file: _static/noescape.html + :file: ../_static/noescape.html .. note:: @@ -4850,7 +4850,7 @@ See also some :ref:`cookbook examples <cookbook.sql>` for some advanced strategi The key functions are: .. autosummary:: - :toctree: generated/ + :toctree: ../reference/api/ read_sql_table read_sql_query diff --git a/doc/source/merging.rst b/doc/source/user_guide/merging.rst similarity index 100% rename from doc/source/merging.rst rename to doc/source/user_guide/merging.rst diff --git a/doc/source/missing_data.rst b/doc/source/user_guide/missing_data.rst similarity index 100% rename from doc/source/missing_data.rst rename to doc/source/user_guide/missing_data.rst diff --git a/doc/source/options.rst b/doc/source/user_guide/options.rst similarity index 99% rename from doc/source/options.rst rename to doc/source/user_guide/options.rst index e91be3e6ae730..d640d8b1153c5 100644 --- a/doc/source/options.rst +++ b/doc/source/user_guide/options.rst @@ -487,7 +487,7 @@ If a DataFrame or Series contains these characters, the default output mode may df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']}) df -.. image:: _static/option_unicode01.png +.. image:: ../_static/option_unicode01.png Enabling ``display.unicode.east_asian_width`` allows pandas to check each character's "East Asian Width" property. These characters can be aligned properly by setting this option to ``True``. However, this will result in longer render @@ -498,7 +498,7 @@ times than the standard ``len`` function. pd.set_option('display.unicode.east_asian_width', True) df -.. image:: _static/option_unicode02.png +.. image:: ../_static/option_unicode02.png In addition, Unicode characters whose width is "Ambiguous" can either be 1 or 2 characters wide depending on the terminal setting or encoding. The option ``display.unicode.ambiguous_as_wide`` can be used to handle the ambiguity. @@ -510,7 +510,7 @@ By default, an "Ambiguous" character's width, such as "¡" (inverted exclamation df = pd.DataFrame({'a': ['xxx', u'¡¡'], 'b': ['yyy', u'¡¡']}) df -.. image:: _static/option_unicode03.png +.. image:: ../_static/option_unicode03.png Enabling ``display.unicode.ambiguous_as_wide`` makes pandas interpret these characters' widths to be 2. (Note that this option will only be effective when ``display.unicode.east_asian_width`` is enabled.) @@ -522,7 +522,7 @@ However, setting this option incorrectly for your terminal will cause these char pd.set_option('display.unicode.ambiguous_as_wide', True) df -.. image:: _static/option_unicode04.png +.. image:: ../_static/option_unicode04.png .. ipython:: python :suppress: diff --git a/doc/source/reshaping.rst b/doc/source/user_guide/reshaping.rst similarity index 98% rename from doc/source/reshaping.rst rename to doc/source/user_guide/reshaping.rst index 9891e22e9d552..5c11be34e6ed4 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/user_guide/reshaping.rst @@ -9,7 +9,7 @@ Reshaping and Pivot Tables Reshaping by pivoting DataFrame objects --------------------------------------- -.. image:: _static/reshaping_pivot.png +.. image:: ../_static/reshaping_pivot.png .. ipython:: python :suppress: @@ -101,7 +101,7 @@ are homogeneously-typed. Reshaping by stacking and unstacking ------------------------------------ -.. image:: _static/reshaping_stack.png +.. image:: ../_static/reshaping_stack.png Closely related to the :meth:`~DataFrame.pivot` method are the related :meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on @@ -116,7 +116,7 @@ Closely related to the :meth:`~DataFrame.pivot` method are the related (possibly hierarchical) row index to the column axis, producing a reshaped ``DataFrame`` with a new inner-most level of column labels. -.. image:: _static/reshaping_unstack.png +.. image:: ../_static/reshaping_unstack.png The clearest way to explain is by example. Let's take a prior example data set from the hierarchical indexing section: @@ -158,7 +158,7 @@ unstacks the **last level**: .. _reshaping.unstack_by_name: -.. image:: _static/reshaping_unstack_1.png +.. image:: ../_static/reshaping_unstack_1.png If the indexes have names, you can use the level names instead of specifying the level numbers: @@ -168,7 +168,7 @@ the level numbers: stacked.unstack('second') -.. image:: _static/reshaping_unstack_0.png +.. image:: ../_static/reshaping_unstack_0.png Notice that the ``stack`` and ``unstack`` methods implicitly sort the index levels involved. Hence a call to ``stack`` and then ``unstack``, or vice versa, @@ -279,7 +279,7 @@ the right thing: Reshaping by Melt ----------------- -.. image:: _static/reshaping_melt.png +.. image:: ../_static/reshaping_melt.png The top-level :func:`~pandas.melt` function and the corresponding :meth:`DataFrame.melt` are useful to massage a ``DataFrame`` into a format where one or more columns diff --git a/doc/source/sparse.rst b/doc/source/user_guide/sparse.rst similarity index 100% rename from doc/source/sparse.rst rename to doc/source/user_guide/sparse.rst diff --git a/doc/source/style.ipynb b/doc/source/user_guide/style.ipynb similarity index 99% rename from doc/source/style.ipynb rename to doc/source/user_guide/style.ipynb index 792fe5120f6e8..79a9848704eec 100644 --- a/doc/source/style.ipynb +++ b/doc/source/user_guide/style.ipynb @@ -992,7 +992,7 @@ "source": [ "A screenshot of the output:\n", "\n", - "![Excel spreadsheet with styled DataFrame](_static/style-excel.png)\n" + "![Excel spreadsheet with styled DataFrame](../_static/style-excel.png)\n" ] }, { @@ -1133,7 +1133,7 @@ "metadata": {}, "outputs": [], "source": [ - "with open(\"template_structure.html\") as f:\n", + "with open(\"templates/template_structure.html\") as f:\n", " structure = f.read()\n", " \n", "HTML(structure)" diff --git a/doc/source/templates/myhtml.tpl b/doc/source/user_guide/templates/myhtml.tpl similarity index 100% rename from doc/source/templates/myhtml.tpl rename to doc/source/user_guide/templates/myhtml.tpl diff --git a/doc/source/template_structure.html b/doc/source/user_guide/templates/template_structure.html similarity index 100% rename from doc/source/template_structure.html rename to doc/source/user_guide/templates/template_structure.html diff --git a/doc/source/text.rst b/doc/source/user_guide/text.rst similarity index 100% rename from doc/source/text.rst rename to doc/source/user_guide/text.rst diff --git a/doc/source/timedeltas.rst b/doc/source/user_guide/timedeltas.rst similarity index 100% rename from doc/source/timedeltas.rst rename to doc/source/user_guide/timedeltas.rst diff --git a/doc/source/timeseries.rst b/doc/source/user_guide/timeseries.rst similarity index 100% rename from doc/source/timeseries.rst rename to doc/source/user_guide/timeseries.rst diff --git a/doc/source/visualization.rst b/doc/source/user_guide/visualization.rst similarity index 100% rename from doc/source/visualization.rst rename to doc/source/user_guide/visualization.rst diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 69b59793f7c0d..16319a3b83ca4 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1,33 +1,49 @@ .. _whatsnew_0240: -What's New in 0.24.0 (January XX, 2019) +What's New in 0.24.0 (January 25, 2019) --------------------------------------- .. warning:: The 0.24.x series of releases will be the last to support Python 2. Future feature - releases will support Python 3 only. See :ref:`install.dropping-27` for more. + releases will support Python 3 only. See :ref:`install.dropping-27` for more + details. {{ header }} -These are the changes in pandas 0.24.0. See :ref:`release` for a full changelog -including other versions of pandas. +This is a major release from 0.23.4 and includes a number of API changes, new +features, enhancements, and performance improvements along with a large number +of bug fixes. -Highlights include +Highlights include: -* :ref:`Optional Nullable Integer Support <whatsnew_0240.enhancements.intna>` +* :ref:`Optional Integer NA Support <whatsnew_0240.enhancements.intna>` * :ref:`New APIs for accessing the array backing a Series or Index <whatsnew_0240.values_api>` * :ref:`A new top-level method for creating arrays <whatsnew_0240.enhancements.array>` * :ref:`Store Interval and Period data in a Series or DataFrame <whatsnew_0240.enhancements.interval>` * :ref:`Support for joining on two MultiIndexes <whatsnew_0240.enhancements.join_with_two_multiindexes>` + +Check the :ref:`API Changes <whatsnew_0240.api_breaking>` and :ref:`deprecations <whatsnew_0240.deprecations>` before updating. + +These are the changes in pandas 0.24.0. See :ref:`release` for a full changelog +including other versions of pandas. + + +Enhancements +~~~~~~~~~~~~ + .. _whatsnew_0240.enhancements.intna: Optional Integer NA Support ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Pandas has gained the ability to hold integer dtypes with missing values. This long requested feature is enabled through the use of :ref:`extension types <extending.extension-types>`. -Here is an example of the usage. + +.. note:: + + IntegerArray is currently experimental. Its API or implementation may + change without warning. We can construct a ``Series`` with the specified dtype. The dtype string ``Int64`` is a pandas ``ExtensionDtype``. Specifying a list or array using the traditional missing value marker of ``np.nan`` will infer to integer dtype. The display of the ``Series`` will also use the ``NaN`` to indicate missing values in string outputs. (:issue:`20700`, :issue:`20747`, :issue:`22441`, :issue:`21789`, :issue:`22346`) @@ -57,7 +73,7 @@ Operations on these dtypes will propagate ``NaN`` as other pandas operations. # coerce when needed s + 0.01 -These dtypes can operate as part of of ``DataFrame``. +These dtypes can operate as part of a ``DataFrame``. .. ipython:: python @@ -66,7 +82,7 @@ These dtypes can operate as part of of ``DataFrame``. df.dtypes -These dtypes can be merged & reshaped & casted. +These dtypes can be merged, reshaped, and casted. .. ipython:: python @@ -109,6 +125,7 @@ a new ndarray of period objects each time. .. ipython:: python + idx.values id(idx.values) id(idx.values) @@ -121,7 +138,7 @@ If you need an actual NumPy array, use :meth:`Series.to_numpy` or :meth:`Index.t For Series and Indexes backed by normal NumPy arrays, :attr:`Series.array` will return a new :class:`arrays.PandasArray`, which is a thin (no-copy) wrapper around a -:class:`numpy.ndarray`. :class:`arrays.PandasArray` isn't especially useful on its own, +:class:`numpy.ndarray`. :class:`~arrays.PandasArray` isn't especially useful on its own, but it does provide the same interface as any extension array defined in pandas or by a third-party library. @@ -139,14 +156,13 @@ See :ref:`Dtypes <basics.dtypes>` and :ref:`Attributes and Underlying Data <basi .. _whatsnew_0240.enhancements.array: -Array -^^^^^ +``pandas.array``: a new top-level method for creating arrays +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A new top-level method :func:`array` has been added for creating 1-dimensional arrays (:issue:`22860`). This can be used to create any :ref:`extension array <extending.extension-types>`, including -extension arrays registered by :ref:`3rd party libraries <ecosystem.extensions>`. See - -See :ref:`Dtypes <basics.dtypes>` for more on extension arrays. +extension arrays registered by :ref:`3rd party libraries <ecosystem.extensions>`. +See the :ref:`dtypes docs <basics.dtypes>` for more on extension arrays. .. ipython:: python @@ -155,15 +171,15 @@ See :ref:`Dtypes <basics.dtypes>` for more on extension arrays. Passing data for which there isn't dedicated extension type (e.g. float, integer, etc.) will return a new :class:`arrays.PandasArray`, which is just a thin (no-copy) -wrapper around a :class:`numpy.ndarray` that satisfies the extension array interface. +wrapper around a :class:`numpy.ndarray` that satisfies the pandas extension array interface. .. ipython:: python pd.array([1, 2, 3]) -On their own, a :class:`arrays.PandasArray` isn't a very useful object. +On their own, a :class:`~arrays.PandasArray` isn't a very useful object. But if you need write low-level code that works generically for any -:class:`~pandas.api.extensions.ExtensionArray`, :class:`arrays.PandasArray` +:class:`~pandas.api.extensions.ExtensionArray`, :class:`~arrays.PandasArray` satisfies that need. Notice that by default, if no ``dtype`` is specified, the dtype of the returned @@ -194,7 +210,7 @@ For periods: .. ipython:: python - pser = pd.Series(pd.date_range("2000", freq="D", periods=5)) + pser = pd.Series(pd.period_range("2000", freq="D", periods=5)) pser pser.dtype @@ -210,6 +226,9 @@ from the ``Series``: ser.array pser.array +These return an instance of :class:`arrays.IntervalArray` or :class:`arrays.PeriodArray`, +the new extension arrays that back interval and period data. + .. warning:: For backwards compatibility, :attr:`Series.values` continues to return @@ -226,7 +245,7 @@ from the ``Series``: Joining with two multi-indexes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:func:`DataFrame.merge` and :func:`DataFrame.join` can now be used to join multi-indexed ``Dataframe`` instances on the overlaping index levels (:issue:`6360`) +:func:`DataFrame.merge` and :func:`DataFrame.join` can now be used to join multi-indexed ``Dataframe`` instances on the overlapping index levels (:issue:`6360`) See the :ref:`Merge, join, and concatenate <merging.Join_with_two_multi_indexes>` documentation section. @@ -256,23 +275,6 @@ For earlier versions this can be done using the following. pd.merge(left.reset_index(), right.reset_index(), on=['key'], how='inner').set_index(['key', 'X', 'Y']) - -.. _whatsnew_0240.enhancements.extension_array_operators: - -``ExtensionArray`` operator support -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison -operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: - -1. Define each of the operators on your ``ExtensionArray`` subclass. -2. Use an operator implementation from pandas that depends on operators that are already defined - on the underlying elements (scalars) of the ``ExtensionArray``. - -See the :ref:`ExtensionArray Operator Support -<extending.extension.operator>` documentation section for details on both -ways of adding operator support. - .. _whatsnew_0240.enhancements.read_html: ``read_html`` Enhancements @@ -332,7 +334,7 @@ convenient way to apply users' predefined styling functions, and can help reduce df.style.pipe(format_and_align).set_caption('Summary of results.') Similar methods already exist for other classes in pandas, including :meth:`DataFrame.pipe`, -:meth:`pandas.core.groupby.GroupBy.pipe`, and :meth:`pandas.core.resample.Resampler.pipe`. +:meth:`GroupBy.pipe() <pandas.core.groupby.GroupBy.pipe>`, and :meth:`Resampler.pipe() <pandas.core.resample.Resampler.pipe>`. .. _whatsnew_0240.enhancements.rename_axis: @@ -340,7 +342,7 @@ Renaming names in a MultiIndex ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :func:`DataFrame.rename_axis` now supports ``index`` and ``columns`` arguments -and :func:`Series.rename_axis` supports ``index`` argument (:issue:`19978`) +and :func:`Series.rename_axis` supports ``index`` argument (:issue:`19978`). This change allows a dictionary to be passed so that some of the names of a ``MultiIndex`` can be changed. @@ -368,13 +370,13 @@ Other Enhancements - :func:`DataFrame.to_parquet` now accepts ``index`` as an argument, allowing the user to override the engine's default behavior to include or omit the dataframe's indexes from the resulting Parquet file. (:issue:`20768`) +- :func:`read_feather` now accepts ``columns`` as an argument, allowing the user to specify which columns should be read. (:issue:`24025`) - :meth:`DataFrame.corr` and :meth:`Series.corr` now accept a callable for generic calculation methods of correlation, e.g. histogram intersection (:issue:`22684`) - :func:`DataFrame.to_string` now accepts ``decimal`` as an argument, allowing the user to specify which decimal separator should be used in the output. (:issue:`23614`) -- :func:`read_feather` now accepts ``columns`` as an argument, allowing the user to specify which columns should be read. (:issue:`24025`) - :func:`DataFrame.to_html` now accepts ``render_links`` as an argument, allowing the user to generate HTML with links to any URLs that appear in the DataFrame. See the :ref:`section on writing HTML <io.html>` in the IO docs for example usage. (:issue:`2679`) - :func:`pandas.read_csv` now supports pandas extension types as an argument to ``dtype``, allowing the user to use pandas extension types when reading CSVs. (:issue:`23228`) -- :meth:`DataFrame.shift` :meth:`Series.shift`, :meth:`ExtensionArray.shift`, :meth:`SparseArray.shift`, :meth:`Period.shift`, :meth:`GroupBy.shift`, :meth:`Categorical.shift`, :meth:`NDFrame.shift` and :meth:`Block.shift` now accept `fill_value` as an argument, allowing the user to specify a value which will be used instead of NA/NaT in the empty periods. (:issue:`15486`) +- The :meth:`~DataFrame.shift` method now accepts `fill_value` as an argument, allowing the user to specify a value which will be used instead of NA/NaT in the empty periods. (:issue:`15486`) - :func:`to_datetime` now supports the ``%Z`` and ``%z`` directive when passed into ``format`` (:issue:`13486`) - :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether ``NaN``/``NaT`` values should be considered (:issue:`17534`) - :func:`DataFrame.to_csv` and :func:`Series.to_csv` now support the ``compression`` keyword when a file handle is passed. (:issue:`21227`) @@ -396,20 +398,21 @@ Other Enhancements The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`). - :meth:`DataFrame.to_sql` now supports writing ``TIMESTAMP WITH TIME ZONE`` types for supported databases. For databases that don't support timezones, datetime data will be stored as timezone unaware local timestamps. See the :ref:`io.sql_datetime_data` for implications (:issue:`9086`). - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) -- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`) +- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` objects in the constructor (:issue:`2193`) - :class:`DatetimeIndex` has gained the :attr:`DatetimeIndex.timetz` attribute. This returns the local time with timezone information. (:issue:`21358`) -- :meth:`Timestamp.round`, :meth:`Timestamp.ceil`, and :meth:`Timestamp.floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`) -- :meth:`Timestamp.round`, :meth:`Timestamp.ceil`, and :meth:`Timestamp.floor` for :class:`DatetimeIndex` and :class:`Timestamp` now support a ``nonexistent`` argument for handling datetimes that are rounded to nonexistent times. See :ref:`timeseries.timezone_nonexistent` (:issue:`22647`) -- :class:`pandas.core.resample.Resampler` now is iterable like :class:`pandas.core.groupby.GroupBy` (:issue:`15314`). +- :meth:`~Timestamp.round`, :meth:`~Timestamp.ceil`, and :meth:`~Timestamp.floor` for :class:`DatetimeIndex` and :class:`Timestamp` + now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`) + and a ``nonexistent`` argument for handling datetimes that are rounded to nonexistent times. See :ref:`timeseries.timezone_nonexistent` (:issue:`22647`) +- The result of :meth:`~DataFrame.resample` is now iterable similar to ``groupby()`` (:issue:`15314`). - :meth:`Series.resample` and :meth:`DataFrame.resample` have gained the :meth:`pandas.core.resample.Resampler.quantile` (:issue:`15023`). - :meth:`DataFrame.resample` and :meth:`Series.resample` with a :class:`PeriodIndex` will now respect the ``base`` argument in the same fashion as with a :class:`DatetimeIndex`. (:issue:`23882`) - :meth:`pandas.api.types.is_list_like` has gained a keyword ``allow_sets`` which is ``True`` by default; if ``False``, all instances of ``set`` will not be considered "list-like" anymore (:issue:`23061`) - :meth:`Index.to_frame` now supports overriding column name(s) (:issue:`22580`). - :meth:`Categorical.from_codes` now can take a ``dtype`` parameter as an alternative to passing ``categories`` and ``ordered`` (:issue:`24398`). -- New attribute :attr:`__git_version__` will return git commit sha of current build (:issue:`21295`). +- New attribute ``__git_version__`` will return git commit sha of current build (:issue:`21295`). - Compatibility with Matplotlib 3.0 (:issue:`22790`). -- Added :meth:`Interval.overlaps`, :meth:`IntervalArray.overlaps`, and :meth:`IntervalIndex.overlaps` for determining overlaps between interval-like objects (:issue:`21998`) +- Added :meth:`Interval.overlaps`, :meth:`arrays.IntervalArray.overlaps`, and :meth:`IntervalIndex.overlaps` for determining overlaps between interval-like objects (:issue:`21998`) - :func:`read_fwf` now accepts keyword ``infer_nrows`` (:issue:`15138`). - :func:`~DataFrame.to_parquet` now supports writing a ``DataFrame`` as a directory of parquet files partitioned by a subset of the columns when ``engine = 'pyarrow'`` (:issue:`23283`) - :meth:`Timestamp.tz_localize`, :meth:`DatetimeIndex.tz_localize`, and :meth:`Series.tz_localize` have gained the ``nonexistent`` argument for alternative handling of nonexistent times. See :ref:`timeseries.timezone_nonexistent` (:issue:`8917`, :issue:`24466`) @@ -418,12 +421,11 @@ Other Enhancements - :meth:`MultiIndex.to_flat_index` has been added to flatten multiple levels into a single-level :class:`Index` object. - :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` can write mixed sting columns to Stata strl format (:issue:`23633`) - :meth:`DataFrame.between_time` and :meth:`DataFrame.at_time` have gained the ``axis`` parameter (:issue:`8839`) -- The ``scatter_matrix``, ``andrews_curves``, ``parallel_coordinates``, ``lag_plot``, ``autocorrelation_plot``, ``bootstrap_plot``, and ``radviz`` plots from the ``pandas.plotting`` module are now accessible from calling :meth:`DataFrame.plot` (:issue:`11978`) - :meth:`DataFrame.to_records` now accepts ``index_dtypes`` and ``column_dtypes`` parameters to allow different data types in stored column and index records (:issue:`18146`) - :class:`IntervalIndex` has gained the :attr:`~IntervalIndex.is_overlapping` attribute to indicate if the ``IntervalIndex`` contains any overlapping intervals (:issue:`23309`) - :func:`pandas.DataFrame.to_sql` has gained the ``method`` argument to control SQL insertion clause. See the :ref:`insertion method <io.sql.method>` section in the documentation. (:issue:`8953`) - :meth:`DataFrame.corrwith` now supports Spearman's rank correlation, Kendall's tau as well as callable correlation methods. (:issue:`21925`) -- :meth:`DataFrame.to_json`, :meth:`DataFrame.to_csv`, :meth:`DataFrame.to_pickle`, and :meth:`DataFrame.to_XXX` etc. now support tilde(~) in path argument. (:issue:`23473`) +- :meth:`DataFrame.to_json`, :meth:`DataFrame.to_csv`, :meth:`DataFrame.to_pickle`, and other export methods now support tilde(~) in path argument. (:issue:`23473`) .. _whatsnew_0240.api_breaking: @@ -435,8 +437,8 @@ Pandas 0.24.0 includes a number of API breaking changes. .. _whatsnew_0240.api_breaking.deps: -Dependencies have increased minimum versions -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Increased minimum versions for dependencies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We have updated our minimum supported versions of dependencies (:issue:`21242`, :issue:`18742`, :issue:`23774`, :issue:`24767`). If installed, we now require: @@ -1164,17 +1166,19 @@ Other API Changes .. _whatsnew_0240.api.extension: -ExtensionType Changes -^^^^^^^^^^^^^^^^^^^^^ +Extension Type Changes +~~~~~~~~~~~~~~~~~~~~~~ **Equality and Hashability** -Pandas now requires that extension dtypes be hashable. The base class implements +Pandas now requires that extension dtypes be hashable (i.e. the respective +``ExtensionDtype`` objects; hashability is not a requirement for the values +of the corresponding ``ExtensionArray``). The base class implements a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should update the ``ExtensionDtype._metadata`` tuple to match the signature of your ``__init__`` method. See :class:`pandas.api.extensions.ExtensionDtype` for more (:issue:`22476`). -**Reshaping changes** +**New and changed methods** - :meth:`~pandas.api.types.ExtensionArray.dropna` has been added (:issue:`21185`) - :meth:`~pandas.api.types.ExtensionArray.repeat` has been added (:issue:`24349`) @@ -1192,9 +1196,25 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your - Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`) - Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`) +.. _whatsnew_0240.enhancements.extension_array_operators: + +**Operator support** + +A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison +operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: + +1. Define each of the operators on your ``ExtensionArray`` subclass. +2. Use an operator implementation from pandas that depends on operators that are already defined + on the underlying elements (scalars) of the ``ExtensionArray``. + +See the :ref:`ExtensionArray Operator Support +<extending.extension.operator>` documentation section for details on both +ways of adding operator support. + **Other changes** - A default repr for :class:`pandas.api.extensions.ExtensionArray` is now provided (:issue:`23601`). +- :meth:`ExtensionArray._formatting_values` is deprecated. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`) - An ``ExtensionArray`` with a boolean dtype now works correctly as a boolean indexer. :meth:`pandas.api.types.is_bool_dtype` now properly considers them boolean (:issue:`22326`) **Bug Fixes** @@ -1243,7 +1263,6 @@ Deprecations - The methods :meth:`DataFrame.update` and :meth:`Panel.update` have deprecated the ``raise_conflict=False|True`` keyword in favor of ``errors='ignore'|'raise'`` (:issue:`23585`) - The methods :meth:`Series.str.partition` and :meth:`Series.str.rpartition` have deprecated the ``pat`` keyword in favor of ``sep`` (:issue:`22676`) - Deprecated the ``nthreads`` keyword of :func:`pandas.read_feather` in favor of ``use_threads`` to reflect the changes in ``pyarrow>=0.11.0``. (:issue:`23053`) -- :meth:`ExtensionArray._formatting_values` is deprecated. Use :attr:`ExtensionArray._formatter` instead. (:issue:`23601`) - :func:`pandas.read_excel` has deprecated accepting ``usecols`` as an integer. Please pass in a list of ints from 0 to ``usecols`` inclusive instead (:issue:`23527`) - Constructing a :class:`TimedeltaIndex` from data with ``datetime64``-dtyped data is deprecated, will raise ``TypeError`` in a future version (:issue:`23539`) - Constructing a :class:`DatetimeIndex` from data with ``timedelta64``-dtyped data is deprecated, will raise ``TypeError`` in a future version (:issue:`23675`) @@ -1692,8 +1711,8 @@ Missing - Bug in :func:`Series.hasnans` that could be incorrectly cached and return incorrect answers if null elements are introduced after an initial call (:issue:`19700`) - :func:`Series.isin` now treats all NaN-floats as equal also for ``np.object``-dtype. This behavior is consistent with the behavior for float64 (:issue:`22119`) - :func:`unique` no longer mangles NaN-floats and the ``NaT``-object for ``np.object``-dtype, i.e. ``NaT`` is no longer coerced to a NaN-value and is treated as a different entity. (:issue:`22295`) -- :func:`DataFrame` and :func:`Series` now properly handle numpy masked arrays with hardened masks. Previously, constructing a DataFrame or Series from a masked array with a hard mask would create a pandas object containing the underlying value, rather than the expected NaN. (:issue:`24574`) - +- :class:`DataFrame` and :class:`Series` now properly handle numpy masked arrays with hardened masks. Previously, constructing a DataFrame or Series from a masked array with a hard mask would create a pandas object containing the underlying value, rather than the expected NaN. (:issue:`24574`) +- Bug in :class:`DataFrame` constructor where ``dtype`` argument was not honored when handling numpy masked record arrays. (:issue:`24874`) MultiIndex ^^^^^^^^^^ @@ -1751,6 +1770,8 @@ I/O - Bug in :meth:`DataFrame.to_stata`, :class:`pandas.io.stata.StataWriter` and :class:`pandas.io.stata.StataWriter117` where a exception would leave a partially written and invalid dta file (:issue:`23573`) - Bug in :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` that produced invalid files when using strLs with non-ASCII characters (:issue:`23573`) - Bug in :class:`HDFStore` that caused it to raise ``ValueError`` when reading a Dataframe in Python 3 from fixed format written in Python 2 (:issue:`24510`) +- Bug in :func:`DataFrame.to_string()` and more generally in the floating ``repr`` formatter. Zeros were not trimmed if ``inf`` was present in a columns while it was the case with NA values. Zeros are now trimmed as in the presence of NA (:issue:`24861`). +- Bug in the ``repr`` when truncating the number of columns and having a wide last column (:issue:`24849`). Plotting ^^^^^^^^ @@ -1786,6 +1807,7 @@ Groupby/Resample/Rolling - Bug in :meth:`DataFrame.groupby` did not respect the ``observed`` argument when selecting a column and instead always used ``observed=False`` (:issue:`23970`) - Bug in :func:`pandas.core.groupby.SeriesGroupBy.pct_change` or :func:`pandas.core.groupby.DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`). - Bug preventing hash table creation with very large number (2^32) of rows (:issue:`22805`) +- Bug in groupby when grouping on categorical causes ``ValueError`` and incorrect grouping if ``observed=True`` and ``nan`` is present in categorical column (:issue:`24740`, :issue:`21151`). Reshaping ^^^^^^^^^ @@ -1821,7 +1843,6 @@ Reshaping - Bug in :func:`DataFrame.unstack` where a ``ValueError`` was raised when unstacking timezone aware values (:issue:`18338`) - Bug in :func:`DataFrame.stack` where timezone aware values were converted to timezone naive values (:issue:`19420`) - Bug in :func:`merge_asof` where a ``TypeError`` was raised when ``by_col`` were timezone aware values (:issue:`21184`) -- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`) - Bug showing an incorrect shape when throwing error during ``DataFrame`` construction. (:issue:`20742`) .. _whatsnew_0240.bug_fixes.sparse: diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst index ee4b7ab62b31a..3ac2ed73ea53f 100644 --- a/doc/source/whatsnew/v0.24.1.rst +++ b/doc/source/whatsnew/v0.24.1.rst @@ -63,6 +63,9 @@ Bug Fixes - - +**Reshaping** + +- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`) **Other** diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index fac42dbd9c7c8..1bc91fc51c0ba 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -1,10 +1,13 @@ -:orphan: - .. _whatsnew_0250: What's New in 0.25.0 (April XX, 2019) ------------------------------------- +.. warning:: + + Starting with the 0.25.x series of releases, pandas only supports Python 3.5 and higher. + See :ref:`install.dropping-27` for more details. + {{ header }} These are the changes in pandas 0.25.0. See :ref:`release` for a full changelog @@ -15,10 +18,7 @@ including other versions of pandas. Other Enhancements ^^^^^^^^^^^^^^^^^^ - -- -- -- +:func:`DataFrame.plot` now can take 'sym' so as to expose symlog scaling. .. _whatsnew_0250.api_breaking: diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index b3c519ab99b6e..663411ad984c2 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -76,7 +76,7 @@ class NegInfinity(object): @cython.wraparound(False) @cython.boundscheck(False) -cpdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): +cpdef ndarray[int64_t, ndim=1] unique_deltas(const int64_t[:] arr): """ Efficiently find the unique first-differences of the given array. @@ -150,7 +150,7 @@ def is_lexsorted(list_of_arrays: list) -> bint: @cython.boundscheck(False) @cython.wraparound(False) -def groupsort_indexer(ndarray[int64_t] index, Py_ssize_t ngroups): +def groupsort_indexer(const int64_t[:] index, Py_ssize_t ngroups): """ compute a 1-d indexer that is an ordering of the passed index, ordered by the groups. This is a reverse of the label @@ -230,7 +230,7 @@ def kth_smallest(numeric[:] a, Py_ssize_t k) -> numeric: @cython.boundscheck(False) @cython.wraparound(False) -def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None): +def nancorr(const float64_t[:, :] mat, bint cov=0, minp=None): cdef: Py_ssize_t i, j, xi, yi, N, K bint minpv @@ -294,7 +294,7 @@ def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None): @cython.boundscheck(False) @cython.wraparound(False) -def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1): +def nancorr_spearman(const float64_t[:, :] mat, Py_ssize_t minp=1): cdef: Py_ssize_t i, j, xi, yi, N, K ndarray[float64_t, ndim=2] result @@ -435,8 +435,8 @@ def pad(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): @cython.boundscheck(False) @cython.wraparound(False) -def pad_inplace(ndarray[algos_t] values, - ndarray[uint8_t, cast=True] mask, +def pad_inplace(algos_t[:] values, + const uint8_t[:] mask, limit=None): cdef: Py_ssize_t i, N @@ -472,8 +472,8 @@ def pad_inplace(ndarray[algos_t] values, @cython.boundscheck(False) @cython.wraparound(False) -def pad_2d_inplace(ndarray[algos_t, ndim=2] values, - ndarray[uint8_t, ndim=2] mask, +def pad_2d_inplace(algos_t[:, :] values, + const uint8_t[:, :] mask, limit=None): cdef: Py_ssize_t i, j, N, K @@ -602,8 +602,8 @@ def backfill(ndarray[algos_t] old, ndarray[algos_t] new, limit=None): @cython.boundscheck(False) @cython.wraparound(False) -def backfill_inplace(ndarray[algos_t] values, - ndarray[uint8_t, cast=True] mask, +def backfill_inplace(algos_t[:] values, + const uint8_t[:] mask, limit=None): cdef: Py_ssize_t i, N @@ -639,8 +639,8 @@ def backfill_inplace(ndarray[algos_t] values, @cython.boundscheck(False) @cython.wraparound(False) -def backfill_2d_inplace(ndarray[algos_t, ndim=2] values, - ndarray[uint8_t, ndim=2] mask, +def backfill_2d_inplace(algos_t[:, :] values, + const uint8_t[:, :] mask, limit=None): cdef: Py_ssize_t i, j, N, K @@ -678,7 +678,7 @@ def backfill_2d_inplace(ndarray[algos_t, ndim=2] values, @cython.wraparound(False) @cython.boundscheck(False) -def arrmap(ndarray[algos_t] index, object func): +def arrmap(algos_t[:] index, object func): cdef: Py_ssize_t length = index.shape[0] Py_ssize_t i = 0 diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index abac9f147848e..858039f038d02 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -29,10 +29,10 @@ def get_dispatch(dtypes): @cython.wraparound(False) @cython.boundscheck(False) -def group_add_{{name}}(ndarray[{{c_type}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, +def group_add_{{name}}({{c_type}}[:, :] out, + int64_t[:] counts, + {{c_type}}[:, :] values, + const int64_t[:] labels, Py_ssize_t min_count=0): """ Only aggregates on axis=0 @@ -76,10 +76,10 @@ def group_add_{{name}}(ndarray[{{c_type}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_prod_{{name}}(ndarray[{{c_type}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, +def group_prod_{{name}}({{c_type}}[:, :] out, + int64_t[:] counts, + {{c_type}}[:, :] values, + const int64_t[:] labels, Py_ssize_t min_count=0): """ Only aggregates on axis=0 @@ -123,10 +123,10 @@ def group_prod_{{name}}(ndarray[{{c_type}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) @cython.cdivision(True) -def group_var_{{name}}(ndarray[{{c_type}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, +def group_var_{{name}}({{c_type}}[:, :] out, + int64_t[:] counts, + {{c_type}}[:, :] values, + const int64_t[:] labels, Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) @@ -175,10 +175,10 @@ def group_var_{{name}}(ndarray[{{c_type}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_mean_{{name}}(ndarray[{{c_type}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, +def group_mean_{{name}}({{c_type}}[:, :] out, + int64_t[:] counts, + {{c_type}}[:, :] values, + const int64_t[:] labels, Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) @@ -220,11 +220,11 @@ def group_mean_{{name}}(ndarray[{{c_type}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_ohlc_{{name}}(ndarray[{{c_type}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, - Py_ssize_t min_count=-1): +def group_ohlc_{{name}}({{c_type}}[:, :] out, + int64_t[:] counts, + {{c_type}}[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -293,10 +293,10 @@ def get_dispatch(dtypes): @cython.wraparound(False) @cython.boundscheck(False) -def group_last_{{name}}(ndarray[{{c_type}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, +def group_last_{{name}}({{c_type}}[:, :] out, + int64_t[:] counts, + {{c_type}}[:, :] values, + const int64_t[:] labels, Py_ssize_t min_count=-1): """ Only aggregates on axis=0 @@ -350,10 +350,10 @@ def group_last_{{name}}(ndarray[{{c_type}}, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_nth_{{name}}(ndarray[{{c_type}}, ndim=2] out, - ndarray[int64_t] counts, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, int64_t rank, +def group_nth_{{name}}({{c_type}}[:, :] out, + int64_t[:] counts, + {{c_type}}[:, :] values, + const int64_t[:] labels, int64_t rank, Py_ssize_t min_count=-1): """ Only aggregates on axis=0 @@ -411,9 +411,9 @@ def group_nth_{{name}}(ndarray[{{c_type}}, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, - ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, +def group_rank_{{name}}(float64_t[:, :] out, + {{c_type}}[:, :] values, + const int64_t[:] labels, bint is_datetimelike, object ties_method, bint ascending, bint pct, object na_option): """ @@ -606,10 +606,10 @@ ctypedef fused groupby_t: @cython.wraparound(False) @cython.boundscheck(False) -def group_max(ndarray[groupby_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[groupby_t, ndim=2] values, - ndarray[int64_t] labels, +def group_max(groupby_t[:, :] out, + int64_t[:] counts, + groupby_t[:, :] values, + const int64_t[:] labels, Py_ssize_t min_count=-1): """ Only aggregates on axis=0 @@ -669,10 +669,10 @@ def group_max(ndarray[groupby_t, ndim=2] out, @cython.wraparound(False) @cython.boundscheck(False) -def group_min(ndarray[groupby_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[groupby_t, ndim=2] values, - ndarray[int64_t] labels, +def group_min(groupby_t[:, :] out, + int64_t[:] counts, + groupby_t[:, :] values, + const int64_t[:] labels, Py_ssize_t min_count=-1): """ Only aggregates on axis=0 @@ -731,9 +731,9 @@ def group_min(ndarray[groupby_t, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_cummin(ndarray[groupby_t, ndim=2] out, - ndarray[groupby_t, ndim=2] values, - ndarray[int64_t] labels, +def group_cummin(groupby_t[:, :] out, + groupby_t[:, :] values, + const int64_t[:] labels, bint is_datetimelike): """ Only transforms on axis=0 @@ -779,9 +779,9 @@ def group_cummin(ndarray[groupby_t, ndim=2] out, @cython.boundscheck(False) @cython.wraparound(False) -def group_cummax(ndarray[groupby_t, ndim=2] out, - ndarray[groupby_t, ndim=2] values, - ndarray[int64_t] labels, +def group_cummax(groupby_t[:, :] out, + groupby_t[:, :] values, + const int64_t[:] labels, bint is_datetimelike): """ Only transforms on axis=0 diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 47fa5932290af..8d0c451ad0ab8 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -52,9 +52,10 @@ include "hashtable_class_helper.pxi" include "hashtable_func_helper.pxi" cdef class Factorizer: - cdef public PyObjectHashTable table - cdef public ObjectVector uniques - cdef public Py_ssize_t count + cdef public: + PyObjectHashTable table + ObjectVector uniques + Py_ssize_t count def __init__(self, size_hint): self.table = PyObjectHashTable(size_hint) @@ -96,9 +97,10 @@ cdef class Factorizer: cdef class Int64Factorizer: - cdef public Int64HashTable table - cdef public Int64Vector uniques - cdef public Py_ssize_t count + cdef public: + Int64HashTable table + Int64Vector uniques + Py_ssize_t count def __init__(self, size_hint): self.table = Int64HashTable(size_hint) @@ -140,7 +142,7 @@ cdef class Int64Factorizer: @cython.wraparound(False) @cython.boundscheck(False) -def unique_label_indices(ndarray[int64_t, ndim=1] labels): +def unique_label_indices(const int64_t[:] labels): """ indices of the first occurrences of the unique labels *excluding* -1. equivalent to: @@ -168,6 +170,6 @@ def unique_label_indices(ndarray[int64_t, ndim=1] labels): kh_destroy_int64(table) arr = idx.to_array() - arr = arr[labels[arr].argsort()] + arr = arr[np.asarray(labels)[arr].argsort()] return arr[1:] if arr.size != 0 and labels[arr[0]] == -1 else arr diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index eac35588b6fc3..3644928d8dedc 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -322,7 +322,7 @@ cdef class {{name}}HashTable(HashTable): self.table.vals[k] = <Py_ssize_t>values[i] @cython.boundscheck(False) - def map_locations(self, ndarray[{{dtype}}_t, ndim=1] values): + def map_locations(self, const {{dtype}}_t[:] values): cdef: Py_ssize_t i, n = len(values) int ret = 0 diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 72a1cf16f96b6..f23d2666b4bf4 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -23,10 +23,11 @@ from pandas._libs.algos import ensure_int64 cdef class BlockPlacement: # __slots__ = '_as_slice', '_as_array', '_len' - cdef slice _as_slice - cdef object _as_array + cdef: + slice _as_slice + object _as_array - cdef bint _has_slice, _has_array, _is_known_slice_like + bint _has_slice, _has_array, _is_known_slice_like def __init__(self, val): cdef: diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index e4440ac3d9fd8..503867058b3c8 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -14,7 +14,7 @@ from pandas._libs.algos import groupsort_indexer, ensure_platform_int from pandas.core.algorithms import take_nd -def inner_join(ndarray[int64_t] left, ndarray[int64_t] right, +def inner_join(const int64_t[:] left, const int64_t[:] right, Py_ssize_t max_groups): cdef: Py_ssize_t i, j, k, count = 0 @@ -65,7 +65,7 @@ def inner_join(ndarray[int64_t] left, ndarray[int64_t] right, _get_result_indexer(right_sorter, right_indexer)) -def left_outer_join(ndarray[int64_t] left, ndarray[int64_t] right, +def left_outer_join(const int64_t[:] left, const int64_t[:] right, Py_ssize_t max_groups, sort=True): cdef: Py_ssize_t i, j, k, count = 0 @@ -139,7 +139,7 @@ def left_outer_join(ndarray[int64_t] left, ndarray[int64_t] right, return left_indexer, right_indexer -def full_outer_join(ndarray[int64_t] left, ndarray[int64_t] right, +def full_outer_join(const int64_t[:] left, const int64_t[:] right, Py_ssize_t max_groups): cdef: Py_ssize_t i, j, k, count = 0 @@ -213,7 +213,7 @@ def _get_result_indexer(sorter, indexer): return res -def ffill_indexer(ndarray[int64_t] indexer): +def ffill_indexer(const int64_t[:] indexer): cdef: Py_ssize_t i, n = len(indexer) ndarray[int64_t] result @@ -252,7 +252,7 @@ ctypedef fused join_t: @cython.wraparound(False) @cython.boundscheck(False) -def left_join_indexer_unique(ndarray[join_t] left, ndarray[join_t] right): +def left_join_indexer_unique(join_t[:] left, join_t[:] right): cdef: Py_ssize_t i, j, nleft, nright ndarray[int64_t] indexer @@ -677,10 +677,10 @@ ctypedef fused by_t: uint64_t -def asof_join_backward_on_X_by_Y(ndarray[asof_t] left_values, - ndarray[asof_t] right_values, - ndarray[by_t] left_by_values, - ndarray[by_t] right_by_values, +def asof_join_backward_on_X_by_Y(asof_t[:] left_values, + asof_t[:] right_values, + by_t[:] left_by_values, + by_t[:] right_by_values, bint allow_exact_matches=1, tolerance=None): @@ -746,10 +746,10 @@ def asof_join_backward_on_X_by_Y(ndarray[asof_t] left_values, return left_indexer, right_indexer -def asof_join_forward_on_X_by_Y(ndarray[asof_t] left_values, - ndarray[asof_t] right_values, - ndarray[by_t] left_by_values, - ndarray[by_t] right_by_values, +def asof_join_forward_on_X_by_Y(asof_t[:] left_values, + asof_t[:] right_values, + by_t[:] left_by_values, + by_t[:] right_by_values, bint allow_exact_matches=1, tolerance=None): @@ -815,10 +815,10 @@ def asof_join_forward_on_X_by_Y(ndarray[asof_t] left_values, return left_indexer, right_indexer -def asof_join_nearest_on_X_by_Y(ndarray[asof_t] left_values, - ndarray[asof_t] right_values, - ndarray[by_t] left_by_values, - ndarray[by_t] right_by_values, +def asof_join_nearest_on_X_by_Y(asof_t[:] left_values, + asof_t[:] right_values, + by_t[:] left_by_values, + by_t[:] right_by_values, bint allow_exact_matches=1, tolerance=None): @@ -864,8 +864,8 @@ def asof_join_nearest_on_X_by_Y(ndarray[asof_t] left_values, # asof_join # ---------------------------------------------------------------------- -def asof_join_backward(ndarray[asof_t] left_values, - ndarray[asof_t] right_values, +def asof_join_backward(asof_t[:] left_values, + asof_t[:] right_values, bint allow_exact_matches=1, tolerance=None): @@ -917,8 +917,8 @@ def asof_join_backward(ndarray[asof_t] left_values, return left_indexer, right_indexer -def asof_join_forward(ndarray[asof_t] left_values, - ndarray[asof_t] right_values, +def asof_join_forward(asof_t[:] left_values, + asof_t[:] right_values, bint allow_exact_matches=1, tolerance=None): @@ -971,8 +971,8 @@ def asof_join_forward(ndarray[asof_t] left_values, return left_indexer, right_indexer -def asof_join_nearest(ndarray[asof_t] left_values, - ndarray[asof_t] right_values, +def asof_join_nearest(asof_t[:] left_values, + asof_t[:] right_values, bint allow_exact_matches=1, tolerance=None): diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index f845a5437ded4..4745916eb0ce2 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -40,11 +40,12 @@ cdef extern from "numpy/arrayobject.h": # Use PyDataType_* macros when possible, however there are no macros # for accessing some of the fields, so some are defined. Please # ask on cython-dev if you need more. - cdef int type_num - cdef int itemsize "elsize" - cdef char byteorder - cdef object fields - cdef tuple names + cdef: + int type_num + int itemsize "elsize" + char byteorder + object fields + tuple names cdef extern from "src/parse_helper.h": @@ -67,12 +68,13 @@ from pandas._libs.missing cimport ( # constants that will be compared to potentially arbitrarily large # python int -cdef object oINT64_MAX = <int64_t>INT64_MAX -cdef object oINT64_MIN = <int64_t>INT64_MIN -cdef object oUINT64_MAX = <uint64_t>UINT64_MAX +cdef: + object oINT64_MAX = <int64_t>INT64_MAX + object oINT64_MIN = <int64_t>INT64_MIN + object oUINT64_MAX = <uint64_t>UINT64_MAX -cdef bint PY2 = sys.version_info[0] == 2 -cdef float64_t NaN = <float64_t>np.NaN + bint PY2 = sys.version_info[0] == 2 + float64_t NaN = <float64_t>np.NaN def values_from_object(obj: object): @@ -376,7 +378,7 @@ def fast_zip(list ndarrays): return result -def get_reverse_indexer(ndarray[int64_t] indexer, Py_ssize_t length): +def get_reverse_indexer(const int64_t[:] indexer, Py_ssize_t length): """ Reverse indexing operation. @@ -405,7 +407,7 @@ def get_reverse_indexer(ndarray[int64_t] indexer, Py_ssize_t length): @cython.wraparound(False) @cython.boundscheck(False) -def has_infs_f4(ndarray[float32_t] arr) -> bool: +def has_infs_f4(const float32_t[:] arr) -> bool: cdef: Py_ssize_t i, n = len(arr) float32_t inf, neginf, val @@ -422,7 +424,7 @@ def has_infs_f4(ndarray[float32_t] arr) -> bool: @cython.wraparound(False) @cython.boundscheck(False) -def has_infs_f8(ndarray[float64_t] arr) -> bool: +def has_infs_f8(const float64_t[:] arr) -> bool: cdef: Py_ssize_t i, n = len(arr) float64_t inf, neginf, val @@ -660,7 +662,7 @@ def clean_index_list(obj: list): # is a general, O(max(len(values), len(binner))) method. @cython.boundscheck(False) @cython.wraparound(False) -def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, +def generate_bins_dt64(ndarray[int64_t] values, const int64_t[:] binner, object closed='left', bint hasnans=0): """ Int64 (datetime64) version of generic python version in groupby.py @@ -723,7 +725,7 @@ def generate_bins_dt64(ndarray[int64_t] values, ndarray[int64_t] binner, @cython.boundscheck(False) @cython.wraparound(False) -def row_bool_subset(ndarray[float64_t, ndim=2] values, +def row_bool_subset(const float64_t[:, :] values, ndarray[uint8_t, cast=True] mask): cdef: Py_ssize_t i, j, n, k, pos = 0 @@ -767,8 +769,8 @@ def row_bool_subset_object(ndarray[object, ndim=2] values, @cython.boundscheck(False) @cython.wraparound(False) -def get_level_sorter(ndarray[int64_t, ndim=1] label, - ndarray[int64_t, ndim=1] starts): +def get_level_sorter(const int64_t[:] label, + const int64_t[:] starts): """ argsort for a single level of a multi-index, keeping the order of higher levels unchanged. `starts` points to starts of same-key indices w.r.t @@ -780,10 +782,11 @@ def get_level_sorter(ndarray[int64_t, ndim=1] label, int64_t l, r Py_ssize_t i ndarray[int64_t, ndim=1] out = np.empty(len(label), dtype=np.int64) + ndarray[int64_t, ndim=1] label_arr = np.asarray(label) for i in range(len(starts) - 1): l, r = starts[i], starts[i + 1] - out[l:r] = l + label[l:r].argsort(kind='mergesort') + out[l:r] = l + label_arr[l:r].argsort(kind='mergesort') return out @@ -791,7 +794,7 @@ def get_level_sorter(ndarray[int64_t, ndim=1] label, @cython.boundscheck(False) @cython.wraparound(False) def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask, - ndarray[int64_t, ndim=1] labels, + const int64_t[:] labels, Py_ssize_t max_bin, int axis): cdef: @@ -818,7 +821,7 @@ def count_level_2d(ndarray[uint8_t, ndim=2, cast=True] mask, return counts -def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups): +def generate_slices(const int64_t[:] labels, Py_ssize_t ngroups): cdef: Py_ssize_t i, group_size, n, start int64_t lab @@ -847,7 +850,7 @@ def generate_slices(ndarray[int64_t] labels, Py_ssize_t ngroups): return starts, ends -def indices_fast(object index, ndarray[int64_t] labels, list keys, +def indices_fast(object index, const int64_t[:] labels, list keys, list sorted_labels): cdef: Py_ssize_t i, j, k, lab, cur, start, n = len(labels) @@ -2146,7 +2149,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, @cython.boundscheck(False) @cython.wraparound(False) -def map_infer_mask(ndarray arr, object f, ndarray[uint8_t] mask, +def map_infer_mask(ndarray arr, object f, const uint8_t[:] mask, bint convert=1): """ Substitute for np.vectorize with pandas-friendly dtype inference diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index 229edbac4992d..ab0e4cd6cc765 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -16,10 +16,11 @@ from pandas._libs.tslibs.nattype cimport ( checknull_with_nat, c_NaT as NaT, is_null_datetimelike) -cdef float64_t INF = <float64_t>np.inf -cdef float64_t NEGINF = -INF +cdef: + float64_t INF = <float64_t>np.inf + float64_t NEGINF = -INF -cdef int64_t NPY_NAT = util.get_nat() + int64_t NPY_NAT = util.get_nat() cpdef bint checknull(object val): diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 6cb6ed749f87b..f679746643643 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -64,10 +64,11 @@ from pandas.errors import (ParserError, DtypeWarning, CParserError = ParserError -cdef bint PY3 = (sys.version_info[0] >= 3) +cdef: + bint PY3 = (sys.version_info[0] >= 3) -cdef float64_t INF = <float64_t>np.inf -cdef float64_t NEGINF = -INF + float64_t INF = <float64_t>np.inf + float64_t NEGINF = -INF cdef extern from "errno.h": @@ -735,7 +736,7 @@ cdef class TextReader: int status int64_t hr, data_line char *errors = "strict" - cdef StringPath path = _string_path(self.c_encoding) + StringPath path = _string_path(self.c_encoding) header = [] unnamed_cols = set() @@ -1389,8 +1390,9 @@ cdef class TextReader: return None -cdef object _true_values = [b'True', b'TRUE', b'true'] -cdef object _false_values = [b'False', b'FALSE', b'false'] +cdef: + object _true_values = [b'True', b'TRUE', b'true'] + object _false_values = [b'False', b'FALSE', b'false'] def _ensure_encoded(list lst): @@ -1637,7 +1639,7 @@ cdef _categorical_convert(parser_t *parser, int64_t col, int64_t current_category = 0 char *errors = "strict" - cdef StringPath path = _string_path(encoding) + StringPath path = _string_path(encoding) int ret = 0 kh_str_t *table @@ -1727,9 +1729,10 @@ cdef inline void _to_fw_string_nogil(parser_t *parser, int64_t col, data += width -cdef char* cinf = b'inf' -cdef char* cposinf = b'+inf' -cdef char* cneginf = b'-inf' +cdef: + char* cinf = b'inf' + char* cposinf = b'+inf' + char* cneginf = b'-inf' cdef _try_double(parser_t *parser, int64_t col, diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index ca39c4de4d309..507567cf480d7 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -494,7 +494,7 @@ class InvalidApply(Exception): def apply_frame_axis0(object frame, object f, object names, - ndarray[int64_t] starts, ndarray[int64_t] ends): + const int64_t[:] starts, const int64_t[:] ends): cdef: BlockSlider slider Py_ssize_t i, n = len(starts) diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx index 6698fcb767d7c..2fdee72f9d588 100644 --- a/pandas/_libs/skiplist.pyx +++ b/pandas/_libs/skiplist.pyx @@ -57,8 +57,9 @@ cdef class IndexableSkiplist: return self.get(i) cpdef get(self, Py_ssize_t i): - cdef Py_ssize_t level - cdef Node node + cdef: + Py_ssize_t level + Node node node = self.head i += 1 @@ -71,9 +72,10 @@ cdef class IndexableSkiplist: return node.value cpdef insert(self, double value): - cdef Py_ssize_t level, steps, d - cdef Node node, prevnode, newnode, next_at_level, tmp - cdef list chain, steps_at_level + cdef: + Py_ssize_t level, steps, d + Node node, prevnode, newnode, next_at_level, tmp + list chain, steps_at_level # find first node on each level where node.next[levels].value > value chain = [None] * self.maxlevels @@ -110,9 +112,10 @@ cdef class IndexableSkiplist: self.size += 1 cpdef remove(self, double value): - cdef Py_ssize_t level, d - cdef Node node, prevnode, tmpnode, next_at_level - cdef list chain + cdef: + Py_ssize_t level, d + Node node, prevnode, tmpnode, next_at_level + list chain # find first node on each level where node.next[levels].value >= value chain = [None] * self.maxlevels diff --git a/pandas/_libs/sparse_op_helper.pxi.in b/pandas/_libs/sparse_op_helper.pxi.in index c6621ab5977ca..5949a3fd0ed81 100644 --- a/pandas/_libs/sparse_op_helper.pxi.in +++ b/pandas/_libs/sparse_op_helper.pxi.in @@ -125,10 +125,10 @@ def get_dispatch(dtypes): @cython.wraparound(False) @cython.boundscheck(False) -cdef inline tuple block_op_{{opname}}_{{dtype}}(ndarray x_, +cdef inline tuple block_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_, BlockIndex xindex, {{dtype}}_t xfill, - ndarray y_, + {{dtype}}_t[:] y_, BlockIndex yindex, {{dtype}}_t yfill): ''' @@ -142,7 +142,7 @@ cdef inline tuple block_op_{{opname}}_{{dtype}}(ndarray x_, int32_t xloc, yloc Py_ssize_t xblock = 0, yblock = 0 # block numbers - ndarray[{{dtype}}_t, ndim=1] x, y + {{dtype}}_t[:] x, y ndarray[{{rdtype}}_t, ndim=1] out # to suppress Cython warning @@ -226,16 +226,18 @@ cdef inline tuple block_op_{{opname}}_{{dtype}}(ndarray x_, @cython.wraparound(False) @cython.boundscheck(False) -cdef inline tuple int_op_{{opname}}_{{dtype}}(ndarray x_, IntIndex xindex, +cdef inline tuple int_op_{{opname}}_{{dtype}}({{dtype}}_t[:] x_, + IntIndex xindex, {{dtype}}_t xfill, - ndarray y_, IntIndex yindex, + {{dtype}}_t[:] y_, + IntIndex yindex, {{dtype}}_t yfill): cdef: IntIndex out_index Py_ssize_t xi = 0, yi = 0, out_i = 0 # fp buf indices int32_t xloc, yloc - ndarray[int32_t, ndim=1] xindices, yindices, out_indices - ndarray[{{dtype}}_t, ndim=1] x, y + int32_t[:] xindices, yindices, out_indices + {{dtype}}_t[:] x, y ndarray[{{rdtype}}_t, ndim=1] out # suppress Cython compiler warnings due to inlining @@ -284,9 +286,9 @@ cdef inline tuple int_op_{{opname}}_{{dtype}}(ndarray x_, IntIndex xindex, return out, out_index, {{(opname, 'xfill', 'yfill', dtype) | get_op}} -cpdef sparse_{{opname}}_{{dtype}}(ndarray[{{dtype}}_t, ndim=1] x, +cpdef sparse_{{opname}}_{{dtype}}({{dtype}}_t[:] x, SparseIndex xindex, {{dtype}}_t xfill, - ndarray[{{dtype}}_t, ndim=1] y, + {{dtype}}_t[:] y, SparseIndex yindex, {{dtype}}_t yfill): if isinstance(xindex, BlockIndex): diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 6c8b732928bc3..1c0adaaa288a9 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -147,7 +147,7 @@ def ensure_timedelta64ns(arr: ndarray, copy: bool=True): @cython.boundscheck(False) @cython.wraparound(False) -def datetime_to_datetime64(values: object[:]): +def datetime_to_datetime64(object[:] values): """ Convert ndarray of datetime-like objects to int64 array representing nanosecond timestamps. diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 5cda7992369fc..240f008394099 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -381,7 +381,7 @@ def get_start_end_field(int64_t[:] dtindex, object field, @cython.wraparound(False) @cython.boundscheck(False) -def get_date_field(ndarray[int64_t] dtindex, object field): +def get_date_field(int64_t[:] dtindex, object field): """ Given a int64-based datetime index, extract the year, month, etc., field and return an array of these values. diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 82719de2dbdbd..7759e165b7193 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -44,9 +44,10 @@ class DateParseError(ValueError): _DEFAULT_DATETIME = datetime(1, 1, 1).replace(hour=0, minute=0, second=0, microsecond=0) -cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])') +cdef: + object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])') -cdef set _not_datelike_strings = {'a', 'A', 'm', 'M', 'p', 'P', 't', 'T'} + set _not_datelike_strings = {'a', 'A', 'm', 'M', 'p', 'P', 't', 'T'} # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 2f4edb7de8f95..e38e9a1ca5df6 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -52,9 +52,10 @@ from pandas._libs.tslibs.nattype cimport ( from pandas._libs.tslibs.offsets cimport to_offset from pandas._libs.tslibs.offsets import _Tick -cdef bint PY2 = str == bytes -cdef enum: - INT32_MIN = -2147483648 +cdef: + bint PY2 = str == bytes + enum: + INT32_MIN = -2147483648 ctypedef struct asfreq_info: diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index f80c1e9841abe..13a4f5ba48557 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -16,15 +16,16 @@ from pandas._libs.tslibs.ccalendar cimport get_days_in_month # ---------------------------------------------------------------------- # Constants -cdef int64_t NPY_NAT = get_nat() - -cdef int RESO_NS = 0 -cdef int RESO_US = 1 -cdef int RESO_MS = 2 -cdef int RESO_SEC = 3 -cdef int RESO_MIN = 4 -cdef int RESO_HR = 5 -cdef int RESO_DAY = 6 +cdef: + int64_t NPY_NAT = get_nat() + + int RESO_NS = 0 + int RESO_US = 1 + int RESO_MS = 2 + int RESO_SEC = 3 + int RESO_MIN = 4 + int RESO_HR = 5 + int RESO_DAY = 6 # ---------------------------------------------------------------------- diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index e8f3de64c3823..cc5b3b63f5b04 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -26,13 +26,14 @@ from pandas._libs.skiplist cimport ( skiplist_t, skiplist_init, skiplist_destroy, skiplist_get, skiplist_insert, skiplist_remove) -cdef float32_t MINfloat32 = np.NINF -cdef float64_t MINfloat64 = np.NINF +cdef: + float32_t MINfloat32 = np.NINF + float64_t MINfloat64 = np.NINF -cdef float32_t MAXfloat32 = np.inf -cdef float64_t MAXfloat64 = np.inf + float32_t MAXfloat32 = np.inf + float64_t MAXfloat64 = np.inf -cdef float64_t NaN = <float64_t>np.NaN + float64_t NaN = <float64_t>np.NaN cdef inline int int_max(int a, int b): return a if a >= b else b cdef inline int int_min(int a, int b): return a if a <= b else b @@ -242,7 +243,7 @@ cdef class VariableWindowIndexer(WindowIndexer): # max window size self.win = (self.end - self.start).max() - def build(self, ndarray[int64_t] index, int64_t win, bint left_closed, + def build(self, const int64_t[:] index, int64_t win, bint left_closed, bint right_closed): cdef: diff --git a/pandas/core/api.py b/pandas/core/api.py index afc929c39086c..8c92287e212a6 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -4,7 +4,6 @@ import numpy as np -from pandas.core.arrays import IntervalArray from pandas.core.arrays.integer import ( Int8Dtype, Int16Dtype, diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py index c7be8e3f745c4..41d623c7efd9c 100644 --- a/pandas/core/arrays/array_.py +++ b/pandas/core/arrays/array_.py @@ -50,7 +50,7 @@ def array(data, # type: Sequence[object] ============================== ===================================== Scalar Type Array Type ============================== ===================================== - :class:`pandas.Interval` :class:`pandas.IntervalArray` + :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray` :class:`pandas.Period` :class:`pandas.arrays.PeriodArray` :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray` :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray` diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index f2aeb1c1309de..d7a8417a71be2 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -218,6 +218,13 @@ class DatetimeArray(dtl.DatetimeLikeArrayMixin, .. versionadded:: 0.24.0 + .. warning:: + + DatetimeArray is currently experimental, and its API may change + without warning. In particular, :attr:`DatetimeArray.dtype` is + expected to change to always be an instance of an ``ExtensionDtype`` + subclass. + Parameters ---------- values : Series, Index, DatetimeArray, ndarray @@ -511,6 +518,12 @@ def dtype(self): """ The dtype for the DatetimeArray. + .. warning:: + + A future version of pandas will change dtype to never be a + ``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will + always be an instance of an ``ExtensionDtype`` subclass. + Returns ------- numpy.dtype or DatetimeTZDtype diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index b3dde6bf2bd93..a6a4a49d3a939 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -225,24 +225,57 @@ class IntegerArray(ExtensionArray, ExtensionOpsMixin): """ Array of integer (optional missing) values. + .. versionadded:: 0.24.0 + + .. warning:: + + IntegerArray is currently experimental, and its API or internal + implementation may change without warning. + We represent an IntegerArray with 2 numpy arrays: - data: contains a numpy integer array of the appropriate dtype - mask: a boolean array holding a mask on the data, True is missing To construct an IntegerArray from generic array-like input, use - ``integer_array`` function instead. + :func:`pandas.array` with one of the integer dtypes (see examples). + + See :ref:`integer_na` for more. Parameters ---------- - values : integer 1D numpy array - mask : boolean 1D numpy array + values : numpy.ndarray + A 1-d integer-dtype array. + mask : numpy.ndarray + A 1-d boolean-dtype array indicating missing values. copy : bool, default False + Whether to copy the `values` and `mask`. Returns ------- IntegerArray + Examples + -------- + Create an IntegerArray with :func:`pandas.array`. + + >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype()) + >>> int_array + <IntegerArray> + [1, NaN, 3] + Length: 3, dtype: Int32 + + String aliases for the dtypes are also available. They are capitalized. + + >>> pd.array([1, None, 3], dtype='Int32') + <IntegerArray> + [1, NaN, 3] + Length: 3, dtype: Int32 + + >>> pd.array([1, None, 3], dtype='UInt16') + <IntegerArray> + [1, NaN, 3] + Length: 3, dtype: UInt16 """ @cache_readonly diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 45470e03c041a..1e671c7bd956a 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -32,6 +32,7 @@ _shared_docs_kwargs = dict( klass='IntervalArray', + qualname='arrays.IntervalArray', name='' ) @@ -115,7 +116,7 @@ A new ``IntervalArray`` can be constructed directly from an array-like of ``Interval`` objects: - >>> pd.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) + >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)]) IntervalArray([(0, 1], (1, 5]], closed='right', dtype='interval[int64]') @@ -248,8 +249,8 @@ def _from_factorized(cls, values, original): Examples -------- - >>> pd.%(klass)s.from_breaks([0, 1, 2, 3]) - %(klass)s([(0, 1], (1, 2], (2, 3]] + >>> pd.%(qualname)s.from_breaks([0, 1, 2, 3]) + %(klass)s([(0, 1], (1, 2], (2, 3]], closed='right', dtype='interval[int64]') """ @@ -311,7 +312,7 @@ def from_breaks(cls, breaks, closed='right', copy=False, dtype=None): Examples -------- >>> %(klass)s.from_arrays([0, 1, 2], [1, 2, 3]) - %(klass)s([(0, 1], (1, 2], (2, 3]] + %(klass)s([(0, 1], (1, 2], (2, 3]], closed='right', dtype='interval[int64]') """ @@ -354,16 +355,16 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None): Examples -------- - >>> pd.%(klass)s.from_intervals([pd.Interval(0, 1), + >>> pd.%(qualname)s.from_intervals([pd.Interval(0, 1), ... pd.Interval(1, 2)]) - %(klass)s([(0, 1], (1, 2]] + %(klass)s([(0, 1], (1, 2]], closed='right', dtype='interval[int64]') The generic Index constructor work identically when it infers an array of all intervals: >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)]) - %(klass)s([(0, 1], (1, 2]] + %(klass)s([(0, 1], (1, 2]], closed='right', dtype='interval[int64]') """ @@ -394,7 +395,7 @@ def from_arrays(cls, left, right, closed='right', copy=False, dtype=None): Examples -------- - >>> pd.%(klass)s.from_tuples([(0, 1), (1, 2)]) + >>> pd.%(qualname)s.from_tuples([(0, 1), (1, 2)]) %(klass)s([(0, 1], (1, 2]], closed='right', dtype='interval[int64]') """ @@ -891,13 +892,13 @@ def closed(self): Examples -------- - >>> index = pd.interval_range(0, 3) - >>> index - %(klass)s([(0, 1], (1, 2], (2, 3]] + >>> index = pd.interval_range(0, 3) + >>> index + IntervalIndex([(0, 1], (1, 2], (2, 3]], closed='right', dtype='interval[int64]') - >>> index.set_closed('both') - %(klass)s([[0, 1], [1, 2], [2, 3]] + >>> index.set_closed('both') + IntervalIndex([[0, 1], [1, 2], [2, 3]], closed='both', dtype='interval[int64]') """ @@ -1039,7 +1040,7 @@ def repeat(self, repeats, axis=None): Examples -------- - >>> intervals = pd.%(klass)s.from_tuples([(0, 1), (1, 3), (2, 4)]) + >>> intervals = pd.%(qualname)s.from_tuples([(0, 1), (1, 3), (2, 4)]) >>> intervals %(klass)s([(0, 1], (1, 3], (2, 4]], closed='right', diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 910cb96a86216..4f0c96f7927da 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -107,6 +107,29 @@ def wrapper(self, other): class TimedeltaArray(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): + """ + Pandas ExtensionArray for timedelta data. + + .. versionadded:: 0.24.0 + + .. warning:: + + TimedeltaArray is currently experimental, and its API may change + without warning. In particular, :attr:`TimedeltaArray.dtype` is + expected to change to be an instance of an ``ExtensionDtype`` + subclass. + + Parameters + ---------- + values : array-like + The timedelta data. + + dtype : numpy.dtype + Currently, only ``numpy.dtype("timedelta64[ns]")`` is accepted. + freq : Offset, optional + copy : bool, default False + Whether to copy the underlying array of data. + """ _typ = "timedeltaarray" _scalar_type = Timedelta __array_priority__ = 1000 @@ -128,6 +151,19 @@ def _box_func(self): @property def dtype(self): + """ + The dtype for the TimedeltaArray. + + .. warning:: + + A future version of pandas will change dtype to be an instance + of a :class:`pandas.api.extensions.ExtensionDtype` subclass, + not a ``numpy.dtype``. + + Returns + ------- + numpy.dtype + """ return _TD_DTYPE # ---------------------------------------------------------------- diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 9a44198ba3b86..d840bf6ae71a2 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -18,7 +18,6 @@ UndefinedVariableError, _arith_ops_syms, _bool_ops_syms, _cmp_ops_syms, _mathops, _reductions, _unary_ops_syms, is_term) from pandas.core.computation.scope import Scope -from pandas.core.reshape.util import compose import pandas.io.formats.printing as printing @@ -103,8 +102,19 @@ def _replace_locals(tok): return toknum, tokval -def _preparse(source, f=compose(_replace_locals, _replace_booleans, - _rewrite_assign)): +def _compose2(f, g): + """Compose 2 callables""" + return lambda *args, **kwargs: f(g(*args, **kwargs)) + + +def _compose(*funcs): + """Compose 2 or more callables""" + assert len(funcs) > 1, 'At least 2 callables must be passed to compose' + return reduce(_compose2, funcs) + + +def _preparse(source, f=_compose(_replace_locals, _replace_booleans, + _rewrite_assign)): """Compose a collection of tokenization functions Parameters @@ -701,8 +711,8 @@ def visitor(x, y): class PandasExprVisitor(BaseExprVisitor): def __init__(self, env, engine, parser, - preparser=partial(_preparse, f=compose(_replace_locals, - _replace_booleans))): + preparser=partial(_preparse, f=_compose(_replace_locals, + _replace_booleans))): super(PandasExprVisitor, self).__init__(env, engine, parser, preparser) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 2b97661fe9ec3..a351233a77465 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -530,7 +530,7 @@ def set_axis(self, labels, axis=0, inplace=None): The axis to update. The value 0 identifies the rows, and 1 identifies the columns. - inplace : boolean, default None + inplace : bool, default None Whether to return a new %(klass)s instance. .. warning:: @@ -3966,35 +3966,37 @@ def add_suffix(self, suffix): def sort_values(self, by=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ - Sort by the values along either axis + Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 - Axis to be sorted + Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False - if True, perform operation in-place + If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' - `first` puts NaNs at the beginning, `last` puts NaNs at the end + Puts NaNs at the beginning if `first`; `last` puts NaNs at the + end. Returns ------- - sorted_obj : %(klass)s + sorted_obj : DataFrame or None + DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> df = pd.DataFrame({ - ... 'col1' : ['A', 'A', 'B', np.nan, 'D', 'C'], - ... 'col2' : [2, 1, 9, 8, 7, 4], + ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], + ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df @@ -4056,32 +4058,35 @@ def sort_values(self, by=None, axis=0, ascending=True, inplace=False, def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): """ - Sort object by labels (along an axis) + Sort object by labels (along an axis). Parameters ---------- - axis : %(axes)s to direct sorting + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis along which to sort. The value 0 identifies the rows, + and 1 identifies the columns. level : int or level name or list of ints or list of level names - if not None, sort on values in specified index level(s) - ascending : boolean, default True - Sort ascending vs. descending + If not None, sort on values in specified index level(s). + ascending : bool, default True + Sort ascending vs. descending. inplace : bool, default False - if True, perform operation in-place + If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' - Choice of sorting algorithm. See also ndarray.np.sort for more - information. `mergesort` is the only stable algorithm. For - DataFrames, this option is only applied when sorting on a single - column or label. + Choice of sorting algorithm. See also ndarray.np.sort for more + information. `mergesort` is the only stable algorithm. For + DataFrames, this option is only applied when sorting on a single + column or label. na_position : {'first', 'last'}, default 'last' - `first` puts NaNs at the beginning, `last` puts NaNs at the end. - Not implemented for MultiIndex. + Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. + Not implemented for MultiIndex. sort_remaining : bool, default True - if true and sorting by level and index is multilevel, sort by other - levels too (in order) after sorting by specified level + If True and sorting by level and index is multilevel, sort by other + levels too (in order) after sorting by specified level. Returns ------- - sorted_obj : %(klass)s + sorted_obj : DataFrame or None + DataFrame with sorted index if inplace=False, None otherwise. """ inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 633a1643f6cdd..260417bc0d598 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -299,6 +299,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self._labels = self.grouper.codes if observed: codes = algorithms.unique1d(self.grouper.codes) + codes = codes[codes != -1] else: codes = np.arange(len(categories)) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 2a6044fb0a08b..0210560aaa21f 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -38,6 +38,7 @@ _index_doc_kwargs.update( dict(klass='IntervalIndex', + qualname="IntervalIndex", target_klass='IntervalIndex or list of Intervals', name=textwrap.dedent("""\ name : object, optional @@ -282,10 +283,10 @@ def contains(self, key): examples=""" Examples -------- - >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3]) - >>> idx.to_tuples() + >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3]) + >>> idx.to_tuples() Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object') - >>> idx.to_tuples(na_tuple=False) + >>> idx.to_tuples(na_tuple=False) Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')""", )) def to_tuples(self, na_tuple=True): @@ -1201,15 +1202,15 @@ def interval_range(start=None, end=None, periods=None, freq=None, Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) - IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]] + IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], closed='right', dtype='interval[int64]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), - end=pd.Timestamp('2017-01-04')) + ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], - (2017-01-03, 2017-01-04]] + (2017-01-03, 2017-01-04]], closed='right', dtype='interval[datetime64[ns]]') The ``freq`` parameter specifies the frequency between the left and right. @@ -1217,23 +1218,23 @@ def interval_range(start=None, end=None, periods=None, freq=None, numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) - IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]] + IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), - periods=3, freq='MS') + ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], - (2017-03-01, 2017-04-01]] + (2017-03-01, 2017-04-01]], closed='right', dtype='interval[datetime64[ns]]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) - IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]] + IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') @@ -1241,7 +1242,7 @@ def interval_range(start=None, end=None, periods=None, freq=None, intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') - IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]] + IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], closed='both', dtype='interval[int64]') """ start = com.maybe_box_datetimelike(start) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 7af347a141781..c05a9a0f8f3c7 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -93,7 +93,7 @@ def masked_rec_array_to_mgr(data, index, columns, dtype, copy): if columns is None: columns = arr_columns - mgr = arrays_to_mgr(arrays, arr_columns, index, columns) + mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype) if copy: mgr = mgr.copy() diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index cafd3a9915fa0..86c3c380636c9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -14,7 +14,8 @@ _get_dtype, is_any_int_dtype, is_bool_dtype, is_complex, is_complex_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_numeric_dtype, - is_object_dtype, is_scalar, is_timedelta64_dtype) + is_object_dtype, is_scalar, is_timedelta64_dtype, pandas_dtype) +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna import pandas.core.common as com @@ -57,7 +58,7 @@ class disallow(object): def __init__(self, *dtypes): super(disallow, self).__init__() - self.dtypes = tuple(np.dtype(dtype).type for dtype in dtypes) + self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes) def check(self, obj): return hasattr(obj, 'dtype') and issubclass(obj.dtype.type, @@ -437,6 +438,7 @@ def nansum(values, axis=None, skipna=True, min_count=0, mask=None): return _wrap_results(the_sum, dtype) +@disallow('M8', DatetimeTZDtype) @bottleneck_switch() def nanmean(values, axis=None, skipna=True, mask=None): """ diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 0a51f2ee0dce7..1dd19a7c1514e 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -760,6 +760,7 @@ def _get_join_info(self): join_index = self._create_join_index(self.left.index, self.right.index, left_indexer, + right_indexer, how='right') else: join_index = self.right.index.take(right_indexer) @@ -769,6 +770,7 @@ def _get_join_info(self): join_index = self._create_join_index(self.right.index, self.left.index, right_indexer, + left_indexer, how='left') else: join_index = self.left.index.take(left_indexer) @@ -780,7 +782,8 @@ def _get_join_info(self): join_index = join_index.astype(object) return join_index, left_indexer, right_indexer - def _create_join_index(self, index, other_index, indexer, how='left'): + def _create_join_index(self, index, other_index, indexer, + other_indexer, how='left'): """ Create a join index by rearranging one index to match another @@ -806,7 +809,8 @@ def _create_join_index(self, index, other_index, indexer, how='left'): # if values missing (-1) from target index, # take from other_index instead join_list = join_index.to_numpy() - join_list[mask] = other_index.to_numpy()[mask] + other_list = other_index.take(other_indexer).to_numpy() + join_list[mask] = other_list[mask] join_index = Index(join_list, dtype=join_index.dtype, name=join_index.name) return join_index diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index 7f43a0e9719b8..9d4135a7f310e 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -1,7 +1,5 @@ import numpy as np -from pandas.compat import reduce - from pandas.core.dtypes.common import is_list_like from pandas.core import common as com @@ -57,14 +55,3 @@ def cartesian_product(X): return [np.tile(np.repeat(np.asarray(com.values_from_object(x)), b[i]), np.product(a[i])) for i, x in enumerate(X)] - - -def _compose2(f, g): - """Compose 2 callables""" - return lambda *args, **kwargs: f(g(*args, **kwargs)) - - -def compose(*funcs): - """Compose 2 or more callables""" - assert len(funcs) > 1, 'At least 2 callables must be passed to compose' - return reduce(_compose2, funcs) diff --git a/pandas/core/series.py b/pandas/core/series.py index 0c8e697c572e8..a25aa86a47927 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2857,13 +2857,13 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True - If true and sorting by level and index is multilevel, sort by other + If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- pandas.Series - The original Series sorted by the labels + The original Series sorted by the labels. See Also -------- diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index 803723dab46ff..79d8ee38637f9 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -21,7 +21,7 @@ def to_numeric(arg, errors='raise', downcast=None): Parameters ---------- - arg : list, tuple, 1-d array, or Series + arg : scalar, list, tuple, 1-d array, or Series errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaN diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 3a7c39ec65309..3d85ae7fd1f46 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -375,60 +375,25 @@ def read_excel(io, **kwds) -class _XlrdReader(object): - - def __init__(self, filepath_or_buffer): - """Reader using xlrd engine. - - Parameters - ---------- - filepath_or_buffer : string, path object or Workbook - Object to be parsed. - """ - err_msg = "Install xlrd >= 1.0.0 for Excel support" - - try: - import xlrd - except ImportError: - raise ImportError(err_msg) - else: - if xlrd.__VERSION__ < LooseVersion("1.0.0"): - raise ImportError(err_msg + - ". Current version " + xlrd.__VERSION__) +@add_metaclass(abc.ABCMeta) +class _BaseExcelReader(object): - # If filepath_or_buffer is a url, want to keep the data as bytes so - # can't pass to get_filepath_or_buffer() - if _is_url(filepath_or_buffer): - filepath_or_buffer = _urlopen(filepath_or_buffer) - elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)): - filepath_or_buffer, _, _, _ = get_filepath_or_buffer( - filepath_or_buffer) + @property + @abc.abstractmethod + def sheet_names(self): + pass - if isinstance(filepath_or_buffer, xlrd.Book): - self.book = filepath_or_buffer - elif not isinstance(filepath_or_buffer, xlrd.Book) and hasattr( - filepath_or_buffer, "read"): - # N.B. xlrd.Book has a read attribute too - if hasattr(filepath_or_buffer, 'seek'): - try: - # GH 19779 - filepath_or_buffer.seek(0) - except UnsupportedOperation: - # HTTPResponse does not support seek() - # GH 20434 - pass + @abc.abstractmethod + def get_sheet_by_name(self, name): + pass - data = filepath_or_buffer.read() - self.book = xlrd.open_workbook(file_contents=data) - elif isinstance(filepath_or_buffer, compat.string_types): - self.book = xlrd.open_workbook(filepath_or_buffer) - else: - raise ValueError('Must explicitly set engine if not passing in' - ' buffer or path for io.') + @abc.abstractmethod + def get_sheet_by_index(self, index): + pass - @property - def sheet_names(self): - return self.book.sheet_names() + @abc.abstractmethod + def get_sheet_data(self, sheet, convert_float): + pass def parse(self, sheet_name=0, @@ -455,48 +420,6 @@ def parse(self, _validate_header_arg(header) - from xlrd import (xldate, XL_CELL_DATE, - XL_CELL_ERROR, XL_CELL_BOOLEAN, - XL_CELL_NUMBER) - - epoch1904 = self.book.datemode - - def _parse_cell(cell_contents, cell_typ): - """converts the contents of the cell into a pandas - appropriate object""" - - if cell_typ == XL_CELL_DATE: - - # Use the newer xlrd datetime handling. - try: - cell_contents = xldate.xldate_as_datetime( - cell_contents, epoch1904) - except OverflowError: - return cell_contents - - # Excel doesn't distinguish between dates and time, - # so we treat dates on the epoch as times only. - # Also, Excel supports 1900 and 1904 epochs. - year = (cell_contents.timetuple())[0:3] - if ((not epoch1904 and year == (1899, 12, 31)) or - (epoch1904 and year == (1904, 1, 1))): - cell_contents = time(cell_contents.hour, - cell_contents.minute, - cell_contents.second, - cell_contents.microsecond) - - elif cell_typ == XL_CELL_ERROR: - cell_contents = np.nan - elif cell_typ == XL_CELL_BOOLEAN: - cell_contents = bool(cell_contents) - elif convert_float and cell_typ == XL_CELL_NUMBER: - # GH5394 - Excel 'numbers' are always floats - # it's a minimal perf hit and less surprising - val = int(cell_contents) - if val == cell_contents: - cell_contents = val - return cell_contents - ret_dict = False # Keep sheetname to maintain backwards compatibility. @@ -504,7 +427,7 @@ def _parse_cell(cell_contents, cell_typ): sheets = sheet_name ret_dict = True elif sheet_name is None: - sheets = self.book.sheet_names() + sheets = self.sheet_names ret_dict = True else: sheets = [sheet_name] @@ -519,19 +442,13 @@ def _parse_cell(cell_contents, cell_typ): print("Reading sheet {sheet}".format(sheet=asheetname)) if isinstance(asheetname, compat.string_types): - sheet = self.book.sheet_by_name(asheetname) + sheet = self.get_sheet_by_name(asheetname) else: # assume an integer if not a string - sheet = self.book.sheet_by_index(asheetname) + sheet = self.get_sheet_by_index(asheetname) - data = [] + data = self.get_sheet_data(sheet, convert_float) usecols = _maybe_convert_usecols(usecols) - for i in range(sheet.nrows): - row = [_parse_cell(value, typ) - for value, typ in zip(sheet.row_values(i), - sheet.row_types(i))] - data.append(row) - if sheet.nrows == 0: output[asheetname] = DataFrame() continue @@ -620,6 +537,120 @@ def _parse_cell(cell_contents, cell_typ): return output[asheetname] +class _XlrdReader(_BaseExcelReader): + + def __init__(self, filepath_or_buffer): + """Reader using xlrd engine. + + Parameters + ---------- + filepath_or_buffer : string, path object or Workbook + Object to be parsed. + """ + err_msg = "Install xlrd >= 1.0.0 for Excel support" + + try: + import xlrd + except ImportError: + raise ImportError(err_msg) + else: + if xlrd.__VERSION__ < LooseVersion("1.0.0"): + raise ImportError(err_msg + + ". Current version " + xlrd.__VERSION__) + + # If filepath_or_buffer is a url, want to keep the data as bytes so + # can't pass to get_filepath_or_buffer() + if _is_url(filepath_or_buffer): + filepath_or_buffer = _urlopen(filepath_or_buffer) + elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)): + filepath_or_buffer, _, _, _ = get_filepath_or_buffer( + filepath_or_buffer) + + if isinstance(filepath_or_buffer, xlrd.Book): + self.book = filepath_or_buffer + elif hasattr(filepath_or_buffer, "read"): + # N.B. xlrd.Book has a read attribute too + if hasattr(filepath_or_buffer, 'seek'): + try: + # GH 19779 + filepath_or_buffer.seek(0) + except UnsupportedOperation: + # HTTPResponse does not support seek() + # GH 20434 + pass + + data = filepath_or_buffer.read() + self.book = xlrd.open_workbook(file_contents=data) + elif isinstance(filepath_or_buffer, compat.string_types): + self.book = xlrd.open_workbook(filepath_or_buffer) + else: + raise ValueError('Must explicitly set engine if not passing in' + ' buffer or path for io.') + + @property + def sheet_names(self): + return self.book.sheet_names() + + def get_sheet_by_name(self, name): + return self.book.sheet_by_name(name) + + def get_sheet_by_index(self, index): + return self.book.sheet_by_index(index) + + def get_sheet_data(self, sheet, convert_float): + from xlrd import (xldate, XL_CELL_DATE, + XL_CELL_ERROR, XL_CELL_BOOLEAN, + XL_CELL_NUMBER) + + epoch1904 = self.book.datemode + + def _parse_cell(cell_contents, cell_typ): + """converts the contents of the cell into a pandas + appropriate object""" + + if cell_typ == XL_CELL_DATE: + + # Use the newer xlrd datetime handling. + try: + cell_contents = xldate.xldate_as_datetime( + cell_contents, epoch1904) + except OverflowError: + return cell_contents + + # Excel doesn't distinguish between dates and time, + # so we treat dates on the epoch as times only. + # Also, Excel supports 1900 and 1904 epochs. + year = (cell_contents.timetuple())[0:3] + if ((not epoch1904 and year == (1899, 12, 31)) or + (epoch1904 and year == (1904, 1, 1))): + cell_contents = time(cell_contents.hour, + cell_contents.minute, + cell_contents.second, + cell_contents.microsecond) + + elif cell_typ == XL_CELL_ERROR: + cell_contents = np.nan + elif cell_typ == XL_CELL_BOOLEAN: + cell_contents = bool(cell_contents) + elif convert_float and cell_typ == XL_CELL_NUMBER: + # GH5394 - Excel 'numbers' are always floats + # it's a minimal perf hit and less surprising + val = int(cell_contents) + if val == cell_contents: + cell_contents = val + return cell_contents + + data = [] + + for i in range(sheet.nrows): + row = [_parse_cell(value, typ) + for value, typ in zip(sheet.row_values(i), + sheet.row_types(i))] + data.append(row) + + return data + + class ExcelFile(object): """ Class for parsing tabular excel sheets into DataFrame objects. diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index bdeed58d856cc..62fa04e784072 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -435,9 +435,6 @@ def _chk_truncate(self): """ from pandas.core.reshape.concat import concat - # Column of which first element is used to determine width of a dot col - self.tr_size_col = -1 - # Cut the data to the information actually printed max_cols = self.max_cols max_rows = self.max_rows @@ -556,10 +553,7 @@ def _to_str_columns(self): if truncate_h: col_num = self.tr_col_num - # infer from column header - col_width = self.adj.len(strcols[self.tr_size_col][0]) - strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] * - (len(str_index))) + strcols.insert(self.tr_col_num + 1, [' ...'] * (len(str_index))) if truncate_v: n_header_rows = len(str_index) - len(frame) row_num = self.tr_row_num @@ -577,8 +571,8 @@ def _to_str_columns(self): if ix == 0: dot_mode = 'left' elif is_dot_col: - cwidth = self.adj.len(strcols[self.tr_size_col][0]) - dot_mode = 'center' + cwidth = 4 + dot_mode = 'right' else: dot_mode = 'right' dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0] @@ -1414,16 +1408,20 @@ def _trim_zeros(str_floats, na_rep='NaN'): """ trimmed = str_floats + def _is_number(x): + return (x != na_rep and not x.endswith('inf')) + def _cond(values): - non_na = [x for x in values if x != na_rep] - return (len(non_na) > 0 and all(x.endswith('0') for x in non_na) and - not (any(('e' in x) or ('E' in x) for x in non_na))) + finite = [x for x in values if _is_number(x)] + return (len(finite) > 0 and all(x.endswith('0') for x in finite) and + not (any(('e' in x) or ('E' in x) for x in finite))) while _cond(trimmed): - trimmed = [x[:-1] if x != na_rep else x for x in trimmed] + trimmed = [x[:-1] if _is_number(x) else x for x in trimmed] # leave one 0 after the decimal points if need be. - return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed] + return [x + "0" if x.endswith('.') and _is_number(x) else x + for x in trimmed] def _has_names(index): diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx index d67c632188e62..8e2d943d8ddb1 100644 --- a/pandas/io/msgpack/_packer.pyx +++ b/pandas/io/msgpack/_packer.pyx @@ -74,14 +74,15 @@ cdef class Packer(object): Use bin type introduced in msgpack spec 2.0 for bytes. It also enable str8 type for unicode. """ - cdef msgpack_packer pk - cdef object _default - cdef object _bencoding - cdef object _berrors - cdef char *encoding - cdef char *unicode_errors - cdef bint use_float - cdef bint autoreset + cdef: + msgpack_packer pk + object _default + object _bencoding + object _berrors + char *encoding + char *unicode_errors + bint use_float + bint autoreset def __cinit__(self): cdef int buf_size = 1024 * 1024 @@ -123,16 +124,17 @@ cdef class Packer(object): cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: - cdef long long llval - cdef unsigned long long ullval - cdef long longval - cdef float fval - cdef double dval - cdef char* rawval - cdef int ret - cdef dict d - cdef size_t L - cdef int default_used = 0 + cdef: + long long llval + unsigned long long ullval + long longval + float fval + double dval + char* rawval + int ret + dict d + size_t L + int default_used = 0 if nest_limit < 0: raise PackValueError("recursion limit exceeded.") diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx index 0c50aa5e68103..9bbfe749ef9ba 100644 --- a/pandas/io/msgpack/_unpacker.pyx +++ b/pandas/io/msgpack/_unpacker.pyx @@ -120,14 +120,15 @@ def unpackb(object packed, object object_hook=None, object list_hook=None, See :class:`Unpacker` for options. """ - cdef unpack_context ctx - cdef size_t off = 0 - cdef int ret + cdef: + unpack_context ctx + size_t off = 0 + int ret - cdef char* buf - cdef Py_ssize_t buf_len - cdef char* cenc = NULL - cdef char* cerr = NULL + char* buf + Py_ssize_t buf_len + char* cenc = NULL + char* cerr = NULL PyObject_AsReadBuffer(packed, <const void**>&buf, &buf_len) @@ -243,16 +244,17 @@ cdef class Unpacker(object): for o in unpacker: process(o) """ - cdef unpack_context ctx - cdef char* buf - cdef size_t buf_size, buf_head, buf_tail - cdef object file_like - cdef object file_like_read - cdef Py_ssize_t read_size - # To maintain refcnt. - cdef object object_hook, object_pairs_hook, list_hook, ext_hook - cdef object encoding, unicode_errors - cdef size_t max_buffer_size + cdef: + unpack_context ctx + char* buf + size_t buf_size, buf_head, buf_tail + object file_like + object file_like_read + Py_ssize_t read_size + # To maintain refcnt. + object object_hook, object_pairs_hook, list_hook, ext_hook + object encoding, unicode_errors + size_t max_buffer_size def __cinit__(self): self.buf = NULL @@ -270,8 +272,9 @@ cdef class Unpacker(object): Py_ssize_t max_array_len=2147483647, Py_ssize_t max_map_len=2147483647, Py_ssize_t max_ext_len=2147483647): - cdef char *cenc=NULL, - cdef char *cerr=NULL + cdef: + char *cenc=NULL, + char *cerr=NULL self.object_hook = object_hook self.object_pairs_hook = object_pairs_hook @@ -388,9 +391,10 @@ cdef class Unpacker(object): cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0): - cdef int ret - cdef object obj - cdef size_t prev_head + cdef: + int ret + object obj + size_t prev_head if self.buf_head >= self.buf_tail and self.file_like is not None: self.read_from_file() diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index a5bfd5866a261..9b8fba16741f6 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -203,11 +203,12 @@ cdef enum ColumnTypes: # type the page_data types -cdef int page_meta_type = const.page_meta_type -cdef int page_mix_types_0 = const.page_mix_types[0] -cdef int page_mix_types_1 = const.page_mix_types[1] -cdef int page_data_type = const.page_data_type -cdef int subheader_pointers_offset = const.subheader_pointers_offset +cdef: + int page_meta_type = const.page_meta_type + int page_mix_types_0 = const.page_mix_types[0] + int page_mix_types_1 = const.page_mix_types[1] + int page_data_type = const.page_data_type + int subheader_pointers_offset = const.subheader_pointers_offset cdef class Parser(object): diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 3ba06c0638317..12810c73f11f0 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -26,7 +26,6 @@ from pandas.core.generic import _shared_doc_kwargs, _shared_docs from pandas.io.formats.printing import pprint_thing -from pandas.plotting import _misc as misc from pandas.plotting._compat import _mpl_ge_3_0_0 from pandas.plotting._style import _get_standard_colors, plot_params from pandas.plotting._tools import ( @@ -309,10 +308,20 @@ def _setup_subplots(self): axes = _flatten(axes) - if self.logx or self.loglog: + valid_log = [False, True, 'sym', None] + for i in (self.logx, self.logy, self.loglog): + if i not in valid_log: + raise ValueError("Valid inputs are boolean, None and 'sym'.") + + if self.logx is True or self.loglog is True: [a.set_xscale('log') for a in axes] - if self.logy or self.loglog: + elif self.logx == 'sym' or self.loglog == 'sym': + [a.set_xscale('symlog') for a in axes] + + if self.logy is True or self.loglog is True: [a.set_yscale('log') for a in axes] + elif self.logy == 'sym' or self.loglog == 'sym': + [a.set_yscale('symlog') for a in axes] self.fig = fig self.axes = axes @@ -2906,15 +2915,6 @@ def pie(self, **kwds): """ return self(kind='pie', **kwds) - def lag(self, *args, **kwds): - return misc.lag_plot(self._parent, *args, **kwds) - - def autocorrelation(self, *args, **kwds): - return misc.autocorrelation_plot(self._parent, *args, **kwds) - - def bootstrap(self, *args, **kwds): - return misc.bootstrap_plot(self._parent, *args, **kwds) - class FramePlotMethods(BasePlotMethods): """DataFrame plotting accessor and method @@ -3610,16 +3610,3 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, if gridsize is not None: kwds['gridsize'] = gridsize return self(kind='hexbin', x=x, y=y, C=C, **kwds) - - def scatter_matrix(self, *args, **kwds): - return misc.scatter_matrix(self._parent, *args, **kwds) - - def andrews_curves(self, class_column, *args, **kwds): - return misc.andrews_curves(self._parent, class_column, *args, **kwds) - - def parallel_coordinates(self, class_column, *args, **kwds): - return misc.parallel_coordinates(self._parent, class_column, - *args, **kwds) - - def radviz(self, class_column, *args, **kwds): - return misc.radviz(self._parent, class_column, *args, **kwds) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 07cf358c765b3..599ab9a3c5f7c 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -46,7 +46,6 @@ class TestPDApi(Base): 'Series', 'SparseArray', 'SparseDataFrame', 'SparseDtype', 'SparseSeries', 'Timedelta', 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex', - 'IntervalArray', 'CategoricalDtype', 'PeriodDtype', 'IntervalDtype', 'DatetimeTZDtype', 'Int8Dtype', 'Int16Dtype', 'Int32Dtype', 'Int64Dtype', diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index f97a1651163e8..405dc0805a285 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -124,14 +124,14 @@ def test_comparison_invalid(self, box_with_array): result = x != y expected = tm.box_expected([True] * 5, xbox) tm.assert_equal(result, expected) - - with pytest.raises(TypeError): + msg = 'Invalid comparison between' + with pytest.raises(TypeError, match=msg): x >= y - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): x > y - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): x < y - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): x <= y @pytest.mark.parametrize('data', [ @@ -327,9 +327,10 @@ def test_comparison_tzawareness_compat(self, op): # raise naive_series = Series(dr) aware_series = Series(dz) - with pytest.raises(TypeError): + msg = 'Cannot compare tz-naive and tz-aware' + with pytest.raises(TypeError, match=msg): op(dz, naive_series) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dr, aware_series) # TODO: implement _assert_tzawareness_compat for the reverse @@ -428,14 +429,14 @@ def test_dti_cmp_null_scalar_inequality(self, tz_naive_fixture, other, dti = pd.date_range('2016-01-01', periods=2, tz=tz) # FIXME: ValueError with transpose dtarr = tm.box_expected(dti, box_with_array, transpose=False) - - with pytest.raises(TypeError): + msg = 'Invalid comparison between' + with pytest.raises(TypeError, match=msg): dtarr < other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dtarr <= other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dtarr > other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dtarr >= other @pytest.mark.parametrize('dtype', [None, object]) @@ -584,22 +585,23 @@ def test_comparison_tzawareness_compat(self, op, box_with_array): dr = tm.box_expected(dr, box_with_array, transpose=False) dz = tm.box_expected(dz, box_with_array, transpose=False) - with pytest.raises(TypeError): + msg = 'Cannot compare tz-naive and tz-aware' + with pytest.raises(TypeError, match=msg): op(dr, dz) if box_with_array is not pd.DataFrame: # DataFrame op is invalid until transpose bug is fixed - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dr, list(dz)) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dr, np.array(list(dz), dtype=object)) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dz, dr) if box_with_array is not pd.DataFrame: # DataFrame op is invalid until transpose bug is fixed - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dz, list(dr)) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dz, np.array(list(dr), dtype=object)) # Check that there isn't a problem aware-aware and naive-naive do not @@ -617,15 +619,15 @@ def test_comparison_tzawareness_compat(self, op, box_with_array): ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam') assert_all(dr > ts) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dr, ts_tz) assert_all(dz > ts_tz) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(dz, ts) # GH#12601: Check comparison against Timestamps and DatetimeIndex - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(ts, dz) @pytest.mark.parametrize('op', [operator.eq, operator.ne, @@ -641,10 +643,10 @@ def test_scalar_comparison_tzawareness(self, op, other, tz_aware_fixture, # FIXME: ValueError with transpose dtarr = tm.box_expected(dti, box_with_array, transpose=False) - - with pytest.raises(TypeError): + msg = 'Cannot compare tz-naive and tz-aware' + with pytest.raises(TypeError, match=msg): op(dtarr, other) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): op(other, dtarr) @pytest.mark.parametrize('op', [operator.eq, operator.ne, @@ -714,14 +716,14 @@ def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, expected = np.array([True] * 10) expected = tm.box_expected(expected, xbox, transpose=False) tm.assert_equal(result, expected) - - with pytest.raises(TypeError): + msg = 'Invalid comparison between' + with pytest.raises(TypeError, match=msg): rng < other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): rng <= other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): rng > other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): rng >= other def test_dti_cmp_list(self): @@ -749,14 +751,14 @@ def test_dti_cmp_tdi_tzawareness(self, other): result = dti != other expected = np.array([True] * 10) tm.assert_numpy_array_equal(result, expected) - - with pytest.raises(TypeError): + msg = 'Invalid comparison between' + with pytest.raises(TypeError, match=msg): dti < other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dti <= other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dti > other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dti >= other def test_dti_cmp_object_dtype(self): @@ -770,7 +772,8 @@ def test_dti_cmp_object_dtype(self): tm.assert_numpy_array_equal(result, expected) other = dti.tz_localize(None) - with pytest.raises(TypeError): + msg = 'Cannot compare tz-naive and tz-aware' + with pytest.raises(TypeError, match=msg): # tzawareness failure dti != other @@ -778,8 +781,8 @@ def test_dti_cmp_object_dtype(self): result = dti == other expected = np.array([True] * 5 + [False] * 5) tm.assert_numpy_array_equal(result, expected) - - with pytest.raises(TypeError): + msg = "Cannot compare type" + with pytest.raises(TypeError, match=msg): dti >= other @@ -898,7 +901,8 @@ def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture): tm.assert_equal(result, expected) result = obj - other tm.assert_equal(result, expected) - with pytest.raises(TypeError): + msg = 'cannot subtract' + with pytest.raises(TypeError, match=msg): other - obj def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, @@ -927,8 +931,8 @@ def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, result = dtarr - tdarr tm.assert_equal(result, expected) - - with pytest.raises(TypeError): + msg = 'cannot subtract' + with pytest.raises(TypeError, match=msg): tdarr - dtarr # ----------------------------------------------------------------- @@ -1028,10 +1032,10 @@ def test_dt64arr_aware_sub_dt64ndarray_raises(self, tz_aware_fixture, dt64vals = dti.values dtarr = tm.box_expected(dti, box_with_array) - - with pytest.raises(TypeError): + msg = 'DatetimeArray subtraction must have the same timezones or' + with pytest.raises(TypeError, match=msg): dtarr - dt64vals - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dt64vals - dtarr # ------------------------------------------------------------- @@ -1048,17 +1052,17 @@ def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, dt64vals = dti.values dtarr = tm.box_expected(dti, box_with_array) - - with pytest.raises(TypeError): + msg = 'cannot add' + with pytest.raises(TypeError, match=msg): dtarr + dt64vals - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dt64vals + dtarr def test_dt64arr_add_timestamp_raises(self, box_with_array): # GH#22163 ensure DataFrame doesn't cast Timestamp to i8 idx = DatetimeIndex(['2011-01-01', '2011-01-02']) idx = tm.box_expected(idx, box_with_array) - msg = "cannot add" + msg = 'cannot add' with pytest.raises(TypeError, match=msg): idx + Timestamp('2011-01-01') with pytest.raises(TypeError, match=msg): @@ -1071,13 +1075,14 @@ def test_dt64arr_add_timestamp_raises(self, box_with_array): def test_dt64arr_add_sub_float(self, other, box_with_array): dti = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') dtarr = tm.box_expected(dti, box_with_array) - with pytest.raises(TypeError): + msg = '|'.join(['unsupported operand type', 'cannot (add|subtract)']) + with pytest.raises(TypeError, match=msg): dtarr + other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): other + dtarr - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dtarr - other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): other - dtarr @pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H']) @@ -1090,14 +1095,15 @@ def test_dt64arr_add_sub_parr(self, dti_freq, pi_freq, dtarr = tm.box_expected(dti, box_with_array) parr = tm.box_expected(pi, box_with_array2) - - with pytest.raises(TypeError): + msg = '|'.join(['cannot (add|subtract)', 'unsupported operand', + 'descriptor.*requires', 'ufunc.*cannot use operands']) + with pytest.raises(TypeError, match=msg): dtarr + parr - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): parr + dtarr - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dtarr - parr - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): parr - dtarr @pytest.mark.parametrize('dti_freq', [None, 'D']) @@ -1108,14 +1114,14 @@ def test_dt64arr_add_sub_period_scalar(self, dti_freq, box_with_array): idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=dti_freq) dtarr = tm.box_expected(idx, box_with_array) - - with pytest.raises(TypeError): + msg = '|'.join(['unsupported operand type', 'cannot (add|subtract)']) + with pytest.raises(TypeError, match=msg): dtarr + per - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): per + dtarr - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dtarr - per - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): per - dtarr @@ -1156,8 +1162,8 @@ def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array): result2 = -pd.offsets.Second(5) + ser tm.assert_equal(result2, expected) - - with pytest.raises(TypeError): + msg = "bad operand type for unary" + with pytest.raises(TypeError, match=msg): pd.offsets.Second(5) - ser @pytest.mark.parametrize('cls_name', ['Day', 'Hour', 'Minute', 'Second', @@ -1239,8 +1245,8 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): expected = DatetimeIndex([x - off for x in vec_items]) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec - off) - - with pytest.raises(TypeError): + msg = "bad operand type for unary" + with pytest.raises(TypeError, match=msg): off - vec # ------------------------------------------------------------- @@ -1320,8 +1326,8 @@ def test_dt64arr_add_sub_DateOffsets(self, box_with_array, expected = DatetimeIndex([offset + x for x in vec_items]) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, offset + vec) - - with pytest.raises(TypeError): + msg = "bad operand type for unary" + with pytest.raises(TypeError, match=msg): offset - vec def test_dt64arr_add_sub_DateOffset(self, box_with_array): @@ -1440,13 +1446,14 @@ def test_dt64_series_arith_overflow(self): td = pd.Timedelta('20000 Days') dti = pd.date_range('1949-09-30', freq='100Y', periods=4) ser = pd.Series(dti) - with pytest.raises(OverflowError): + msg = 'Overflow in int64 addition' + with pytest.raises(OverflowError, match=msg): ser - dt - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): dt - ser - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): ser + td - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): td + ser ser.iloc[-1] = pd.NaT @@ -1480,9 +1487,9 @@ def test_datetimeindex_sub_timestamp_overflow(self): tspos.to_pydatetime(), tspos.to_datetime64().astype('datetime64[ns]'), tspos.to_datetime64().astype('datetime64[D]')] - + msg = 'Overflow in int64 addition' for variant in ts_neg_variants: - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): dtimax - variant expected = pd.Timestamp.max.value - tspos.value @@ -1496,7 +1503,7 @@ def test_datetimeindex_sub_timestamp_overflow(self): assert res[1].value == expected for variant in ts_pos_variants: - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): dtimin - variant def test_datetimeindex_sub_datetimeindex_overflow(self): @@ -1515,22 +1522,22 @@ def test_datetimeindex_sub_datetimeindex_overflow(self): expected = pd.Timestamp.min.value - ts_neg[1].value result = dtimin - ts_neg assert result[1].value == expected - - with pytest.raises(OverflowError): + msg = 'Overflow in int64 addition' + with pytest.raises(OverflowError, match=msg): dtimax - ts_neg - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): dtimin - ts_pos # Edge cases tmin = pd.to_datetime([pd.Timestamp.min]) t1 = tmin + pd.Timedelta.max + pd.Timedelta('1us') - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): t1 - tmin tmax = pd.to_datetime([pd.Timestamp.max]) t2 = tmax + pd.Timedelta.min - pd.Timedelta('1us') - with pytest.raises(OverflowError): + with pytest.raises(OverflowError, match=msg): tmax - t2 @@ -1543,7 +1550,8 @@ def test_empty_series_add_sub(self): tm.assert_series_equal(a, a + b) tm.assert_series_equal(a, a - b) tm.assert_series_equal(a, b + a) - with pytest.raises(TypeError): + msg = 'cannot subtract' + with pytest.raises(TypeError, match=msg): b - a def test_operators_datetimelike(self): @@ -1688,12 +1696,13 @@ def test_datetime64_ops_nat(self): # subtraction tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) - with pytest.raises(TypeError): + msg = 'Unary negative expects' + with pytest.raises(TypeError, match=msg): -single_nat_dtype_datetime + datetime_series tm.assert_series_equal(-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): -single_nat_dtype_datetime + nat_series_dtype_timestamp # addition @@ -1718,15 +1727,16 @@ def test_datetime64_ops_nat(self): @pytest.mark.parametrize('one', [1, 1.0, np.array(1)]) def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): # multiplication - with pytest.raises(TypeError): + msg = 'cannot perform .* with this index type' + with pytest.raises(TypeError, match=msg): dt64_series * one - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): one * dt64_series # division - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dt64_series / one - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): one / dt64_series @pytest.mark.parametrize('op', ['__add__', '__radd__', @@ -1740,13 +1750,17 @@ def test_dt64_series_add_intlike(self, tz, op): other = Series([20, 30, 40], dtype='uint8') method = getattr(ser, op) - with pytest.raises(TypeError): + msg = '|'.join(['incompatible type for a .* operation', + 'cannot evaluate a numeric op', + 'ufunc .* cannot use operands', + 'cannot (add|subtract)']) + with pytest.raises(TypeError, match=msg): method(1) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): method(other) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): method(other.values) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): method(pd.Index(other)) # ------------------------------------------------------------- @@ -1783,13 +1797,14 @@ def test_operators_datetimelike_with_timezones(self): result = dt1 - td1[0] exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz) tm.assert_series_equal(result, exp) - with pytest.raises(TypeError): + msg = "bad operand type for unary" + with pytest.raises(TypeError, match=msg): td1[0] - dt1 result = dt2 - td2[0] exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz) tm.assert_series_equal(result, exp) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): td2[0] - dt2 result = dt1 + td1 @@ -1807,10 +1822,10 @@ def test_operators_datetimelike_with_timezones(self): result = dt2 - td2 exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz) tm.assert_series_equal(result, exp) - - with pytest.raises(TypeError): + msg = 'cannot (add|subtract)' + with pytest.raises(TypeError, match=msg): td1 - dt1 - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): td2 - dt2 @@ -1909,13 +1924,15 @@ def test_dti_add_intarray_no_freq(self, int_holder): # GH#19959 dti = pd.DatetimeIndex(['2016-01-01', 'NaT', '2017-04-05 06:07:08']) other = int_holder([9, 4, -1]) - with pytest.raises(NullFrequencyError): + nfmsg = 'Cannot shift with no freq' + tmsg = 'cannot subtract DatetimeArray from' + with pytest.raises(NullFrequencyError, match=nfmsg): dti + other - with pytest.raises(NullFrequencyError): + with pytest.raises(NullFrequencyError, match=nfmsg): other + dti - with pytest.raises(NullFrequencyError): + with pytest.raises(NullFrequencyError, match=nfmsg): dti - other - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=tmsg): other - dti # ------------------------------------------------------------- @@ -2057,14 +2074,14 @@ def test_sub_dti_dti(self): result = dti_tz - dti_tz tm.assert_index_equal(result, expected) - - with pytest.raises(TypeError): + msg = 'DatetimeArray subtraction must have the same timezones or' + with pytest.raises(TypeError, match=msg): dti_tz - dti - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dti - dti_tz - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): dti_tz - dti_tz2 # isub @@ -2074,7 +2091,8 @@ def test_sub_dti_dti(self): # different length raises ValueError dti1 = date_range('20130101', periods=3) dti2 = date_range('20130101', periods=4) - with pytest.raises(ValueError): + msg = 'cannot add indices of unequal length' + with pytest.raises(ValueError, match=msg): dti1 - dti2 # NaN propagation @@ -2148,8 +2166,8 @@ def test_ops_nat_mixed_datetime64_timedelta64(self): tm.assert_series_equal(-single_nat_dtype_timedelta + nat_series_dtype_timestamp, nat_series_dtype_timestamp) - - with pytest.raises(TypeError): + msg = 'cannot subtract a datelike' + with pytest.raises(TypeError, match=msg): timedelta_series - single_nat_dtype_datetime # addition diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 4a51fd63d963b..9fea1989e46df 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -74,7 +74,7 @@ # Interval ([pd.Interval(1, 2), pd.Interval(3, 4)], 'interval', - pd.IntervalArray.from_tuples([(1, 2), (3, 4)])), + pd.arrays.IntervalArray.from_tuples([(1, 2), (3, 4)])), # Sparse ([0, 1], 'Sparse[int64]', pd.SparseArray([0, 1], dtype='int64')), @@ -129,7 +129,7 @@ def test_array_copy(): # interval ([pd.Interval(0, 1), pd.Interval(1, 2)], - pd.IntervalArray.from_breaks([0, 1, 2])), + pd.arrays.IntervalArray.from_breaks([0, 1, 2])), # datetime ([pd.Timestamp('2000',), pd.Timestamp('2001')], diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index f2c3f50c291c3..386e5f57617cf 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -794,6 +794,25 @@ def test_mean(self, float_frame_with_na, float_frame, float_string_frame): check_dates=True) assert_stat_op_api('mean', float_frame, float_string_frame) + @pytest.mark.parametrize('tz', [None, 'UTC']) + def test_mean_mixed_datetime_numeric(self, tz): + # https://github.com/pandas-dev/pandas/issues/24752 + df = pd.DataFrame({"A": [1, 1], + "B": [pd.Timestamp('2000', tz=tz)] * 2}) + result = df.mean() + expected = pd.Series([1.0], index=['A']) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('tz', [None, 'UTC']) + def test_mean_excludeds_datetimes(self, tz): + # https://github.com/pandas-dev/pandas/issues/24752 + # Our long-term desired behavior is unclear, but the behavior in + # 0.24.0rc1 was buggy. + df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2}) + result = df.mean() + expected = pd.Series() + tm.assert_series_equal(result, expected) + def test_product(self, float_frame_with_na, float_frame, float_string_frame): assert_stat_op_calc('product', np.prod, float_frame_with_na) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 4f6a2e2bfbebf..90ad48cac3a5f 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -787,6 +787,17 @@ def test_constructor_maskedarray_hardened(self): dtype=float) tm.assert_frame_equal(result, expected) + def test_constructor_maskedrecarray_dtype(self): + # Ensure constructor honors dtype + data = np.ma.array( + np.ma.zeros(5, dtype=[('date', '<f8'), ('price', '<f8')]), + mask=[False] * 5) + data = data.view(ma.mrecords.mrecarray) + result = pd.DataFrame(data, dtype=int) + expected = pd.DataFrame(np.zeros((5, 2), dtype=int), + columns=['date', 'price']) + tm.assert_frame_equal(result, expected) + def test_constructor_mrecarray(self): # Ensure mrecarray produces frame identical to dict of masked arrays # from GH3479 diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 144b64025e1c0..e118135ccc75d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -420,6 +420,39 @@ def test_observed_groups(observed): tm.assert_dict_equal(result, expected) +def test_observed_groups_with_nan(observed): + # GH 24740 + df = pd.DataFrame({'cat': pd.Categorical(['a', np.nan, 'a'], + categories=['a', 'b', 'd']), + 'vals': [1, 2, 3]}) + g = df.groupby('cat', observed=observed) + result = g.groups + if observed: + expected = {'a': Index([0, 2], dtype='int64')} + else: + expected = {'a': Index([0, 2], dtype='int64'), + 'b': Index([], dtype='int64'), + 'd': Index([], dtype='int64')} + tm.assert_dict_equal(result, expected) + + +def test_dataframe_categorical_with_nan(observed): + # GH 21151 + s1 = pd.Categorical([np.nan, 'a', np.nan, 'a'], + categories=['a', 'b', 'c']) + s2 = pd.Series([1, 2, 3, 4]) + df = pd.DataFrame({'s1': s1, 's2': s2}) + result = df.groupby('s1', observed=observed).first().reset_index() + if observed: + expected = DataFrame({'s1': pd.Categorical(['a'], + categories=['a', 'b', 'c']), 's2': [2]}) + else: + expected = DataFrame({'s1': pd.Categorical(['a', 'b', 'c'], + categories=['a', 'b', 'c']), + 's2': [2, np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + + def test_datetime(): # GH9049: ensure backward compatibility levels = pd.date_range('2014-01-01', periods=4) diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 52dce572c6d4f..5d922ccaf1fd5 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -345,6 +345,15 @@ def test_repr_truncates_terminal_size_full(self, monkeypatch): lambda: terminal_size) assert "..." not in str(df) + def test_repr_truncation_column_size(self): + # dataframe with last column very wide -> check it is not used to + # determine size of truncation (...) column + df = pd.DataFrame({'a': [108480, 30830], 'b': [12345, 12345], + 'c': [12345, 12345], 'd': [12345, 12345], + 'e': ['a' * 50] * 2}) + assert "..." in str(df) + assert " ... " not in str(df) + def test_repr_max_columns_max_rows(self): term_width, term_height = get_terminal_size() if term_width < 10 or term_height < 10: @@ -543,7 +552,7 @@ def test_to_string_with_formatters_unicode(self): formatters={u('c/\u03c3'): lambda x: '{x}'.format(x=x)}) assert result == u(' c/\u03c3\n') + '0 1\n1 2\n2 3' - def test_east_asian_unicode_frame(self): + def test_east_asian_unicode_false(self): if PY3: _rep = repr else: @@ -643,17 +652,23 @@ def test_east_asian_unicode_frame(self): u'ああああ': [u'さ', u'し', u'す', u'せ']}, columns=['a', 'b', 'c', u'ああああ']) - expected = (u" a ... ああああ\n0 あああああ ... さ\n" - u".. ... ... ...\n3 えええ ... せ\n" + expected = (u" a ... ああああ\n0 あああああ ... さ\n" + u".. ... ... ...\n3 えええ ... せ\n" u"\n[4 rows x 4 columns]") assert _rep(df) == expected df.index = [u'あああ', u'いいいい', u'う', 'aaa'] - expected = (u" a ... ああああ\nあああ あああああ ... さ\n" - u".. ... ... ...\naaa えええ ... せ\n" + expected = (u" a ... ああああ\nあああ あああああ ... さ\n" + u".. ... ... ...\naaa えええ ... せ\n" u"\n[4 rows x 4 columns]") assert _rep(df) == expected + def test_east_asian_unicode_true(self): + if PY3: + _rep = repr + else: + _rep = unicode # noqa + # Emable Unicode option ----------------------------------------- with option_context('display.unicode.east_asian_width', True): @@ -757,18 +772,18 @@ def test_east_asian_unicode_frame(self): u'ああああ': [u'さ', u'し', u'す', u'せ']}, columns=['a', 'b', 'c', u'ああああ']) - expected = (u" a ... ああああ\n" - u"0 あああああ ... さ\n" - u".. ... ... ...\n" - u"3 えええ ... せ\n" + expected = (u" a ... ああああ\n" + u"0 あああああ ... さ\n" + u".. ... ... ...\n" + u"3 えええ ... せ\n" u"\n[4 rows x 4 columns]") assert _rep(df) == expected df.index = [u'あああ', u'いいいい', u'う', 'aaa'] - expected = (u" a ... ああああ\n" - u"あああ あああああ ... さ\n" - u"... ... ... ...\n" - u"aaa えええ ... せ\n" + expected = (u" a ... ああああ\n" + u"あああ あああああ ... さ\n" + u"... ... ... ...\n" + u"aaa えええ ... せ\n" u"\n[4 rows x 4 columns]") assert _rep(df) == expected @@ -1465,6 +1480,39 @@ def test_to_string_format_na(self): '4 4.0 bar') assert result == expected + def test_to_string_format_inf(self): + # Issue #24861 + tm.reset_display_options() + df = DataFrame({ + 'A': [-np.inf, np.inf, -1, -2.1234, 3, 4], + 'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar'] + }) + result = df.to_string() + + expected = (' A B\n' + '0 -inf -inf\n' + '1 inf inf\n' + '2 -1.0000 foo\n' + '3 -2.1234 foooo\n' + '4 3.0000 fooooo\n' + '5 4.0000 bar') + assert result == expected + + df = DataFrame({ + 'A': [-np.inf, np.inf, -1., -2., 3., 4.], + 'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar'] + }) + result = df.to_string() + + expected = (' A B\n' + '0 -inf -inf\n' + '1 inf inf\n' + '2 -1.0 foo\n' + '3 -2.0 foooo\n' + '4 3.0 fooooo\n' + '5 4.0 bar') + assert result == expected + def test_to_string_decimal(self): # Issue #23614 df = DataFrame({'A': [6.0, 3.1, 2.2]}) diff --git a/pandas/tests/plotting/test_frame.py b/pandas/tests/plotting/test_frame.py index 0e7672f4e2f9d..645abc11ab732 100644 --- a/pandas/tests/plotting/test_frame.py +++ b/pandas/tests/plotting/test_frame.py @@ -231,14 +231,40 @@ def test_plot_xy(self): @pytest.mark.slow def test_logscales(self): df = DataFrame({'a': np.arange(100)}, index=np.arange(100)) + ax = df.plot(logy=True) self._check_ax_scales(ax, yaxis='log') + assert ax.get_yscale() == 'log' + + ax = df.plot(logy='sym') + self._check_ax_scales(ax, yaxis='symlog') + assert ax.get_yscale() == 'symlog' ax = df.plot(logx=True) self._check_ax_scales(ax, xaxis='log') + assert ax.get_xscale() == 'log' + + ax = df.plot(logx='sym') + self._check_ax_scales(ax, xaxis='symlog') + assert ax.get_xscale() == 'symlog' ax = df.plot(loglog=True) self._check_ax_scales(ax, xaxis='log', yaxis='log') + assert ax.get_xscale() == 'log' + assert ax.get_yscale() == 'log' + + ax = df.plot(loglog='sym') + self._check_ax_scales(ax, xaxis='symlog', yaxis='symlog') + assert ax.get_xscale() == 'symlog' + assert ax.get_yscale() == 'symlog' + + @pytest.mark.parametrize("wrong_input", ["sm", "symlog"]) + def test_invalid_logscale(self, wrong_input): + df = DataFrame({'a': np.arange(100)}, index=np.arange(100)) + + msg = "Wrong input for log option." + with pytest.raises(ValueError, match=msg): + df.plot(logy=wrong_input) @pytest.mark.slow def test_xcompat(self): @@ -2988,22 +3014,6 @@ def test_secondary_axis_font_size(self, method): self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize) - def test_misc_bindings(self, monkeypatch): - df = pd.DataFrame(randn(10, 10), columns=list('abcdefghij')) - monkeypatch.setattr('pandas.plotting._misc.scatter_matrix', - lambda x: 2) - monkeypatch.setattr('pandas.plotting._misc.andrews_curves', - lambda x, y: 2) - monkeypatch.setattr('pandas.plotting._misc.parallel_coordinates', - lambda x, y: 2) - monkeypatch.setattr('pandas.plotting._misc.radviz', - lambda x, y: 2) - - assert df.plot.scatter_matrix() == 2 - assert df.plot.andrews_curves('a') == 2 - assert df.plot.parallel_coordinates('a') == 2 - assert df.plot.radviz('a') == 2 - def _generate_4_axes_via_gridspec(): import matplotlib.pyplot as plt diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 1e223c20f55b7..07a4b168a66f1 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -878,19 +878,6 @@ def test_custom_business_day_freq(self): _check_plot_works(s.plot) - def test_misc_bindings(self, monkeypatch): - s = Series(randn(10)) - monkeypatch.setattr('pandas.plotting._misc.lag_plot', - lambda x: 2) - monkeypatch.setattr('pandas.plotting._misc.autocorrelation_plot', - lambda x: 2) - monkeypatch.setattr('pandas.plotting._misc.bootstrap_plot', - lambda x: 2) - - assert s.plot.lag() == 2 - assert s.plot.autocorrelation() == 2 - assert s.plot.bootstrap() == 2 - @pytest.mark.xfail def test_plot_accessor_updates_on_inplace(self): s = Series([1, 2, 3, 4]) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index e123a5171769d..c17c301968269 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -939,26 +939,40 @@ def test_merge_two_empty_df_no_division_error(self): with np.errstate(divide='raise'): merge(a, a, on=('a', 'b')) - @pytest.mark.parametrize('how', ['left', 'outer']) + @pytest.mark.parametrize('how', ['right', 'outer']) def test_merge_on_index_with_more_values(self, how): # GH 24212 - # pd.merge gets [-1, -1, 0, 1] as right_indexer, ensure that -1 is - # interpreted as a missing value instead of the last element - df1 = pd.DataFrame([[1, 2], [2, 4], [3, 6], [4, 8]], - columns=['a', 'b']) - df2 = pd.DataFrame([[3, 30], [4, 40]], - columns=['a', 'c']) - df1.set_index('a', drop=False, inplace=True) - df2.set_index('a', inplace=True) - result = pd.merge(df1, df2, left_index=True, right_on='a', how=how) - expected = pd.DataFrame([[1, 2, np.nan], - [2, 4, np.nan], - [3, 6, 30.0], - [4, 8, 40.0]], - columns=['a', 'b', 'c']) - expected.set_index('a', drop=False, inplace=True) + # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that + # -1 is interpreted as a missing value instead of the last element + df1 = pd.DataFrame({'a': [1, 2, 3], 'key': [0, 2, 2]}) + df2 = pd.DataFrame({'b': [1, 2, 3, 4, 5]}) + result = df1.merge(df2, left_on='key', right_index=True, how=how) + expected = pd.DataFrame([[1.0, 0, 1], + [2.0, 2, 3], + [3.0, 2, 3], + [np.nan, 1, 2], + [np.nan, 3, 4], + [np.nan, 4, 5]], + columns=['a', 'key', 'b']) + expected.set_index(Int64Index([0, 1, 2, 1, 3, 4]), inplace=True) assert_frame_equal(result, expected) + def test_merge_right_index_right(self): + # Note: the expected output here is probably incorrect. + # See https://github.com/pandas-dev/pandas/issues/17257 for more. + # We include this as a regression test for GH-24897. + left = pd.DataFrame({'a': [1, 2, 3], 'key': [0, 1, 1]}) + right = pd.DataFrame({'b': [1, 2, 3]}) + + expected = pd.DataFrame({'a': [1, 2, 3, None], + 'key': [0, 1, 1, 2], + 'b': [1, 2, 2, 3]}, + columns=['a', 'key', 'b'], + index=[0, 1, 2, 2]) + result = left.merge(right, left_on='key', right_index=True, + how='right') + tm.assert_frame_equal(result, expected) + def _check_merge(x, y): for how in ['inner', 'left', 'outer']: diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 0a86bb0b67797..7e0feb418e8df 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -164,3 +164,12 @@ def test_end_time_timevalues(self, input_vals): result = s.dt.end_time expected = s.apply(lambda x: x.end_time) tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('input_vals', [ + ('2001'), ('NaT') + ]) + def test_to_period(self, input_vals): + # GH 21205 + expected = Series([input_vals], dtype='Period[D]') + result = Series([input_vals], dtype='datetime64[ns]').dt.to_period('D') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 4bcd16a86e865..cf5ef6cf15eca 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -971,6 +971,9 @@ def prng(self): class TestDatetime64NaNOps(object): @pytest.mark.parametrize('tz', [None, 'UTC']) + @pytest.mark.xfail(reason="disabled") + # Enabling mean changes the behavior of DataFrame.mean + # See https://github.com/pandas-dev/pandas/issues/24752 def test_nanmean(self, tz): dti = pd.date_range('2016-01-01', periods=3, tz=tz) expected = dti[1] diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py index 537881f3a5e85..3822170d884aa 100644 --- a/pandas/tests/tools/test_numeric.py +++ b/pandas/tests/tools/test_numeric.py @@ -5,436 +5,461 @@ import pytest import pandas as pd -from pandas import to_numeric +from pandas import DataFrame, Index, Series, to_numeric from pandas.util import testing as tm -class TestToNumeric(object): - - def test_empty(self): - # see gh-16302 - s = pd.Series([], dtype=object) - - res = to_numeric(s) - expected = pd.Series([], dtype=np.int64) - - tm.assert_series_equal(res, expected) - - # Original issue example - res = to_numeric(s, errors='coerce', downcast='integer') - expected = pd.Series([], dtype=np.int8) - - tm.assert_series_equal(res, expected) - - def test_series(self): - s = pd.Series(['1', '-3.14', '7']) - res = to_numeric(s) - expected = pd.Series([1, -3.14, 7]) - tm.assert_series_equal(res, expected) - - s = pd.Series(['1', '-3.14', 7]) - res = to_numeric(s) - tm.assert_series_equal(res, expected) - - def test_series_numeric(self): - s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - # bool is regarded as numeric - s = pd.Series([True, False, True, True], - index=list('ABCD'), name='XXX') - res = to_numeric(s) - tm.assert_series_equal(res, s) - - def test_error(self): - s = pd.Series([1, -3.14, 'apple']) - msg = 'Unable to parse string "apple" at position 2' - with pytest.raises(ValueError, match=msg): - to_numeric(s, errors='raise') - - res = to_numeric(s, errors='ignore') - expected = pd.Series([1, -3.14, 'apple']) - tm.assert_series_equal(res, expected) - - res = to_numeric(s, errors='coerce') - expected = pd.Series([1, -3.14, np.nan]) - tm.assert_series_equal(res, expected) - - s = pd.Series(['orange', 1, -3.14, 'apple']) - msg = 'Unable to parse string "orange" at position 0' - with pytest.raises(ValueError, match=msg): - to_numeric(s, errors='raise') - - def test_error_seen_bool(self): - s = pd.Series([True, False, 'apple']) - msg = 'Unable to parse string "apple" at position 2' - with pytest.raises(ValueError, match=msg): - to_numeric(s, errors='raise') - - res = to_numeric(s, errors='ignore') - expected = pd.Series([True, False, 'apple']) - tm.assert_series_equal(res, expected) - - # coerces to float - res = to_numeric(s, errors='coerce') - expected = pd.Series([1., 0., np.nan]) - tm.assert_series_equal(res, expected) - - def test_list(self): - s = ['1', '-3.14', '7'] - res = to_numeric(s) - expected = np.array([1, -3.14, 7]) - tm.assert_numpy_array_equal(res, expected) - - def test_list_numeric(self): - s = [1, 3, 4, 5] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64)) - - s = [1., 3., 4., 5.] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s)) - - # bool is regarded as numeric - s = [True, False, True, True] - res = to_numeric(s) - tm.assert_numpy_array_equal(res, np.array(s)) - - def test_numeric(self): - s = pd.Series([1, -3.14, 7], dtype='O') - res = to_numeric(s) - expected = pd.Series([1, -3.14, 7]) - tm.assert_series_equal(res, expected) - - s = pd.Series([1, -3.14, 7]) - res = to_numeric(s) - tm.assert_series_equal(res, expected) - - # GH 14827 - df = pd.DataFrame(dict( - a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'], - b=[1.0, 2.0, 3.0, 4.0], - )) - expected = pd.DataFrame(dict( - a=[1.2, 3.14, np.inf, 0.1], - b=[1.0, 2.0, 3.0, 4.0], - )) - - # Test to_numeric over one column - df_copy = df.copy() - df_copy['a'] = df_copy['a'].apply(to_numeric) - tm.assert_frame_equal(df_copy, expected) - - # Test to_numeric over multiple columns - df_copy = df.copy() - df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric) - tm.assert_frame_equal(df_copy, expected) - - def test_numeric_lists_and_arrays(self): - # Test to_numeric with embedded lists and arrays - df = pd.DataFrame(dict( - a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1] - )) - df['a'] = df['a'].apply(to_numeric) - expected = pd.DataFrame(dict( - a=[[3.14, 1.0], 1.6, 0.1], - )) - tm.assert_frame_equal(df, expected) - - df = pd.DataFrame(dict( - a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1] - )) - df['a'] = df['a'].apply(to_numeric) - expected = pd.DataFrame(dict( - a=[[3.14, 1.0], 0.1], - )) - tm.assert_frame_equal(df, expected) - - def test_all_nan(self): - s = pd.Series(['a', 'b', 'c']) - res = to_numeric(s, errors='coerce') - expected = pd.Series([np.nan, np.nan, np.nan]) - tm.assert_series_equal(res, expected) - - @pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"]) - def test_type_check(self, errors): - # see gh-11776 - df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]}) - kwargs = dict(errors=errors) if errors is not None else dict() - error_ctx = pytest.raises(TypeError, match="1-d array") - - with error_ctx: - to_numeric(df, **kwargs) - - def test_scalar(self): - assert pd.to_numeric(1) == 1 - assert pd.to_numeric(1.1) == 1.1 - - assert pd.to_numeric('1') == 1 - assert pd.to_numeric('1.1') == 1.1 - - with pytest.raises(ValueError): - to_numeric('XX', errors='raise') - - assert to_numeric('XX', errors='ignore') == 'XX' - assert np.isnan(to_numeric('XX', errors='coerce')) - - def test_numeric_dtypes(self): - idx = pd.Index([1, 2, 3], name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, idx) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.values) - - idx = pd.Index([1., np.nan, 3., np.nan], name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, idx) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.values) - - def test_str(self): - idx = pd.Index(['1', '2', '3'], name='xxx') - exp = np.array([1, 2, 3], dtype='int64') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(exp, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(exp, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, exp) - - idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx') - exp = np.array([1.5, 2.7, 3.4]) - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(exp, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(exp, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, exp) - - def test_datetime_like(self, tz_naive_fixture): - idx = pd.date_range("20130101", periods=3, - tz=tz_naive_fixture, name="xxx") - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name="xxx")) - - res = pd.to_numeric(pd.Series(idx, name="xxx")) - tm.assert_series_equal(res, pd.Series(idx.asi8, name="xxx")) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.asi8) - - def test_timedelta(self): - idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - res = pd.to_numeric(pd.Series(idx, name='xxx')) - tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - res = pd.to_numeric(idx.values) - tm.assert_numpy_array_equal(res, idx.asi8) - - def test_period(self): - idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx') - res = pd.to_numeric(idx) - tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx')) - - # TODO: enable when we can support native PeriodDtype - # res = pd.to_numeric(pd.Series(idx, name='xxx')) - # tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx')) - - def test_non_hashable(self): - # Test for Bug #13324 - s = pd.Series([[10.0, 2], 1.0, 'apple']) - res = pd.to_numeric(s, errors='coerce') - tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan])) - - res = pd.to_numeric(s, errors='ignore') - tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple'])) - - with pytest.raises(TypeError, match="Invalid object type"): - pd.to_numeric(s) - - @pytest.mark.parametrize("data", [ - ["1", 2, 3], - [1, 2, 3], - np.array(["1970-01-02", "1970-01-03", - "1970-01-04"], dtype="datetime64[D]") - ]) - def test_downcast_basic(self, data): - # see gh-13352 - invalid_downcast = "unsigned-integer" - msg = "invalid downcasting method provided" - - with pytest.raises(ValueError, match=msg): - pd.to_numeric(data, downcast=invalid_downcast) - - expected = np.array([1, 2, 3], dtype=np.int64) - - # Basic function tests. - res = pd.to_numeric(data) - tm.assert_numpy_array_equal(res, expected) - - res = pd.to_numeric(data, downcast=None) - tm.assert_numpy_array_equal(res, expected) - - # Basic dtype support. - smallest_uint_dtype = np.dtype(np.typecodes["UnsignedInteger"][0]) - - # Support below np.float32 is rare and far between. - float_32_char = np.dtype(np.float32).char - smallest_float_dtype = float_32_char - - expected = np.array([1, 2, 3], dtype=smallest_uint_dtype) - res = pd.to_numeric(data, downcast="unsigned") - tm.assert_numpy_array_equal(res, expected) - - expected = np.array([1, 2, 3], dtype=smallest_float_dtype) - res = pd.to_numeric(data, downcast="float") - tm.assert_numpy_array_equal(res, expected) - - @pytest.mark.parametrize("signed_downcast", ["integer", "signed"]) - @pytest.mark.parametrize("data", [ - ["1", 2, 3], - [1, 2, 3], - np.array(["1970-01-02", "1970-01-03", - "1970-01-04"], dtype="datetime64[D]") - ]) - def test_signed_downcast(self, data, signed_downcast): - # see gh-13352 - smallest_int_dtype = np.dtype(np.typecodes["Integer"][0]) - expected = np.array([1, 2, 3], dtype=smallest_int_dtype) - - res = pd.to_numeric(data, downcast=signed_downcast) - tm.assert_numpy_array_equal(res, expected) - - def test_ignore_downcast_invalid_data(self): - # If we can't successfully cast the given - # data to a numeric dtype, do not bother - # with the downcast parameter. - data = ["foo", 2, 3] - expected = np.array(data, dtype=object) - - res = pd.to_numeric(data, errors="ignore", - downcast="unsigned") - tm.assert_numpy_array_equal(res, expected) - - def test_ignore_downcast_neg_to_unsigned(self): - # Cannot cast to an unsigned integer - # because we have a negative number. - data = ["-1", 2, 3] - expected = np.array([-1, 2, 3], dtype=np.int64) - - res = pd.to_numeric(data, downcast="unsigned") - tm.assert_numpy_array_equal(res, expected) - - @pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"]) - @pytest.mark.parametrize("data,expected", [ - (["1.1", 2, 3], - np.array([1.1, 2, 3], dtype=np.float64)), - ([10000.0, 20000, 3000, 40000.36, 50000, 50000.00], - np.array([10000.0, 20000, 3000, - 40000.36, 50000, 50000.00], dtype=np.float64)) - ]) - def test_ignore_downcast_cannot_convert_float( - self, data, expected, downcast): - # Cannot cast to an integer (signed or unsigned) - # because we have a float number. - res = pd.to_numeric(data, downcast=downcast) - tm.assert_numpy_array_equal(res, expected) - - @pytest.mark.parametrize("downcast,expected_dtype", [ - ("integer", np.int16), - ("signed", np.int16), - ("unsigned", np.uint16) - ]) - def test_downcast_not8bit(self, downcast, expected_dtype): - # the smallest integer dtype need not be np.(u)int8 - data = ["256", 257, 258] - - expected = np.array([256, 257, 258], dtype=expected_dtype) - res = pd.to_numeric(data, downcast=downcast) - tm.assert_numpy_array_equal(res, expected) - - @pytest.mark.parametrize("dtype,downcast,min_max", [ - ("int8", "integer", [iinfo(np.int8).min, - iinfo(np.int8).max]), - ("int16", "integer", [iinfo(np.int16).min, - iinfo(np.int16).max]), - ('int32', "integer", [iinfo(np.int32).min, - iinfo(np.int32).max]), - ('int64', "integer", [iinfo(np.int64).min, - iinfo(np.int64).max]), - ('uint8', "unsigned", [iinfo(np.uint8).min, - iinfo(np.uint8).max]), - ('uint16', "unsigned", [iinfo(np.uint16).min, - iinfo(np.uint16).max]), - ('uint32', "unsigned", [iinfo(np.uint32).min, - iinfo(np.uint32).max]), - ('uint64', "unsigned", [iinfo(np.uint64).min, - iinfo(np.uint64).max]), - ('int16', "integer", [iinfo(np.int8).min, - iinfo(np.int8).max + 1]), - ('int32', "integer", [iinfo(np.int16).min, - iinfo(np.int16).max + 1]), - ('int64', "integer", [iinfo(np.int32).min, - iinfo(np.int32).max + 1]), - ('int16', "integer", [iinfo(np.int8).min - 1, - iinfo(np.int16).max]), - ('int32', "integer", [iinfo(np.int16).min - 1, - iinfo(np.int32).max]), - ('int64', "integer", [iinfo(np.int32).min - 1, - iinfo(np.int64).max]), - ('uint16', "unsigned", [iinfo(np.uint8).min, - iinfo(np.uint8).max + 1]), - ('uint32', "unsigned", [iinfo(np.uint16).min, - iinfo(np.uint16).max + 1]), - ('uint64', "unsigned", [iinfo(np.uint32).min, - iinfo(np.uint32).max + 1]) - ]) - def test_downcast_limits(self, dtype, downcast, min_max): - # see gh-14404: test the limits of each downcast. - series = pd.to_numeric(pd.Series(min_max), downcast=downcast) - assert series.dtype == dtype - - def test_coerce_uint64_conflict(self): - # see gh-17007 and gh-17125 - # - # Still returns float despite the uint64-nan conflict, - # which would normally force the casting to object. - df = pd.DataFrame({"a": [200, 300, "", "NaN", 30000000000000000000]}) - expected = pd.Series([200, 300, np.nan, np.nan, - 30000000000000000000], dtype=float, name="a") - result = to_numeric(df["a"], errors="coerce") +@pytest.mark.parametrize("input_kwargs,result_kwargs", [ + (dict(), dict(dtype=np.int64)), + (dict(errors="coerce", downcast="integer"), dict(dtype=np.int8)) +]) +def test_empty(input_kwargs, result_kwargs): + # see gh-16302 + ser = Series([], dtype=object) + result = to_numeric(ser, **input_kwargs) + + expected = Series([], **result_kwargs) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("last_val", ["7", 7]) +def test_series(last_val): + ser = Series(["1", "-3.14", last_val]) + result = to_numeric(ser) + + expected = Series([1, -3.14, 7]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("data", [ + [1, 3, 4, 5], + [1., 3., 4., 5.], + + # Bool is regarded as numeric. + [True, False, True, True] +]) +def test_series_numeric(data): + ser = Series(data, index=list("ABCD"), name="EFG") + + result = to_numeric(ser) + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize("data,msg", [ + ([1, -3.14, "apple"], + 'Unable to parse string "apple" at position 2'), + (["orange", 1, -3.14, "apple"], + 'Unable to parse string "orange" at position 0') +]) +def test_error(data, msg): + ser = Series(data) + + with pytest.raises(ValueError, match=msg): + to_numeric(ser, errors="raise") + + +@pytest.mark.parametrize("errors,exp_data", [ + ("ignore", [1, -3.14, "apple"]), + ("coerce", [1, -3.14, np.nan]) +]) +def test_ignore_error(errors, exp_data): + ser = Series([1, -3.14, "apple"]) + result = to_numeric(ser, errors=errors) + + expected = Series(exp_data) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("errors,exp", [ + ("raise", 'Unable to parse string "apple" at position 2'), + ("ignore", [True, False, "apple"]), + + # Coerces to float. + ("coerce", [1., 0., np.nan]) +]) +def test_bool_handling(errors, exp): + ser = Series([True, False, "apple"]) + + if isinstance(exp, str): + with pytest.raises(ValueError, match=exp): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + expected = Series(exp) + tm.assert_series_equal(result, expected) - s = pd.Series(["12345678901234567890", "1234567890", "ITEM"]) - expected = pd.Series([12345678901234567890, - 1234567890, np.nan], dtype=float) - result = to_numeric(s, errors="coerce") + +def test_list(): + ser = ["1", "-3.14", "7"] + res = to_numeric(ser) + + expected = np.array([1, -3.14, 7]) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize("data,arr_kwargs", [ + ([1, 3, 4, 5], dict(dtype=np.int64)), + ([1., 3., 4., 5.], dict()), + + # Boolean is regarded as numeric. + ([True, False, True, True], dict()) +]) +def test_list_numeric(data, arr_kwargs): + result = to_numeric(data) + expected = np.array(data, **arr_kwargs) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("kwargs", [ + dict(dtype="O"), dict() +]) +def test_numeric(kwargs): + data = [1, -3.14, 7] + + ser = Series(data, **kwargs) + result = to_numeric(ser) + + expected = Series(data) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("columns", [ + # One column. + "a", + + # Multiple columns. + ["a", "b"] +]) +def test_numeric_df_columns(columns): + # see gh-14827 + df = DataFrame(dict( + a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"], + b=[1.0, 2.0, 3.0, 4.0], + )) + + expected = DataFrame(dict( + a=[1.2, 3.14, np.inf, 0.1], + b=[1.0, 2.0, 3.0, 4.0], + )) + + df_copy = df.copy() + df_copy[columns] = df_copy[columns].apply(to_numeric) + + tm.assert_frame_equal(df_copy, expected) + + +@pytest.mark.parametrize("data,exp_data", [ + ([[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1], + [[3.14, 1.0], 1.6, 0.1]), + ([np.array([decimal.Decimal(3.14), 1.0]), 0.1], + [[3.14, 1.0], 0.1]) +]) +def test_numeric_embedded_arr_likes(data, exp_data): + # Test to_numeric with embedded lists and arrays + df = DataFrame(dict(a=data)) + df["a"] = df["a"].apply(to_numeric) + + expected = DataFrame(dict(a=exp_data)) + tm.assert_frame_equal(df, expected) + + +def test_all_nan(): + ser = Series(["a", "b", "c"]) + result = to_numeric(ser, errors="coerce") + + expected = Series([np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"]) +def test_type_check(errors): + # see gh-11776 + df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]}) + kwargs = dict(errors=errors) if errors is not None else dict() + error_ctx = pytest.raises(TypeError, match="1-d array") + + with error_ctx: + to_numeric(df, **kwargs) + + +@pytest.mark.parametrize("val", [ + 1, 1.1, "1", "1.1", -1.5, "-1.5" +]) +def test_scalar(val): + assert to_numeric(val) == float(val) + + +@pytest.mark.parametrize("errors,checker", [ + ("raise", 'Unable to parse string "fail" at position 0'), + ("ignore", lambda x: x == "fail"), + ("coerce", lambda x: np.isnan(x)) +]) +def test_scalar_fail(errors, checker): + scalar = "fail" + + if isinstance(checker, str): + with pytest.raises(ValueError, match=checker): + to_numeric(scalar, errors=errors) + else: + assert checker(to_numeric(scalar, errors=errors)) + + +@pytest.fixture(params=[ + (lambda x: Index(x, name="idx"), tm.assert_index_equal), + (lambda x: Series(x, name="ser"), tm.assert_series_equal), + (lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal) +]) +def transform_assert_equal(request): + return request.param + + +@pytest.mark.parametrize("data", [ + [1, 2, 3], + [1., np.nan, 3, np.nan] +]) +def test_numeric_dtypes(data, transform_assert_equal): + transform, assert_equal = transform_assert_equal + data = transform(data) + + result = to_numeric(data) + assert_equal(result, data) + + +@pytest.mark.parametrize("data,exp", [ + (["1", "2", "3"], np.array([1, 2, 3], dtype="int64")), + (["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4])) +]) +def test_str(data, exp, transform_assert_equal): + transform, assert_equal = transform_assert_equal + result = to_numeric(transform(data)) + + expected = transform(exp) + assert_equal(result, expected) + + +def test_datetime_like(tz_naive_fixture, transform_assert_equal): + transform, assert_equal = transform_assert_equal + idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture) + + result = to_numeric(transform(idx)) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +def test_timedelta(transform_assert_equal): + transform, assert_equal = transform_assert_equal + idx = pd.timedelta_range("1 days", periods=3, freq="D") + + result = to_numeric(transform(idx)) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +def test_period(transform_assert_equal): + transform, assert_equal = transform_assert_equal + + idx = pd.period_range("2011-01", periods=3, freq="M", name="") + inp = transform(idx) + + if isinstance(inp, Index): + result = to_numeric(inp) + expected = transform(idx.asi8) + assert_equal(result, expected) + else: + # TODO: PeriodDtype, so support it in to_numeric. + pytest.skip("Missing PeriodDtype support in to_numeric") + + +@pytest.mark.parametrize("errors,expected", [ + ("raise", "Invalid object type at position 0"), + ("ignore", Series([[10.0, 2], 1.0, "apple"])), + ("coerce", Series([np.nan, 1.0, np.nan])) +]) +def test_non_hashable(errors, expected): + # see gh-13324 + ser = Series([[10.0, 2], 1.0, "apple"]) + + if isinstance(expected, str): + with pytest.raises(TypeError, match=expected): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) tm.assert_series_equal(result, expected) - # For completeness, check against "ignore" and "raise" - result = to_numeric(s, errors="ignore") - tm.assert_series_equal(result, s) - msg = "Unable to parse string" - with pytest.raises(ValueError, match=msg): - to_numeric(s, errors="raise") +def test_downcast_invalid_cast(): + # see gh-13352 + data = ["1", 2, 3] + invalid_downcast = "unsigned-integer" + msg = "invalid downcasting method provided" + + with pytest.raises(ValueError, match=msg): + to_numeric(data, downcast=invalid_downcast) + + +@pytest.mark.parametrize("data", [ + ["1", 2, 3], + [1, 2, 3], + np.array(["1970-01-02", "1970-01-03", + "1970-01-04"], dtype="datetime64[D]") +]) +@pytest.mark.parametrize("kwargs,exp_dtype", [ + # Basic function tests. + (dict(), np.int64), + (dict(downcast=None), np.int64), + + # Support below np.float32 is rare and far between. + (dict(downcast="float"), np.dtype(np.float32).char), + + # Basic dtype support. + (dict(downcast="unsigned"), np.dtype(np.typecodes["UnsignedInteger"][0])) +]) +def test_downcast_basic(data, kwargs, exp_dtype): + # see gh-13352 + result = to_numeric(data, **kwargs) + expected = np.array([1, 2, 3], dtype=exp_dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("signed_downcast", ["integer", "signed"]) +@pytest.mark.parametrize("data", [ + ["1", 2, 3], + [1, 2, 3], + np.array(["1970-01-02", "1970-01-03", + "1970-01-04"], dtype="datetime64[D]") +]) +def test_signed_downcast(data, signed_downcast): + # see gh-13352 + smallest_int_dtype = np.dtype(np.typecodes["Integer"][0]) + expected = np.array([1, 2, 3], dtype=smallest_int_dtype) + + res = to_numeric(data, downcast=signed_downcast) + tm.assert_numpy_array_equal(res, expected) + + +def test_ignore_downcast_invalid_data(): + # If we can't successfully cast the given + # data to a numeric dtype, do not bother + # with the downcast parameter. + data = ["foo", 2, 3] + expected = np.array(data, dtype=object) + + res = to_numeric(data, errors="ignore", + downcast="unsigned") + tm.assert_numpy_array_equal(res, expected) + + +def test_ignore_downcast_neg_to_unsigned(): + # Cannot cast to an unsigned integer + # because we have a negative number. + data = ["-1", 2, 3] + expected = np.array([-1, 2, 3], dtype=np.int64) + + res = to_numeric(data, downcast="unsigned") + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"]) +@pytest.mark.parametrize("data,expected", [ + (["1.1", 2, 3], + np.array([1.1, 2, 3], dtype=np.float64)), + ([10000.0, 20000, 3000, 40000.36, 50000, 50000.00], + np.array([10000.0, 20000, 3000, + 40000.36, 50000, 50000.00], dtype=np.float64)) +]) +def test_ignore_downcast_cannot_convert_float(data, expected, downcast): + # Cannot cast to an integer (signed or unsigned) + # because we have a float number. + res = to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize("downcast,expected_dtype", [ + ("integer", np.int16), + ("signed", np.int16), + ("unsigned", np.uint16) +]) +def test_downcast_not8bit(downcast, expected_dtype): + # the smallest integer dtype need not be np.(u)int8 + data = ["256", 257, 258] + + expected = np.array([256, 257, 258], dtype=expected_dtype) + res = to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize("dtype,downcast,min_max", [ + ("int8", "integer", [iinfo(np.int8).min, + iinfo(np.int8).max]), + ("int16", "integer", [iinfo(np.int16).min, + iinfo(np.int16).max]), + ("int32", "integer", [iinfo(np.int32).min, + iinfo(np.int32).max]), + ("int64", "integer", [iinfo(np.int64).min, + iinfo(np.int64).max]), + ("uint8", "unsigned", [iinfo(np.uint8).min, + iinfo(np.uint8).max]), + ("uint16", "unsigned", [iinfo(np.uint16).min, + iinfo(np.uint16).max]), + ("uint32", "unsigned", [iinfo(np.uint32).min, + iinfo(np.uint32).max]), + ("uint64", "unsigned", [iinfo(np.uint64).min, + iinfo(np.uint64).max]), + ("int16", "integer", [iinfo(np.int8).min, + iinfo(np.int8).max + 1]), + ("int32", "integer", [iinfo(np.int16).min, + iinfo(np.int16).max + 1]), + ("int64", "integer", [iinfo(np.int32).min, + iinfo(np.int32).max + 1]), + ("int16", "integer", [iinfo(np.int8).min - 1, + iinfo(np.int16).max]), + ("int32", "integer", [iinfo(np.int16).min - 1, + iinfo(np.int32).max]), + ("int64", "integer", [iinfo(np.int32).min - 1, + iinfo(np.int64).max]), + ("uint16", "unsigned", [iinfo(np.uint8).min, + iinfo(np.uint8).max + 1]), + ("uint32", "unsigned", [iinfo(np.uint16).min, + iinfo(np.uint16).max + 1]), + ("uint64", "unsigned", [iinfo(np.uint32).min, + iinfo(np.uint32).max + 1]) +]) +def test_downcast_limits(dtype, downcast, min_max): + # see gh-14404: test the limits of each downcast. + series = to_numeric(Series(min_max), downcast=downcast) + assert series.dtype == dtype + + +@pytest.mark.parametrize("data,exp_data", [ + ([200, 300, "", "NaN", 30000000000000000000], + [200, 300, np.nan, np.nan, 30000000000000000000]), + (["12345678901234567890", "1234567890", "ITEM"], + [12345678901234567890, 1234567890, np.nan]) +]) +def test_coerce_uint64_conflict(data, exp_data): + # see gh-17007 and gh-17125 + # + # Still returns float despite the uint64-nan conflict, + # which would normally force the casting to object. + result = to_numeric(Series(data), errors="coerce") + expected = Series(exp_data, dtype=float) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("errors,exp", [ + ("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])), + ("raise", "Unable to parse string") +]) +def test_non_coerce_uint64_conflict(errors, exp): + # see gh-17007 and gh-17125 + # + # For completeness. + ser = Series(["12345678901234567890", "1234567890", "ITEM"]) + + if isinstance(exp, str): + with pytest.raises(ValueError, match=exp): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + tm.assert_series_equal(result, ser) diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index 4e389aed2b0d2..bce33f7e78daa 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -796,7 +796,8 @@ def validate_all(prefix, ignore_deprecated=False): seen = {} # functions from the API docs - api_doc_fnames = os.path.join(BASE_PATH, 'doc', 'source', 'api', '*.rst') + api_doc_fnames = os.path.join( + BASE_PATH, 'doc', 'source', 'reference', '*.rst') api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: diff --git a/setup.cfg b/setup.cfg index 95c71826a80d4..7155cc1013544 100644 --- a/setup.cfg +++ b/setup.cfg @@ -46,8 +46,8 @@ ignore = E402, # module level import not at top of file E711, # comparison to none should be 'if cond is none:' exclude = - doc/source/basics.rst - doc/source/contributing_docstring.rst + doc/source/getting_started/basics.rst + doc/source/development/contributing_docstring.rst [yapf] diff --git a/setup.py b/setup.py index ed2d905f4358b..4bf040b8c8e20 100755 --- a/setup.py +++ b/setup.py @@ -457,6 +457,11 @@ def run(self): directives['linetrace'] = True macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')] +# in numpy>=1.16.0, silence build warnings about deprecated API usage +# we can't do anything about these warnings because they stem from +# cython+numpy version mismatches. +macros.append(('NPY_NO_DEPRECATED_API', '0')) + # ---------------------------------------------------------------------- # Specification of Dependencies
- [ ] closes #24867 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24871
2019-01-21T23:24:09Z
2019-01-27T22:04:22Z
null
2019-01-27T22:04:23Z
REF: move methods into cdef classes
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 3147f36dcc835..eb511b1adb28a 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -18,7 +18,6 @@ cnp.import_array() cimport pandas._libs.util as util -util.import_array() from pandas._libs.hashtable cimport Int64Vector, Int64VectorData diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index a55d15a7c4e85..67b415f5e9f66 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -382,7 +382,7 @@ class NaTType(_NaT): ) combine = _make_error_func('combine', # noqa:E128 """ - Timsetamp.combine(date, time) + Timestamp.combine(date, time) date, time -> datetime with same date and time fields """ @@ -401,24 +401,7 @@ class NaTType(_NaT): # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or # return NaT create functions that raise, for binding to NaTType astimezone = _make_error_func('astimezone', # noqa:E128 - """ - Convert tz-aware Timestamp to another time zone. - - Parameters - ---------- - tz : str, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time which Timestamp will be converted to. - None will remove timezone holding UTC time. - - Returns - ------- - converted : Timestamp - - Raises - ------ - TypeError - If Timestamp is tz-naive. - """) + """Alias for tz_convert. See tz_convert.__doc__""") fromordinal = _make_error_func('fromordinal', # noqa:E128 """ Timestamp.fromordinal(ordinal, freq=None, tz=None) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 0a19d8749fc7c..c913a127d7c45 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1105,6 +1105,36 @@ cdef class _Timedelta(timedelta): .format(td=components, seconds=seconds)) return tpl + # ---------------------------------------------------------------- + + def _round(self, freq, rounder): + cdef: + int64_t result, unit + + unit = to_offset(freq).nanos + result = unit * rounder(self.value / float(unit)) + return Timedelta(result, unit='ns') + + def floor(self, freq): + """ + return a new Timedelta floored to this resolution + + Parameters + ---------- + freq : a freq string indicating the flooring resolution + """ + return self._round(freq, np.floor) + + def ceil(self, freq): + """ + return a new Timedelta ceiled to this resolution + + Parameters + ---------- + freq : a freq string indicating the ceiling resolution + """ + return self._round(freq, np.ceil) + # Python front end to C extension type _Timedelta # This serves as the box for timedelta64 @@ -1206,14 +1236,6 @@ class Timedelta(_Timedelta): object_state = self.value, return (Timedelta, object_state) - def _round(self, freq, rounder): - cdef: - int64_t result, unit - - unit = to_offset(freq).nanos - result = unit * rounder(self.value / float(unit)) - return Timedelta(result, unit='ns') - def round(self, freq): """ Round the Timedelta to the specified resolution @@ -1232,26 +1254,6 @@ class Timedelta(_Timedelta): """ return self._round(freq, np.round) - def floor(self, freq): - """ - return a new Timedelta floored to this resolution - - Parameters - ---------- - freq : a freq string indicating the flooring resolution - """ - return self._round(freq, np.floor) - - def ceil(self, freq): - """ - return a new Timedelta ceiled to this resolution - - Parameters - ---------- - freq : a freq string indicating the ceiling resolution - """ - return self._round(freq, np.ceil) - # ---------------------------------------------------------------- # Arithmetic Methods # TODO: Can some of these be defined in the cython class? diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index fe0564cb62c30..c2ccde1e2a822 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -197,7 +197,7 @@ def round_nsint64(values, mode, freq): # This is PITA. Because we inherit from datetime, which has very specific # construction requirements, we need to do object instantiation in python -# (see Timestamp class above). This will serve as a C extension type that +# (see Timestamp class below). This will serve as a C extension type that # shadows the python class, where we do any heavy lifting. cdef class _Timestamp(datetime): @@ -206,6 +206,119 @@ cdef class _Timestamp(datetime): object freq # frequency reference list _date_attributes + # -------------------------------------------------------------------- + # Constructors + + @classmethod + def fromordinal(cls, ordinal, freq=None, tz=None): + """ + Timestamp.fromordinal(ordinal, freq=None, tz=None) + + passed an ordinal, translate and convert to a ts + note: by definition there cannot be any tz info on the ordinal itself + + Parameters + ---------- + ordinal : int + date corresponding to a proleptic Gregorian ordinal + freq : str, DateOffset + Offset which Timestamp will have + tz : str, pytz.timezone, dateutil.tz.tzfile or None + Time zone for time which Timestamp will have. + """ + return cls(datetime.fromordinal(ordinal), + freq=freq, tz=tz) + + @classmethod + def now(cls, tz=None): + """ + Timestamp.now(tz=None) + + Returns new Timestamp object representing current time local to + tz. + + Parameters + ---------- + tz : str or timezone object, default None + Timezone to localize to + """ + if is_string_object(tz): + tz = maybe_get_tz(tz) + return cls(datetime.now(tz)) + + @classmethod + def today(cls, tz=None): + """ + Timestamp.today(cls, tz=None) + + Return the current time in the local timezone. This differs + from datetime.today() in that it can be localized to a + passed timezone. + + Parameters + ---------- + tz : str or timezone object, default None + Timezone to localize to + """ + return cls.now(tz) + + @classmethod + def utcnow(cls): + """ + Timestamp.utcnow() + + Return a new Timestamp representing UTC day and time. + """ + return cls.now(UTC) + + @classmethod + def utcfromtimestamp(cls, ts): + """ + Timestamp.utcfromtimestamp(ts) + + Construct a naive UTC datetime from a POSIX timestamp. + """ + return cls(datetime.utcfromtimestamp(ts)) + + @classmethod + def fromtimestamp(cls, ts): + """ + Timestamp.fromtimestamp(ts) + + timestamp[, tz] -> tz's local time from POSIX timestamp. + """ + return cls(datetime.fromtimestamp(ts)) + + @classmethod + def combine(cls, date, time): + """ + Timestamp.combine(date, time) + + date, time -> datetime with same date and time fields + """ + return cls(datetime.combine(date, time)) + + # -------------------------------------------------------------------- + + @property + def tz(self): + """ + Alias for tzinfo + """ + return self.tzinfo + + @tz.setter + def tz(self, value): + # GH 3746: Prevent localizing or converting the index by setting tz + raise AttributeError("Cannot directly set timezone. Use tz_localize() " + "or tz_convert() as appropriate") + + @property + def freqstr(self): + return getattr(self.freq, 'freqstr', self.freq) + + # -------------------------------------------------------------------- + def __hash__(_Timestamp self): if self.nanosecond: return hash(self.value) @@ -267,30 +380,14 @@ cdef class _Timestamp(datetime): # now __reduce_ex__ is defined and higher priority than __reduce__ return self.__reduce__() - def __repr__(self): - stamp = self._repr_base - zone = None - - try: - stamp += self.strftime('%z') - if self.tzinfo: - zone = get_timezone(self.tzinfo) - except ValueError: - year2000 = self.replace(year=2000) - stamp += year2000.strftime('%z') - if self.tzinfo: - zone = get_timezone(self.tzinfo) - - try: - stamp += zone.strftime(' %%Z') - except: - pass - - tz = ", tz='{0}'".format(zone) if zone is not None else "" - freq = "" if self.freq is None else ", freq='{0}'".format(self.freqstr) + def __setstate__(self, state): + self.value = state[0] + self.freq = state[1] + self.tzinfo = state[2] - return "Timestamp('{stamp}'{tz}{freq})".format(stamp=stamp, - tz=tz, freq=freq) + def __reduce__(self): + object_state = self.value, self.freq, self.tzinfo + return (Timestamp, object_state) cdef bint _compare_outside_nanorange(_Timestamp self, datetime other, int op) except -1: @@ -323,24 +420,6 @@ cdef class _Timestamp(datetime): elif other.tzinfo is None: raise TypeError('Cannot compare tz-naive and tz-aware timestamps') - cpdef datetime to_pydatetime(_Timestamp self, bint warn=True): - """ - Convert a Timestamp object to a native Python datetime object. - - If warn=True, issue a warning if nanoseconds is nonzero. - """ - if self.nanosecond != 0 and warn: - warnings.warn("Discarding nonzero nanoseconds in conversion", - UserWarning, stacklevel=2) - - return datetime(self.year, self.month, self.day, - self.hour, self.minute, self.second, - self.microsecond, self.tzinfo) - - cpdef to_datetime64(self): - """ Returns a numpy.datetime64 object with 'ns' precision """ - return np.datetime64(self.value, 'ns') - def __add__(self, other): cdef: int64_t other_int, nanos @@ -375,6 +454,11 @@ cdef class _Timestamp(datetime): elif hasattr(other, '_typ'): return NotImplemented + elif not PyDateTime_Check(self): + # cython has called this method with `self, other` swapped, + # since __radd__ is not called by cython classes + return other + self + result = datetime.__add__(self, other) if PyDateTime_Check(result): result = Timestamp(result) @@ -433,7 +517,7 @@ cdef class _Timestamp(datetime): val = tz_convert_single(self.value, UTC, self.tz) return val - cpdef bint _get_start_end_field(self, str field): + cdef bint _get_start_end_field(self, str field): cdef: int64_t val dict kwds @@ -454,7 +538,7 @@ cdef class _Timestamp(datetime): field, freqstr, month_kw) return out[0] - cpdef _get_date_name_field(self, object field, object locale): + cdef _get_date_name_field(self, object field, object locale): cdef: int64_t val object[:] out @@ -464,6 +548,52 @@ cdef class _Timestamp(datetime): field, locale=locale) return out[0] + def _has_time_component(self): + """ + Returns if the Timestamp has a time component + in addition to the date part + """ + return (self.time() != _zero_time + or self.tzinfo is not None + or self.nanosecond != 0) + + @property + def resolution(self): + """ + Return resolution describing the smallest difference between two + times that can be represented by Timestamp object_state + """ + # GH#21336, GH#21365 + return Timedelta(nanoseconds=1) + + # -------------------------------------------------------------------- + # Rendering Methods + + def __repr__(self): + stamp = self._repr_base + zone = None + + try: + stamp += self.strftime('%z') + if self.tzinfo: + zone = get_timezone(self.tzinfo) + except ValueError: + year2000 = self.replace(year=2000) + stamp += year2000.strftime('%z') + if self.tzinfo: + zone = get_timezone(self.tzinfo) + + try: + stamp += zone.strftime(' %%Z') + except: + pass + + tz = ", tz='{0}'".format(zone) if zone is not None else "" + freq = "" if self.freq is None else ", freq='{0}'".format(self.freqstr) + + return "Timestamp('{stamp}'{tz}{freq})".format(stamp=stamp, + tz=tz, freq=freq) + @property def _repr_base(self): return '{date} {time}'.format(date=self._date_repr, @@ -498,265 +628,331 @@ cdef class _Timestamp(datetime): return self._date_repr return self._repr_base - @property - def asm8(self): - return np.datetime64(self.value, 'ns') + def isoformat(self, sep='T'): + base = super(_Timestamp, self).isoformat(sep=sep) + if self.nanosecond == 0: + return base - @property - def resolution(self): + if self.tzinfo is not None: + base1, base2 = base[:-6], base[-6:] + else: + base1, base2 = base, "" + + if self.microsecond != 0: + base1 += "%.3d" % self.nanosecond + else: + base1 += ".%.9d" % self.nanosecond + + return base1 + base2 + + # -------------------------------------------------------------------- + # Conversion + + cpdef datetime to_pydatetime(_Timestamp self, bint warn=True): """ - Return resolution describing the smallest difference between two - times that can be represented by Timestamp object_state + Convert a Timestamp object to a native Python datetime object. + + If warn=True, issue a warning if nanoseconds is nonzero. """ - # GH#21336, GH#21365 - return Timedelta(nanoseconds=1) + if self.nanosecond != 0 and warn: + warnings.warn("Discarding nonzero nanoseconds in conversion", + UserWarning, stacklevel=2) + + return datetime(self.year, self.month, self.day, + self.hour, self.minute, self.second, + self.microsecond, self.tzinfo) def timestamp(self): """Return POSIX timestamp as float.""" # py27 compat, see GH#17329 return round(self.value / 1e9, 6) + def to_datetime64(self): + """ Returns a numpy.datetime64 object with 'ns' precision """ + return np.datetime64(self.value, 'ns') -# ---------------------------------------------------------------------- + @property + def asm8(self): + return np.datetime64(self.value, 'ns') -# Python front end to C extension type _Timestamp -# This serves as the box for datetime64 + def to_period(self, freq=None): + """ + Return an period of which this timestamp is an observation. + """ + from .period import Period + if self.tz is not None: + # GH#21333 + warnings.warn("Converting to Period representation will " + "drop timezone information.", + UserWarning) -class Timestamp(_Timestamp): - """Pandas replacement for datetime.datetime - - Timestamp is the pandas equivalent of python's Datetime - and is interchangeable with it in most cases. It's the type used - for the entries that make up a DatetimeIndex, and other timeseries - oriented data structures in pandas. - - Parameters - ---------- - ts_input : datetime-like, str, int, float - Value to be converted to Timestamp - freq : str, DateOffset - Offset which Timestamp will have - tz : str, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time which Timestamp will have. - unit : str - Unit used for conversion if ts_input is of type int or float. The - valid values are 'D', 'h', 'm', 's', 'ms', 'us', and 'ns'. For - example, 's' means seconds and 'ms' means milliseconds. - year, month, day : int - .. versionadded:: 0.19.0 - hour, minute, second, microsecond : int, optional, default 0 - .. versionadded:: 0.19.0 - nanosecond : int, optional, default 0 - .. versionadded:: 0.23.0 - tzinfo : datetime.tzinfo, optional, default None - .. versionadded:: 0.19.0 + if freq is None: + freq = self.freq - Notes - ----- - There are essentially three calling conventions for the constructor. The - primary form accepts four parameters. They can be passed by position or - keyword. + return Period(self, freq=freq) - The other two forms mimic the parameters from ``datetime.datetime``. They - can be passed by either position or keyword, but not both mixed together. + def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', + errors=None): + """ + Convert naive Timestamp to local time zone, or remove + timezone from tz-aware Timestamp. - Examples - -------- - Using the primary calling convention: + Parameters + ---------- + tz : str, pytz.timezone, dateutil.tz.tzfile or None + Time zone for time which Timestamp will be converted to. + None will remove timezone holding local time. - This converts a datetime-like string - >>> pd.Timestamp('2017-01-01T12') - Timestamp('2017-01-01 12:00:00') + ambiguous : bool, 'NaT', default 'raise' + When clocks moved backward due to DST, ambiguous times may arise. + For example in Central European Time (UTC+01), when going from + 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at + 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the + `ambiguous` parameter dictates how ambiguous times should be + handled. - This converts a float representing a Unix epoch in units of seconds - >>> pd.Timestamp(1513393355.5, unit='s') - Timestamp('2017-12-16 03:02:35.500000') + - bool contains flags to determine if time is dst or not (note + that this flag is only applicable for ambiguous fall dst dates) + - 'NaT' will return NaT for an ambiguous time + - 'raise' will raise an AmbiguousTimeError for an ambiguous time - This converts an int representing a Unix-epoch in units of seconds - and for a particular timezone - >>> pd.Timestamp(1513393355, unit='s', tz='US/Pacific') - Timestamp('2017-12-15 19:02:35-0800', tz='US/Pacific') + nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, + default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. - Using the other two forms that mimic the API for ``datetime.datetime``: + - 'shift_forward' will shift the nonexistent time forward to the + closest existing time + - 'shift_backward' will shift the nonexistent time backward to the + closest existing time + - 'NaT' will return NaT where there are nonexistent times + - timedelta objects will shift nonexistent times by the timedelta + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times - >>> pd.Timestamp(2017, 1, 1, 12) - Timestamp('2017-01-01 12:00:00') + .. versionadded:: 0.24.0 - >>> pd.Timestamp(year=2017, month=1, day=1, hour=12) - Timestamp('2017-01-01 12:00:00') - """ + errors : 'raise', 'coerce', default None + - 'raise' will raise a NonExistentTimeError if a timestamp is not + valid in the specified timezone (e.g. due to a transition from + or to DST time). Use ``nonexistent='raise'`` instead. + - 'coerce' will return NaT if the timestamp can not be converted + into the specified timezone. Use ``nonexistent='NaT'`` instead. - @classmethod - def fromordinal(cls, ordinal, freq=None, tz=None): - """ - Timestamp.fromordinal(ordinal, freq=None, tz=None) + .. deprecated:: 0.24.0 - passed an ordinal, translate and convert to a ts - note: by definition there cannot be any tz info on the ordinal itself + Returns + ------- + localized : Timestamp - Parameters - ---------- - ordinal : int - date corresponding to a proleptic Gregorian ordinal - freq : str, DateOffset - Offset which Timestamp will have - tz : str, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time which Timestamp will have. + Raises + ------ + TypeError + If the Timestamp is tz-aware and tz is not None. """ - return cls(datetime.fromordinal(ordinal), - freq=freq, tz=tz) + if ambiguous == 'infer': + raise ValueError('Cannot infer offset with only one time.') - @classmethod - def now(cls, tz=None): - """ - Timestamp.now(tz=None) + if errors is not None: + warnings.warn("The errors argument is deprecated and will be " + "removed in a future release. Use " + "nonexistent='NaT' or nonexistent='raise' " + "instead.", FutureWarning) + if errors == 'coerce': + nonexistent = 'NaT' + elif errors == 'raise': + nonexistent = 'raise' + else: + raise ValueError("The errors argument must be either 'coerce' " + "or 'raise'.") - Returns new Timestamp object representing current time local to - tz. + nonexistent_options = ('raise', 'NaT', 'shift_forward', + 'shift_backward') + if nonexistent not in nonexistent_options and not isinstance( + nonexistent, timedelta): + raise ValueError("The nonexistent argument must be one of 'raise'," + " 'NaT', 'shift_forward', 'shift_backward' or" + " a timedelta object") - Parameters - ---------- - tz : str or timezone object, default None - Timezone to localize to - """ - if is_string_object(tz): + if self.tzinfo is None: + # tz naive, localize tz = maybe_get_tz(tz) - return cls(datetime.now(tz)) + if not is_string_object(ambiguous): + ambiguous = [ambiguous] + value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz, + ambiguous=ambiguous, + nonexistent=nonexistent)[0] + return Timestamp(value, tz=tz) + else: + if tz is None: + # reset tz + value = tz_convert_single(self.value, UTC, self.tz) + return Timestamp(value, tz=None) + else: + raise TypeError('Cannot localize tz-aware Timestamp, use ' + 'tz_convert for conversions') - @classmethod - def today(cls, tz=None): + def tz_convert(self, tz): """ - Timestamp.today(cls, tz=None) - - Return the current time in the local timezone. This differs - from datetime.today() in that it can be localized to a - passed timezone. + Convert tz-aware Timestamp to another time zone. Parameters ---------- - tz : str or timezone object, default None - Timezone to localize to - """ - return cls.now(tz) + tz : str, pytz.timezone, dateutil.tz.tzfile or None + Time zone for time which Timestamp will be converted to. + None will remove timezone holding UTC time. - @classmethod - def utcnow(cls): - """ - Timestamp.utcnow() + Returns + ------- + converted : Timestamp - Return a new Timestamp representing UTC day and time. + Raises + ------ + TypeError + If Timestamp is tz-naive. """ - return cls.now(UTC) + if self.tzinfo is None: + # tz naive, use tz_localize + raise TypeError('Cannot convert tz-naive Timestamp, use ' + 'tz_localize to localize') + else: + # Same UTC timestamp, different time zone + return Timestamp(self.value, tz=tz) - @classmethod - def utcfromtimestamp(cls, ts): - """ - Timestamp.utcfromtimestamp(ts) + def astimezone(self, tz): + """Alias for tz_convert. See tz_convert.__doc__""" + return self.tz_convert(tz) - Construct a naive UTC datetime from a POSIX timestamp. + def to_julian_date(self): """ - return cls(datetime.utcfromtimestamp(ts)) - - @classmethod - def fromtimestamp(cls, ts): + Convert TimeStamp to a Julian Date. + 0 Julian date is noon January 1, 4713 BC. """ - Timestamp.fromtimestamp(ts) + year = self.year + month = self.month + day = self.day + if month <= 2: + year -= 1 + month += 12 + return (day + + np.fix((153 * month - 457) / 5) + + 365 * year + + np.floor(year / 4) - + np.floor(year / 100) + + np.floor(year / 400) + + 1721118.5 + + (self.hour + + self.minute / 60.0 + + self.second / 3600.0 + + self.microsecond / 3600.0 / 1e+6 + + self.nanosecond / 3600.0 / 1e+9 + ) / 24.0) - timestamp[, tz] -> tz's local time from POSIX timestamp. + def normalize(self): """ - return cls(datetime.fromtimestamp(ts)) - - @classmethod - def combine(cls, date, time): + Normalize Timestamp to midnight, preserving + tz information. """ - Timsetamp.combine(date, time) + if self.tz is None or is_utc(self.tz): + DAY_NS = DAY_SECONDS * 1000000000 + normalized_value = self.value - (self.value % DAY_NS) + return Timestamp(normalized_value).tz_localize(self.tz) + normalized_value = normalize_i8_timestamps( + np.array([self.value], dtype='i8'), tz=self.tz)[0] + return Timestamp(normalized_value).tz_localize(self.tz) - date, time -> datetime with same date and time fields + def replace(self, year=None, month=None, day=None, + hour=None, minute=None, second=None, microsecond=None, + nanosecond=None, tzinfo=object, fold=0): """ - return cls(datetime.combine(date, time)) - - def __new__(cls, object ts_input=_no_input, - object freq=None, tz=None, unit=None, - year=None, month=None, day=None, - hour=None, minute=None, second=None, microsecond=None, - nanosecond=None, tzinfo=None): - # The parameter list folds together legacy parameter names (the first - # four) and positional and keyword parameter names from pydatetime. - # - # There are three calling forms: - # - # - In the legacy form, the first parameter, ts_input, is required - # and may be datetime-like, str, int, or float. The second - # parameter, offset, is optional and may be str or DateOffset. - # - # - ints in the first, second, and third arguments indicate - # pydatetime positional arguments. Only the first 8 arguments - # (standing in for year, month, day, hour, minute, second, - # microsecond, tzinfo) may be non-None. As a shortcut, we just - # check that the second argument is an int. - # - # - Nones for the first four (legacy) arguments indicate pydatetime - # keyword arguments. year, month, and day are required. As a - # shortcut, we just check that the first argument was not passed. - # - # Mixing pydatetime positional and keyword arguments is forbidden! + implements datetime.replace, handles nanoseconds - cdef _TSObject ts + Parameters + ---------- + year : int, optional + month : int, optional + day : int, optional + hour : int, optional + minute : int, optional + second : int, optional + microsecond : int, optional + nanosecond : int, optional + tzinfo : tz-convertible, optional + fold : int, optional, default is 0 + added in 3.6, NotImplemented - _date_attributes = [year, month, day, hour, minute, second, - microsecond, nanosecond] + Returns + ------- + Timestamp with fields replaced + """ - if tzinfo is not None: - if not PyTZInfo_Check(tzinfo): - # tzinfo must be a datetime.tzinfo object, GH#17690 - raise TypeError('tzinfo must be a datetime.tzinfo object, ' - 'not %s' % type(tzinfo)) - elif tz is not None: - raise ValueError('Can provide at most one of tz, tzinfo') + cdef: + npy_datetimestruct dts + int64_t value, value_tz, offset + object _tzinfo, result, k, v + datetime ts_input - # User passed tzinfo instead of tz; avoid silently ignoring - tz, tzinfo = tzinfo, None + # set to naive if needed + _tzinfo = self.tzinfo + value = self.value + if _tzinfo is not None: + value_tz = tz_convert_single(value, _tzinfo, UTC) + value += value - value_tz - if is_string_object(ts_input): - # User passed a date string to parse. - # Check that the user didn't also pass a date attribute kwarg. - if any(arg is not None for arg in _date_attributes): - raise ValueError('Cannot pass a date attribute keyword ' - 'argument when passing a date string') + # setup components + dt64_to_dtstruct(value, &dts) + dts.ps = self.nanosecond * 1000 - elif ts_input is _no_input: - # User passed keyword arguments. - ts_input = datetime(year, month, day, hour or 0, - minute or 0, second or 0, - microsecond or 0) - elif is_integer_object(freq): - # User passed positional arguments: - # Timestamp(year, month, day[, hour[, minute[, second[, - # microsecond[, nanosecond[, tzinfo]]]]]]) - ts_input = datetime(ts_input, freq, tz, unit or 0, - year or 0, month or 0, day or 0) - nanosecond = hour - tz = minute - freq = None + # replace + def validate(k, v): + """ validate integers """ + if not is_integer_object(v): + raise ValueError("value must be an integer, received " + "{v} for {k}".format(v=type(v), k=k)) + return v - if getattr(ts_input, 'tzinfo', None) is not None and tz is not None: - warnings.warn("Passing a datetime or Timestamp with tzinfo and the" - " tz parameter will raise in the future. Use" - " tz_convert instead.", FutureWarning) + if year is not None: + dts.year = validate('year', year) + if month is not None: + dts.month = validate('month', month) + if day is not None: + dts.day = validate('day', day) + if hour is not None: + dts.hour = validate('hour', hour) + if minute is not None: + dts.min = validate('minute', minute) + if second is not None: + dts.sec = validate('second', second) + if microsecond is not None: + dts.us = validate('microsecond', microsecond) + if nanosecond is not None: + dts.ps = validate('nanosecond', nanosecond) * 1000 + if tzinfo is not object: + _tzinfo = tzinfo - ts = convert_to_tsobject(ts_input, tz, unit, 0, 0, nanosecond or 0) + # reconstruct & check bounds + if _tzinfo is not None and treat_tz_as_pytz(_tzinfo): + # replacing across a DST boundary may induce a new tzinfo object + # see GH#18319 + ts_input = _tzinfo.localize(datetime(dts.year, dts.month, dts.day, + dts.hour, dts.min, dts.sec, + dts.us)) + _tzinfo = ts_input.tzinfo + else: + ts_input = datetime(dts.year, dts.month, dts.day, + dts.hour, dts.min, dts.sec, dts.us, + tzinfo=_tzinfo) - if ts.value == NPY_NAT: - return NaT + ts = convert_datetime_to_tsobject(ts_input, _tzinfo) + value = ts.value + (dts.ps // 1000) + if value != NPY_NAT: + check_dts_bounds(&dts) - if freq is None: - # GH 22311: Try to extract the frequency of a given Timestamp input - freq = getattr(ts_input, 'freq', None) - elif not is_offset_object(freq): - freq = to_offset(freq) + return create_timestamp_from_ts(value, dts, _tzinfo, self.freq) - return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq) + # -------------------------------------------------------------------- + # Rounding Methods def _round(self, freq, mode, ambiguous='raise', nonexistent='raise'): if self.tz is not None: @@ -889,44 +1085,8 @@ class Timestamp(_Timestamp): """ return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) - @property - def tz(self): - """ - Alias for tzinfo - """ - return self.tzinfo - - @tz.setter - def tz(self, value): - # GH 3746: Prevent localizing or converting the index by setting tz - raise AttributeError("Cannot directly set timezone. Use tz_localize() " - "or tz_convert() as appropriate") - - def __setstate__(self, state): - self.value = state[0] - self.freq = state[1] - self.tzinfo = state[2] - - def __reduce__(self): - object_state = self.value, self.freq, self.tzinfo - return (Timestamp, object_state) - - def to_period(self, freq=None): - """ - Return an period of which this timestamp is an observation. - """ - from pandas import Period - - if self.tz is not None: - # GH#21333 - warnings.warn("Converting to Period representation will " - "drop timezone information.", - UserWarning) - - if freq is None: - freq = self.freq - - return Period(self, freq=freq) + # -------------------------------------------------------------------- + # Fields @property def dayofweek(self): @@ -985,7 +1145,9 @@ class Timestamp(_Timestamp): def week(self): return ccalendar.get_week_of_year(self.year, self.month, self.day) - weekofyear = week + @property + def weekofyear(self): + return self.week @property def quarter(self): @@ -995,11 +1157,9 @@ class Timestamp(_Timestamp): def days_in_month(self): return ccalendar.get_days_in_month(self.year, self.month) - daysinmonth = days_in_month - @property - def freqstr(self): - return getattr(self.freq, 'freqstr', self.freq) + def daysinmonth(self): + return self.days_in_month @property def is_month_start(self): @@ -1047,293 +1207,158 @@ class Timestamp(_Timestamp): def is_leap_year(self): return bool(ccalendar.is_leapyear(self.year)) - def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', - errors=None): - """ - Convert naive Timestamp to local time zone, or remove - timezone from tz-aware Timestamp. - Parameters - ---------- - tz : str, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time which Timestamp will be converted to. - None will remove timezone holding local time. +# ---------------------------------------------------------------------- - ambiguous : bool, 'NaT', default 'raise' - When clocks moved backward due to DST, ambiguous times may arise. - For example in Central European Time (UTC+01), when going from - 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at - 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the - `ambiguous` parameter dictates how ambiguous times should be - handled. +# Python front end to C extension type _Timestamp +# This serves as the box for datetime64 - - bool contains flags to determine if time is dst or not (note - that this flag is only applicable for ambiguous fall dst dates) - - 'NaT' will return NaT for an ambiguous time - - 'raise' will raise an AmbiguousTimeError for an ambiguous time - nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, - default 'raise' - A nonexistent time does not exist in a particular timezone - where clocks moved forward due to DST. +class Timestamp(_Timestamp): + """Pandas replacement for datetime.datetime - - 'shift_forward' will shift the nonexistent time forward to the - closest existing time - - 'shift_backward' will shift the nonexistent time backward to the - closest existing time - - 'NaT' will return NaT where there are nonexistent times - - timedelta objects will shift nonexistent times by the timedelta - - 'raise' will raise an NonExistentTimeError if there are - nonexistent times + Timestamp is the pandas equivalent of python's Datetime + and is interchangeable with it in most cases. It's the type used + for the entries that make up a DatetimeIndex, and other timeseries + oriented data structures in pandas. - .. versionadded:: 0.24.0 + Parameters + ---------- + ts_input : datetime-like, str, int, float + Value to be converted to Timestamp + freq : str, DateOffset + Offset which Timestamp will have + tz : str, pytz.timezone, dateutil.tz.tzfile or None + Time zone for time which Timestamp will have. + unit : str + Unit used for conversion if ts_input is of type int or float. The + valid values are 'D', 'h', 'm', 's', 'ms', 'us', and 'ns'. For + example, 's' means seconds and 'ms' means milliseconds. + year, month, day : int + .. versionadded:: 0.19.0 + hour, minute, second, microsecond : int, optional, default 0 + .. versionadded:: 0.19.0 + nanosecond : int, optional, default 0 + .. versionadded:: 0.23.0 + tzinfo : datetime.tzinfo, optional, default None + .. versionadded:: 0.19.0 - errors : 'raise', 'coerce', default None - - 'raise' will raise a NonExistentTimeError if a timestamp is not - valid in the specified timezone (e.g. due to a transition from - or to DST time). Use ``nonexistent='raise'`` instead. - - 'coerce' will return NaT if the timestamp can not be converted - into the specified timezone. Use ``nonexistent='NaT'`` instead. - - .. deprecated:: 0.24.0 - - Returns - ------- - localized : Timestamp - - Raises - ------ - TypeError - If the Timestamp is tz-aware and tz is not None. - """ - if ambiguous == 'infer': - raise ValueError('Cannot infer offset with only one time.') + Notes + ----- + There are essentially three calling conventions for the constructor. The + primary form accepts four parameters. They can be passed by position or + keyword. - if errors is not None: - warnings.warn("The errors argument is deprecated and will be " - "removed in a future release. Use " - "nonexistent='NaT' or nonexistent='raise' " - "instead.", FutureWarning) - if errors == 'coerce': - nonexistent = 'NaT' - elif errors == 'raise': - nonexistent = 'raise' - else: - raise ValueError("The errors argument must be either 'coerce' " - "or 'raise'.") + The other two forms mimic the parameters from ``datetime.datetime``. They + can be passed by either position or keyword, but not both mixed together. - nonexistent_options = ('raise', 'NaT', 'shift_forward', - 'shift_backward') - if nonexistent not in nonexistent_options and not isinstance( - nonexistent, timedelta): - raise ValueError("The nonexistent argument must be one of 'raise'," - " 'NaT', 'shift_forward', 'shift_backward' or" - " a timedelta object") + Examples + -------- + Using the primary calling convention: - if self.tzinfo is None: - # tz naive, localize - tz = maybe_get_tz(tz) - if not is_string_object(ambiguous): - ambiguous = [ambiguous] - value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz, - ambiguous=ambiguous, - nonexistent=nonexistent)[0] - return Timestamp(value, tz=tz) - else: - if tz is None: - # reset tz - value = tz_convert_single(self.value, UTC, self.tz) - return Timestamp(value, tz=None) - else: - raise TypeError('Cannot localize tz-aware Timestamp, use ' - 'tz_convert for conversions') + This converts a datetime-like string + >>> pd.Timestamp('2017-01-01T12') + Timestamp('2017-01-01 12:00:00') - def tz_convert(self, tz): - """ - Convert tz-aware Timestamp to another time zone. + This converts a float representing a Unix epoch in units of seconds + >>> pd.Timestamp(1513393355.5, unit='s') + Timestamp('2017-12-16 03:02:35.500000') - Parameters - ---------- - tz : str, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time which Timestamp will be converted to. - None will remove timezone holding UTC time. + This converts an int representing a Unix-epoch in units of seconds + and for a particular timezone + >>> pd.Timestamp(1513393355, unit='s', tz='US/Pacific') + Timestamp('2017-12-15 19:02:35-0800', tz='US/Pacific') - Returns - ------- - converted : Timestamp + Using the other two forms that mimic the API for ``datetime.datetime``: - Raises - ------ - TypeError - If Timestamp is tz-naive. - """ - if self.tzinfo is None: - # tz naive, use tz_localize - raise TypeError('Cannot convert tz-naive Timestamp, use ' - 'tz_localize to localize') - else: - # Same UTC timestamp, different time zone - return Timestamp(self.value, tz=tz) + >>> pd.Timestamp(2017, 1, 1, 12) + Timestamp('2017-01-01 12:00:00') - astimezone = tz_convert + >>> pd.Timestamp(year=2017, month=1, day=1, hour=12) + Timestamp('2017-01-01 12:00:00') + """ - def replace(self, year=None, month=None, day=None, + def __new__(cls, object ts_input=_no_input, + object freq=None, tz=None, unit=None, + year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, - nanosecond=None, tzinfo=object, fold=0): - """ - implements datetime.replace, handles nanoseconds - - Parameters - ---------- - year : int, optional - month : int, optional - day : int, optional - hour : int, optional - minute : int, optional - second : int, optional - microsecond : int, optional - nanosecond : int, optional - tzinfo : tz-convertible, optional - fold : int, optional, default is 0 - added in 3.6, NotImplemented - - Returns - ------- - Timestamp with fields replaced - """ - - cdef: - npy_datetimestruct dts - int64_t value, value_tz, offset - object _tzinfo, result, k, v - datetime ts_input - - # set to naive if needed - _tzinfo = self.tzinfo - value = self.value - if _tzinfo is not None: - value_tz = tz_convert_single(value, _tzinfo, UTC) - value += value - value_tz - - # setup components - dt64_to_dtstruct(value, &dts) - dts.ps = self.nanosecond * 1000 - - # replace - def validate(k, v): - """ validate integers """ - if not is_integer_object(v): - raise ValueError("value must be an integer, received " - "{v} for {k}".format(v=type(v), k=k)) - return v - - if year is not None: - dts.year = validate('year', year) - if month is not None: - dts.month = validate('month', month) - if day is not None: - dts.day = validate('day', day) - if hour is not None: - dts.hour = validate('hour', hour) - if minute is not None: - dts.min = validate('minute', minute) - if second is not None: - dts.sec = validate('second', second) - if microsecond is not None: - dts.us = validate('microsecond', microsecond) - if nanosecond is not None: - dts.ps = validate('nanosecond', nanosecond) * 1000 - if tzinfo is not object: - _tzinfo = tzinfo + nanosecond=None, tzinfo=None): + # The parameter list folds together legacy parameter names (the first + # four) and positional and keyword parameter names from pydatetime. + # + # There are three calling forms: + # + # - In the legacy form, the first parameter, ts_input, is required + # and may be datetime-like, str, int, or float. The second + # parameter, offset, is optional and may be str or DateOffset. + # + # - ints in the first, second, and third arguments indicate + # pydatetime positional arguments. Only the first 8 arguments + # (standing in for year, month, day, hour, minute, second, + # microsecond, tzinfo) may be non-None. As a shortcut, we just + # check that the second argument is an int. + # + # - Nones for the first four (legacy) arguments indicate pydatetime + # keyword arguments. year, month, and day are required. As a + # shortcut, we just check that the first argument was not passed. + # + # Mixing pydatetime positional and keyword arguments is forbidden! - # reconstruct & check bounds - if _tzinfo is not None and treat_tz_as_pytz(_tzinfo): - # replacing across a DST boundary may induce a new tzinfo object - # see GH#18319 - ts_input = _tzinfo.localize(datetime(dts.year, dts.month, dts.day, - dts.hour, dts.min, dts.sec, - dts.us)) - _tzinfo = ts_input.tzinfo - else: - ts_input = datetime(dts.year, dts.month, dts.day, - dts.hour, dts.min, dts.sec, dts.us, - tzinfo=_tzinfo) + cdef _TSObject ts - ts = convert_datetime_to_tsobject(ts_input, _tzinfo) - value = ts.value + (dts.ps // 1000) - if value != NPY_NAT: - check_dts_bounds(&dts) + _date_attributes = [year, month, day, hour, minute, second, + microsecond, nanosecond] - return create_timestamp_from_ts(value, dts, _tzinfo, self.freq) + if tzinfo is not None: + if not PyTZInfo_Check(tzinfo): + # tzinfo must be a datetime.tzinfo object, GH#17690 + raise TypeError('tzinfo must be a datetime.tzinfo object, ' + 'not %s' % type(tzinfo)) + elif tz is not None: + raise ValueError('Can provide at most one of tz, tzinfo') - def isoformat(self, sep='T'): - base = super(_Timestamp, self).isoformat(sep=sep) - if self.nanosecond == 0: - return base + # User passed tzinfo instead of tz; avoid silently ignoring + tz, tzinfo = tzinfo, None - if self.tzinfo is not None: - base1, base2 = base[:-6], base[-6:] - else: - base1, base2 = base, "" + if is_string_object(ts_input): + # User passed a date string to parse. + # Check that the user didn't also pass a date attribute kwarg. + if any(arg is not None for arg in _date_attributes): + raise ValueError('Cannot pass a date attribute keyword ' + 'argument when passing a date string') - if self.microsecond != 0: - base1 += "%.3d" % self.nanosecond - else: - base1 += ".%.9d" % self.nanosecond + elif ts_input is _no_input: + # User passed keyword arguments. + ts_input = datetime(year, month, day, hour or 0, + minute or 0, second or 0, + microsecond or 0) + elif is_integer_object(freq): + # User passed positional arguments: + # Timestamp(year, month, day[, hour[, minute[, second[, + # microsecond[, nanosecond[, tzinfo]]]]]]) + ts_input = datetime(ts_input, freq, tz, unit or 0, + year or 0, month or 0, day or 0) + nanosecond = hour + tz = minute + freq = None - return base1 + base2 + if getattr(ts_input, 'tzinfo', None) is not None and tz is not None: + warnings.warn("Passing a datetime or Timestamp with tzinfo and the" + " tz parameter will raise in the future. Use" + " tz_convert instead.", FutureWarning) - def _has_time_component(self): - """ - Returns if the Timestamp has a time component - in addition to the date part - """ - return (self.time() != _zero_time - or self.tzinfo is not None - or self.nanosecond != 0) + ts = convert_to_tsobject(ts_input, tz, unit, 0, 0, nanosecond or 0) - def to_julian_date(self): - """ - Convert TimeStamp to a Julian Date. - 0 Julian date is noon January 1, 4713 BC. - """ - year = self.year - month = self.month - day = self.day - if month <= 2: - year -= 1 - month += 12 - return (day + - np.fix((153 * month - 457) / 5) + - 365 * year + - np.floor(year / 4) - - np.floor(year / 100) + - np.floor(year / 400) + - 1721118.5 + - (self.hour + - self.minute / 60.0 + - self.second / 3600.0 + - self.microsecond / 3600.0 / 1e+6 + - self.nanosecond / 3600.0 / 1e+9 - ) / 24.0) + if ts.value == NPY_NAT: + return NaT - def normalize(self): - """ - Normalize Timestamp to midnight, preserving - tz information. - """ - if self.tz is None or is_utc(self.tz): - DAY_NS = DAY_SECONDS * 1000000000 - normalized_value = self.value - (self.value % DAY_NS) - return Timestamp(normalized_value).tz_localize(self.tz) - normalized_value = normalize_i8_timestamps( - np.array([self.value], dtype='i8'), tz=self.tz)[0] - return Timestamp(normalized_value).tz_localize(self.tz) + if freq is None: + # GH 22311: Try to extract the frequency of a given Timestamp input + freq = getattr(ts_input, 'freq', None) + elif not is_offset_object(freq): + freq = to_offset(freq) - def __radd__(self, other): - # __radd__ on cython extension types like _Timestamp is not used, so - # define it here instead - return self + other + return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq) # Add the min and max fields at the class level
A bunch of methods are defined in Timestamp/Timedelta that can be defined in _Timestamp/_Timedelta. Moving these into the cdef classes is supposedly slightly more efficient. If we can work around the `__cinit__/__new__` rules, we might be able to get rid of _Timestamp entirely. Fix a couple of typos while we're at it.
https://api.github.com/repos/pandas-dev/pandas/pulls/24869
2019-01-21T21:17:44Z
2019-01-28T18:05:46Z
null
2020-04-05T17:37:19Z
TST: resolve issues with test_constructor_dtype_datetime64
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index b37bf02a6b8e7..ca54993712439 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -808,11 +808,15 @@ def test_astype_to_incorrect_datetimelike(self, unit): other = "m8[{}]".format(unit) df = DataFrame(np.array([[1, 2, 3]], dtype=dtype)) - with pytest.raises(TypeError): + msg = (r"cannot astype a datetimelike from \[datetime64\[ns\]\] to" + r" \[timedelta64\[{}\]\]").format(unit) + with pytest.raises(TypeError, match=msg): df.astype(other) + msg = (r"cannot astype a timedelta from \[timedelta64\[ns\]\] to" + r" \[datetime64\[{}\]\]").format(unit) df = DataFrame(np.array([[1, 2, 3]], dtype=other)) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): df.astype(dtype) def test_timedeltas(self): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 8525b877618c9..96e18c6a60cac 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -683,17 +683,44 @@ def test_constructor_dtype_datetime64(self): assert s.dtype == 'M8[ns]' # GH3414 related - # msg = (r"cannot astype a datetimelike from \[datetime64\[ns\]\] to" - # r" \[int32\]") - # with pytest.raises(TypeError, match=msg): - # Series(Series(dates).astype('int') / 1000000, dtype='M8[ms]') - pytest.raises(TypeError, lambda x: Series( - Series(dates).astype('int') / 1000000, dtype='M8[ms]')) - - msg = (r"The 'datetime64' dtype has no unit\. Please pass in" - r" 'datetime64\[ns\]' instead\.") - with pytest.raises(ValueError, match=msg): - Series(dates, dtype='datetime64') + expected = Series([ + datetime(2013, 1, 1), + datetime(2013, 1, 2), + datetime(2013, 1, 3), + ], dtype='datetime64[ns]') + + result = Series( + Series(dates).astype(np.int64) / 1000000, dtype='M8[ms]') + tm.assert_series_equal(result, expected) + + result = Series(dates, dtype='datetime64[ns]') + tm.assert_series_equal(result, expected) + + expected = Series([ + pd.NaT, + datetime(2013, 1, 2), + datetime(2013, 1, 3), + ], dtype='datetime64[ns]') + result = Series([np.nan] + dates[1:], dtype='datetime64[ns]') + tm.assert_series_equal(result, expected) + + dts = Series(dates, dtype='datetime64[ns]') + + # valid astype + dts.astype('int64') + + # invalid casting + msg = (r"cannot astype a datetimelike from \[datetime64\[ns\]\] to" + r" \[int32\]") + with pytest.raises(TypeError, match=msg): + dts.astype('int32') + + # ints are ok + # we test with np.int64 to get similar results on + # windows / 32-bit platforms + result = Series(dts, dtype=np.int64) + expected = Series(dts.astype(np.int64)) + tm.assert_series_equal(result, expected) # invalid dates can be help as object result = Series([datetime(2, 1, 1)]) diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index d8046c4944afc..735b8553b14d3 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -415,7 +415,9 @@ def test_astype_generic_timestamp_no_frequency(self, dtype): data = [1] s = Series(data) - msg = "dtype has no unit. Please pass in" + msg = ((r"The '{dtype}' dtype has no unit\. " + r"Please pass in '{dtype}\[ns\]' instead.") + .format(dtype=dtype.__name__)) with pytest.raises(ValueError, match=msg): s.astype(dtype)
closes #24827 xref https://github.com/pandas-dev/pandas/pull/24812#discussion_r248666576 while changing pytest.raises to use the context manager and check the error message it appeared that a test was not raising exceptions as originally intended. the test was related to a legacy numpy related issue #3414
https://api.github.com/repos/pandas-dev/pandas/pulls/24868
2019-01-21T21:12:00Z
2019-03-13T15:44:41Z
2019-03-13T15:44:41Z
2019-03-13T17:17:59Z
BUG: Ensure .astype doesn't use PandasArray
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 360f3ea23ec97..e631a6b4937c4 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -27,13 +27,13 @@ CategoricalDtype, ExtensionDtype, PandasExtensionDtype) from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDatetimeIndex, ABCExtensionArray, ABCIndexClass, - ABCSeries) + ABCPandasArray, ABCSeries) from pandas.core.dtypes.missing import ( _isna_compat, array_equivalent, isna, notna) import pandas.core.algorithms as algos from pandas.core.arrays import ( - Categorical, DatetimeArray, ExtensionArray, TimedeltaArray) + Categorical, DatetimeArray, ExtensionArray, PandasDtype, TimedeltaArray) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.indexes.datetimes import DatetimeIndex @@ -576,23 +576,14 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, return self.make_block(Categorical(self.values, dtype=dtype)) - # convert dtypes if needed dtype = pandas_dtype(dtype) + # astype processing if is_dtype_equal(self.dtype, dtype): if copy: return self.copy() return self - klass = None - if is_sparse(self.values): - # special case sparse, Series[Sparse].astype(object) is sparse - klass = ExtensionBlock - elif is_object_dtype(dtype): - klass = ObjectBlock - elif is_extension_array_dtype(dtype): - klass = ExtensionBlock - try: # force the copy here if values is None: @@ -624,7 +615,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, pass newb = make_block(values, placement=self.mgr_locs, - klass=klass, ndim=self.ndim) + ndim=self.ndim) except Exception: # noqa: E722 if errors == 'raise': raise @@ -3041,6 +3032,13 @@ def get_block_type(values, dtype=None): def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=None): + # Ensure that we don't allow PandasArray / PandasDtype in internals. + # For now, blocks should be backed by ndarrays when possible. + if isinstance(values, ABCPandasArray): + values = values.to_numpy() + if isinstance(dtype, PandasDtype): + dtype = dtype.numpy_dtype + if fastpath is not None: # GH#19265 pyarrow is passing this warnings.warn("fastpath argument is deprecated, will be removed " diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index fc520436f02f7..f73b7842ef901 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1291,3 +1291,23 @@ def test_block_shape(): assert (a._data.blocks[0].mgr_locs.indexer == b._data.blocks[0].mgr_locs.indexer) + + +def test_make_block_no_pandas_array(): + # https://github.com/pandas-dev/pandas/pull/24866 + arr = pd.array([1, 2]) + + # PandasArray, no dtype + result = make_block(arr, slice(len(arr))) + assert result.is_integer is True + assert result.is_extension is False + + # PandasArray, PandasDtype + result = make_block(arr, slice(len(arr)), dtype=arr.dtype) + assert result.is_integer is True + assert result.is_extension is False + + # ndarray, PandasDtype + result = make_block(arr.to_numpy(), slice(len(arr)), dtype=arr.dtype) + assert result.is_integer is True + assert result.is_extension is False diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index e8f5344689e6f..f6f4a2db359f7 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -312,6 +312,14 @@ def test_constructor_no_pandas_array(self): tm.assert_series_equal(ser, result) assert isinstance(result._data.blocks[0], IntBlock) + def test_astype_no_pandas_dtype(self): + # https://github.com/pandas-dev/pandas/pull/24866 + ser = pd.Series([1, 2], dtype="int64") + # Don't have PandasDtype in the public API, so we use `.array.dtype`, + # which is a PandasDtype. + result = ser.astype(ser.array.dtype) + tm.assert_series_equal(result, ser) + def test_from_array(self): result = pd.Series(pd.array(['1H', '2H'], dtype='timedelta64[ns]')) assert result._data.blocks[0].is_extension is False
On 0.24.0rc1, it's possible to end up with a PandasArray in internals. ```python In [8]: ser = pd.Series([1, 2]) In [9]: ser.astype(ser.array.dtype)._data.blocks[0] Out[9]: ExtensionBlock: 2 dtype: int64 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24866
2019-01-21T19:22:21Z
2019-04-28T21:14:54Z
2019-04-28T21:14:54Z
2019-04-28T21:14:58Z
CLN: fix typo in asv eval.Query suite
diff --git a/asv_bench/benchmarks/eval.py b/asv_bench/benchmarks/eval.py index 837478efbad64..68df38cd50742 100644 --- a/asv_bench/benchmarks/eval.py +++ b/asv_bench/benchmarks/eval.py @@ -45,7 +45,7 @@ def setup(self): index = pd.date_range('20010101', periods=N, freq='T') s = pd.Series(index) self.ts = s.iloc[halfway] - self.df = pd.DataFrame({'a': np.random.randn(N), 'dates': s}, + self.df = pd.DataFrame({'a': np.random.randn(N), 'dates': index}, index=index) data = np.random.randn(N) self.min_val = data.min()
The `eval.Query` asv suite has a large amount of setup overhead due to a typo: ``` $ time asv dev -b eval.Query · Discovering benchmarks · Running 3 total benchmarks (1 commits * 1 environments * 3 benchmarks) [ 0.00%] ·· Benchmarking existing-py_home_chris_anaconda3_bin_python [ 16.67%] ··· eval.Query.time_query_datetime_column 18.3±0ms [ 33.33%] ··· eval.Query.time_query_datetime_index 24.8±0ms [ 50.00%] ··· eval.Query.time_query_with_boolean_selection 44.2±0ms real ***0m25.883s*** user 0m23.047s sys 0m2.531s ``` This typo also breaks the intent of the benchmark, as a test done on a datetime column is being done on one entirely of `NaT`s: ``` a dates 2001-01-01 00:00:00 -1.228806 NaT 2001-01-01 00:01:00 -2.561029 NaT 2001-01-01 00:02:00 0.879283 NaT 2001-01-01 00:03:00 -0.814045 NaT 2001-01-01 00:04:00 -0.905315 NaT ``` This occurs due to `s` having a `RangeIndex` and being re-indexed to `DatetimeIndex`. This triggers a casting of both to `object` type to find any intersection; this is both slow and unsuccessful here. Instead, we pass the index directly as the input for the dates column: ``` a dates 2001-01-01 00:00:00 -1.068134 2001-01-01 00:00:00 2001-01-01 00:01:00 0.440343 2001-01-01 00:01:00 2001-01-01 00:02:00 -0.767818 2001-01-01 00:02:00 2001-01-01 00:03:00 -0.849333 2001-01-01 00:03:00 2001-01-01 00:04:00 0.737811 2001-01-01 00:04:00 ``` And get a nice speedup in `asv` setup time as well: ``` $ time asv dev -b eval.Query · Discovering benchmarks · Running 3 total benchmarks (1 commits * 1 environments * 3 benchmarks) [ 0.00%] ·· Benchmarking existing-py_home_chris_anaconda3_bin_python [ 16.67%] ··· eval.Query.time_query_datetime_column 26.9±0ms [ 33.33%] ··· eval.Query.time_query_datetime_index 28.1±0ms [ 50.00%] ··· eval.Query.time_query_with_boolean_selection 47.1±0ms real 0m6.336s user 0m4.281s sys 0m1.828s ``` And here's the timings on the fixed benchmark: ``` $ asv compare v0.23.4 v0.24.0rc1 All benchmarks: before after ratio [04095216] [fdc4db25] <v0.23.4^0> <v0.24.0rc1^0> 14.8±2ms 12.3±1ms ~0.83 eval.Query.time_query_datetime_column 15.7±1ms 14.0±1ms ~0.89 eval.Query.time_query_datetime_index 21.0±4ms 20.9±7ms 0.99 eval.Query.time_query_with_boolean_selection ``` - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24865
2019-01-21T18:58:36Z
2019-01-21T19:38:39Z
2019-01-21T19:38:39Z
2019-01-21T19:38:43Z
BLD: silence npy_no_deprecated warnings with numpy>=1.16.0
diff --git a/setup.py b/setup.py index ed2d905f4358b..4bf040b8c8e20 100755 --- a/setup.py +++ b/setup.py @@ -457,6 +457,11 @@ def run(self): directives['linetrace'] = True macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')] +# in numpy>=1.16.0, silence build warnings about deprecated API usage +# we can't do anything about these warnings because they stem from +# cython+numpy version mismatches. +macros.append(('NPY_NO_DEPRECATED_API', '0')) + # ---------------------------------------------------------------------- # Specification of Dependencies
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24864
2019-01-21T16:36:36Z
2019-01-26T17:47:33Z
2019-01-26T17:47:32Z
2020-04-05T17:37:14Z
BUG: fix floating precision formatting in presence of inf
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 69b59793f7c0d..6787022ba295c 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1751,6 +1751,7 @@ I/O - Bug in :meth:`DataFrame.to_stata`, :class:`pandas.io.stata.StataWriter` and :class:`pandas.io.stata.StataWriter117` where a exception would leave a partially written and invalid dta file (:issue:`23573`) - Bug in :meth:`DataFrame.to_stata` and :class:`pandas.io.stata.StataWriter117` that produced invalid files when using strLs with non-ASCII characters (:issue:`23573`) - Bug in :class:`HDFStore` that caused it to raise ``ValueError`` when reading a Dataframe in Python 3 from fixed format written in Python 2 (:issue:`24510`) +- Bug in :func:`DataFrame.to_string()` and more generally in the floating ``repr`` formatter. Zeros were not trimmed if ``inf`` was present in a columns while it was the case with NA values. Zeros are now trimmed as in the presence of NA (:issue:`24861`). Plotting ^^^^^^^^ diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index bdeed58d856cc..2c1fcab1ebde9 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1414,16 +1414,20 @@ def _trim_zeros(str_floats, na_rep='NaN'): """ trimmed = str_floats + def _is_number(x): + return (x != na_rep and not x.endswith('inf')) + def _cond(values): - non_na = [x for x in values if x != na_rep] - return (len(non_na) > 0 and all(x.endswith('0') for x in non_na) and - not (any(('e' in x) or ('E' in x) for x in non_na))) + finite = [x for x in values if _is_number(x)] + return (len(finite) > 0 and all(x.endswith('0') for x in finite) and + not (any(('e' in x) or ('E' in x) for x in finite))) while _cond(trimmed): - trimmed = [x[:-1] if x != na_rep else x for x in trimmed] + trimmed = [x[:-1] if _is_number(x) else x for x in trimmed] # leave one 0 after the decimal points if need be. - return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed] + return [x + "0" if x.endswith('.') and _is_number(x) else x + for x in trimmed] def _has_names(index): diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 52dce572c6d4f..31ab1e050d95c 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1465,6 +1465,39 @@ def test_to_string_format_na(self): '4 4.0 bar') assert result == expected + def test_to_string_format_inf(self): + # Issue #24861 + tm.reset_display_options() + df = DataFrame({ + 'A': [-np.inf, np.inf, -1, -2.1234, 3, 4], + 'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar'] + }) + result = df.to_string() + + expected = (' A B\n' + '0 -inf -inf\n' + '1 inf inf\n' + '2 -1.0000 foo\n' + '3 -2.1234 foooo\n' + '4 3.0000 fooooo\n' + '5 4.0000 bar') + assert result == expected + + df = DataFrame({ + 'A': [-np.inf, np.inf, -1., -2., 3., 4.], + 'B': [-np.inf, np.inf, 'foo', 'foooo', 'fooooo', 'bar'] + }) + result = df.to_string() + + expected = (' A B\n' + '0 -inf -inf\n' + '1 inf inf\n' + '2 -1.0 foo\n' + '3 -2.0 foooo\n' + '4 3.0 fooooo\n' + '5 4.0 bar') + assert result == expected + def test_to_string_decimal(self): # Issue #23614 df = DataFrame({'A': [6.0, 3.1, 2.2]})
- [x] closes #24861 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24863
2019-01-21T16:34:06Z
2019-01-23T12:23:51Z
2019-01-23T12:23:50Z
2019-01-23T12:23:56Z
BUG: Properly parse unicode usecols names in CSV
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index a4598b315cbb7..ff017c743a00f 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1705,6 +1705,7 @@ I/O ^^^ - Bug in :func:`read_csv` in which a column specified with ``CategoricalDtype`` of boolean categories was not being correctly coerced from string values to booleans (:issue:`20498`) +- Bug in :func:`read_csv` in which unicode column names were not being properly recognized with Python 2.x (:issue:`13253`) - Bug in :meth:`DataFrame.to_sql` when writing timezone aware data (``datetime64[ns, tz]`` dtype) would raise a ``TypeError`` (:issue:`9086`) - Bug in :meth:`DataFrame.to_sql` where a naive :class:`DatetimeIndex` would be written as ``TIMESTAMP WITH TIMEZONE`` type in supported databases, e.g. PostgreSQL (:issue:`23510`) - Bug in :meth:`read_excel()` when ``parse_cols`` is specified with an empty dataset (:issue:`9208`) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 5590e8f445c67..b31d3f665f47f 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1296,15 +1296,28 @@ def _validate_usecols_arg(usecols): if usecols is not None: if callable(usecols): return usecols, None - # GH20529, ensure is iterable container but not string. - elif not is_list_like(usecols): + + if not is_list_like(usecols): + # see gh-20529 + # + # Ensure it is iterable container but not string. raise ValueError(msg) - else: - usecols_dtype = lib.infer_dtype(usecols, skipna=False) - if usecols_dtype not in ('empty', 'integer', - 'string', 'unicode'): - raise ValueError(msg) - return set(usecols), usecols_dtype + + usecols_dtype = lib.infer_dtype(usecols, skipna=False) + + if usecols_dtype not in ("empty", "integer", + "string", "unicode"): + raise ValueError(msg) + + usecols = set(usecols) + + if usecols_dtype == "unicode": + # see gh-13253 + # + # Python 2.x compatibility + usecols = {col.encode("utf-8") for col in usecols} + + return usecols, usecols_dtype return usecols, None diff --git a/pandas/tests/io/parser/test_usecols.py b/pandas/tests/io/parser/test_usecols.py index 068227908a285..652f78d198ee8 100644 --- a/pandas/tests/io/parser/test_usecols.py +++ b/pandas/tests/io/parser/test_usecols.py @@ -9,7 +9,7 @@ import pytest from pandas._libs.tslib import Timestamp -from pandas.compat import PY2, StringIO +from pandas.compat import StringIO from pandas import DataFrame, Index import pandas.util.testing as tm @@ -387,8 +387,7 @@ def test_usecols_with_mixed_encoding_strings(all_parsers, usecols): @pytest.mark.parametrize("usecols", [ ["あああ", "いい"], - pytest.param([u"あああ", u"いい"], marks=pytest.mark.skipif( - PY2, reason="Buggy behavior: see gh-13253")) + [u"あああ", u"いい"] ]) def test_usecols_with_multi_byte_characters(all_parsers, usecols): data = """あああ,いい,ううう,ええええ
Closes #13253.
https://api.github.com/repos/pandas-dev/pandas/pulls/24856
2019-01-21T01:53:36Z
2019-01-21T14:21:32Z
2019-01-21T14:21:31Z
2019-01-27T00:26:55Z
DOC: Document AttributeError for accessor
diff --git a/doc/source/extending.rst b/doc/source/extending.rst index 3cb7e1ae019e1..e6928d9efde06 100644 --- a/doc/source/extending.rst +++ b/doc/source/extending.rst @@ -28,8 +28,14 @@ decorate a class, providing the name of attribute to add. The class's @pd.api.extensions.register_dataframe_accessor("geo") class GeoAccessor(object): def __init__(self, pandas_obj): + self._validate(pandas_obj) self._obj = pandas_obj + @staticmethod + def _validate(obj): + if 'lat' not in obj.columns or 'lon' not in obj.columns: + raise AttributeError("Must have 'lat' and 'lon'.") + @property def center(self): # return the geographic center point of this DataFrame @@ -54,6 +60,13 @@ This can be a convenient way to extend pandas objects without subclassing them. If you write a custom accessor, make a pull request adding it to our :ref:`ecosystem` page. +We highly recommend validating the data in your accessor's `__init__`. +In our ``GeoAccessor``, we validate that the data contains the expected columns, +raising an ``AttributeError`` when the validation fails. +For a ``Series`` accessor, you should validate the ``dtype`` if the accessor +applies only to certain dtypes. + + .. _extending.extension-types: Extension Types
Closes https://github.com/pandas-dev/pandas/issues/20579
https://api.github.com/repos/pandas-dev/pandas/pulls/24855
2019-01-21T01:41:53Z
2019-01-21T11:58:08Z
2019-01-21T11:58:07Z
2019-01-21T11:58:12Z
DEPS: Bump pyarrow min version to 0.9.0
diff --git a/ci/deps/travis-27.yaml b/ci/deps/travis-27.yaml index 0f2194e71de31..2624797b24fa1 100644 --- a/ci/deps/travis-27.yaml +++ b/ci/deps/travis-27.yaml @@ -22,7 +22,7 @@ dependencies: - patsy - psycopg2 - py - - pyarrow=0.7.0 + - pyarrow=0.9.0 - PyCrypto - pymysql=0.6.3 - pytables diff --git a/doc/source/install.rst b/doc/source/install.rst index fa3ff2f20b150..92364fcc9ebd2 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -257,7 +257,7 @@ Optional Dependencies * `SciPy <http://www.scipy.org>`__: miscellaneous statistical functions, Version 0.18.1 or higher * `xarray <http://xarray.pydata.org>`__: pandas like handling for > 2 dims, needed for converting Panels to xarray objects. Version 0.7.0 or higher is recommended. * `PyTables <http://www.pytables.org>`__: necessary for HDF5-based storage, Version 3.4.2 or higher -* `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.7.0): necessary for feather-based storage. +* `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.9.0): necessary for feather-based storage. * `Apache Parquet <https://parquet.apache.org/>`__, either `pyarrow <http://arrow.apache.org/docs/python/>`__ (>= 0.7.0) or `fastparquet <https://fastparquet.readthedocs.io/en/latest>`__ (>= 0.2.1) for parquet-based storage. The `snappy <https://pypi.org/project/python-snappy>`__ and `brotli <https://pypi.org/project/brotlipy>`__ are available for compression support. * `SQLAlchemy <http://www.sqlalchemy.org>`__: for SQL database support. Version 0.8.1 or higher recommended. Besides SQLAlchemy, you also need a database specific driver. You can find an overview of supported drivers for each SQL dialect in the `SQLAlchemy docs <http://docs.sqlalchemy.org/en/latest/dialects/index.html>`__. Some common drivers are: diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index a4598b315cbb7..fbc3c4fe4ce92 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -438,7 +438,7 @@ Pandas 0.24.0 includes a number of API breaking changes. Dependencies have increased minimum versions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We have updated our minimum supported versions of dependencies (:issue:`21242`, :issue:`18742`, :issue:`23774`). +We have updated our minimum supported versions of dependencies (:issue:`21242`, :issue:`18742`, :issue:`23774`, :issue:`24767`). If installed, we now require: +-----------------+-----------------+----------+ @@ -456,7 +456,7 @@ If installed, we now require: +-----------------+-----------------+----------+ | pandas-gbq | 0.8.0 | | +-----------------+-----------------+----------+ -| pyarrow | 0.7.0 | | +| pyarrow | 0.9.0 | | +-----------------+-----------------+----------+ | pytables | 3.4.2 | | +-----------------+-----------------+----------+ diff --git a/environment.yml b/environment.yml index 7a177cfee3d39..47fe8e4c2a640 100644 --- a/environment.yml +++ b/environment.yml @@ -39,7 +39,7 @@ dependencies: - nbsphinx - numexpr>=2.6.8 - openpyxl - - pyarrow>=0.7.0 + - pyarrow>=0.9.0 - pytables>=3.4.2 - pytest-cov - pytest-xdist diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 5c8ab37c7c917..d76e6b75d3762 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -24,8 +24,8 @@ def _try_import(): "or via pip\n" "pip install -U pyarrow\n") - if LooseVersion(pyarrow.__version__) < LooseVersion('0.4.1'): - raise ImportError("pyarrow >= 0.4.1 required for feather support\n\n" + if LooseVersion(pyarrow.__version__) < LooseVersion('0.9.0'): + raise ImportError("pyarrow >= 0.9.0 required for feather support\n\n" "you can install via conda\n" "conda install pyarrow -c conda-forge" "or via pip\n" diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index a40fe0c9aa74f..dada9000d901a 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -89,9 +89,9 @@ def __init__(self): "\nor via pip\n" "pip install -U pyarrow\n" ) - if LooseVersion(pyarrow.__version__) < '0.7.0': + if LooseVersion(pyarrow.__version__) < '0.9.0': raise ImportError( - "pyarrow >= 0.7.0 is required for parquet support\n\n" + "pyarrow >= 0.9.0 is required for parquet support\n\n" "you can install via conda\n" "conda install pyarrow -c conda-forge\n" "\nor via pip\n" diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 8833c6f7813c6..01a47a67ad1b6 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -47,8 +47,6 @@ def engine(request): def pa(): if not _HAVE_PYARROW: pytest.skip("pyarrow is not installed") - if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'): - pytest.skip("pyarrow is < 0.7.0") return 'pyarrow' @@ -289,11 +287,6 @@ def test_read_columns(self, engine): def test_write_index(self, engine): check_names = engine != 'fastparquet' - if engine == 'pyarrow': - import pyarrow - if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'): - pytest.skip("pyarrow is < 0.7.0") - df = pd.DataFrame({'A': [1, 2, 3]}) check_round_trip(df, engine) @@ -386,10 +379,8 @@ def test_basic(self, pa, df_full): df = df_full # additional supported types for pyarrow - import pyarrow - if LooseVersion(pyarrow.__version__) >= LooseVersion('0.7.0'): - df['datetime_tz'] = pd.date_range('20130101', periods=3, - tz='Europe/Brussels') + df['datetime_tz'] = pd.date_range('20130101', periods=3, + tz='Europe/Brussels') df['bool_with_none'] = [True, None, True] check_round_trip(df, pa) diff --git a/requirements-dev.txt b/requirements-dev.txt index ba78430a4b19e..76aaeefa648f4 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -28,7 +28,7 @@ matplotlib>=2.0.0 nbsphinx numexpr>=2.6.8 openpyxl -pyarrow>=0.7.0 +pyarrow>=0.9.0 tables>=3.4.2 pytest-cov pytest-xdist
xref: * https://github.com/pandas-dev/pandas/issues/24617#issuecomment-451702104 * https://github.com/pandas-dev/pandas/issues/24767#issuecomment-455468954 * https://github.com/pandas-dev/pandas/issues/24617#issuecomment-451774147 Closes #24767.
https://api.github.com/repos/pandas-dev/pandas/pulls/24854
2019-01-21T01:27:57Z
2019-01-21T02:46:33Z
2019-01-21T02:46:32Z
2019-01-21T03:04:24Z
Feature/groupby repr ellipses 1135
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index f0a359a75f8fc..47b61ac39ee7f 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -212,8 +212,9 @@ Other API Changes - :class:`Timestamp` and :class:`Timedelta` scalars now implement the :meth:`to_numpy` method as aliases to :meth:`Timestamp.to_datetime64` and :meth:`Timedelta.to_timedelta64`, respectively. (:issue:`24653`) - :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`) - Comparing :class:`Timestamp` with unsupported objects now returns :py:obj:`NotImplemented` instead of raising ``TypeError``. This implies that unsupported rich comparisons are delegated to the other object, and are now consistent with Python 3 behavior for ``datetime`` objects (:issue:`24011`) -- Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`) +- Bug in :meth:`DatetimeIndex.snap` which didn't preserve the ``name`` of the input :class:`Index` (:issue:`25575`) - The ``arg`` argument in :meth:`pandas.core.groupby.DataFrameGroupBy.agg` has been renamed to ``func`` (:issue:`26089`) +- :meth:`Index.groupby` and dependent methods (notably :attr:`GroupBy.groups`) now return object with abbreviated repr (:issue:`1135`) .. _whatsnew_0250.deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3248b708cd7aa..5ba24f44b6b53 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -554,7 +554,7 @@ def _repr_fits_horizontal_(self, ignore_width=False): Check if full repr fits in horizontal boundaries imposed by the display options width and max_columns. - In case off non-interactive session, no boundaries apply. + In case of non-interactive session, no boundaries apply. `ignore_width` is here so ipnb+HTML output can behave the way users expect. display.max_columns remains in effect. diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index bd8a8852964e3..163c96f93bc36 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -56,7 +56,7 @@ class providing the base-class of operations. _apply_docs = dict( template=""" - Apply function `func` group-wise and combine the results together. + Apply function `func` group-wise and combine the results together. The function passed to `apply` must take a {input} as its first argument and return a DataFrame, Series or scalar. `apply` will diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 8145e5000c056..343530bc475c5 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -234,7 +234,6 @@ class Grouping: def __init__(self, index, grouper=None, obj=None, name=None, level=None, sort=True, observed=False, in_axis=False): - self.name = name self.level = level self.grouper = _convert_grouper(index, grouper) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index e6b7577d97bad..76cb72379604f 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -256,6 +256,7 @@ def size(self): @cache_readonly def groups(self): """ dict {group name -> group labels} """ + if len(self.groupings) == 1: return self.groupings[0].groups else: @@ -382,7 +383,7 @@ def get_group_levels(self): def _is_builtin_func(self, arg): """ - if we define an builtin function for this argument, return it, + if we define a builtin function for this argument, return it, otherwise return the arg """ return SelectionMixin._builtin_table.get(arg, arg) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6bb8f299e811f..e873808c2f8f9 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6,6 +6,8 @@ import numpy as np +from pandas._config.config import get_option + from pandas._libs import ( algos as libalgos, index as libindex, join as libjoin, lib) from pandas._libs.lib import is_datetime_array @@ -4484,7 +4486,7 @@ def groupby(self, values): # map to the label result = {k: self.take(v) for k, v in result.items()} - return result + return IndexGroupbyGroups(result) def map(self, mapper, na_action=None): """ @@ -5274,6 +5276,14 @@ def _add_logical_methods_disabled(cls): Index._add_comparison_methods() +class IndexGroupbyGroups(dict): + """Dict extension to support abbreviated __repr__""" + from pandas.io.formats.printing import pprint_thing + + def __repr__(self): + return pprint_thing(self, max_seq_items=get_option('display.max_rows')) + + def ensure_index_from_sequences(sequences, names=None): """ Construct an index from sequences of data. diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py index bee66fcbfaa82..7acc6a9adf7cf 100644 --- a/pandas/io/formats/printing.py +++ b/pandas/io/formats/printing.py @@ -92,7 +92,7 @@ def _join_unicode(lines, sep=''): def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds): """ internal. pprinter for iterables. you should probably use pprint_thing() - rather then calling this directly. + rather than calling this directly. bounds length of printed sequence, depending on options """ @@ -124,8 +124,9 @@ def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds): def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds): """ internal. pprinter for iterables. you should probably use pprint_thing() - rather then calling this directly. + rather than calling this directly. """ + fmt = "{{{things}}}" pairs = [] diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 9739fe580f4cf..b56e0864fdf70 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1761,6 +1761,22 @@ def test_period(self): assert str(df) == exp +class TestDataFrameGroupByFormatting(object): + def test_groups_repr_truncates(self): + df = pd.DataFrame({ + 'a': [1, 1, 1, 2, 2, 3], + 'b': [1, 2, 3, 4, 5, 6] + }) + + with option_context('display.max_rows', 2): + x = df.groupby('a').groups + assert x.__repr__().endswith('...}') + + with option_context('display.max_rows', 5): + x = df.groupby('a').groups + assert not x.__repr__().endswith('...}') + + def gen_series_formatting(): s1 = pd.Series(['a'] * 100) s2 = pd.Series(['ab'] * 100)
- [ ] closes #1135 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Currently one test is failing: ``` def test_groups(self, df): grouped = df.groupby(['A']) groups = grouped.groups assert groups is grouped.groups # caching works ``` I'm not sure what exactly this test is checking for. Is this a behavior that needs to be kept?
https://api.github.com/repos/pandas-dev/pandas/pulls/24853
2019-01-21T00:17:27Z
2019-07-11T16:09:19Z
null
2019-07-11T16:09:19Z
DOC: CategoricalIndex doc string
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index c6d31339f950d..b494c41c3b58c 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -42,20 +42,35 @@ typ='method', overwrite=True) class CategoricalIndex(Index, accessor.PandasDelegate): """ - Immutable Index implementing an ordered, sliceable set. CategoricalIndex - represents a sparsely populated Index with an underlying Categorical. + Index based on an underlying :class:`Categorical`. + + CategoricalIndex, like Categorical, can only take on a limited, + and usually fixed, number of possible values (`categories`). Also, + like Categorical, it might have an order, but numerical operations + (additions, divisions, ...) are not possible. Parameters ---------- - data : array-like or Categorical, (1-dimensional) - categories : optional, array-like - categories for the CategoricalIndex - ordered : boolean, - designating if the categories are ordered - copy : bool - Make a copy of input ndarray - name : object - Name to be stored in the index + data : array-like (1-dimensional) + The values of the categorical. If `categories` are given, values not in + `categories` will be replaced with NaN. + categories : index-like, optional + The categories for the categorical. Items need to be unique. + If the categories are not given here (and also not in `dtype`), they + will be inferred from the `data`. + ordered : bool, optional + Whether or not this categorical is treated as an ordered + categorical. If not given here or in `dtype`, the resulting + categorical will be unordered. + dtype : CategoricalDtype or the string "category", optional + If :class:`CategoricalDtype`, cannot be used together with + `categories` or `ordered`. + + .. versionadded:: 0.21.0 + copy : bool, default False + Make a copy of input ndarray. + name : object, optional + Name to be stored in the index. Attributes ---------- @@ -75,9 +90,45 @@ class CategoricalIndex(Index, accessor.PandasDelegate): as_unordered map + Raises + ------ + ValueError + If the categories do not validate. + TypeError + If an explicit ``ordered=True`` is given but no `categories` and the + `values` are not sortable. + See Also -------- - Categorical, Index + Index : The base pandas Index type. + Categorical : A categorical array. + CategoricalDtype : Type for categorical data. + + Notes + ----- + See the `user guide + <http://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`_ + for more. + + Examples + -------- + >>> pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c']) + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa + + ``CategoricalIndex`` can also be instantiated from a ``Categorical``: + + >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']) + >>> pd.CategoricalIndex(c) + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa + + Ordered ``CategoricalIndex`` can have a min and max value. + + >>> ci = pd.CategoricalIndex(['a','b','c','a','b','c'], ordered=True, + ... categories=['c', 'b', 'a']) + >>> ci + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category') # noqa + >>> ci.min() + 'c' """ _typ = 'categoricalindex'
Improvements to the doc string of ``CategoricalIndex`` .
https://api.github.com/repos/pandas-dev/pandas/pulls/24852
2019-01-20T23:23:53Z
2019-02-28T00:30:34Z
2019-02-28T00:30:34Z
2019-02-28T11:25:03Z
fix MacPython pandas-wheels failure
diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 899daf488638a..ec6123bae327e 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -975,7 +975,8 @@ def test_append_different_columns_types_raises( msg = (r"unorderable types: (Interval|int)\(\) > " r"(int|long|float|str)\(\)|" r"Expected tuple, got (int|long|float|str)|" - r"Cannot compare type 'Timestamp' with type '(int|long)'") + r"Cannot compare type 'Timestamp' with type '(int|long)'|" + r"'>' not supported between instances of 'int' and 'str'") with pytest.raises(TypeError, match=msg): df.append(ser)
xref https://github.com/pandas-dev/pandas/pull/24838#issuecomment-455878312
https://api.github.com/repos/pandas-dev/pandas/pulls/24851
2019-01-20T21:30:39Z
2019-01-20T22:14:55Z
2019-01-20T22:14:55Z
2019-01-20T22:29:16Z
BUG : ValueError in case on NaN value in groupby columns
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3268575c7064d..c2e5f3b3c9aa2 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1782,6 +1782,7 @@ Groupby/Resample/Rolling - Bug in :meth:`DataFrame.groupby` did not respect the ``observed`` argument when selecting a column and instead always used ``observed=False`` (:issue:`23970`) - Bug in :func:`pandas.core.groupby.SeriesGroupBy.pct_change` or :func:`pandas.core.groupby.DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`). - Bug preventing hash table creation with very large number (2^32) of rows (:issue:`22805`) +- Bug in groupby when grouping on categorical causes ``ValueError`` and incorrect grouping if ``observed=True`` and ``nan`` is present in categorical column (:issue:`24740`, :issue:`21151`). Reshaping ^^^^^^^^^ diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index d8df227d4911a..16c7ea687237c 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -299,6 +299,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self._labels = self.grouper.codes if observed: codes = algorithms.unique1d(self.grouper.codes) + codes = codes[codes != -1] else: codes = np.arange(len(categories)) diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 144b64025e1c0..e118135ccc75d 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -420,6 +420,39 @@ def test_observed_groups(observed): tm.assert_dict_equal(result, expected) +def test_observed_groups_with_nan(observed): + # GH 24740 + df = pd.DataFrame({'cat': pd.Categorical(['a', np.nan, 'a'], + categories=['a', 'b', 'd']), + 'vals': [1, 2, 3]}) + g = df.groupby('cat', observed=observed) + result = g.groups + if observed: + expected = {'a': Index([0, 2], dtype='int64')} + else: + expected = {'a': Index([0, 2], dtype='int64'), + 'b': Index([], dtype='int64'), + 'd': Index([], dtype='int64')} + tm.assert_dict_equal(result, expected) + + +def test_dataframe_categorical_with_nan(observed): + # GH 21151 + s1 = pd.Categorical([np.nan, 'a', np.nan, 'a'], + categories=['a', 'b', 'c']) + s2 = pd.Series([1, 2, 3, 4]) + df = pd.DataFrame({'s1': s1, 's2': s2}) + result = df.groupby('s1', observed=observed).first().reset_index() + if observed: + expected = DataFrame({'s1': pd.Categorical(['a'], + categories=['a', 'b', 'c']), 's2': [2]}) + else: + expected = DataFrame({'s1': pd.Categorical(['a', 'b', 'c'], + categories=['a', 'b', 'c']), + 's2': [2, np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + + def test_datetime(): # GH9049: ensure backward compatibility levels = pd.date_range('2014-01-01', periods=4)
Fixes GH24740 - [x] closes #24740 - [x] closes #21151 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry **Before** ```python In [5]: ds.groupby("b").groups Out[5]: {'b': Int64Index([1], dtype='int64'), 'c': Int64Index([2], dtype='int64')} In [6]: ds.groupby("b", observed=True).groups --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-6-d76af2ea5948> in <module> ----> 1 ds.groupby("b", observed=True).groups ~/Softwares/pandas/pandas/core/groupby/groupby.py in groups(self) 388 """ 389 self._assure_grouper() --> 390 return self.grouper.groups 391 ``` **After** ```python In [5]: ds.groupby('b',observed=True).groups Out[5]: {'b': Int64Index([1], dtype='int64'), 'c': Int64Index([2], dtype='int64')} ``` **Better Example** ```python In [6]: df = pd.DataFrame({'cat': pd.Categorical(['a', 'c', 'a'], ...: categories=['a', 'b', 'd', 'e', 'f']), ...: 'vals': [1, 2, 3]}) In [7]: df Out[7]: cat vals 0 a 1 1 NaN 2 2 a 3 In [8]: df.groupby('cat').groups Out[8]: {'a': Int64Index([0, 2], dtype='int64'), 'b': Int64Index([], dtype='int64'), 'd': Int64Index([], dtype='int64'), 'e': Int64Index([], dtype='int64'), 'f': Int64Index([], dtype='int64')} In [9]: df.groupby('cat',observed=True).groups Out[9]: {'a': Int64Index([0, 2], dtype='int64')} ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24850
2019-01-20T21:14:03Z
2019-01-22T23:29:34Z
2019-01-22T23:29:34Z
2019-01-22T23:29:38Z
Start whatsnew for 0.24.1 and 0.25.0
diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst new file mode 100644 index 0000000000000..ee4b7ab62b31a --- /dev/null +++ b/doc/source/whatsnew/v0.24.1.rst @@ -0,0 +1,77 @@ +:orphan: + +.. _whatsnew_0241: + +Whats New in 0.24.1 (February XX, 2019) +--------------------------------------- + +.. warning:: + + The 0.24.x series of releases will be the last to support Python 2. Future feature + releases will support Python 3 only. See :ref:`install.dropping-27` for more. + +{{ header }} + +These are the changes in pandas 0.24.1. See :ref:`release` for a full changelog +including other versions of pandas. + + +.. _whatsnew_0241.enhancements: + +Enhancements +^^^^^^^^^^^^ + + +.. _whatsnew_0241.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +**Conversion** + +- +- +- + +**Indexing** + +- +- +- + +**I/O** + +- +- +- + +**Categorical** + +- +- +- + +**Timezones** + +- +- +- + +**Timedelta** + +- +- +- + + +**Other** + +- +- + +.. _whatsnew_0.241.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v0.24.0..v0.24.1 diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst new file mode 100644 index 0000000000000..fac42dbd9c7c8 --- /dev/null +++ b/doc/source/whatsnew/v0.25.0.rst @@ -0,0 +1,206 @@ +:orphan: + +.. _whatsnew_0250: + +What's New in 0.25.0 (April XX, 2019) +------------------------------------- + +{{ header }} + +These are the changes in pandas 0.25.0. See :ref:`release` for a full changelog +including other versions of pandas. + + +.. _whatsnew_0250.enhancements.other: + +Other Enhancements +^^^^^^^^^^^^^^^^^^ + +- +- +- + + +.. _whatsnew_0250.api_breaking: + +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _whatsnew_0250.api.other: + +Other API Changes +^^^^^^^^^^^^^^^^^ + +- +- +- + +.. _whatsnew_0250.deprecations: + +Deprecations +~~~~~~~~~~~~ + +- +- +- + + +.. _whatsnew_0250.prior_deprecations: + +Removal of prior version deprecations/changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + +.. _whatsnew_0250.performance: + +Performance Improvements +~~~~~~~~~~~~~~~~~~~~~~~~ + +- +- +- + + +.. _whatsnew_0250.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +Categorical +^^^^^^^^^^^ + +- +- +- + +Datetimelike +^^^^^^^^^^^^ + +- +- +- + +Timedelta +^^^^^^^^^ + +- +- +- + +Timezones +^^^^^^^^^ + +- +- +- + +Numeric +^^^^^^^ + +- +- +- + + +Conversion +^^^^^^^^^^ + +- +- +- + +Strings +^^^^^^^ + +- +- +- + + +Interval +^^^^^^^^ + +- +- +- + +Indexing +^^^^^^^^ + +- +- +- + + +Missing +^^^^^^^ + +- +- +- + +MultiIndex +^^^^^^^^^^ + +- +- +- + + +I/O +^^^ + +- +- +- + + +Plotting +^^^^^^^^ + +- +- +- + +Groupby/Resample/Rolling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- +- +- + + +Reshaping +^^^^^^^^^ + +- +- +- + + +Sparse +^^^^^^ + +- +- +- + + +Other +^^^^^ + +- +- +- + + +.. _whatsnew_0.250.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v0.24.x..HEAD +
https://api.github.com/repos/pandas-dev/pandas/pulls/24848
2019-01-20T19:59:47Z
2019-01-21T13:21:49Z
2019-01-21T13:21:49Z
2019-01-21T13:21:54Z
ERR/TST: Add pytest idiom to dtypes/test_cast.py
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 4049b0321f221..ad62146dda268 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -354,7 +354,7 @@ def infer_dtype_from_scalar(val, pandas_dtype=False): # a 1-element ndarray if isinstance(val, np.ndarray): - msg = "invalid ndarray passed to _infer_dtype_from_scalar" + msg = "invalid ndarray passed to infer_dtype_from_scalar" if val.ndim != 0: raise ValueError(msg) diff --git a/pandas/tests/dtypes/cast/__init__.py b/pandas/tests/dtypes/cast/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/dtypes/cast/test_construct_from_scalar.py b/pandas/tests/dtypes/cast/test_construct_from_scalar.py new file mode 100644 index 0000000000000..d0f58c811e34c --- /dev/null +++ b/pandas/tests/dtypes/cast/test_construct_from_scalar.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas import Categorical +from pandas.util import testing as tm + + +def test_cast_1d_array_like_from_scalar_categorical(): + # see gh-19565 + # + # Categorical result from scalar did not maintain + # categories and ordering of the passed dtype. + cats = ["a", "b", "c"] + cat_type = CategoricalDtype(categories=cats, ordered=False) + expected = Categorical(["a", "a"], categories=cats) + + result = construct_1d_arraylike_from_scalar("a", len(expected), cat_type) + tm.assert_categorical_equal(result, expected, + check_category_order=True, + check_dtype=True) diff --git a/pandas/tests/dtypes/cast/test_construct_ndarray.py b/pandas/tests/dtypes/cast/test_construct_ndarray.py new file mode 100644 index 0000000000000..aa2cb25e62d52 --- /dev/null +++ b/pandas/tests/dtypes/cast/test_construct_ndarray.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import construct_1d_ndarray_preserving_na + +from pandas.util import testing as tm + + +@pytest.mark.parametrize('values, dtype, expected', [ + ([1, 2, 3], None, np.array([1, 2, 3])), + (np.array([1, 2, 3]), None, np.array([1, 2, 3])), + (['1', '2', None], None, np.array(['1', '2', None])), + (['1', '2', None], np.dtype('str'), np.array(['1', '2', None])), + ([1, 2, None], np.dtype('str'), np.array(['1', '2', None])), +]) +def test_construct_1d_ndarray_preserving_na(values, dtype, expected): + result = construct_1d_ndarray_preserving_na(values, dtype=dtype) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/dtypes/cast/test_construct_object_arr.py b/pandas/tests/dtypes/cast/test_construct_object_arr.py new file mode 100644 index 0000000000000..61fc17880ed65 --- /dev/null +++ b/pandas/tests/dtypes/cast/test_construct_object_arr.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +import pytest + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + + +@pytest.mark.parametrize("datum1", [1, 2., "3", (4, 5), [6, 7], None]) +@pytest.mark.parametrize("datum2", [8, 9., "10", (11, 12), [13, 14], None]) +def test_cast_1d_array(datum1, datum2): + data = [datum1, datum2] + result = construct_1d_object_array_from_listlike(data) + + # Direct comparison fails: https://github.com/numpy/numpy/issues/10218 + assert result.dtype == "object" + assert list(result) == data + + +@pytest.mark.parametrize("val", [1, 2., None]) +def test_cast_1d_array_invalid_scalar(val): + with pytest.raises(TypeError, match="has no len()"): + construct_1d_object_array_from_listlike(val) diff --git a/pandas/tests/dtypes/cast/test_convert_objects.py b/pandas/tests/dtypes/cast/test_convert_objects.py new file mode 100644 index 0000000000000..58ba4161e96a9 --- /dev/null +++ b/pandas/tests/dtypes/cast/test_convert_objects.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import maybe_convert_objects + + +@pytest.mark.parametrize("data", [[1, 2], ["apply", "banana"]]) +@pytest.mark.parametrize("copy", [True, False]) +def test_maybe_convert_objects_copy(data, copy): + arr = np.array(data) + out = maybe_convert_objects(arr, copy=copy) + + assert (arr is out) is (not copy) diff --git a/pandas/tests/dtypes/cast/test_downcast.py b/pandas/tests/dtypes/cast/test_downcast.py new file mode 100644 index 0000000000000..41607c948b909 --- /dev/null +++ b/pandas/tests/dtypes/cast/test_downcast.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import maybe_downcast_to_dtype + +from pandas import DatetimeIndex, Series, Timestamp +from pandas.util import testing as tm + + +@pytest.mark.parametrize("arr,dtype,expected", [ + (np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]), "infer", + np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])), + + (np.array([8., 8., 8., 8., 8.9999999999995]), "infer", + np.array([8, 8, 8, 8, 9], dtype=np.int64)), + + (np.array([8., 8., 8., 8., 9.0000000000005]), "infer", + np.array([8, 8, 8, 8, 9], dtype=np.int64)), +]) +def test_downcast(arr, expected, dtype): + result = maybe_downcast_to_dtype(arr, dtype) + tm.assert_numpy_array_equal(result, expected) + + +def test_downcast_booleans(): + # see gh-16875: coercing of booleans. + ser = Series([True, True, False]) + result = maybe_downcast_to_dtype(ser, np.dtype(np.float64)) + + expected = ser + tm.assert_series_equal(result, expected) + + +def test_downcast_conversion_no_nan(any_real_dtype): + dtype = any_real_dtype + expected = np.array([1, 2]) + arr = np.array([1.0, 2.0], dtype=dtype) + + result = maybe_downcast_to_dtype(arr, "infer") + tm.assert_almost_equal(result, expected, check_dtype=False) + + +def test_downcast_conversion_nan(float_dtype): + dtype = float_dtype + data = [1.0, 2.0, np.nan] + + expected = np.array(data, dtype=dtype) + arr = np.array(data, dtype=dtype) + + result = maybe_downcast_to_dtype(arr, "infer") + tm.assert_almost_equal(result, expected) + + +def test_downcast_conversion_empty(any_real_dtype): + dtype = any_real_dtype + arr = np.array([], dtype=dtype) + result = maybe_downcast_to_dtype(arr, "int64") + tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64)) + + +@pytest.mark.parametrize("klass", [np.datetime64, np.timedelta64]) +def test_datetime_likes_nan(klass): + dtype = klass.__name__ + "[ns]" + arr = np.array([1, 2, np.nan]) + + exp = np.array([1, 2, klass("NaT")], dtype) + res = maybe_downcast_to_dtype(arr, dtype) + tm.assert_numpy_array_equal(res, exp) + + +@pytest.mark.parametrize("as_asi", [True, False]) +def test_datetime_with_timezone(as_asi): + # see gh-15426 + ts = Timestamp("2016-01-01 12:00:00", tz="US/Pacific") + exp = DatetimeIndex([ts, ts]) + + obj = exp.asi8 if as_asi else exp + res = maybe_downcast_to_dtype(obj, exp.dtype) + + tm.assert_index_equal(res, exp) diff --git a/pandas/tests/dtypes/cast/test_find_common_type.py b/pandas/tests/dtypes/cast/test_find_common_type.py new file mode 100644 index 0000000000000..d83c8d03e9e42 --- /dev/null +++ b/pandas/tests/dtypes/cast/test_find_common_type.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, DatetimeTZDtype, PeriodDtype) + + +@pytest.mark.parametrize("source_dtypes,expected_common_dtype", [ + ((np.int64,), np.int64), + ((np.uint64,), np.uint64), + ((np.float32,), np.float32), + ((np.object,), np.object), + + # Into ints. + ((np.int16, np.int64), np.int64), + ((np.int32, np.uint32), np.int64), + ((np.uint16, np.uint64), np.uint64), + + # Into floats. + ((np.float16, np.float32), np.float32), + ((np.float16, np.int16), np.float32), + ((np.float32, np.int16), np.float32), + ((np.uint64, np.int64), np.float64), + ((np.int16, np.float64), np.float64), + ((np.float16, np.int64), np.float64), + + # Into others. + ((np.complex128, np.int32), np.complex128), + ((np.object, np.float32), np.object), + ((np.object, np.int16), np.object), + + # Bool with int. + ((np.dtype("bool"), np.int64), np.object), + ((np.dtype("bool"), np.int32), np.object), + ((np.dtype("bool"), np.int16), np.object), + ((np.dtype("bool"), np.int8), np.object), + ((np.dtype("bool"), np.uint64), np.object), + ((np.dtype("bool"), np.uint32), np.object), + ((np.dtype("bool"), np.uint16), np.object), + ((np.dtype("bool"), np.uint8), np.object), + + # Bool with float. + ((np.dtype("bool"), np.float64), np.object), + ((np.dtype("bool"), np.float32), np.object), + + ((np.dtype("datetime64[ns]"), np.dtype("datetime64[ns]")), + np.dtype("datetime64[ns]")), + ((np.dtype("timedelta64[ns]"), np.dtype("timedelta64[ns]")), + np.dtype("timedelta64[ns]")), + + ((np.dtype("datetime64[ns]"), np.dtype("datetime64[ms]")), + np.dtype("datetime64[ns]")), + ((np.dtype("timedelta64[ms]"), np.dtype("timedelta64[ns]")), + np.dtype("timedelta64[ns]")), + + ((np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")), np.object), + ((np.dtype("datetime64[ns]"), np.int64), np.object) +]) +def test_numpy_dtypes(source_dtypes, expected_common_dtype): + assert find_common_type(source_dtypes) == expected_common_dtype + + +def test_raises_empty_input(): + with pytest.raises(ValueError, match="no types given"): + find_common_type([]) + + +@pytest.mark.parametrize("dtypes,exp_type", [ + ([CategoricalDtype()], "category"), + ([np.object, CategoricalDtype()], np.object), + ([CategoricalDtype(), CategoricalDtype()], "category"), +]) +def test_categorical_dtype(dtypes, exp_type): + assert find_common_type(dtypes) == exp_type + + +def test_datetimetz_dtype_match(): + dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern") + assert find_common_type([dtype, dtype]) == "datetime64[ns, US/Eastern]" + + +@pytest.mark.parametrize("dtype2", [ + DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"), + np.dtype("datetime64[ns]"), np.object, np.int64 +]) +def test_datetimetz_dtype_mismatch(dtype2): + dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern") + assert find_common_type([dtype, dtype2]) == np.object + assert find_common_type([dtype2, dtype]) == np.object + + +def test_period_dtype_match(): + dtype = PeriodDtype(freq="D") + assert find_common_type([dtype, dtype]) == "period[D]" + + +@pytest.mark.parametrize("dtype2", [ + DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"), + PeriodDtype(freq="2D"), PeriodDtype(freq="H"), + np.dtype("datetime64[ns]"), np.object, np.int64 +]) +def test_period_dtype_mismatch(dtype2): + dtype = PeriodDtype(freq="D") + assert find_common_type([dtype, dtype2]) == np.object + assert find_common_type([dtype2, dtype]) == np.object diff --git a/pandas/tests/dtypes/cast/test_infer_datetimelike.py b/pandas/tests/dtypes/cast/test_infer_datetimelike.py new file mode 100644 index 0000000000000..b2d63a6bfbd1c --- /dev/null +++ b/pandas/tests/dtypes/cast/test_infer_datetimelike.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pytest + +from pandas import DataFrame, NaT, Series, Timestamp + + +@pytest.mark.parametrize("data,exp_size", [ + # see gh-16362. + ([[NaT, "a", "b", 0], [NaT, "b", "c", 1]], 8), + ([[NaT, "a", 0], [NaT, "b", 1]], 6) +]) +def test_maybe_infer_to_datetimelike_df_construct(data, exp_size): + result = DataFrame(np.array(data)) + assert result.size == exp_size + + +def test_maybe_infer_to_datetimelike_ser_construct(): + # see gh-19671. + result = Series(["M1701", Timestamp("20130101")]) + assert result.dtype.kind == "O" diff --git a/pandas/tests/dtypes/cast/test_infer_dtype.py b/pandas/tests/dtypes/cast/test_infer_dtype.py new file mode 100644 index 0000000000000..c7842ac591ed9 --- /dev/null +++ b/pandas/tests/dtypes/cast/test_infer_dtype.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- + +from datetime import date, datetime, timedelta + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import ( + cast_scalar_to_array, infer_dtype_from_array, infer_dtype_from_scalar) +from pandas.core.dtypes.common import is_dtype_equal + +from pandas import ( + Categorical, Period, Series, Timedelta, Timestamp, date_range) +from pandas.util import testing as tm + + +@pytest.fixture(params=[True, False]) +def pandas_dtype(request): + return request.param + + +def test_infer_dtype_from_int_scalar(any_int_dtype): + # Test that infer_dtype_from_scalar is + # returning correct dtype for int and float. + data = np.dtype(any_int_dtype).type(12) + dtype, val = infer_dtype_from_scalar(data) + assert dtype == type(data) + + +def test_infer_dtype_from_float_scalar(float_dtype): + float_dtype = np.dtype(float_dtype).type + data = float_dtype(12) + + dtype, val = infer_dtype_from_scalar(data) + assert dtype == float_dtype + + +@pytest.mark.parametrize("data,exp_dtype", [ + (12, np.int64), (np.float(12), np.float64) +]) +def test_infer_dtype_from_python_scalar(data, exp_dtype): + dtype, val = infer_dtype_from_scalar(data) + assert dtype == exp_dtype + + +@pytest.mark.parametrize("bool_val", [True, False]) +def test_infer_dtype_from_boolean(bool_val): + dtype, val = infer_dtype_from_scalar(bool_val) + assert dtype == np.bool_ + + +def test_infer_dtype_from_complex(complex_dtype): + data = np.dtype(complex_dtype).type(1) + dtype, val = infer_dtype_from_scalar(data) + assert dtype == np.complex_ + + +@pytest.mark.parametrize("data", [np.datetime64(1, "ns"), Timestamp(1), + datetime(2000, 1, 1, 0, 0)]) +def test_infer_dtype_from_datetime(data): + dtype, val = infer_dtype_from_scalar(data) + assert dtype == "M8[ns]" + + +@pytest.mark.parametrize("data", [np.timedelta64(1, "ns"), Timedelta(1), + timedelta(1)]) +def test_infer_dtype_from_timedelta(data): + dtype, val = infer_dtype_from_scalar(data) + assert dtype == "m8[ns]" + + +@pytest.mark.parametrize("freq", ["M", "D"]) +def test_infer_dtype_from_period(freq, pandas_dtype): + p = Period("2011-01-01", freq=freq) + dtype, val = infer_dtype_from_scalar(p, pandas_dtype=pandas_dtype) + + if pandas_dtype: + exp_dtype = "period[{0}]".format(freq) + exp_val = p.ordinal + else: + exp_dtype = np.object_ + exp_val = p + + assert dtype == exp_dtype + assert val == exp_val + + +@pytest.mark.parametrize("data", [date(2000, 1, 1), "foo", + Timestamp(1, tz="US/Eastern")]) +def test_infer_dtype_misc(data): + dtype, val = infer_dtype_from_scalar(data) + assert dtype == np.object_ + + +@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo"]) +def test_infer_from_scalar_tz(tz, pandas_dtype): + dt = Timestamp(1, tz=tz) + dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=pandas_dtype) + + if pandas_dtype: + exp_dtype = "datetime64[ns, {0}]".format(tz) + exp_val = dt.value + else: + exp_dtype = np.object_ + exp_val = dt + + assert dtype == exp_dtype + assert val == exp_val + + +def test_infer_dtype_from_scalar_errors(): + msg = "invalid ndarray passed to infer_dtype_from_scalar" + + with pytest.raises(ValueError, match=msg): + infer_dtype_from_scalar(np.array([1])) + + +@pytest.mark.parametrize( + "arr, expected, pandas_dtype", + [("foo", np.object_, False), + (b"foo", np.object_, False), + (1, np.int_, False), + (1.5, np.float_, False), + ([1], np.int_, False), + (np.array([1], dtype=np.int64), np.int64, False), + ([np.nan, 1, ""], np.object_, False), + (np.array([[1.0, 2.0]]), np.float_, False), + (Categorical(list("aabc")), np.object_, False), + (Categorical([1, 2, 3]), np.int64, False), + (Categorical(list("aabc")), "category", True), + (Categorical([1, 2, 3]), "category", True), + (Timestamp("20160101"), np.object_, False), + (np.datetime64("2016-01-01"), np.dtype("=M8[D]"), False), + (date_range("20160101", periods=3), + np.dtype("=M8[ns]"), False), + (date_range("20160101", periods=3, tz="US/Eastern"), + "datetime64[ns, US/Eastern]", True), + (Series([1., 2, 3]), np.float64, False), + (Series(list("abc")), np.object_, False), + (Series(date_range("20160101", periods=3, tz="US/Eastern")), + "datetime64[ns, US/Eastern]", True)]) +def test_infer_dtype_from_array(arr, expected, pandas_dtype): + dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype) + assert is_dtype_equal(dtype, expected) + + +@pytest.mark.parametrize("obj,dtype", [ + (1, np.int64), (1.1, np.float64), + (Timestamp("2011-01-01"), "datetime64[ns]"), + (Timestamp("2011-01-01", tz="US/Eastern"), np.object), + (Period("2011-01-01", freq="D"), np.object) +]) +def test_cast_scalar_to_array(obj, dtype): + shape = (3, 2) + + exp = np.empty(shape, dtype=dtype) + exp.fill(obj) + + arr = cast_scalar_to_array(shape, obj, dtype=dtype) + tm.assert_numpy_array_equal(arr, exp) diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py deleted file mode 100644 index 871e71ea2e4b0..0000000000000 --- a/pandas/tests/dtypes/test_cast.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -These test the private routines in types/cast.py - -""" - -from datetime import date, datetime, timedelta - -import numpy as np -import pytest - -from pandas.core.dtypes.cast import ( - cast_scalar_to_array, construct_1d_arraylike_from_scalar, - construct_1d_ndarray_preserving_na, - construct_1d_object_array_from_listlike, find_common_type, - infer_dtype_from_array, infer_dtype_from_scalar, maybe_convert_objects, - maybe_downcast_to_dtype) -from pandas.core.dtypes.common import is_dtype_equal -from pandas.core.dtypes.dtypes import ( - CategoricalDtype, DatetimeTZDtype, PeriodDtype) - -import pandas as pd -from pandas import ( - DataFrame, DatetimeIndex, NaT, Period, Series, Timedelta, Timestamp) -from pandas.util import testing as tm - - -class TestMaybeDowncast(object): - - def test_downcast(self): - # test downcasting - - arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]) - result = maybe_downcast_to_dtype(arr, 'infer') - tm.assert_numpy_array_equal(result, arr) - - arr = np.array([8., 8., 8., 8., 8.9999999999995]) - result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) - tm.assert_numpy_array_equal(result, expected) - - arr = np.array([8., 8., 8., 8., 9.0000000000005]) - result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) - tm.assert_numpy_array_equal(result, expected) - - # see gh-16875: coercing of booleans. - ser = Series([True, True, False]) - result = maybe_downcast_to_dtype(ser, np.dtype(np.float64)) - expected = ser - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("dtype", [np.float64, object, np.int64]) - def test_downcast_conversion_no_nan(self, dtype): - expected = np.array([1, 2]) - arr = np.array([1.0, 2.0], dtype=dtype) - - result = maybe_downcast_to_dtype(arr, "infer") - tm.assert_almost_equal(result, expected, check_dtype=False) - - @pytest.mark.parametrize("dtype", [np.float64, object]) - def test_downcast_conversion_nan(self, dtype): - expected = np.array([1.0, 2.0, np.nan], dtype=dtype) - arr = np.array([1.0, 2.0, np.nan], dtype=dtype) - - result = maybe_downcast_to_dtype(arr, "infer") - tm.assert_almost_equal(result, expected) - - @pytest.mark.parametrize("dtype", [np.int32, np.float64, np.float32, - np.bool_, np.int64, object]) - def test_downcast_conversion_empty(self, dtype): - arr = np.array([], dtype=dtype) - result = maybe_downcast_to_dtype(arr, "int64") - tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64)) - - def test_datetimelikes_nan(self): - arr = np.array([1, 2, np.nan]) - exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]') - res = maybe_downcast_to_dtype(arr, 'datetime64[ns]') - tm.assert_numpy_array_equal(res, exp) - - exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]') - res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]') - tm.assert_numpy_array_equal(res, exp) - - def test_datetime_with_timezone(self): - # GH 15426 - ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific') - exp = DatetimeIndex([ts, ts]) - res = maybe_downcast_to_dtype(exp, exp.dtype) - tm.assert_index_equal(res, exp) - - res = maybe_downcast_to_dtype(exp.asi8, exp.dtype) - tm.assert_index_equal(res, exp) - - -class TestInferDtype(object): - - def test_infer_dtype_from_int_scalar(self, any_int_dtype): - # Test that infer_dtype_from_scalar is - # returning correct dtype for int and float. - data = np.dtype(any_int_dtype).type(12) - dtype, val = infer_dtype_from_scalar(data) - assert dtype == type(data) - - def test_infer_dtype_from_float_scalar(self, float_dtype): - float_dtype = np.dtype(float_dtype).type - data = float_dtype(12) - - dtype, val = infer_dtype_from_scalar(data) - assert dtype == float_dtype - - def test_infer_dtype_from_python_scalar(self): - data = 12 - dtype, val = infer_dtype_from_scalar(data) - assert dtype == np.int64 - - data = np.float(12) - dtype, val = infer_dtype_from_scalar(data) - assert dtype == np.float64 - - @pytest.mark.parametrize("bool_val", [True, False]) - def test_infer_dtype_from_boolean(self, bool_val): - dtype, val = infer_dtype_from_scalar(bool_val) - assert dtype == np.bool_ - - def test_infer_dtype_from_complex(self, complex_dtype): - data = np.dtype(complex_dtype).type(1) - dtype, val = infer_dtype_from_scalar(data) - assert dtype == np.complex_ - - @pytest.mark.parametrize("data", [np.datetime64(1, "ns"), Timestamp(1), - datetime(2000, 1, 1, 0, 0)]) - def test_infer_dtype_from_datetime(self, data): - dtype, val = infer_dtype_from_scalar(data) - assert dtype == "M8[ns]" - - @pytest.mark.parametrize("data", [np.timedelta64(1, "ns"), Timedelta(1), - timedelta(1)]) - def test_infer_dtype_from_timedelta(self, data): - dtype, val = infer_dtype_from_scalar(data) - assert dtype == "m8[ns]" - - @pytest.mark.parametrize("freq", ["M", "D"]) - def test_infer_dtype_from_period(self, freq): - p = Period("2011-01-01", freq=freq) - dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True) - - assert dtype == "period[{0}]".format(freq) - assert val == p.ordinal - - dtype, val = infer_dtype_from_scalar(p) - assert dtype == np.object_ - assert val == p - - @pytest.mark.parametrize("data", [date(2000, 1, 1), "foo", - Timestamp(1, tz="US/Eastern")]) - def test_infer_dtype_misc(self, data): - dtype, val = infer_dtype_from_scalar(data) - assert dtype == np.object_ - - @pytest.mark.parametrize('tz', ['UTC', 'US/Eastern', 'Asia/Tokyo']) - def test_infer_from_scalar_tz(self, tz): - dt = Timestamp(1, tz=tz) - dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True) - assert dtype == 'datetime64[ns, {0}]'.format(tz) - assert val == dt.value - - dtype, val = infer_dtype_from_scalar(dt) - assert dtype == np.object_ - assert val == dt - - def test_infer_dtype_from_scalar_errors(self): - with pytest.raises(ValueError): - infer_dtype_from_scalar(np.array([1])) - - @pytest.mark.parametrize( - "arr, expected, pandas_dtype", - [('foo', np.object_, False), - (b'foo', np.object_, False), - (1, np.int_, False), - (1.5, np.float_, False), - ([1], np.int_, False), - (np.array([1], dtype=np.int64), np.int64, False), - ([np.nan, 1, ''], np.object_, False), - (np.array([[1.0, 2.0]]), np.float_, False), - (pd.Categorical(list('aabc')), np.object_, False), - (pd.Categorical([1, 2, 3]), np.int64, False), - (pd.Categorical(list('aabc')), 'category', True), - (pd.Categorical([1, 2, 3]), 'category', True), - (Timestamp('20160101'), np.object_, False), - (np.datetime64('2016-01-01'), np.dtype('=M8[D]'), False), - (pd.date_range('20160101', periods=3), - np.dtype('=M8[ns]'), False), - (pd.date_range('20160101', periods=3, tz='US/Eastern'), - 'datetime64[ns, US/Eastern]', True), - (pd.Series([1., 2, 3]), np.float64, False), - (pd.Series(list('abc')), np.object_, False), - (pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')), - 'datetime64[ns, US/Eastern]', True)]) - def test_infer_dtype_from_array(self, arr, expected, pandas_dtype): - - dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype) - assert is_dtype_equal(dtype, expected) - - def test_cast_scalar_to_array(self): - arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64) - exp = np.ones((3, 2), dtype=np.int64) - tm.assert_numpy_array_equal(arr, exp) - - arr = cast_scalar_to_array((3, 2), 1.1) - exp = np.empty((3, 2), dtype=np.float64) - exp.fill(1.1) - tm.assert_numpy_array_equal(arr, exp) - - arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01')) - exp = np.empty((2, 3), dtype='datetime64[ns]') - exp.fill(np.datetime64('2011-01-01')) - tm.assert_numpy_array_equal(arr, exp) - - # pandas dtype is stored as object dtype - obj = Timestamp('2011-01-01', tz='US/Eastern') - arr = cast_scalar_to_array((2, 3), obj) - exp = np.empty((2, 3), dtype=np.object) - exp.fill(obj) - tm.assert_numpy_array_equal(arr, exp) - - obj = Period('2011-01-01', freq='D') - arr = cast_scalar_to_array((2, 3), obj) - exp = np.empty((2, 3), dtype=np.object) - exp.fill(obj) - tm.assert_numpy_array_equal(arr, exp) - - -class TestMaybe(object): - - def test_maybe_infer_to_datetimelike(self): - # GH16362 - # pandas=0.20.1 raises IndexError: tuple index out of range - result = DataFrame(np.array([[NaT, 'a', 'b', 0], - [NaT, 'b', 'c', 1]])) - assert result.size == 8 - # this construction was fine - result = DataFrame(np.array([[NaT, 'a', 0], - [NaT, 'b', 1]])) - assert result.size == 6 - - # GH19671 - result = Series(['M1701', Timestamp('20130101')]) - assert result.dtype.kind == 'O' - - -class TestConvert(object): - - def test_maybe_convert_objects_copy(self): - values = np.array([1, 2]) - - out = maybe_convert_objects(values, copy=False) - assert values is out - - out = maybe_convert_objects(values, copy=True) - assert values is not out - - values = np.array(['apply', 'banana']) - out = maybe_convert_objects(values, copy=False) - assert values is out - - out = maybe_convert_objects(values, copy=True) - assert values is not out - - -class TestCommonTypes(object): - - @pytest.mark.parametrize("source_dtypes,expected_common_dtype", [ - ((np.int64,), np.int64), - ((np.uint64,), np.uint64), - ((np.float32,), np.float32), - ((np.object,), np.object), - - # into ints - ((np.int16, np.int64), np.int64), - ((np.int32, np.uint32), np.int64), - ((np.uint16, np.uint64), np.uint64), - - # into floats - ((np.float16, np.float32), np.float32), - ((np.float16, np.int16), np.float32), - ((np.float32, np.int16), np.float32), - ((np.uint64, np.int64), np.float64), - ((np.int16, np.float64), np.float64), - ((np.float16, np.int64), np.float64), - - # into others - ((np.complex128, np.int32), np.complex128), - ((np.object, np.float32), np.object), - ((np.object, np.int16), np.object), - - # bool with int - ((np.dtype('bool'), np.int64), np.object), - ((np.dtype('bool'), np.int32), np.object), - ((np.dtype('bool'), np.int16), np.object), - ((np.dtype('bool'), np.int8), np.object), - ((np.dtype('bool'), np.uint64), np.object), - ((np.dtype('bool'), np.uint32), np.object), - ((np.dtype('bool'), np.uint16), np.object), - ((np.dtype('bool'), np.uint8), np.object), - - # bool with float - ((np.dtype('bool'), np.float64), np.object), - ((np.dtype('bool'), np.float32), np.object), - - ((np.dtype('datetime64[ns]'), np.dtype('datetime64[ns]')), - np.dtype('datetime64[ns]')), - ((np.dtype('timedelta64[ns]'), np.dtype('timedelta64[ns]')), - np.dtype('timedelta64[ns]')), - - ((np.dtype('datetime64[ns]'), np.dtype('datetime64[ms]')), - np.dtype('datetime64[ns]')), - ((np.dtype('timedelta64[ms]'), np.dtype('timedelta64[ns]')), - np.dtype('timedelta64[ns]')), - - ((np.dtype('datetime64[ns]'), np.dtype('timedelta64[ns]')), - np.object), - ((np.dtype('datetime64[ns]'), np.int64), np.object) - ]) - def test_numpy_dtypes(self, source_dtypes, expected_common_dtype): - assert find_common_type(source_dtypes) == expected_common_dtype - - def test_raises_empty_input(self): - with pytest.raises(ValueError): - find_common_type([]) - - def test_categorical_dtype(self): - dtype = CategoricalDtype() - assert find_common_type([dtype]) == 'category' - assert find_common_type([dtype, dtype]) == 'category' - assert find_common_type([np.object, dtype]) == np.object - - def test_datetimetz_dtype(self): - dtype = DatetimeTZDtype(unit='ns', tz='US/Eastern') - assert find_common_type([dtype, dtype]) == 'datetime64[ns, US/Eastern]' - - for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'), - np.dtype('datetime64[ns]'), np.object, np.int64]: - assert find_common_type([dtype, dtype2]) == np.object - assert find_common_type([dtype2, dtype]) == np.object - - def test_period_dtype(self): - dtype = PeriodDtype(freq='D') - assert find_common_type([dtype, dtype]) == 'period[D]' - - for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'), - PeriodDtype(freq='2D'), PeriodDtype(freq='H'), - np.dtype('datetime64[ns]'), np.object, np.int64]: - assert find_common_type([dtype, dtype2]) == np.object - assert find_common_type([dtype2, dtype]) == np.object - - @pytest.mark.parametrize('datum1', [1, 2., "3", (4, 5), [6, 7], None]) - @pytest.mark.parametrize('datum2', [8, 9., "10", (11, 12), [13, 14], None]) - def test_cast_1d_array(self, datum1, datum2): - data = [datum1, datum2] - result = construct_1d_object_array_from_listlike(data) - - # Direct comparison fails: https://github.com/numpy/numpy/issues/10218 - assert result.dtype == 'object' - assert list(result) == data - - @pytest.mark.parametrize('val', [1, 2., None]) - def test_cast_1d_array_invalid_scalar(self, val): - pytest.raises(TypeError, construct_1d_object_array_from_listlike, val) - - def test_cast_1d_arraylike_from_scalar_categorical(self): - # GH 19565 - Categorical result from scalar did not maintain categories - # and ordering of the passed dtype - cats = ['a', 'b', 'c'] - cat_type = CategoricalDtype(categories=cats, ordered=False) - expected = pd.Categorical(['a', 'a'], categories=cats) - result = construct_1d_arraylike_from_scalar('a', len(expected), - cat_type) - tm.assert_categorical_equal(result, expected, - check_category_order=True, - check_dtype=True) - - -@pytest.mark.parametrize('values, dtype, expected', [ - ([1, 2, 3], None, np.array([1, 2, 3])), - (np.array([1, 2, 3]), None, np.array([1, 2, 3])), - (['1', '2', None], None, np.array(['1', '2', None])), - (['1', '2', None], np.dtype('str'), np.array(['1', '2', None])), - ([1, 2, None], np.dtype('str'), np.array(['1', '2', None])), -]) -def test_construct_1d_ndarray_preserving_na(values, dtype, expected): - result = construct_1d_ndarray_preserving_na(values, dtype=dtype) - tm.assert_numpy_array_equal(result, expected)
Also corrects error message in `infer_dtype_from_scalar`.
https://api.github.com/repos/pandas-dev/pandas/pulls/24847
2019-01-20T19:56:28Z
2019-01-20T22:07:32Z
2019-01-20T22:07:32Z
2019-01-20T23:29:23Z
REF/CLN: ops boilerplate #23853
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 35b662eaae9a5..e2d0571405d80 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -5,7 +5,7 @@ import numpy as np -from pandas._libs import algos as libalgos, lib +from pandas._libs import algos as libalgos import pandas.compat as compat from pandas.compat import lzip, u from pandas.compat.numpy import function as nv @@ -23,7 +23,7 @@ is_timedelta64_dtype) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.generic import ( - ABCCategoricalIndex, ABCDataFrame, ABCIndexClass, ABCSeries) + ABCCategoricalIndex, ABCIndexClass, ABCSeries) from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna @@ -34,6 +34,7 @@ import pandas.core.common as com from pandas.core.config import get_option from pandas.core.missing import interpolate_2d +from pandas.core.ops import CompWrapper from pandas.core.sorting import nargsort from pandas.io.formats import console @@ -53,17 +54,13 @@ def _cat_compare_op(op): + @CompWrapper(inst_from_senior_cls=True, zerodim=True) def f(self, other): # On python2, you can usually compare any type to any type, and # Categoricals can be seen as a custom type, but having different # results depending whether categories are the same or not is kind of # insane, so be a bit stricter here and use the python3 idea of # comparing only things of equal type. - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented - - other = lib.item_from_zerodim(other) - if not self.ordered: if op in ['__lt__', '__gt__', '__le__', '__ge__']: raise TypeError("Unordered Categoricals can only compare " diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 73e799f9e0a36..991ae5aaeded5 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -32,6 +32,7 @@ from pandas.core.algorithms import ( checked_add_with_arr, take, unique1d, value_counts) import pandas.core.common as com +from pandas.core.ops import CompWrapper from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset, Tick @@ -982,14 +983,12 @@ def _add_timedeltalike_scalar(self, other): new_values = self._maybe_mask_results(new_values) return new_values.view('i8') + @CompWrapper(validate_len=True) def _add_delta_tdi(self, other): """ Add a delta of a TimedeltaIndex return the i8 result view """ - if len(self) != len(other): - raise ValueError("cannot add indices of unequal length") - if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op from pandas import TimedeltaIndex @@ -1034,6 +1033,7 @@ def _sub_nat(self): result.fill(iNaT) return result.view('timedelta64[ns]') + @CompWrapper(validate_len=True) def _sub_period_array(self, other): """ Subtract a Period Array/Index from self. This is only valid if self @@ -1054,9 +1054,6 @@ def _sub_period_array(self, other): .format(dtype=other.dtype, cls=type(self).__name__)) - if len(self) != len(other): - raise ValueError("cannot subtract arrays/indices of " - "unequal length") if self.freq != other.freq: msg = DIFFERENT_FREQ.format(cls=type(self).__name__, own_freq=self.freqstr, @@ -1143,7 +1140,7 @@ def _time_shift(self, periods, freq=None): Note this is different from ExtensionArray.shift, which shifts the *position* of each element, padding the end with - missing values. + missing values.x Parameters ---------- @@ -1175,8 +1172,8 @@ def _time_shift(self, periods, freq=None): return self._generate_range(start=start, end=end, periods=None, freq=self.freq) + @CompWrapper(zerodim=True) def __add__(self, other): - other = lib.item_from_zerodim(other) if isinstance(other, (ABCSeries, ABCDataFrame)): return NotImplemented @@ -1238,8 +1235,8 @@ def __radd__(self, other): # alias for __add__ return self.__add__(other) + @CompWrapper(zerodim=True) def __sub__(self, other): - other = lib.item_from_zerodim(other) if isinstance(other, (ABCSeries, ABCDataFrame)): return NotImplemented diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d7a8417a71be2..0d3be76b93620 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -20,8 +20,7 @@ is_extension_type, is_float_dtype, is_object_dtype, is_period_dtype, is_string_dtype, is_timedelta64_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype -from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCIndexClass, ABCPandasArray, ABCSeries) +from pandas.core.dtypes.generic import ABCIndexClass, ABCPandasArray, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import ops @@ -29,6 +28,7 @@ from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com +from pandas.core.ops import CompWrapper from pandas.tseries.frequencies import get_period_alias, to_offset from pandas.tseries.offsets import Day, Tick @@ -130,12 +130,8 @@ def _dt_array_cmp(cls, op): opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False + @CompWrapper(inst_from_senior_cls=True, validate_len=True, zerodim=True) def wrapper(self, other): - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented - - other = lib.item_from_zerodim(other) - if isinstance(other, (datetime, np.datetime64, compat.string_types)): if isinstance(other, (datetime, np.datetime64)): # GH#18435 strings get a pass from tzawareness compat @@ -152,8 +148,6 @@ def wrapper(self, other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: return ops.invalid_comparison(self, other, op) - elif len(other) != len(self): - raise ValueError("Lengths must match") else: if isinstance(other, list): try: @@ -703,11 +697,9 @@ def _assert_tzawareness_compat(self, other): # ----------------------------------------------------------------- # Arithmetic Methods + @CompWrapper(validate_len=True) def _sub_datetime_arraylike(self, other): """subtract DatetimeArray/Index or ndarray[datetime64]""" - if len(self) != len(other): - raise ValueError("cannot add indices of unequal length") - if isinstance(other, np.ndarray): assert is_datetime64_dtype(other) other = type(self)(other) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index e0c71b5609096..331d0a190c4e8 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -16,15 +16,15 @@ from pandas.core.dtypes.common import ( _TD_DTYPE, ensure_object, is_datetime64_dtype, is_float_dtype, - is_list_like, is_period_dtype, pandas_dtype) + is_period_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import PeriodDtype -from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCIndexClass, ABCPeriodIndex, ABCSeries) +from pandas.core.dtypes.generic import ABCIndexClass, ABCPeriodIndex, ABCSeries from pandas.core.dtypes.missing import isna, notna import pandas.core.algorithms as algos from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com +from pandas.core.ops import CompWrapper from pandas.tseries import frequencies from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick @@ -48,15 +48,10 @@ def _period_array_cmp(cls, op): opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False + @CompWrapper(validate_len=True, inst_from_senior_cls=True) def wrapper(self, other): op = getattr(self.asi8, opname) - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented - - if is_list_like(other) and len(other) != len(self): - raise ValueError("Lengths must match") - if isinstance(other, Period): self._check_compatible_with(other) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 4f0c96f7927da..86f7e9a26a9bb 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -28,6 +28,7 @@ from pandas.core import ops from pandas.core.algorithms import checked_add_with_arr import pandas.core.common as com +from pandas.core.ops import CompWrapper from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick @@ -64,10 +65,8 @@ def _td_array_cmp(cls, op): opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False + @CompWrapper(validate_len=True, inst_from_senior_cls=True) def wrapper(self, other): - if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): - return NotImplemented - if _is_convertible_to_td(other) or other is NaT: try: other = Timedelta(other) @@ -82,9 +81,6 @@ def wrapper(self, other): elif not is_list_like(other): return ops.invalid_comparison(self, other, op) - elif len(other) != len(self): - raise ValueError("Lengths must match") - else: try: other = type(self)._from_sequence(other)._data diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 10cebc6f94b92..ac1b5711a5627 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -7,6 +7,7 @@ from __future__ import division import datetime +from functools import wraps import operator import textwrap import warnings @@ -28,8 +29,8 @@ is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_scalar, is_timedelta64_dtype, needs_i8_conversion) from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCIndex, ABCIndexClass, ABCPanel, ABCSeries, ABCSparseArray, - ABCSparseSeries) + ABCDataFrame, ABCExtensionArray, ABCIndex, ABCIndexClass, ABCPanel, + ABCSeries, ABCSparseArray, ABCSparseSeries) from pandas.core.dtypes.missing import isna, notna import pandas as pd @@ -136,6 +137,62 @@ def maybe_upcast_for_op(obj): return obj +class CompWrapper(object): + __key__ = ['list_to_array', 'validate_len', + 'zerodim', 'inst_from_senior_cls'] + + def __init__(self, + list_to_array=None, + validate_len=None, + zerodim=None, + inst_from_senior_cls=None): + self.list_to_array = list_to_array + self.validate_len = validate_len + self.zerodim = zerodim + self.inst_from_senior_cls = inst_from_senior_cls + + def _list_to_array(self, comp): + @wraps(comp) + def wrapper(comp_self, comp_other): + if is_list_like(comp_other): + comp_other = np.asarray(comp_other) + return comp(comp_self, comp_other) + return wrapper + + def _validate_len(self, comp): + @wraps(comp) + def wrapper(comp_self, comp_other): + if is_list_like(comp_other) and len(comp_other) != len(comp_self): + raise ValueError("Lengths must match to compare") + return comp(comp_self, comp_other) + return wrapper + + def _zerodim(self, comp): + @wraps(comp) + def wrapper(comp_self, comp_other): + from pandas._libs import lib + comp_other = lib.item_from_zerodim(comp_other) + return comp(comp_self, comp_other) + return wrapper + + def _inst_from_senior_cls(self, comp): + @wraps(comp) + def wrapper(comp_self, comp_other): + if isinstance(comp_self, ABCExtensionArray): + if isinstance(comp_other, (ABCDataFrame, ABCSeries, + ABCIndexClass)): + # Rely on pandas to unbox and dispatch to us. + return NotImplemented + return comp(comp_self, comp_other) + return wrapper + + def __call__(self, comp): + for key in CompWrapper.__key__: + if getattr(self, key) is True: + comp = getattr(self, '_' + key)(comp) + return comp + + # ----------------------------------------------------------------------------- # Reversed Operations not available in the stdlib operator module. # Defining these instead of using lambdas allows us to reference them by name. diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 405dc0805a285..99f687dbb485b 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -2091,7 +2091,7 @@ def test_sub_dti_dti(self): # different length raises ValueError dti1 = date_range('20130101', periods=3) dti2 = date_range('20130101', periods=4) - msg = 'cannot add indices of unequal length' + msg = 'Lengths must match to compare' with pytest.raises(ValueError, match=msg): dti1 - dti2
- closes #23853 - passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Implement a wrapper class other than a single decorator. It is tidier to add more conditions for ops later.
https://api.github.com/repos/pandas-dev/pandas/pulls/24846
2019-01-20T14:28:31Z
2019-04-05T00:52:20Z
null
2019-04-05T00:52:20Z
ENH: to_datetime support iso week year (16607) Updated
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 21df1a3aacd59..faff68b636109 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -8,16 +8,16 @@ Our main contributing guide can be found [in this repo](https://github.com/panda If you are looking to contribute to the *pandas* codebase, the best place to start is the [GitHub "issues" tab](https://github.com/pandas-dev/pandas/issues). This is also a great place for filing bug reports and making suggestions for ways in which we can improve the code and documentation. -If you have additional questions, feel free to ask them on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas). Further information can also be found in the "[Where to start?](https://github.com/pandas-dev/pandas/blob/master/doc/source/contributing.rst#where-to-start)" section. +If you have additional questions, feel free to ask them on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas). Further information can also be found in the "[Where to start?](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#where-to-start)" section. ## Filing Issues -If you notice a bug in the code or documentation, or have suggestions for how we can improve either, feel free to create an issue on the [GitHub "issues" tab](https://github.com/pandas-dev/pandas/issues) using [GitHub's "issue" form](https://github.com/pandas-dev/pandas/issues/new). The form contains some questions that will help us best address your issue. For more information regarding how to file issues against *pandas*, please refer to the "[Bug reports and enhancement requests](https://github.com/pandas-dev/pandas/blob/master/doc/source/contributing.rst#bug-reports-and-enhancement-requests)" section. +If you notice a bug in the code or documentation, or have suggestions for how we can improve either, feel free to create an issue on the [GitHub "issues" tab](https://github.com/pandas-dev/pandas/issues) using [GitHub's "issue" form](https://github.com/pandas-dev/pandas/issues/new). The form contains some questions that will help us best address your issue. For more information regarding how to file issues against *pandas*, please refer to the "[Bug reports and enhancement requests](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#bug-reports-and-enhancement-requests)" section. ## Contributing to the Codebase -The code is hosted on [GitHub](https://www.github.com/pandas-dev/pandas), so you will need to use [Git](http://git-scm.com/) to clone the project and make changes to the codebase. Once you have obtained a copy of the code, you should create a development environment that is separate from your existing Python environment so that you can make and test changes without compromising your own work environment. For more information, please refer to the "[Working with the code](https://github.com/pandas-dev/pandas/blob/master/doc/source/contributing.rst#working-with-the-code)" section. +The code is hosted on [GitHub](https://www.github.com/pandas-dev/pandas), so you will need to use [Git](http://git-scm.com/) to clone the project and make changes to the codebase. Once you have obtained a copy of the code, you should create a development environment that is separate from your existing Python environment so that you can make and test changes without compromising your own work environment. For more information, please refer to the "[Working with the code](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#working-with-the-code)" section. -Before submitting your changes for review, make sure to check that your changes do not break any tests. You can find more information about our test suites in the "[Test-driven development/code writing](https://github.com/pandas-dev/pandas/blob/master/doc/source/contributing.rst#test-driven-development-code-writing)" section. We also have guidelines regarding coding style that will be enforced during testing, which can be found in the "[Code standards](https://github.com/pandas-dev/pandas/blob/master/doc/source/contributing.rst#code-standards)" section. +Before submitting your changes for review, make sure to check that your changes do not break any tests. You can find more information about our test suites in the "[Test-driven development/code writing](https://github.com/pandas-dev/pandas/blob/master/doc/source/contributing.rst#test-driven-development-code-writing)" section. We also have guidelines regarding coding style that will be enforced during testing, which can be found in the "[Code standards](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#code-standards)" section. -Once your changes are ready to be submitted, make sure to push your changes to GitHub before creating a pull request. Details about how to do that can be found in the "[Contributing your changes to pandas](https://github.com/pandas-dev/pandas/blob/master/doc/source/contributing.rst#contributing-your-changes-to-pandas)" section. We will review your changes, and you will most likely be asked to make additional changes before it is finally ready to merge. However, once it's ready, we will merge it, and you will have successfully contributed to the codebase! +Once your changes are ready to be submitted, make sure to push your changes to GitHub before creating a pull request. Details about how to do that can be found in the "[Contributing your changes to pandas](https://github.com/pandas-dev/pandas/blob/master/doc/source/development/contributing.rst#contributing-your-changes-to-pandas)" section. We will review your changes, and you will most likely be asked to make additional changes before it is finally ready to merge. However, once it's ready, we will merge it, and you will have successfully contributed to the codebase! diff --git a/Makefile b/Makefile index d2bd067950fd0..956ff52338839 100644 --- a/Makefile +++ b/Makefile @@ -23,4 +23,3 @@ doc: cd doc; \ python make.py clean; \ python make.py html - python make.py spellcheck diff --git a/asv_bench/benchmarks/__init__.py b/asv_bench/benchmarks/__init__.py index e69de29bb2d1d..eada147852fe1 100644 --- a/asv_bench/benchmarks/__init__.py +++ b/asv_bench/benchmarks/__init__.py @@ -0,0 +1 @@ +"""Pandas benchmarks.""" diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 34fb161e5afcb..74849d330f2bc 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -5,7 +5,6 @@ import pandas as pd from pandas.util import testing as tm - for imp in ['pandas.util', 'pandas.tools.hashing']: try: hashing = import_module(imp) @@ -142,4 +141,4 @@ def time_quantile(self, quantile, interpolation, dtype): self.idx.quantile(quantile, interpolation=interpolation) -from .pandas_vb_common import setup # noqa: F401 +from .pandas_vb_common import setup # noqa: F401 isort:skip diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 59e43ee22afde..27d279bb90a31 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -14,7 +14,7 @@ method_blacklist = { 'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean', 'max', 'skew', 'cumprod', 'cummax', 'rank', 'pct_change', 'min', - 'var', 'mad', 'describe', 'std'}, + 'var', 'mad', 'describe', 'std', 'quantile'}, 'datetime': {'median', 'prod', 'sem', 'cumsum', 'sum', 'mean', 'skew', 'cumprod', 'cummax', 'pct_change', 'var', 'mad', 'describe', 'std'} @@ -316,8 +316,9 @@ class GroupByMethods(object): ['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin', 'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head', 'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique', - 'pct_change', 'prod', 'rank', 'sem', 'shift', 'size', 'skew', - 'std', 'sum', 'tail', 'unique', 'value_counts', 'var'], + 'pct_change', 'prod', 'quantile', 'rank', 'sem', 'shift', + 'size', 'skew', 'std', 'sum', 'tail', 'unique', 'value_counts', + 'var'], ['direct', 'transformation']] def setup(self, dtype, method, application): diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py index f08904ba70a5f..a5dc28eb9508c 100644 --- a/asv_bench/benchmarks/io/hdf.py +++ b/asv_bench/benchmarks/io/hdf.py @@ -1,7 +1,5 @@ -import warnings - import numpy as np -from pandas import DataFrame, Panel, date_range, HDFStore, read_hdf +from pandas import DataFrame, date_range, HDFStore, read_hdf import pandas.util.testing as tm from ..pandas_vb_common import BaseIO @@ -99,31 +97,6 @@ def time_store_info(self): self.store.info() -class HDFStorePanel(BaseIO): - - def setup(self): - self.fname = '__test__.h5' - with warnings.catch_warnings(record=True): - self.p = Panel(np.random.randn(20, 1000, 25), - items=['Item%03d' % i for i in range(20)], - major_axis=date_range('1/1/2000', periods=1000), - minor_axis=['E%03d' % i for i in range(25)]) - self.store = HDFStore(self.fname) - self.store.append('p1', self.p) - - def teardown(self): - self.store.close() - self.remove(self.fname) - - def time_read_store_table_panel(self): - with warnings.catch_warnings(record=True): - self.store.select('p1') - - def time_write_store_table_panel(self): - with warnings.catch_warnings(record=True): - self.store.append('p2', self.p) - - class HDF(BaseIO): params = ['table', 'fixed'] diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index f7d0083b86a01..3303483c50e20 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -124,6 +124,25 @@ def time_dropna(self, dtype): self.s.dropna() +class SearchSorted(object): + + goal_time = 0.2 + params = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float16', 'float32', 'float64', + 'str'] + param_names = ['dtype'] + + def setup(self, dtype): + N = 10**5 + data = np.array([1] * N + [2] * N + [3] * N).astype(dtype) + self.s = Series(data) + + def time_searchsorted(self, dtype): + key = '2' if dtype == 'str' else 2 + self.s.searchsorted(key) + + class Map(object): params = ['dict', 'Series'] diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index e9f2727f64e15..b5b2c955f0133 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -102,10 +102,10 @@ def setup(self, repeats): N = 10**5 self.s = Series(tm.makeStringIndex(N)) repeat = {'int': 1, 'array': np.random.randint(1, 3, N)} - self.repeat = repeat[repeats] + self.values = repeat[repeats] def time_repeat(self, repeats): - self.s.str.repeat(self.repeat) + self.s.str.repeat(self.values) class Cat(object): diff --git a/ci/code_checks.sh b/ci/code_checks.sh index c8bfc564e7573..c4840f1e836c4 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -93,7 +93,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # this particular codebase (e.g. src/headers, src/klib, src/msgpack). However, # we can lint all header files since they aren't "generated" like C files are. MSG='Linting .c and .h' ; echo $MSG - cpplint --quiet --extensions=c,h --headers=h --recursive --filter=-readability/casting,-runtime/int,-build/include_subdir pandas/_libs/src/*.h pandas/_libs/src/parser pandas/_libs/ujson pandas/_libs/tslibs/src/datetime + cpplint --quiet --extensions=c,h --headers=h --recursive --filter=-readability/casting,-runtime/int,-build/include_subdir pandas/_libs/src/*.h pandas/_libs/src/parser pandas/_libs/ujson pandas/_libs/tslibs/src/datetime pandas/io/msgpack pandas/_libs/*.cpp pandas/util RET=$(($RET + $?)) ; echo $MSG "DONE" echo "isort --version-number" @@ -174,9 +174,10 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then MSG='Check that no file in the repo contains tailing whitespaces' ; echo $MSG set -o pipefail if [[ "$AZURE" == "true" ]]; then - ! grep -n --exclude="*.svg" -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}' + # we exclude all c/cpp files as the c/cpp files of pandas code base are tested when Linting .c and .h files + ! grep -n '--exclude=*.'{svg,c,cpp,html} -RI "\s$" * | awk -F ":" '{print "##vso[task.logissue type=error;sourcepath=" $1 ";linenumber=" $2 ";] Tailing whitespaces found: " $3}' else - ! grep -n --exclude="*.svg" -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}' + ! grep -n '--exclude=*.'{svg,c,cpp,html} -RI "\s$" * | awk -F ":" '{print $1 ":" $2 ":Tailing whitespaces found: " $3}' fi RET=$(($RET + $?)) ; echo $MSG "DONE" fi @@ -206,7 +207,7 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then MSG='Doctests frame.py' ; echo $MSG pytest -q --doctest-modules pandas/core/frame.py \ - -k"-axes -combine -itertuples -join -pivot_table -query -reindex -reindex_axis -round" + -k" -itertuples -join -reindex -reindex_axis -round" RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Doctests series.py' ; echo $MSG @@ -240,8 +241,8 @@ fi ### DOCSTRINGS ### if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then - MSG='Validate docstrings (GL06, GL07, GL09, SS04, PR03, PR05, EX04)' ; echo $MSG - $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,PR03,PR05,EX04 + MSG='Validate docstrings (GL06, GL07, GL09, SS04, SS05, PR03, PR04, PR05, PR10, EX04, RT04, RT05, SA05)' ; echo $MSG + $BASE_DIR/scripts/validate_docstrings.py --format=azure --errors=GL06,GL07,GL09,SS04,SS05,PR03,PR04,PR05,PR10,EX04,RT04,RT05,SA05 RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/ci/deps/azure-27-compat.yaml b/ci/deps/azure-27-compat.yaml index 8899e22bdf6cf..a7784f17d1956 100644 --- a/ci/deps/azure-27-compat.yaml +++ b/ci/deps/azure-27-compat.yaml @@ -18,8 +18,10 @@ dependencies: - xlsxwriter=0.5.2 - xlwt=0.7.5 # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock + - isort - pip: - html5lib==1.0b2 - beautifulsoup4==4.2.1 diff --git a/ci/deps/azure-27-locale.yaml b/ci/deps/azure-27-locale.yaml index 0846ef5e8264e..8636a63d02fed 100644 --- a/ci/deps/azure-27-locale.yaml +++ b/ci/deps/azure-27-locale.yaml @@ -20,9 +20,11 @@ dependencies: - xlsxwriter=0.5.2 - xlwt=0.7.5 # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - hypothesis>=3.58.0 + - isort - pip: - html5lib==1.0b2 - beautifulsoup4==4.2.1 diff --git a/ci/deps/azure-36-locale_slow.yaml b/ci/deps/azure-36-locale_slow.yaml index c7d2334623501..3f788e5ddcf39 100644 --- a/ci/deps/azure-36-locale_slow.yaml +++ b/ci/deps/azure-36-locale_slow.yaml @@ -26,8 +26,10 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - moto + - isort - pip: - hypothesis>=3.58.0 diff --git a/ci/deps/azure-37-locale.yaml b/ci/deps/azure-37-locale.yaml index b5a05c49b8083..9d598cddce91a 100644 --- a/ci/deps/azure-37-locale.yaml +++ b/ci/deps/azure-37-locale.yaml @@ -25,8 +25,10 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock + - isort - pip: - hypothesis>=3.58.0 - moto # latest moto in conda-forge fails with 3.7, move to conda dependencies when this is fixed diff --git a/ci/deps/azure-37-numpydev.yaml b/ci/deps/azure-37-numpydev.yaml index 99ae228f25de3..e58c1f599279c 100644 --- a/ci/deps/azure-37-numpydev.yaml +++ b/ci/deps/azure-37-numpydev.yaml @@ -6,9 +6,11 @@ dependencies: - pytz - Cython>=0.28.2 # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - hypothesis>=3.58.0 + - isort - pip: - "git+git://github.com/dateutil/dateutil.git" - "-f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" diff --git a/ci/deps/azure-macos-35.yaml b/ci/deps/azure-macos-35.yaml index 58abbabce3d86..2326e8092cc85 100644 --- a/ci/deps/azure-macos-35.yaml +++ b/ci/deps/azure-macos-35.yaml @@ -21,9 +21,11 @@ dependencies: - xlrd - xlsxwriter - xlwt - # universal - - pytest - - pytest-xdist + - isort - pip: - python-dateutil==2.5.3 + # universal + - pytest>=4.0.2 + - pytest-xdist + - pytest-mock - hypothesis>=3.58.0 diff --git a/ci/deps/azure-windows-27.yaml b/ci/deps/azure-windows-27.yaml index b1533b071fa74..f40efdfca3cbd 100644 --- a/ci/deps/azure-windows-27.yaml +++ b/ci/deps/azure-windows-27.yaml @@ -25,7 +25,9 @@ dependencies: - xlwt # universal - cython>=0.28.2 - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - moto - hypothesis>=3.58.0 + - isort diff --git a/ci/deps/azure-windows-36.yaml b/ci/deps/azure-windows-36.yaml index 7b132a134c44e..8517d340f2ba8 100644 --- a/ci/deps/azure-windows-36.yaml +++ b/ci/deps/azure-windows-36.yaml @@ -23,6 +23,8 @@ dependencies: - xlwt # universal - cython>=0.28.2 - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - hypothesis>=3.58.0 + - isort diff --git a/ci/deps/travis-27.yaml b/ci/deps/travis-27.yaml index 2624797b24fa1..a910af36a6b10 100644 --- a/ci/deps/travis-27.yaml +++ b/ci/deps/travis-27.yaml @@ -39,10 +39,12 @@ dependencies: - xlsxwriter=0.5.2 - xlwt=0.7.5 # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - moto==1.3.4 - hypothesis>=3.58.0 + - isort - pip: - backports.lzma - pandas-gbq diff --git a/ci/deps/travis-36-doc.yaml b/ci/deps/travis-36-doc.yaml index 26f3a17432ab2..6f33bc58a8b21 100644 --- a/ci/deps/travis-36-doc.yaml +++ b/ci/deps/travis-36-doc.yaml @@ -41,5 +41,6 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - isort diff --git a/ci/deps/travis-36-locale.yaml b/ci/deps/travis-36-locale.yaml index 2b38465c04512..34b289e6c0c2f 100644 --- a/ci/deps/travis-36-locale.yaml +++ b/ci/deps/travis-36-locale.yaml @@ -28,8 +28,10 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - moto + - isort - pip: - hypothesis>=3.58.0 diff --git a/ci/deps/travis-36-slow.yaml b/ci/deps/travis-36-slow.yaml index a6ffdb95e5e7c..46875d59411d9 100644 --- a/ci/deps/travis-36-slow.yaml +++ b/ci/deps/travis-36-slow.yaml @@ -25,7 +25,9 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - moto - hypothesis>=3.58.0 + - isort diff --git a/ci/deps/travis-36.yaml b/ci/deps/travis-36.yaml index 74db888d588f4..06fc0d76a3d16 100644 --- a/ci/deps/travis-36.yaml +++ b/ci/deps/travis-36.yaml @@ -33,10 +33,12 @@ dependencies: - xlsxwriter - xlwt # universal - - pytest + - pytest>=4.0.2 - pytest-xdist - pytest-cov + - pytest-mock - hypothesis>=3.58.0 + - isort - pip: - brotlipy - coverage diff --git a/ci/deps/travis-37.yaml b/ci/deps/travis-37.yaml index c503124d8cd26..f71d29fe13378 100644 --- a/ci/deps/travis-37.yaml +++ b/ci/deps/travis-37.yaml @@ -12,9 +12,11 @@ dependencies: - nomkl - pyarrow - pytz - - pytest + - pytest>=4.0.2 - pytest-xdist + - pytest-mock - hypothesis>=3.58.0 - s3fs + - isort - pip: - moto diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf index 696ed288cf7a6..48da05d053b96 100644 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf and b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx index f8b98a6f1f8e4..039b3898fa301 100644 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx and b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf index daa65a944e68a..cf1e40e627f33 100644 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf and b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf differ diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx index 6270a71e20ee8..564d92ddbb56a 100644 Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx and b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx differ diff --git a/doc/make.py b/doc/make.py index 438c4a04a3f08..6ffbd3ef86e68 100755 --- a/doc/make.py +++ b/doc/make.py @@ -294,14 +294,16 @@ def main(): help='number of jobs used by sphinx-build') argparser.add_argument('--no-api', default=False, - help='ommit api and autosummary', + help='omit api and autosummary', action='store_true') argparser.add_argument('--single', metavar='FILENAME', type=str, default=None, - help=('filename of section or method name to ' - 'compile, e.g. "indexing", "DataFrame.join"')) + help=('filename (relative to the "source" folder)' + ' of section or method name to compile, e.g. ' + '"development/contributing.rst",' + ' "ecosystem.rst", "pandas.DataFrame.join"')) argparser.add_argument('--python-path', type=str, default=os.path.dirname(DOC_PATH), @@ -323,7 +325,7 @@ def main(): # the import of `python_path` correctly. The latter is used to resolve # the import within the module, injecting it into the global namespace os.environ['PYTHONPATH'] = args.python_path - sys.path.append(args.python_path) + sys.path.insert(0, args.python_path) globals()['pandas'] = importlib.import_module('pandas') # Set the matplotlib backend to the non-interactive Agg backend for all diff --git a/doc/source/conf.py b/doc/source/conf.py index 776b1bfa7bdd7..c59d28a6dc3ea 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -98,9 +98,9 @@ if (fname == 'index.rst' and os.path.abspath(dirname) == source_path): continue - elif pattern == '-api' and dirname == 'api': + elif pattern == '-api' and dirname == 'reference': exclude_patterns.append(fname) - elif fname != pattern: + elif pattern != '-api' and fname != pattern: exclude_patterns.append(fname) with open(os.path.join(source_path, 'index.rst.template')) as f: diff --git a/doc/source/development/contributing.rst b/doc/source/development/contributing.rst index c9d6845107dfc..434df772ae9d1 100644 --- a/doc/source/development/contributing.rst +++ b/doc/source/development/contributing.rst @@ -54,7 +54,7 @@ Bug reports must: ... ``` -#. Include the full version string of *pandas* and its dependencies. You can use the built in function:: +#. Include the full version string of *pandas* and its dependencies. You can use the built-in function:: >>> import pandas as pd >>> pd.show_versions() @@ -178,6 +178,7 @@ We'll now kick off a three-step process: # Create and activate the build environment conda env create -f environment.yml conda activate pandas-dev + conda uninstall --force pandas # or with older versions of Anaconda: source activate pandas-dev @@ -211,7 +212,7 @@ See the full conda docs `here <http://conda.pydata.org/docs>`__. Creating a Python Environment (pip) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you aren't using conda for you development environment, follow these instructions. +If you aren't using conda for your development environment, follow these instructions. You'll need to have at least python3.5 installed on your system. .. code-block:: none @@ -428,14 +429,14 @@ reducing the turn-around time for checking your changes. python make.py clean python make.py --no-api - # compile the docs with only a single - # section, that which is in indexing.rst + # compile the docs with only a single section, relative to the "source" folder. + # For example, compiling only this guide (docs/source/development/contributing.rst) python make.py clean - python make.py --single indexing + python make.py --single development/contributing.rst # compile the reference docs for a single function python make.py clean - python make.py --single DataFrame.join + python make.py --single pandas.DataFrame.join For comparison, a full documentation build may take 15 minutes, but a single section may take 15 seconds. Subsequent builds, which only process portions @@ -484,7 +485,7 @@ contributing them to the project:: ./ci/code_checks.sh -The script verify the linting of code files, it looks for common mistake patterns +The script verifies the linting of code files, it looks for common mistake patterns (like missing spaces around sphinx directives that make the documentation not being rendered properly) and it also validates the doctests. It is possible to run the checks independently by using the parameters ``lint``, ``patterns`` and @@ -675,7 +676,7 @@ Otherwise, you need to do it manually: You'll also need to -1. write a new test that asserts a warning is issued when calling with the deprecated argument +1. Write a new test that asserts a warning is issued when calling with the deprecated argument 2. Update all of pandas existing tests and code to use the new argument See :ref:`contributing.warnings` for more. @@ -731,7 +732,7 @@ extensions in `numpy.testing .. note:: - The earliest supported pytest version is 3.6.0. + The earliest supported pytest version is 4.0.2. Writing tests ~~~~~~~~~~~~~ diff --git a/doc/source/development/extending.rst b/doc/source/development/extending.rst index e6928d9efde06..9e5034f6d3db0 100644 --- a/doc/source/development/extending.rst +++ b/doc/source/development/extending.rst @@ -33,8 +33,9 @@ decorate a class, providing the name of attribute to add. The class's @staticmethod def _validate(obj): - if 'lat' not in obj.columns or 'lon' not in obj.columns: - raise AttributeError("Must have 'lat' and 'lon'.") + # verify there is a column latitude and a column longitude + if 'latitude' not in obj.columns or 'longitude' not in obj.columns: + raise AttributeError("Must have 'latitude' and 'longitude'.") @property def center(self): diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst index 02cbc7e2c3b6d..bbec7b5de1d2e 100644 --- a/doc/source/getting_started/basics.rst +++ b/doc/source/getting_started/basics.rst @@ -505,7 +505,7 @@ So, for instance, to reproduce :meth:`~DataFrame.combine_first` as above: .. ipython:: python def combiner(x, y): - np.where(pd.isna(x), y, x) + return np.where(pd.isna(x), y, x) df1.combine(df2, combiner) .. _basics.stats: diff --git a/doc/source/install.rst b/doc/source/install.rst index 92364fcc9ebd2..5310667c403e5 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -202,7 +202,7 @@ pandas is equipped with an exhaustive set of unit tests, covering about 97% of the code base as of this writing. To run it on your machine to verify that everything is working (and that you have all of the dependencies, soft and hard, installed), make sure you have `pytest -<http://docs.pytest.org/en/latest/>`__ >= 3.6 and `Hypothesis +<http://docs.pytest.org/en/latest/>`__ >= 4.0.2 and `Hypothesis <https://hypothesis.readthedocs.io/>`__ >= 3.58, then run: :: diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index 1dc74ad83b7e6..a129b75636536 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -120,6 +120,7 @@ Methods Timestamp.timetuple Timestamp.timetz Timestamp.to_datetime64 + Timestamp.to_numpy Timestamp.to_julian_date Timestamp.to_period Timestamp.to_pydatetime @@ -191,6 +192,7 @@ Methods Timedelta.round Timedelta.to_pytimedelta Timedelta.to_timedelta64 + Timedelta.to_numpy Timedelta.total_seconds A collection of timedeltas may be stored in a :class:`TimedeltaArray`. diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst index 6ed85ff2fac43..c7f9113b53c22 100644 --- a/doc/source/reference/groupby.rst +++ b/doc/source/reference/groupby.rst @@ -99,6 +99,7 @@ application to columns of a specific data type. DataFrameGroupBy.idxmax DataFrameGroupBy.idxmin DataFrameGroupBy.mad + DataFrameGroupBy.nunique DataFrameGroupBy.pct_change DataFrameGroupBy.plot DataFrameGroupBy.quantile diff --git a/doc/source/reference/series.rst b/doc/source/reference/series.rst index a6ac40b5203bf..b406893e3414a 100644 --- a/doc/source/reference/series.rst +++ b/doc/source/reference/series.rst @@ -409,6 +409,7 @@ strings and apply several methods to it. These can be accessed like :template: autosummary/accessor_method.rst Series.str.capitalize + Series.str.casefold Series.str.cat Series.str.center Series.str.contains diff --git a/doc/source/styled.xlsx b/doc/source/styled.xlsx new file mode 100644 index 0000000000000..1233ff2b8692b Binary files /dev/null and b/doc/source/styled.xlsx differ diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index 953f40d1afebe..e4dd82afcdf65 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -15,7 +15,7 @@ steps: Out of these, the split step is the most straightforward. In fact, in many situations we may wish to split the data set into groups and do something with -those groups. In the apply step, we might wish to one of the +those groups. In the apply step, we might wish to do one of the following: * **Aggregation**: compute a summary statistic (or statistics) for each @@ -1317,7 +1317,7 @@ arbitrary function, for example: df.groupby(['Store', 'Product']).pipe(mean) where ``mean`` takes a GroupBy object and finds the mean of the Revenue and Quantity -columns repectively for each Store-Product combination. The ``mean`` function can +columns respectively for each Store-Product combination. The ``mean`` function can be any function that takes in a GroupBy object; the ``.pipe`` will pass the GroupBy object as a parameter into the function you specify. diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index be1745e2664a1..00d4dc9efc8cc 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -435,7 +435,7 @@ Selection By Position This is sometimes called ``chained assignment`` and should be avoided. See :ref:`Returning a View versus Copy <indexing.view_versus_copy>`. -Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bounds is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``. +Pandas provides a suite of methods in order to get **purely integer based indexing**. The semantics follow closely Python and NumPy slicing. These are ``0-based`` indexing. When slicing, the start bound is *included*, while the upper bound is *excluded*. Trying to use a non-integer, even a **valid** label will raise an ``IndexError``. The ``.iloc`` attribute is the primary access method. The following are valid inputs: @@ -545,7 +545,7 @@ Selection By Callable .. versionadded:: 0.18.1 ``.loc``, ``.iloc``, and also ``[]`` indexing can accept a ``callable`` as indexer. -The ``callable`` must be a function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing. +The ``callable`` must be a function with one argument (the calling Series, DataFrame or Panel) that returns valid output for indexing. .. ipython:: python @@ -569,7 +569,7 @@ You can use callable indexing in ``Series``. df1.A.loc[lambda s: s > 0] Using these methods / indexers, you can chain data selection operations -without using temporary variable. +without using a temporary variable. .. ipython:: python @@ -907,7 +907,7 @@ of the DataFrame): df[df['A'] > 0] -List comprehensions and ``map`` method of Series can also be used to produce +List comprehensions and the ``map`` method of Series can also be used to produce more complex criteria: .. ipython:: python @@ -1556,7 +1556,7 @@ See :ref:`Advanced Indexing <advanced>` for usage of MultiIndexes. ind ``set_names``, ``set_levels``, and ``set_codes`` also take an optional -`level`` argument +``level`` argument .. ipython:: python diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 58e1b2370c7c8..b23a0f10e9e2b 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -989,6 +989,36 @@ a single date rather than the entire array. os.remove('tmp.csv') + +.. _io.csv.mixed_timezones: + +Parsing a CSV with mixed Timezones +++++++++++++++++++++++++++++++++++ + +Pandas cannot natively represent a column or index with mixed timezones. If your CSV +file contains columns with a mixture of timezones, the default result will be +an object-dtype column with strings, even with ``parse_dates``. + + +.. ipython:: python + + content = """\ + a + 2000-01-01T00:00:00+05:00 + 2000-01-01T00:00:00+06:00""" + df = pd.read_csv(StringIO(content), parse_dates=['a']) + df['a'] + +To parse the mixed-timezone values as a datetime column, pass a partially-applied +:func:`to_datetime` with ``utc=True`` as the ``date_parser``. + +.. ipython:: python + + df = pd.read_csv(StringIO(content), parse_dates=['a'], + date_parser=lambda col: pd.to_datetime(col, utc=True)) + df['a'] + + .. _io.dayfirst: diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index a462f01dcd14f..7883814e91c94 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -335,7 +335,7 @@ examined :ref:`in the API <api.dataframe.missing>`. Interpolation ~~~~~~~~~~~~~ -.. versionadded:: 0.21.0 +.. versionadded:: 0.23.0 The ``limit_area`` keyword argument was added. diff --git a/doc/source/user_guide/text.rst b/doc/source/user_guide/text.rst index e4f60a761750d..6f21a7d9beb36 100644 --- a/doc/source/user_guide/text.rst +++ b/doc/source/user_guide/text.rst @@ -600,6 +600,7 @@ Method Summary :meth:`~Series.str.partition`;Equivalent to ``str.partition`` :meth:`~Series.str.rpartition`;Equivalent to ``str.rpartition`` :meth:`~Series.str.lower`;Equivalent to ``str.lower`` + :meth:`~Series.str.casefold`;Equivalent to ``str.casefold`` :meth:`~Series.str.upper`;Equivalent to ``str.upper`` :meth:`~Series.str.find`;Equivalent to ``str.find`` :meth:`~Series.str.rfind`;Equivalent to ``str.rfind`` diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index f56ad710973dd..4e2c428415926 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -321,6 +321,15 @@ which can be specified. These are computed from the starting point specified by pd.to_datetime([1349720105100, 1349720105200, 1349720105300, 1349720105400, 1349720105500], unit='ms') +Constructing a :class:`Timestamp` or :class:`DatetimeIndex` with an epoch timestamp +with the ``tz`` argument specified will localize the epoch timestamps to UTC +first then convert the result to the specified time zone. + +.. ipython:: python + + pd.Timestamp(1262347200000000000, tz='US/Pacific') + pd.DatetimeIndex([1262347200000000000], tz='US/Pacific') + .. note:: Epoch times will be rounded to the nearest nanosecond. @@ -624,6 +633,16 @@ We are stopping on the included end-point as it is part of the index: dft2 = dft2.swaplevel(0, 1).sort_index() dft2.loc[idx[:, '2013-01-05'], :] +.. versionadded:: 0.25.0 + +Slicing with string indexing also honors UTC offset. + +.. ipython:: python + + df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + df + df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00'] + .. _timeseries.slice_vs_exact_match: Slice vs. Exact Match @@ -2129,11 +2148,13 @@ These can easily be converted to a ``PeriodIndex``: Time Zone Handling ------------------ -Pandas provides rich support for working with timestamps in different time -zones using ``pytz`` and ``dateutil`` libraries. ``dateutil`` currently is only -supported for fixed offset and tzfile zones. The default library is ``pytz``. -Support for ``dateutil`` is provided for compatibility with other -applications e.g. if you use ``dateutil`` in other Python packages. +pandas provides rich support for working with timestamps in different time +zones using the ``pytz`` and ``dateutil`` libraries. + +.. note:: + + pandas does not yet support ``datetime.timezone`` objects from the standard + library. Working with Time Zones ~~~~~~~~~~~~~~~~~~~~~~~ @@ -2145,13 +2166,16 @@ By default, pandas objects are time zone unaware: rng = pd.date_range('3/6/2012 00:00', periods=15, freq='D') rng.tz is None -To supply the time zone, you can use the ``tz`` keyword to ``date_range`` and -other functions. Dateutil time zone strings are distinguished from ``pytz`` -time zones by starting with ``dateutil/``. +To localize these dates to a time zone (assign a particular time zone to a naive date), +you can use the ``tz_localize`` method or the ``tz`` keyword argument in +:func:`date_range`, :class:`Timestamp`, or :class:`DatetimeIndex`. +You can either pass ``pytz`` or ``dateutil`` time zone objects or Olson time zone database strings. +Olson time zone strings will return ``pytz`` time zone objects by default. +To return ``dateutil`` time zone objects, append ``dateutil/`` before the string. * In ``pytz`` you can find a list of common (and less common) time zones using ``from pytz import common_timezones, all_timezones``. -* ``dateutil`` uses the OS timezones so there isn't a fixed list available. For +* ``dateutil`` uses the OS time zones so there isn't a fixed list available. For common zones, the names are the same as ``pytz``. .. ipython:: python @@ -2159,23 +2183,23 @@ time zones by starting with ``dateutil/``. import dateutil # pytz - rng_pytz = pd.date_range('3/6/2012 00:00', periods=10, freq='D', + rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D', tz='Europe/London') rng_pytz.tz # dateutil - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=10, freq='D', - tz='dateutil/Europe/London') + rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D') + rng_dateutil = rng_dateutil.tz_localize('dateutil/Europe/London') rng_dateutil.tz # dateutil - utc special case - rng_utc = pd.date_range('3/6/2012 00:00', periods=10, freq='D', + rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D', tz=dateutil.tz.tzutc()) rng_utc.tz -Note that the ``UTC`` timezone is a special case in ``dateutil`` and should be constructed explicitly -as an instance of ``dateutil.tz.tzutc``. You can also construct other timezones explicitly first, -which gives you more control over which time zone is used: +Note that the ``UTC`` time zone is a special case in ``dateutil`` and should be constructed explicitly +as an instance of ``dateutil.tz.tzutc``. You can also construct other time +zones objects explicitly first. .. ipython:: python @@ -2183,56 +2207,61 @@ which gives you more control over which time zone is used: # pytz tz_pytz = pytz.timezone('Europe/London') - rng_pytz = pd.date_range('3/6/2012 00:00', periods=10, freq='D', - tz=tz_pytz) + rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D') + rng_pytz = rng_pytz.tz_localize(tz_pytz) rng_pytz.tz == tz_pytz # dateutil tz_dateutil = dateutil.tz.gettz('Europe/London') - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=10, freq='D', + rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D', tz=tz_dateutil) rng_dateutil.tz == tz_dateutil -Timestamps, like Python's ``datetime.datetime`` object can be either time zone -naive or time zone aware. Naive time series and ``DatetimeIndex`` objects can be -*localized* using ``tz_localize``: +To convert a time zone aware pandas object from one time zone to another, +you can use the ``tz_convert`` method. .. ipython:: python - ts = pd.Series(np.random.randn(len(rng)), rng) + rng_pytz.tz_convert('US/Eastern') - ts_utc = ts.tz_localize('UTC') - ts_utc +.. note:: -Again, you can explicitly construct the timezone object first. -You can use the ``tz_convert`` method to convert pandas objects to convert -tz-aware data to another time zone: + When using ``pytz`` time zones, :class:`DatetimeIndex` will construct a different + time zone object than a :class:`Timestamp` for the same time zone input. A :class:`DatetimeIndex` + can hold a collection of :class:`Timestamp` objects that may have different UTC offsets and cannot be + succinctly represented by one ``pytz`` time zone instance while one :class:`Timestamp` + represents one point in time with a specific UTC offset. -.. ipython:: python + .. ipython:: python - ts_utc.tz_convert('US/Eastern') + dti = pd.date_range('2019-01-01', periods=3, freq='D', tz='US/Pacific') + dti.tz + ts = pd.Timestamp('2019-01-01', tz='US/Pacific') + ts.tz .. warning:: - Be wary of conversions between libraries. For some zones ``pytz`` and ``dateutil`` have different - definitions of the zone. This is more of a problem for unusual timezones than for + Be wary of conversions between libraries. For some time zones, ``pytz`` and ``dateutil`` have different + definitions of the zone. This is more of a problem for unusual time zones than for 'standard' zones like ``US/Eastern``. .. warning:: - Be aware that a timezone definition across versions of timezone libraries may not - be considered equal. This may cause problems when working with stored data that - is localized using one version and operated on with a different version. - See :ref:`here<io.hdf5-notes>` for how to handle such a situation. + Be aware that a time zone definition across versions of time zone libraries may not + be considered equal. This may cause problems when working with stored data that + is localized using one version and operated on with a different version. + See :ref:`here<io.hdf5-notes>` for how to handle such a situation. .. warning:: - It is incorrect to pass a timezone directly into the ``datetime.datetime`` constructor (e.g., - ``datetime.datetime(2011, 1, 1, tz=timezone('US/Eastern'))``. Instead, the datetime - needs to be localized using the localize method on the timezone. + For ``pytz`` time zones, it is incorrect to pass a time zone object directly into + the ``datetime.datetime`` constructor + (e.g., ``datetime.datetime(2011, 1, 1, tz=pytz.timezone('US/Eastern'))``. + Instead, the datetime needs to be localized using the ``localize`` method + on the ``pytz`` time zone object. -Under the hood, all timestamps are stored in UTC. Scalar values from a -``DatetimeIndex`` with a time zone will have their fields (day, hour, minute) +Under the hood, all timestamps are stored in UTC. Values from a time zone aware +:class:`DatetimeIndex` or :class:`Timestamp` will have their fields (day, hour, minute, etc.) localized to the time zone. However, timestamps with the same UTC value are still considered to be equal even if they are in different time zones: @@ -2241,51 +2270,35 @@ still considered to be equal even if they are in different time zones: rng_eastern = rng_utc.tz_convert('US/Eastern') rng_berlin = rng_utc.tz_convert('Europe/Berlin') - rng_eastern[5] - rng_berlin[5] - rng_eastern[5] == rng_berlin[5] - -Like ``Series``, ``DataFrame``, and ``DatetimeIndex``; ``Timestamp`` objects -can be converted to other time zones using ``tz_convert``: - -.. ipython:: python - - rng_eastern[5] - rng_berlin[5] - rng_eastern[5].tz_convert('Europe/Berlin') + rng_eastern[2] + rng_berlin[2] + rng_eastern[2] == rng_berlin[2] -Localization of ``Timestamp`` functions just like ``DatetimeIndex`` and ``Series``: - -.. ipython:: python - - rng[5] - rng[5].tz_localize('Asia/Shanghai') - - -Operations between ``Series`` in different time zones will yield UTC -``Series``, aligning the data on the UTC timestamps: +Operations between :class:`Series` in different time zones will yield UTC +:class:`Series`, aligning the data on the UTC timestamps: .. ipython:: python + ts_utc = pd.Series(range(3), pd.date_range('20130101', periods=3, tz='UTC')) eastern = ts_utc.tz_convert('US/Eastern') berlin = ts_utc.tz_convert('Europe/Berlin') result = eastern + berlin result result.index -To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or ``tz_convert(None)``. -``tz_localize(None)`` will remove timezone holding local time representations. -``tz_convert(None)`` will remove timezone after converting to UTC time. +To remove time zone information, use ``tz_localize(None)`` or ``tz_convert(None)``. +``tz_localize(None)`` will remove the time zone yielding the local time representation. +``tz_convert(None)`` will remove the time zone after converting to UTC time. .. ipython:: python didx = pd.date_range(start='2014-08-01 09:00', freq='H', - periods=10, tz='US/Eastern') + periods=3, tz='US/Eastern') didx didx.tz_localize(None) didx.tz_convert(None) - # tz_convert(None) is identical with tz_convert('UTC').tz_localize(None) + # tz_convert(None) is identical to tz_convert('UTC').tz_localize(None) didx.tz_convert('UTC').tz_localize(None) .. _timeseries.timezone_ambiguous: @@ -2293,54 +2306,34 @@ To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or Ambiguous Times when Localizing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In some cases, localize cannot determine the DST and non-DST hours when there are -duplicates. This often happens when reading files or database records that simply -duplicate the hours. Passing ``ambiguous='infer'`` into ``tz_localize`` will -attempt to determine the right offset. Below the top example will fail as it -contains ambiguous times and the bottom will infer the right offset. +``tz_localize`` may not be able to determine the UTC offset of a timestamp +because daylight savings time (DST) in a local time zone causes some times to occur +twice within one day ("clocks fall back"). The following options are available: + +* ``'raise'``: Raises a ``pytz.AmbiguousTimeError`` (the default behavior) +* ``'infer'``: Attempt to determine the correct offset base on the monotonicity of the timestamps +* ``'NaT'``: Replaces ambiguous times with ``NaT`` +* ``bool``: ``True`` represents a DST time, ``False`` represents non-DST time. An array-like of ``bool`` values is supported for a sequence of times. .. ipython:: python rng_hourly = pd.DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00', - '11/06/2011 01:00', '11/06/2011 02:00', - '11/06/2011 03:00']) + '11/06/2011 01:00', '11/06/2011 02:00']) -This will fail as there are ambiguous times +This will fail as there are ambiguous times (``'11/06/2011 01:00'``) .. code-block:: ipython In [2]: rng_hourly.tz_localize('US/Eastern') AmbiguousTimeError: Cannot infer dst time from Timestamp('2011-11-06 01:00:00'), try using the 'ambiguous' argument -Infer the ambiguous times - -.. ipython:: python - - rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer') - rng_hourly_eastern.to_list() - -In addition to 'infer', there are several other arguments supported. Passing -an array-like of bools or 0s/1s where True represents a DST hour and False a -non-DST hour, allows for distinguishing more than one DST -transition (e.g., if you have multiple records in a database each with their -own DST transition). Or passing 'NaT' will fill in transition times -with not-a-time values. These methods are available in the ``DatetimeIndex`` -constructor as well as ``tz_localize``. +Handle these ambiguous times by specifying the following. .. ipython:: python - rng_hourly_dst = np.array([1, 1, 0, 0, 0]) - rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).to_list() - rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').to_list() - - didx = pd.date_range(start='2014-08-01 09:00', freq='H', - periods=10, tz='US/Eastern') - didx - didx.tz_localize(None) - didx.tz_convert(None) - - # tz_convert(None) is identical with tz_convert('UTC').tz_localize(None) - didx.tz_convert('UCT').tz_localize(None) + rng_hourly.tz_localize('US/Eastern', ambiguous='infer') + rng_hourly.tz_localize('US/Eastern', ambiguous='NaT') + rng_hourly.tz_localize('US/Eastern', ambiguous=[True, True, False, False]) .. _timeseries.timezone_nonexistent: @@ -2348,7 +2341,7 @@ Nonexistent Times when Localizing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A DST transition may also shift the local time ahead by 1 hour creating nonexistent -local times. The behavior of localizing a timeseries with nonexistent times +local times ("clocks spring forward"). The behavior of localizing a timeseries with nonexistent times can be controlled by the ``nonexistent`` argument. The following options are available: * ``'raise'``: Raises a ``pytz.NonExistentTimeError`` (the default behavior) @@ -2382,58 +2375,61 @@ Transform nonexistent times to ``NaT`` or shift the times. .. _timeseries.timezone_series: -TZ Aware Dtypes -~~~~~~~~~~~~~~~ +Time Zone Series Operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``Series/DatetimeIndex`` with a timezone **naive** value are represented with a dtype of ``datetime64[ns]``. +A :class:`Series` with time zone **naive** values is +represented with a dtype of ``datetime64[ns]``. .. ipython:: python s_naive = pd.Series(pd.date_range('20130101', periods=3)) s_naive -``Series/DatetimeIndex`` with a timezone **aware** value are represented with a dtype of ``datetime64[ns, tz]``. +A :class:`Series` with a time zone **aware** values is +represented with a dtype of ``datetime64[ns, tz]`` where ``tz`` is the time zone .. ipython:: python s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern')) s_aware -Both of these ``Series`` can be manipulated via the ``.dt`` accessor, see :ref:`here <basics.dt_accessors>`. +Both of these :class:`Series` time zone information +can be manipulated via the ``.dt`` accessor, see :ref:`the dt accessor section <basics.dt_accessors>`. -For example, to localize and convert a naive stamp to timezone aware. +For example, to localize and convert a naive stamp to time zone aware. .. ipython:: python s_naive.dt.tz_localize('UTC').dt.tz_convert('US/Eastern') - -Further more you can ``.astype(...)`` timezone aware (and naive). This operation is effectively a localize AND convert on a naive stamp, and -a convert on an aware stamp. +Time zone information can also be manipulated using the ``astype`` method. +This method can localize and convert time zone naive timestamps or +convert time zone aware timestamps. .. ipython:: python - # localize and convert a naive timezone + # localize and convert a naive time zone s_naive.astype('datetime64[ns, US/Eastern]') # make an aware tz naive s_aware.astype('datetime64[ns]') - # convert to a new timezone + # convert to a new time zone s_aware.astype('datetime64[ns, CET]') .. note:: Using :meth:`Series.to_numpy` on a ``Series``, returns a NumPy array of the data. - NumPy does not currently support timezones (even though it is *printing* in the local timezone!), - therefore an object array of Timestamps is returned for timezone aware data: + NumPy does not currently support time zones (even though it is *printing* in the local time zone!), + therefore an object array of Timestamps is returned for time zone aware data: .. ipython:: python s_naive.to_numpy() s_aware.to_numpy() - By converting to an object array of Timestamps, it preserves the timezone + By converting to an object array of Timestamps, it preserves the time zone information. For example, when converting back to a Series: .. ipython:: python diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst index bc2a4918bc27b..2d6550bb6888d 100644 --- a/doc/source/whatsnew/v0.10.0.rst +++ b/doc/source/whatsnew/v0.10.0.rst @@ -370,7 +370,7 @@ Updated PyTables Support df1.get_dtype_counts() - performance improvements on table writing -- support for arbitrarly indexed dimensions +- support for arbitrarily indexed dimensions - ``SparseSeries`` now has a ``density`` property (:issue:`2384`) - enable ``Series.str.strip/lstrip/rstrip`` methods to take an input argument to strip arbitrary characters (:issue:`2411`) diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst index 7621cb9c1e27c..cbcb23e356577 100644 --- a/doc/source/whatsnew/v0.16.1.rst +++ b/doc/source/whatsnew/v0.16.1.rst @@ -136,7 +136,7 @@ groupby operations on the index will preserve the index nature as well reindexing operations, will return a resulting index based on the type of the passed indexer, meaning that passing a list will return a plain-old-``Index``; indexing with a ``Categorical`` will return a ``CategoricalIndex``, indexed according to the categories -of the PASSED ``Categorical`` dtype. This allows one to arbitrarly index these even with +of the PASSED ``Categorical`` dtype. This allows one to arbitrarily index these even with values NOT in the categories, similarly to how you can reindex ANY pandas index. .. code-block:: ipython diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 16319a3b83ca4..a49ea2cf493a6 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -648,6 +648,52 @@ that the dates have been converted to UTC pd.to_datetime(["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30"], utc=True) + +.. _whatsnew_0240.api_breaking.read_csv_mixed_tz: + +Parsing mixed-timezones with :func:`read_csv` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`read_csv` no longer silently converts mixed-timezone columns to UTC (:issue:`24987`). + +*Previous Behavior* + +.. code-block:: python + + >>> import io + >>> content = """\ + ... a + ... 2000-01-01T00:00:00+05:00 + ... 2000-01-01T00:00:00+06:00""" + >>> df = pd.read_csv(io.StringIO(content), parse_dates=['a']) + >>> df.a + 0 1999-12-31 19:00:00 + 1 1999-12-31 18:00:00 + Name: a, dtype: datetime64[ns] + +*New Behavior* + +.. ipython:: python + + import io + content = """\ + a + 2000-01-01T00:00:00+05:00 + 2000-01-01T00:00:00+06:00""" + df = pd.read_csv(io.StringIO(content), parse_dates=['a']) + df.a + +As can be seen, the ``dtype`` is object; each value in the column is a string. +To convert the strings to an array of datetimes, the ``date_parser`` argument + +.. ipython:: python + + df = pd.read_csv(io.StringIO(content), parse_dates=['a'], + date_parser=lambda col: pd.to_datetime(col, utc=True)) + df.a + +See :ref:`whatsnew_0240.api.timezone_offset_parsing` for more. + .. _whatsnew_0240.api_breaking.period_end_time: Time values in ``dt.end_time`` and ``to_timestamp(how='end')`` diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst index 3ac2ed73ea53f..be0a2eb682e87 100644 --- a/doc/source/whatsnew/v0.24.1.rst +++ b/doc/source/whatsnew/v0.24.1.rst @@ -2,8 +2,8 @@ .. _whatsnew_0241: -Whats New in 0.24.1 (February XX, 2019) ---------------------------------------- +Whats New in 0.24.1 (February 3, 2019) +-------------------------------------- .. warning:: @@ -13,64 +13,69 @@ Whats New in 0.24.1 (February XX, 2019) {{ header }} These are the changes in pandas 0.24.1. See :ref:`release` for a full changelog -including other versions of pandas. +including other versions of pandas. See :ref:`whatsnew_0240` for the 0.24.0 changelog. +.. _whatsnew_0241.api: -.. _whatsnew_0241.enhancements: +API Changes +~~~~~~~~~~~ -Enhancements -^^^^^^^^^^^^ +Changing the ``sort`` parameter for :class:`Index` set operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The default ``sort`` value for :meth:`Index.union` has changed from ``True`` to ``None`` (:issue:`24959`). +The default *behavior*, however, remains the same: the result is sorted, unless -.. _whatsnew_0241.bug_fixes: - -Bug Fixes -~~~~~~~~~ - -**Conversion** +1. ``self`` and ``other`` are identical +2. ``self`` or ``other`` is empty +3. ``self`` or ``other`` contain values that can not be compared (a ``RuntimeWarning`` is raised). -- -- -- +This change will allow ``sort=True`` to mean "always sort" in a future release. -**Indexing** +The same change applies to :meth:`Index.difference` and :meth:`Index.symmetric_difference`, which +would not sort the result when the values could not be compared. -- -- -- +The `sort` option for :meth:`Index.intersection` has changed in three ways. -**I/O** +1. The default has changed from ``True`` to ``False``, to restore the + pandas 0.23.4 and earlier behavior of not sorting by default. +2. The behavior of ``sort=True`` can now be obtained with ``sort=None``. + This will sort the result only if the values in ``self`` and ``other`` + are not identical. +3. The value ``sort=True`` is no longer allowed. A future version of pandas + will properly support ``sort=True`` meaning "always sort". -- -- -- +.. _whatsnew_0241.regressions: -**Categorical** +Fixed Regressions +~~~~~~~~~~~~~~~~~ -- -- -- +- Fixed regression in :meth:`DataFrame.to_dict` with ``records`` orient raising an + ``AttributeError`` when the ``DataFrame`` contained more than 255 columns, or + wrongly converting column names that were not valid python identifiers (:issue:`24939`, :issue:`24940`). +- Fixed regression in :func:`read_sql` when passing certain queries with MySQL/pymysql (:issue:`24988`). +- Fixed regression in :class:`Index.intersection` incorrectly sorting the values by default (:issue:`24959`). +- Fixed regression in :func:`merge` when merging an empty ``DataFrame`` with multiple timezone-aware columns on one of the timezone-aware columns (:issue:`25014`). +- Fixed regression in :meth:`Series.rename_axis` and :meth:`DataFrame.rename_axis` where passing ``None`` failed to remove the axis name (:issue:`25034`) +- Fixed regression in :func:`to_timedelta` with `box=False` incorrectly returning a ``datetime64`` object instead of a ``timedelta64`` object (:issue:`24961`) +- Fixed regression where custom hashable types could not be used as column keys in :meth:`DataFrame.set_index` (:issue:`24969`) -**Timezones** +.. _whatsnew_0241.bug_fixes: -- -- -- +Bug Fixes +~~~~~~~~~ -**Timedelta** +**Reshaping** -- -- -- +- Bug in :meth:`DataFrame.groupby` with :class:`Grouper` when there is a time change (DST) and grouping frequency is ``'1d'`` (:issue:`24972`) -**Reshaping** +**Visualization** -- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`) +- Fixed the warning for implicitly registered matplotlib converters not showing. See :ref:`whatsnew_0211.converters` for more (:issue:`24963`). **Other** -- -- +- Fixed AttributeError when printing a DataFrame's HTML repr after accessing the IPython config object (:issue:`25036`) .. _whatsnew_0.241.contributors: diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst new file mode 100644 index 0000000000000..e80b1060e867d --- /dev/null +++ b/doc/source/whatsnew/v0.24.2.rst @@ -0,0 +1,111 @@ +:orphan: + +.. _whatsnew_0242: + +Whats New in 0.24.2 (February XX, 2019) +--------------------------------------- + +.. warning:: + + The 0.24.x series of releases will be the last to support Python 2. Future feature + releases will support Python 3 only. See :ref:`install.dropping-27` for more. + +{{ header }} + +These are the changes in pandas 0.24.2. See :ref:`release` for a full changelog +including other versions of pandas. + +.. _whatsnew_0242.regressions: + +Fixed Regressions +^^^^^^^^^^^^^^^^^ + +- Fixed regression in :meth:`DataFrame.all` and :meth:`DataFrame.any` where ``bool_only=True`` was ignored (:issue:`25101`) +- Fixed issue in ``DataFrame`` construction with passing a mixed list of mixed types could segfault. (:issue:`25075`) +- Fixed regression in :meth:`DataFrame.apply` causing ``RecursionError`` when ``dict``-like classes were passed as argument. (:issue:`25196`) +- Fixed regression in :meth:`DataFrame.replace` where ``regex=True`` was only replacing patterns matching the start of the string (:issue:`25259`) + +- Fixed regression in :meth:`DataFrame.duplicated()`, where empty dataframe was not returning a boolean dtyped Series. (:issue:`25184`) +- Fixed regression in :meth:`Series.min` and :meth:`Series.max` where ``numeric_only=True`` was ignored when the ``Series`` contained ```Categorical`` data (:issue:`25299`) +- Fixed regression in subtraction between :class:`Series` objects with ``datetime64[ns]`` dtype incorrectly raising ``OverflowError`` when the `Series` on the right contains null values (:issue:`25317`) +- Fixed regression in :class:`TimedeltaIndex` where `np.sum(index)` incorrectly returned a zero-dimensional object instead of a scalar (:issue:`25282`) +- Fixed regression in ``IntervalDtype`` construction where passing an incorrect string with 'Interval' as a prefix could result in a ``RecursionError``. (:issue:`25338`) + +- Fixed regression in :class:`Categorical`, where constructing it from a categorical ``Series`` and an explicit ``categories=`` that differed from that in the ``Series`` created an invalid object which could trigger segfaults. (:issue:`25318`) + +.. _whatsnew_0242.enhancements: + +Enhancements +^^^^^^^^^^^^ + +- +- + +.. _whatsnew_0242.bug_fixes: + +Bug Fixes +~~~~~~~~~ + +**Conversion** + +- +- +- + +**Indexing** + +- +- +- + +**I/O** + +- Better handling of terminal printing when the terminal dimensions are not known (:issue:`25080`); +- Bug in reading a HDF5 table-format ``DataFrame`` created in Python 2, in Python 3 (:issue:`24925`) +- Bug in reading a JSON with ``orient='table'`` generated by :meth:`DataFrame.to_json` with ``index=False`` (:issue:`25170`) +- Bug where float indexes could have misaligned values when printing (:issue:`25061`) +- + +**Categorical** + +- +- +- + +**Timezones** + +- +- +- + +**Timedelta** + +- +- +- + +**Reshaping** + +- Bug in :meth:`pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`) +- Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`) +- + +**Visualization** + +- +- +- + +**Other** + +- Bug in :meth:`Series.is_unique` where single occurrences of ``NaN`` were not considered unique (:issue:`25180`) +- Bug in :func:`merge` when merging an empty ``DataFrame`` with an ``Int64`` column or a non-empty ``DataFrame`` with an ``Int64`` column that is all ``NaN`` (:issue:`25183`) +- Bug in ``IntervalTree`` where a ``RecursionError`` occurs upon construction due to an overflow when adding endpoints, which also causes :class:`IntervalIndex` to crash during indexing operations (:issue:`25485`) +- + +.. _whatsnew_0.242.contributors: + +Contributors +~~~~~~~~~~~~ + +.. contributors:: v0.24.1..v0.24.2 diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 5129449e4fdf3..ce6f259f97062 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -19,23 +19,73 @@ including other versions of pandas. Other Enhancements ^^^^^^^^^^^^^^^^^^ +- Indexing of ``DataFrame`` and ``Series`` now accepts zerodim ``np.ndarray`` (:issue:`24919`) +- :meth:`Timestamp.replace` now supports the ``fold`` argument to disambiguate DST transition times (:issue:`25017`) +- :meth:`DataFrame.at_time` and :meth:`Series.at_time` now support :meth:`datetime.time` objects with timezones (:issue:`24043`) +- ``Series.str`` has gained :meth:`Series.str.casefold` method to removes all case distinctions present in a string (:issue:`25405`) +- :meth:`DataFrame.set_index` now works for instances of ``abc.Iterator``, provided their output is of the same length as the calling frame (:issue:`22484`, :issue:`24984`) +- :meth:`DatetimeIndex.union` now supports the ``sort`` argument. The behaviour of the sort parameter matches that of :meth:`Index.union` (:issue:`24994`) - -- -- - .. _whatsnew_0250.api_breaking: Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. _whatsnew_0250.api_breaking.utc_offset_indexing: + +Indexing with date strings with UTC offsets +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Indexing a :class:`DataFrame` or :class:`Series` with a :class:`DatetimeIndex` with a +date string with a UTC offset would previously ignore the UTC offset. Now, the UTC offset +is respected in indexing. (:issue:`24076`, :issue:`16785`) + +*Previous Behavior*: + +.. code-block:: ipython + + In [1]: df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + + In [2]: df + Out[2]: + 0 + 2019-01-01 00:00:00-08:00 0 + + In [3]: df['2019-01-01 00:00:00+04:00':'2019-01-01 01:00:00+04:00'] + Out[3]: + 0 + 2019-01-01 00:00:00-08:00 0 + +*New Behavior*: + +.. ipython:: ipython + + df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00'] + +.. _whatsnew_0250.api_breaking.deps: + +Increased minimum versions for dependencies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We have updated our minimum supported versions of dependencies (:issue:`23519`). +If installed, we now require: + ++-----------------+-----------------+----------+ +| Package | Minimum Version | Required | ++=================+=================+==========+ +| pytest (dev) | 4.0.2 | | ++-----------------+-----------------+----------+ + .. _whatsnew_0250.api.other: Other API Changes ^^^^^^^^^^^^^^^^^ -- -- +- :class:`DatetimeTZDtype` will now standardize pytz timezones to a common timezone instance (:issue:`24713`) +- ``Timestamp`` and ``Timedelta`` scalars now implement the :meth:`to_numpy` method as aliases to :meth:`Timestamp.to_datetime64` and :meth:`Timedelta.to_timedelta64`, respectively. (:issue:`24653`) +- :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`) - .. _whatsnew_0250.deprecations: @@ -43,16 +93,13 @@ Other API Changes Deprecations ~~~~~~~~~~~~ -- -- -- - +- Deprecated the `M (months)` and `Y (year)` `units` parameter of :func: `pandas.to_timedelta`, :func: `pandas.Timedelta` and :func: `pandas.TimedeltaIndex` (:issue:`16344`) .. _whatsnew_0250.prior_deprecations: Removal of prior version deprecations/changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - +- Removed (parts of) :class:`Panel` (:issue:`25047`,:issue:`25191`,:issue:`25231`) - - - @@ -62,15 +109,20 @@ Removal of prior version deprecations/changes Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- -- -- +- Significant speedup in `SparseArray` initialization that benefits most operations, fixing performance regression introduced in v0.20.0 (:issue:`24985`) +- `DataFrame.to_stata()` is now faster when outputting data with any string or non-native endian columns (:issue:`25045`) +- Improved performance of :meth:`Series.searchsorted`. The speedup is especially large when the dtype is + int8/int16/int32 and the searched key is within the integer bounds for the dtype (:issue:`22034`) +- Improved performance of :meth:`pandas.core.groupby.GroupBy.quantile` (:issue:`20405`) .. _whatsnew_0250.bug_fixes: Bug Fixes ~~~~~~~~~ +- Bug in :func:`to_datetime` which would raise an (incorrect) ``ValueError`` when called with a date far into the future and the ``format`` argument specified instead of raising ``OutOfBoundsDatetime`` (:issue:`23830`) +- +- Categorical ^^^^^^^^^^^ @@ -82,7 +134,7 @@ Categorical Datetimelike ^^^^^^^^^^^^ -- +- Added support for ISO week year format ('%G-%V-%u') when parsing datetimes using :meth: `to_datetime` (:issue:`16607`) - - @@ -96,13 +148,15 @@ Timedelta Timezones ^^^^^^^^^ -- -- +- Bug in :func:`to_datetime` with ``utc=True`` and datetime strings that would apply previously parsed UTC offsets to subsequent arguments (:issue:`24992`) +- Bug in :func:`Timestamp.tz_localize` and :func:`Timestamp.tz_convert` does not propagate ``freq`` (:issue:`25241`) - Numeric ^^^^^^^ +- Bug in :meth:`to_numeric` in which large negative numbers were being improperly handled (:issue:`24910`) +- Bug in :meth:`to_numeric` in which numbers were being coerced to float, even though ``errors`` was not ``coerce`` (:issue:`24910`) - - - @@ -141,21 +195,25 @@ Indexing Missing ^^^^^^^ -- +- Fixed misleading exception message in :meth:`Series.missing` if argument ``order`` is required, but omitted (:issue:`10633`, :issue:`24014`). - - MultiIndex ^^^^^^^^^^ +- Bug in which incorrect exception raised by :meth:`pd.Timedelta` when testing the membership of :class:`MultiIndex` (:issue:`24570`) - - -- - I/O ^^^ +- Bug in :func:`DataFrame.to_html()` where values were truncated using display options instead of outputting the full content (:issue:`17004`) +- Fixed bug in missing text when using :meth:`to_clipboard` if copying utf-16 characters in Python 3 on Windows (:issue:`25040`) +- Bug in :func:`read_json` for ``orient='table'`` when it tries to infer dtypes by default, which is not applicable as dtypes are already defined in the JSON schema (:issue:`21345`) +- Bug in :func:`read_json` for ``orient='table'`` and float index, as it infers index dtype by default, which is not applicable because index dtype is already defined in the JSON schema (:issue:`25433`) +- Bug in :func:`read_json` for ``orient='table'`` and string of float column names, as it makes a column name type conversion to Timestamp, which is not applicable because column names are already defined in the JSON schema (:issue:`25435`) - - - @@ -171,24 +229,25 @@ Plotting Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- -- -- +- Bug in :meth:`pandas.core.resample.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`) +- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.nunique` in which the names of column levels were lost (:issue:`23222`) +- Bug in :func:`pandas.core.groupby.GroupBy.agg` when applying a aggregation function to timezone aware data (:issue:`23683`) +- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` where timezone information would be dropped (:issue:`21603`) Reshaping ^^^^^^^^^ -- -- -- +- Bug in :func:`pandas.merge` adds a string of ``None`` if ``None`` is assigned in suffixes instead of remain the column name as-is (:issue:`24782`). +- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`) +- :func:`to_records` now accepts dtypes to its `column_dtypes` parameter (:issue:`24895`) Sparse ^^^^^^ -- -- +- Significant speedup in `SparseArray` initialization that benefits most operations, fixing performance regression introduced in v0.20.0 (:issue:`24985`) +- Bug in :class:`SparseFrame` constructor where passing ``None`` as the data would cause ``default_fill_value`` to be ignored (:issue:`16807`) - @@ -206,4 +265,3 @@ Contributors ~~~~~~~~~~~~ .. contributors:: v0.24.x..HEAD - diff --git a/environment.yml b/environment.yml index 47fe8e4c2a640..c1669c9f49017 100644 --- a/environment.yml +++ b/environment.yml @@ -19,7 +19,8 @@ dependencies: - hypothesis>=3.82 - isort - moto - - pytest>=4.0 + - pytest>=4.0.2 + - pytest-mock - sphinx - numpydoc diff --git a/pandas/_libs/groupby.pxd b/pandas/_libs/groupby.pxd new file mode 100644 index 0000000000000..70ad8a62871e9 --- /dev/null +++ b/pandas/_libs/groupby.pxd @@ -0,0 +1,6 @@ +cdef enum InterpolationEnumType: + INTERPOLATION_LINEAR, + INTERPOLATION_LOWER, + INTERPOLATION_HIGHER, + INTERPOLATION_NEAREST, + INTERPOLATION_MIDPOINT diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index e6036654c71c3..71e25c3955a6d 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -2,6 +2,7 @@ import cython from cython import Py_ssize_t +from cython cimport floating from libc.stdlib cimport malloc, free @@ -381,6 +382,368 @@ def group_any_all(uint8_t[:] out, if values[i] == flag_val: out[lab] = flag_val +# ---------------------------------------------------------------------- +# group_add, group_prod, group_var, group_mean, group_ohlc +# ---------------------------------------------------------------------- + + +@cython.wraparound(False) +@cython.boundscheck(False) +def _group_add(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=0): + """ + Only aggregates on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) + floating val, count + floating[:, :] sumx, nobs + + if len(values) != len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + sumx = np.zeros_like(out) + + N, K = (<object>values).shape + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + sumx[lab, j] += val + + for i in range(ncounts): + for j in range(K): + if nobs[i, j] < min_count: + out[i, j] = NAN + else: + out[i, j] = sumx[i, j] + + +group_add_float32 = _group_add['float'] +group_add_float64 = _group_add['double'] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def _group_prod(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=0): + """ + Only aggregates on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) + floating val, count + floating[:, :] prodx, nobs + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + prodx = np.ones_like(out) + + N, K = (<object>values).shape + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + prodx[lab, j] *= val + + for i in range(ncounts): + for j in range(K): + if nobs[i, j] < min_count: + out[i, j] = NAN + else: + out[i, j] = prodx[i, j] + + +group_prod_float32 = _group_prod['float'] +group_prod_float64 = _group_prod['double'] + + +@cython.wraparound(False) +@cython.boundscheck(False) +@cython.cdivision(True) +def _group_var(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): + cdef: + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) + floating val, ct, oldmean + floating[:, :] nobs, mean + + assert min_count == -1, "'min_count' only used in add and prod" + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + mean = np.zeros_like(out) + + N, K = (<object>values).shape + + out[:, :] = 0.0 + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + + for j in range(K): + val = values[i, j] + + # not nan + if val == val: + nobs[lab, j] += 1 + oldmean = mean[lab, j] + mean[lab, j] += (val - oldmean) / nobs[lab, j] + out[lab, j] += (val - mean[lab, j]) * (val - oldmean) + + for i in range(ncounts): + for j in range(K): + ct = nobs[i, j] + if ct < 2: + out[i, j] = NAN + else: + out[i, j] /= (ct - 1) + + +group_var_float32 = _group_var['float'] +group_var_float64 = _group_var['double'] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def _group_mean(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): + cdef: + Py_ssize_t i, j, N, K, lab, ncounts = len(counts) + floating val, count + floating[:, :] sumx, nobs + + assert min_count == -1, "'min_count' only used in add and prod" + + if not len(values) == len(labels): + raise AssertionError("len(index) != len(labels)") + + nobs = np.zeros_like(out) + sumx = np.zeros_like(out) + + N, K = (<object>values).shape + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0: + continue + + counts[lab] += 1 + for j in range(K): + val = values[i, j] + # not nan + if val == val: + nobs[lab, j] += 1 + sumx[lab, j] += val + + for i in range(ncounts): + for j in range(K): + count = nobs[i, j] + if nobs[i, j] == 0: + out[i, j] = NAN + else: + out[i, j] = sumx[i, j] / count + + +group_mean_float32 = _group_mean['float'] +group_mean_float64 = _group_mean['double'] + + +@cython.wraparound(False) +@cython.boundscheck(False) +def _group_ohlc(floating[:, :] out, + int64_t[:] counts, + floating[:, :] values, + const int64_t[:] labels, + Py_ssize_t min_count=-1): + """ + Only aggregates on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, lab + floating val, count + Py_ssize_t ngroups = len(counts) + + assert min_count == -1, "'min_count' only used in add and prod" + + if len(labels) == 0: + return + + N, K = (<object>values).shape + + if out.shape[1] != 4: + raise ValueError('Output array must have 4 columns') + + if K > 1: + raise NotImplementedError("Argument 'values' must have only " + "one dimension") + out[:] = np.nan + + with nogil: + for i in range(N): + lab = labels[i] + if lab == -1: + continue + + counts[lab] += 1 + val = values[i, 0] + if val != val: + continue + + if out[lab, 0] != out[lab, 0]: + out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val + else: + out[lab, 1] = max(out[lab, 1], val) + out[lab, 2] = min(out[lab, 2], val) + out[lab, 3] = val + + +group_ohlc_float32 = _group_ohlc['float'] +group_ohlc_float64 = _group_ohlc['double'] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_quantile(ndarray[float64_t] out, + ndarray[int64_t] labels, + numeric[:] values, + ndarray[uint8_t] mask, + float64_t q, + object interpolation): + """ + Calculate the quantile per group. + + Parameters + ---------- + out : ndarray + Array of aggregated values that will be written to. + labels : ndarray + Array containing the unique group labels. + values : ndarray + Array containing the values to apply the function against. + q : float + The quantile value to search for. + + Notes + ----- + Rather than explicitly returning a value, this function modifies the + provided `out` parameter. + """ + cdef: + Py_ssize_t i, N=len(labels), ngroups, grp_sz, non_na_sz + Py_ssize_t grp_start=0, idx=0 + int64_t lab + uint8_t interp + float64_t q_idx, frac, val, next_val + ndarray[int64_t] counts, non_na_counts, sort_arr + + assert values.shape[0] == N + inter_methods = { + 'linear': INTERPOLATION_LINEAR, + 'lower': INTERPOLATION_LOWER, + 'higher': INTERPOLATION_HIGHER, + 'nearest': INTERPOLATION_NEAREST, + 'midpoint': INTERPOLATION_MIDPOINT, + } + interp = inter_methods[interpolation] + + counts = np.zeros_like(out, dtype=np.int64) + non_na_counts = np.zeros_like(out, dtype=np.int64) + ngroups = len(counts) + + # First figure out the size of every group + with nogil: + for i in range(N): + lab = labels[i] + counts[lab] += 1 + if not mask[i]: + non_na_counts[lab] += 1 + + # Get an index of values sorted by labels and then values + order = (values, labels) + sort_arr = np.lexsort(order).astype(np.int64, copy=False) + + with nogil: + for i in range(ngroups): + # Figure out how many group elements there are + grp_sz = counts[i] + non_na_sz = non_na_counts[i] + + if non_na_sz == 0: + out[i] = NaN + else: + # Calculate where to retrieve the desired value + # Casting to int will intentionaly truncate result + idx = grp_start + <int64_t>(q * <float64_t>(non_na_sz - 1)) + + val = values[sort_arr[idx]] + # If requested quantile falls evenly on a particular index + # then write that index's value out. Otherwise interpolate + q_idx = q * (non_na_sz - 1) + frac = q_idx % 1 + + if frac == 0.0 or interp == INTERPOLATION_LOWER: + out[i] = val + else: + next_val = values[sort_arr[idx + 1]] + if interp == INTERPOLATION_LINEAR: + out[i] = val + (next_val - val) * frac + elif interp == INTERPOLATION_HIGHER: + out[i] = next_val + elif interp == INTERPOLATION_MIDPOINT: + out[i] = (val + next_val) / 2.0 + elif interp == INTERPOLATION_NEAREST: + if frac > .5 or (frac == .5 and q > .5): # Always OK? + out[i] = next_val + else: + out[i] = val + + # Increment the index reference in sorted_arr for the next group + grp_start += grp_sz + # generated from template include "groupby_helper.pxi" diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 858039f038d02..63cd4d6ac6ff2 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -8,266 +8,6 @@ cdef extern from "numpy/npy_math.h": float64_t NAN "NPY_NAN" _int64_max = np.iinfo(np.int64).max -# ---------------------------------------------------------------------- -# group_add, group_prod, group_var, group_mean, group_ohlc -# ---------------------------------------------------------------------- - -{{py: - -# name, c_type -dtypes = [('float64', 'float64_t'), - ('float32', 'float32_t')] - -def get_dispatch(dtypes): - - for name, c_type in dtypes: - yield name, c_type -}} - -{{for name, c_type in get_dispatch(dtypes)}} - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_add_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=0): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val, count - ndarray[{{c_type}}, ndim=2] sumx, nobs - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object>values).shape - - with nogil: - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - sumx[lab, j] += val - - for i in range(ncounts): - for j in range(K): - if nobs[i, j] < min_count: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_prod_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=0): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val, count - ndarray[{{c_type}}, ndim=2] prodx, nobs - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - prodx = np.ones_like(out) - - N, K = (<object>values).shape - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - prodx[lab, j] *= val - - for i in range(ncounts): - for j in range(K): - if nobs[i, j] < min_count: - out[i, j] = NAN - else: - out[i, j] = prodx[i, j] - - -@cython.wraparound(False) -@cython.boundscheck(False) -@cython.cdivision(True) -def group_var_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val, ct, oldmean - ndarray[{{c_type}}, ndim=2] nobs, mean - - assert min_count == -1, "'min_count' only used in add and prod" - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - mean = np.zeros_like(out) - - N, K = (<object>values).shape - - out[:, :] = 0.0 - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - oldmean = mean[lab, j] - mean[lab, j] += (val - oldmean) / nobs[lab, j] - out[lab, j] += (val - mean[lab, j]) * (val - oldmean) - - for i in range(ncounts): - for j in range(K): - ct = nobs[i, j] - if ct < 2: - out[i, j] = NAN - else: - out[i, j] /= (ct - 1) -# add passing bin edges, instead of labels - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_mean_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): - cdef: - Py_ssize_t i, j, N, K, lab, ncounts = len(counts) - {{c_type}} val, count - ndarray[{{c_type}}, ndim=2] sumx, nobs - - assert min_count == -1, "'min_count' only used in add and prod" - - if not len(values) == len(labels): - raise AssertionError("len(index) != len(labels)") - - nobs = np.zeros_like(out) - sumx = np.zeros_like(out) - - N, K = (<object>values).shape - - with nogil: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - # not nan - if val == val: - nobs[lab, j] += 1 - sumx[lab, j] += val - - for i in range(ncounts): - for j in range(K): - count = nobs[i, j] - if nobs[i, j] == 0: - out[i, j] = NAN - else: - out[i, j] = sumx[i, j] / count - - -@cython.wraparound(False) -@cython.boundscheck(False) -def group_ohlc_{{name}}({{c_type}}[:, :] out, - int64_t[:] counts, - {{c_type}}[:, :] values, - const int64_t[:] labels, - Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, lab - {{c_type}} val, count - Py_ssize_t ngroups = len(counts) - - assert min_count == -1, "'min_count' only used in add and prod" - - if len(labels) == 0: - return - - N, K = (<object>values).shape - - if out.shape[1] != 4: - raise ValueError('Output array must have 4 columns') - - if K > 1: - raise NotImplementedError("Argument 'values' must have only " - "one dimension") - out[:] = np.nan - - with nogil: - for i in range(N): - lab = labels[i] - if lab == -1: - continue - - counts[lab] += 1 - val = values[i, 0] - if val != val: - continue - - if out[lab, 0] != out[lab, 0]: - out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val - else: - out[lab, 1] = max(out[lab, 1], val) - out[lab, 2] = min(out[lab, 2], val) - out[lab, 3] = val - -{{endfor}} - # ---------------------------------------------------------------------- # group_nth, group_last, group_rank # ---------------------------------------------------------------------- diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 3147f36dcc835..e86b692e9915e 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -18,7 +18,6 @@ cnp.import_array() cimport pandas._libs.util as util -util.import_array() from pandas._libs.hashtable cimport Int64Vector, Int64VectorData @@ -151,9 +150,6 @@ cdef class Interval(IntervalMixin): Left bound for the interval. right : orderable scalar Right bound for the interval. - closed : {'left', 'right', 'both', 'neither'}, default 'right' - Whether the interval is closed on the left-side, right-side, both or - neither. closed : {'right', 'left', 'both', 'neither'}, default 'right' Whether the interval is closed on the left-side, right-side, both or neither. See the Notes for more detailed explanation. diff --git a/pandas/_libs/intervaltree.pxi.in b/pandas/_libs/intervaltree.pxi.in index fb6f30c030f11..196841f35ed8d 100644 --- a/pandas/_libs/intervaltree.pxi.in +++ b/pandas/_libs/intervaltree.pxi.in @@ -284,7 +284,7 @@ cdef class {{dtype_title}}Closed{{closed_title}}IntervalNode: else: # calculate a pivot so we can create child nodes self.is_leaf_node = False - self.pivot = np.median(left + right) / 2 + self.pivot = np.median(left / 2 + right / 2) left_set, right_set, center_set = self.classify_intervals( left, right) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 4745916eb0ce2..34ceeb20e260e 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -233,10 +233,11 @@ def fast_unique_multiple(list arrays, sort: bool=True): if val not in table: table[val] = stub uniques.append(val) - if sort: + if sort is None: try: uniques.sort() except Exception: + # TODO: RuntimeWarning? pass return uniques @@ -938,6 +939,7 @@ _TYPE_MAP = { 'float32': 'floating', 'float64': 'floating', 'f': 'floating', + 'complex64': 'complex', 'complex128': 'complex', 'c': 'complex', 'string': 'string' if PY2 else 'bytes', @@ -1304,6 +1306,9 @@ def infer_dtype(value: object, skipna: object=None) -> str: elif is_decimal(val): return 'decimal' + elif is_complex(val): + return 'complex' + elif util.is_float_object(val): if is_float_array(values): return 'floating' @@ -1828,7 +1833,7 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, except (ValueError, OverflowError, TypeError): pass - # otherwise, iterate and do full infererence + # Otherwise, iterate and do full inference. cdef: int status, maybe_int Py_ssize_t i, n = values.size @@ -1865,10 +1870,10 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, else: seen.float_ = True - if val <= oINT64_MAX: + if oINT64_MIN <= val <= oINT64_MAX: ints[i] = val - if seen.sint_ and seen.uint_: + if val < oINT64_MIN or (seen.sint_ and seen.uint_): seen.float_ = True elif util.is_bool_object(val): @@ -1910,23 +1915,28 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, else: seen.saw_int(as_int) - if not (seen.float_ or as_int in na_values): + if as_int not in na_values: if as_int < oINT64_MIN or as_int > oUINT64_MAX: - raise ValueError('Integer out of range.') + if seen.coerce_numeric: + seen.float_ = True + else: + raise ValueError("Integer out of range.") + else: + if as_int >= 0: + uints[i] = as_int - if as_int >= 0: - uints[i] = as_int - if as_int <= oINT64_MAX: - ints[i] = as_int + if as_int <= oINT64_MAX: + ints[i] = as_int seen.float_ = seen.float_ or (seen.uint_ and seen.sint_) else: seen.float_ = True except (TypeError, ValueError) as e: if not seen.coerce_numeric: - raise type(e)(str(e) + ' at position {pos}'.format(pos=i)) + raise type(e)(str(e) + " at position {pos}".format(pos=i)) elif "uint64" in str(e): # Exception from check functions. raise + seen.saw_null() floats[i] = NaN @@ -2271,7 +2281,7 @@ def to_object_array(rows: object, int min_width=0): result = np.empty((n, k), dtype=object) for i in range(n): - row = <list>input_rows[i] + row = list(input_rows[i]) for j in range(len(row)): result[i, j] = row[j] diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 507567cf480d7..517d59c399179 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -342,7 +342,9 @@ cdef class SeriesGrouper: index = None else: values = dummy.values - if dummy.dtype != self.arr.dtype: + # GH 23683: datetimetz types are equivalent to datetime types here + if (dummy.dtype != self.arr.dtype + and values.dtype != self.arr.dtype): raise ValueError('Dummy array must be same dtype') if not values.flags.contiguous: values = values.copy() diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index f5980998f6db4..5471c8184e458 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -72,9 +72,6 @@ cdef class IntIndex(SparseIndex): A ValueError is raised if any of these conditions is violated. """ - cdef: - int32_t index, prev = -1 - if self.npoints > self.length: msg = ("Too many indices. Expected " "{exp} but found {act}").format( @@ -86,17 +83,15 @@ cdef class IntIndex(SparseIndex): if self.npoints == 0: return - if min(self.indices) < 0: + if self.indices.min() < 0: raise ValueError("No index can be less than zero") - if max(self.indices) >= self.length: + if self.indices.max() >= self.length: raise ValueError("All indices must be less than the length") - for index in self.indices: - if prev != -1 and index <= prev: - raise ValueError("Indices must be strictly increasing") - - prev = index + monotonic = np.all(self.indices[:-1] < self.indices[1:]) + if not monotonic: + raise ValueError("Indices must be strictly increasing") def equals(self, other): if not isinstance(other, IntIndex): diff --git a/pandas/_libs/src/compat_helper.h b/pandas/_libs/src/compat_helper.h index 462f53392adee..078069fb48af2 100644 --- a/pandas/_libs/src/compat_helper.h +++ b/pandas/_libs/src/compat_helper.h @@ -29,8 +29,8 @@ the macro, which restores compat. #ifndef PYPY_VERSION # if PY_VERSION_HEX < 0x03070000 && defined(PySlice_GetIndicesEx) # undef PySlice_GetIndicesEx -# endif -#endif +# endif // PY_VERSION_HEX +#endif // PYPY_VERSION PANDAS_INLINE int slice_get_indices(PyObject *s, Py_ssize_t length, @@ -44,7 +44,7 @@ PANDAS_INLINE int slice_get_indices(PyObject *s, #else return PySlice_GetIndicesEx((PySliceObject *)s, length, start, stop, step, slicelength); -#endif +#endif // PY_VERSION_HEX } #endif // PANDAS__LIBS_SRC_COMPAT_HELPER_H_ diff --git a/pandas/_libs/src/inline_helper.h b/pandas/_libs/src/inline_helper.h index 397ec8e7b2cb8..e203a05d2eb56 100644 --- a/pandas/_libs/src/inline_helper.h +++ b/pandas/_libs/src/inline_helper.h @@ -19,7 +19,7 @@ The full license is in the LICENSE file, distributed with this software. #define PANDAS_INLINE static inline #else #define PANDAS_INLINE - #endif -#endif + #endif // __GNUC__ +#endif // PANDAS_INLINE #endif // PANDAS__LIBS_SRC_INLINE_HELPER_H_ diff --git a/pandas/_libs/src/parse_helper.h b/pandas/_libs/src/parse_helper.h index b71131bee7008..6fcd2ed0a9ea0 100644 --- a/pandas/_libs/src/parse_helper.h +++ b/pandas/_libs/src/parse_helper.h @@ -30,7 +30,7 @@ int to_double(char *item, double *p_value, char sci, char decimal, #if PY_VERSION_HEX < 0x02060000 #define PyBytes_Check PyString_Check #define PyBytes_AS_STRING PyString_AS_STRING -#endif +#endif // PY_VERSION_HEX int floatify(PyObject *str, double *result, int *maybe_int) { int status; diff --git a/pandas/_libs/src/parser/io.c b/pandas/_libs/src/parser/io.c index 19271c78501ba..f578ce138e274 100644 --- a/pandas/_libs/src/parser/io.c +++ b/pandas/_libs/src/parser/io.c @@ -15,7 +15,7 @@ The full license is in the LICENSE file, distributed with this software. #ifndef O_BINARY #define O_BINARY 0 -#endif /* O_BINARY */ +#endif // O_BINARY /* On-disk FILE, uncompressed @@ -277,4 +277,4 @@ void *buffer_mmap_bytes(void *source, size_t nbytes, size_t *bytes_read, return NULL; } -#endif +#endif // HAVE_MMAP diff --git a/pandas/_libs/src/parser/io.h b/pandas/_libs/src/parser/io.h index d22e8ddaea88d..074322c7bdf78 100644 --- a/pandas/_libs/src/parser/io.h +++ b/pandas/_libs/src/parser/io.h @@ -25,7 +25,7 @@ typedef struct _file_source { #if !defined(_WIN32) && !defined(HAVE_MMAP) #define HAVE_MMAP -#endif +#endif // HAVE_MMAP typedef struct _memory_map { int fd; diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index a86af7c5416de..6acf3c3de0c91 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -1480,7 +1480,7 @@ int main(int argc, char *argv[]) { return 0; } -#endif +#endif // TEST // --------------------------------------------------------------------------- // Implementation of xstrtod diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h index c32c061c7fa89..ce9dd39b16222 100644 --- a/pandas/_libs/src/parser/tokenizer.h +++ b/pandas/_libs/src/parser/tokenizer.h @@ -42,7 +42,7 @@ See LICENSE for the license #if defined(_MSC_VER) #define strtoll _strtoi64 -#endif +#endif // _MSC_VER /* @@ -75,7 +75,7 @@ See LICENSE for the license #define TRACE(X) printf X; #else #define TRACE(X) -#endif +#endif // VERBOSE #define PARSER_OUT_OF_MEMORY -1 diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 798e338d5581b..624872c1c56c6 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -645,6 +645,8 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', out_tzoffset_vals.add(out_tzoffset * 60.) tz = pytz.FixedOffset(out_tzoffset) value = tz_convert_single(value, tz, UTC) + out_local = 0 + out_tzoffset = 0 else: # Add a marker for naive string, to track if we are # parsing mixed naive and aware strings @@ -668,9 +670,11 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', # dateutil parser will return incorrect result because # it will ignore nanoseconds if is_raise: - raise ValueError("time data {val} doesn't " - "match format specified" - .format(val=val)) + + # Still raise OutOfBoundsDatetime, + # as error message is informative. + raise + assert is_ignore return values, tz_out raise diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index a55d15a7c4e85..79e2e256c501d 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -183,9 +183,31 @@ cdef class _NaT(datetime): return np.datetime64(NPY_NAT, 'ns') def to_datetime64(self): - """ Returns a numpy.datetime64 object with 'ns' precision """ + """ + Return a numpy.datetime64 object with 'ns' precision. + """ return np.datetime64('NaT', 'ns') + def to_numpy(self, dtype=None, copy=False): + """ + Convert the Timestamp to a NumPy datetime64. + + .. versionadded:: 0.25.0 + + This is an alias method for `Timestamp.to_datetime64()`. The dtype and + copy parameters are available here only for compatibility. Their values + will not affect the return value. + + Returns + ------- + numpy.datetime64 + + See Also + -------- + DatetimeIndex.to_numpy : Similar method for DatetimeIndex. + """ + return self.to_datetime64() + def __repr__(self): return 'NaT' @@ -352,7 +374,6 @@ class NaTType(_NaT): utctimetuple = _make_error_func('utctimetuple', datetime) timetz = _make_error_func('timetz', datetime) timetuple = _make_error_func('timetuple', datetime) - strptime = _make_error_func('strptime', datetime) strftime = _make_error_func('strftime', datetime) isocalendar = _make_error_func('isocalendar', datetime) dst = _make_error_func('dst', datetime) @@ -366,6 +387,14 @@ class NaTType(_NaT): # The remaining methods have docstrings copy/pasted from the analogous # Timestamp methods. + strptime = _make_error_func('strptime', # noqa:E128 + """ + Timestamp.strptime(string, format) + + Function is not implemented. Use pd.to_datetime(). + """ + ) + utcfromtimestamp = _make_error_func('utcfromtimestamp', # noqa:E128 """ Timestamp.utcfromtimestamp(ts) @@ -382,7 +411,7 @@ class NaTType(_NaT): ) combine = _make_error_func('combine', # noqa:E128 """ - Timsetamp.combine(date, time) + Timestamp.combine(date, time) date, time -> datetime with same date and time fields """ @@ -448,7 +477,7 @@ class NaTType(_NaT): """ Timestamp.now(tz=None) - Returns new Timestamp object representing current time local to + Return new Timestamp object representing current time local to tz. Parameters @@ -669,7 +698,6 @@ class NaTType(_NaT): nanosecond : int, optional tzinfo : tz-convertible, optional fold : int, optional, default is 0 - added in 3.6, NotImplemented Returns ------- diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 856aa52f82cf5..e28462f7103b9 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -18,6 +18,7 @@ from numpy cimport int64_t cnp.import_array() +from pandas._libs.tslibs cimport util from pandas._libs.tslibs.util cimport is_string_object, is_integer_object from pandas._libs.tslibs.ccalendar import MONTHS, DAYS @@ -408,6 +409,10 @@ class _BaseOffset(object): return self.apply(other) def __mul__(self, other): + if hasattr(other, "_typ"): + return NotImplemented + if util.is_array(other): + return np.array([self * x for x in other]) return type(self)(n=other * self.n, normalize=self.normalize, **self.kwds) @@ -458,6 +463,9 @@ class _BaseOffset(object): TypeError if `int(n)` raises ValueError if n != int(n) """ + if util.is_timedelta64_object(n): + raise TypeError('`n` argument must be an integer, ' + 'got {ntype}'.format(ntype=type(n))) try: nint = int(n) except (ValueError, TypeError): @@ -533,12 +541,20 @@ class _Tick(object): can do isinstance checks on _Tick and avoid importing tseries.offsets """ + # ensure that reversed-ops with numpy scalars return NotImplemented + __array_priority__ = 1000 + def __truediv__(self, other): result = self.delta.__truediv__(other) return _wrap_timedelta_result(result) + def __rtruediv__(self, other): + result = self.delta.__rtruediv__(other) + return _wrap_timedelta_result(result) + if PY2: __div__ = __truediv__ + __rdiv__ = __rtruediv__ # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e38e9a1ca5df6..a5a50ea59753d 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -138,11 +138,11 @@ cdef int64_t get_daytime_conversion_factor(int from_index, int to_index) nogil: return daytime_conversion_factor_matrix[row - 6][col - 6] -cdef int64_t nofunc(int64_t ordinal, asfreq_info *af_info): - return np.iinfo(np.int32).min +cdef int64_t nofunc(int64_t ordinal, asfreq_info *af_info) nogil: + return INT32_MIN -cdef int64_t no_op(int64_t ordinal, asfreq_info *af_info): +cdef int64_t no_op(int64_t ordinal, asfreq_info *af_info) nogil: return ordinal @@ -270,7 +270,8 @@ cdef int64_t DtoB_weekday(int64_t unix_date) nogil: return ((unix_date + 4) // 7) * 5 + ((unix_date + 4) % 7) - 4 -cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back, int64_t unix_date): +cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back, + int64_t unix_date) nogil: cdef: int day_of_week = dayofweek(dts.year, dts.month, dts.day) @@ -286,21 +287,23 @@ cdef int64_t DtoB(npy_datetimestruct *dts, int roll_back, int64_t unix_date): return DtoB_weekday(unix_date) -cdef inline int64_t upsample_daytime(int64_t ordinal, asfreq_info *af_info): +cdef inline int64_t upsample_daytime(int64_t ordinal, + asfreq_info *af_info) nogil: if (af_info.is_end): return (ordinal + 1) * af_info.intraday_conversion_factor - 1 else: return ordinal * af_info.intraday_conversion_factor -cdef inline int64_t downsample_daytime(int64_t ordinal, asfreq_info *af_info): +cdef inline int64_t downsample_daytime(int64_t ordinal, + asfreq_info *af_info) nogil: return ordinal // (af_info.intraday_conversion_factor) cdef inline int64_t transform_via_day(int64_t ordinal, asfreq_info *af_info, freq_conv_func first_func, - freq_conv_func second_func): + freq_conv_func second_func) nogil: cdef: int64_t result @@ -313,7 +316,7 @@ cdef inline int64_t transform_via_day(int64_t ordinal, # Conversion _to_ Daily Freq cdef void AtoD_ym(int64_t ordinal, int64_t *year, - int *month, asfreq_info *af_info): + int *month, asfreq_info *af_info) nogil: year[0] = ordinal + 1970 month[0] = 1 @@ -327,7 +330,7 @@ cdef void AtoD_ym(int64_t ordinal, int64_t *year, year[0] -= 1 -cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int64_t unix_date, year int month @@ -341,7 +344,7 @@ cdef int64_t asfreq_AtoDT(int64_t ordinal, asfreq_info *af_info): cdef void QtoD_ym(int64_t ordinal, int *year, - int *month, asfreq_info *af_info): + int *month, asfreq_info *af_info) nogil: year[0] = ordinal // 4 + 1970 month[0] = (ordinal % 4) * 3 + 1 @@ -353,7 +356,7 @@ cdef void QtoD_ym(int64_t ordinal, int *year, year[0] -= 1 -cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int64_t unix_date int year, month @@ -366,12 +369,12 @@ cdef int64_t asfreq_QtoDT(int64_t ordinal, asfreq_info *af_info): return upsample_daytime(unix_date, af_info) -cdef void MtoD_ym(int64_t ordinal, int *year, int *month): +cdef void MtoD_ym(int64_t ordinal, int *year, int *month) nogil: year[0] = ordinal // 12 + 1970 month[0] = ordinal % 12 + 1 -cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int64_t unix_date int year, month @@ -384,7 +387,7 @@ cdef int64_t asfreq_MtoDT(int64_t ordinal, asfreq_info *af_info): return upsample_daytime(unix_date, af_info) -cdef int64_t asfreq_WtoDT(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_WtoDT(int64_t ordinal, asfreq_info *af_info) nogil: ordinal = (ordinal * 7 + af_info.from_end - 4 + (7 - 1) * (af_info.is_end - 1)) return upsample_daytime(ordinal, af_info) @@ -393,7 +396,7 @@ cdef int64_t asfreq_WtoDT(int64_t ordinal, asfreq_info *af_info): # -------------------------------------------------------------------- # Conversion _to_ BusinessDay Freq -cdef int64_t asfreq_AtoB(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_AtoB(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int roll_back npy_datetimestruct dts @@ -404,7 +407,7 @@ cdef int64_t asfreq_AtoB(int64_t ordinal, asfreq_info *af_info): return DtoB(&dts, roll_back, unix_date) -cdef int64_t asfreq_QtoB(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_QtoB(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int roll_back npy_datetimestruct dts @@ -415,7 +418,7 @@ cdef int64_t asfreq_QtoB(int64_t ordinal, asfreq_info *af_info): return DtoB(&dts, roll_back, unix_date) -cdef int64_t asfreq_MtoB(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_MtoB(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int roll_back npy_datetimestruct dts @@ -426,7 +429,7 @@ cdef int64_t asfreq_MtoB(int64_t ordinal, asfreq_info *af_info): return DtoB(&dts, roll_back, unix_date) -cdef int64_t asfreq_WtoB(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_WtoB(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int roll_back npy_datetimestruct dts @@ -437,7 +440,7 @@ cdef int64_t asfreq_WtoB(int64_t ordinal, asfreq_info *af_info): return DtoB(&dts, roll_back, unix_date) -cdef int64_t asfreq_DTtoB(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_DTtoB(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int roll_back npy_datetimestruct dts @@ -452,7 +455,7 @@ cdef int64_t asfreq_DTtoB(int64_t ordinal, asfreq_info *af_info): # ---------------------------------------------------------------------- # Conversion _from_ Daily Freq -cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info) nogil: cdef: npy_datetimestruct dts @@ -464,7 +467,7 @@ cdef int64_t asfreq_DTtoA(int64_t ordinal, asfreq_info *af_info): return <int64_t>(dts.year - 1970) -cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year): +cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year) nogil: cdef: npy_datetimestruct dts int quarter @@ -485,7 +488,7 @@ cdef int DtoQ_yq(int64_t ordinal, asfreq_info *af_info, int *year): return quarter -cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info) nogil: cdef: int year, quarter @@ -495,7 +498,7 @@ cdef int64_t asfreq_DTtoQ(int64_t ordinal, asfreq_info *af_info): return <int64_t>((year - 1970) * 4 + quarter - 1) -cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info) nogil: cdef: npy_datetimestruct dts @@ -504,7 +507,7 @@ cdef int64_t asfreq_DTtoM(int64_t ordinal, asfreq_info *af_info): return <int64_t>((dts.year - 1970) * 12 + dts.month - 1) -cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info) nogil: ordinal = downsample_daytime(ordinal, af_info) return (ordinal + 3 - af_info.to_end) // 7 + 1 @@ -512,30 +515,30 @@ cdef int64_t asfreq_DTtoW(int64_t ordinal, asfreq_info *af_info): # -------------------------------------------------------------------- # Conversion _from_ BusinessDay Freq -cdef int64_t asfreq_BtoDT(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_BtoDT(int64_t ordinal, asfreq_info *af_info) nogil: ordinal = ((ordinal + 3) // 5) * 7 + (ordinal + 3) % 5 -3 return upsample_daytime(ordinal, af_info) -cdef int64_t asfreq_BtoA(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_BtoA(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_BtoDT, <freq_conv_func>asfreq_DTtoA) -cdef int64_t asfreq_BtoQ(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_BtoQ(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_BtoDT, <freq_conv_func>asfreq_DTtoQ) -cdef int64_t asfreq_BtoM(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_BtoM(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_BtoDT, <freq_conv_func>asfreq_DTtoM) -cdef int64_t asfreq_BtoW(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_BtoW(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_BtoDT, <freq_conv_func>asfreq_DTtoW) @@ -544,25 +547,25 @@ cdef int64_t asfreq_BtoW(int64_t ordinal, asfreq_info *af_info): # ---------------------------------------------------------------------- # Conversion _from_ Annual Freq -cdef int64_t asfreq_AtoA(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_AtoA(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_AtoDT, <freq_conv_func>asfreq_DTtoA) -cdef int64_t asfreq_AtoQ(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_AtoQ(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_AtoDT, <freq_conv_func>asfreq_DTtoQ) -cdef int64_t asfreq_AtoM(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_AtoM(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_AtoDT, <freq_conv_func>asfreq_DTtoM) -cdef int64_t asfreq_AtoW(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_AtoW(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_AtoDT, <freq_conv_func>asfreq_DTtoW) @@ -571,25 +574,25 @@ cdef int64_t asfreq_AtoW(int64_t ordinal, asfreq_info *af_info): # ---------------------------------------------------------------------- # Conversion _from_ Quarterly Freq -cdef int64_t asfreq_QtoQ(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_QtoQ(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_QtoDT, <freq_conv_func>asfreq_DTtoQ) -cdef int64_t asfreq_QtoA(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_QtoA(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_QtoDT, <freq_conv_func>asfreq_DTtoA) -cdef int64_t asfreq_QtoM(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_QtoM(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_QtoDT, <freq_conv_func>asfreq_DTtoM) -cdef int64_t asfreq_QtoW(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_QtoW(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_QtoDT, <freq_conv_func>asfreq_DTtoW) @@ -598,19 +601,19 @@ cdef int64_t asfreq_QtoW(int64_t ordinal, asfreq_info *af_info): # ---------------------------------------------------------------------- # Conversion _from_ Monthly Freq -cdef int64_t asfreq_MtoA(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_MtoA(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_MtoDT, <freq_conv_func>asfreq_DTtoA) -cdef int64_t asfreq_MtoQ(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_MtoQ(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_MtoDT, <freq_conv_func>asfreq_DTtoQ) -cdef int64_t asfreq_MtoW(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_MtoW(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_MtoDT, <freq_conv_func>asfreq_DTtoW) @@ -619,25 +622,25 @@ cdef int64_t asfreq_MtoW(int64_t ordinal, asfreq_info *af_info): # ---------------------------------------------------------------------- # Conversion _from_ Weekly Freq -cdef int64_t asfreq_WtoA(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_WtoA(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_WtoDT, <freq_conv_func>asfreq_DTtoA) -cdef int64_t asfreq_WtoQ(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_WtoQ(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_WtoDT, <freq_conv_func>asfreq_DTtoQ) -cdef int64_t asfreq_WtoM(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_WtoM(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_WtoDT, <freq_conv_func>asfreq_DTtoM) -cdef int64_t asfreq_WtoW(int64_t ordinal, asfreq_info *af_info): +cdef int64_t asfreq_WtoW(int64_t ordinal, asfreq_info *af_info) nogil: return transform_via_day(ordinal, af_info, <freq_conv_func>asfreq_WtoDT, <freq_conv_func>asfreq_DTtoW) @@ -971,7 +974,7 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): return qtr_freq -cdef inline int month_to_quarter(int month): +cdef inline int month_to_quarter(int month) nogil: return (month - 1) // 3 + 1 @@ -1024,9 +1027,6 @@ def periodarr_to_dt64arr(int64_t[:] periodarr, int freq): with nogil: for i in range(l): - if periodarr[i] == NPY_NAT: - out[i] = NPY_NAT - continue out[i] = period_ordinal_to_dt64(periodarr[i], freq) return out.base # .base to access underlying np.ndarray diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime.c b/pandas/_libs/tslibs/src/datetime/np_datetime.c index 866c9ca9d3ac7..87866d804503e 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime.c +++ b/pandas/_libs/tslibs/src/datetime/np_datetime.c @@ -30,7 +30,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #if PY_MAJOR_VERSION >= 3 #define PyInt_AsLong PyLong_AsLong -#endif +#endif // PyInt_AsLong const npy_datetimestruct _NS_MIN_DTS = { 1677, 9, 21, 0, 12, 43, 145225, 0, 0}; diff --git a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c index 05ccdd13598fb..207da4b8f8340 100644 --- a/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/tslibs/src/datetime/np_datetime_strings.c @@ -609,7 +609,7 @@ int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, tmplen = _snprintf(substr, sublen, "%04" NPY_INT64_FMT, dts->year); #else tmplen = snprintf(substr, sublen, "%04" NPY_INT64_FMT, dts->year); -#endif +#endif // _WIN32 /* If it ran out of space or there isn't space for the NULL terminator */ if (tmplen < 0 || tmplen > sublen) { goto string_too_short; diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 87658ae92175e..e7674c5c364a1 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -54,7 +54,10 @@ cdef dict _parse_code_table = {'y': 0, 'W': 16, 'Z': 17, 'p': 18, # an additional key, only with I - 'z': 19} + 'z': 19, + 'G': 20, + 'V': 21, + 'u': 22} def array_strptime(object[:] values, object fmt, @@ -77,6 +80,7 @@ def array_strptime(object[:] values, object fmt, object[:] result_timezone int year, month, day, minute, hour, second, weekday, julian int week_of_year, week_of_year_start, parse_code, ordinal + int iso_week, iso_year int64_t us, ns object val, group_key, ampm, found, timezone dict found_key @@ -169,13 +173,14 @@ def array_strptime(object[:] values, object fmt, raise ValueError("time data %r does not match format " "%r (search)" % (values[i], fmt)) + iso_year = -1 year = 1900 month = day = 1 hour = minute = second = ns = us = 0 timezone = None # Default to -1 to signify that values not known; not critical to have, # though - week_of_year = -1 + iso_week = week_of_year = -1 week_of_year_start = -1 # weekday and julian defaulted to -1 so as to signal need to calculate # values @@ -265,13 +270,44 @@ def array_strptime(object[:] values, object fmt, timezone = pytz.timezone(found_dict['Z']) elif parse_code == 19: timezone = parse_timezone_directive(found_dict['z']) + elif parse_code == 20: + iso_year = int(found_dict['G']) + elif parse_code == 21: + iso_week = int(found_dict['V']) + elif parse_code == 22: + weekday = int(found_dict['u']) + weekday -= 1 + + # don't assume default values for ISO week/year + if iso_year != -1: + if iso_week == -1 or weekday == -1: + raise ValueError("ISO year directive '%G' must be used with " + "the ISO week directive '%V' and a weekday " + "directive '%A', '%a', '%w', or '%u'.") + if julian != -1: + raise ValueError("Day of the year directive '%j' is not " + "compatible with ISO year directive '%G'. " + "Use '%Y' instead.") + elif year != -1 and week_of_year == -1 and iso_week != -1: + if weekday == -1: + raise ValueError("ISO week directive '%V' must be used with " + "the ISO year directive '%G' and a weekday " + "directive '%A', '%a', '%w', or '%u'.") + else: + raise ValueError("ISO week directive '%V' is incompatible with" + " the year directive '%Y'. Use the ISO year " + "'%G' instead.") # If we know the wk of the year and what day of that wk, we can figure # out the Julian day of the year. - if julian == -1 and week_of_year != -1 and weekday != -1: - week_starts_Mon = True if week_of_year_start == 0 else False - julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, - week_starts_Mon) + if julian == -1 and weekday != -1: + if week_of_year != -1: + week_starts_Mon = week_of_year_start == 0 + julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, + week_starts_Mon) + elif iso_year != -1 and iso_week != -1: + year, julian = _calc_julian_from_V(iso_year, iso_week, + weekday + 1) # Cannot pre-calculate datetime_date() since can change in Julian # calculation and thus could have different value for the day of the wk # calculation. @@ -511,6 +547,7 @@ class TimeRE(dict): # The " \d" part of the regex is to make %c from ANSI C work 'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])", 'f': r"(?P<f>[0-9]{1,9})", + 'G': r"(?P<G>\d\d\d\d)", 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)", 'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])", 'j': (r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|" @@ -518,7 +555,9 @@ class TimeRE(dict): 'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])", 'M': r"(?P<M>[0-5]\d|\d)", 'S': r"(?P<S>6[0-1]|[0-5]\d|\d)", + 'u': r"(?P<u>[1-7])", 'U': r"(?P<U>5[0-3]|[0-4]\d|\d)", + 'V': r"(?P<V>5[0-3]|0[1-9]|[1-4]\d|\d)", 'w': r"(?P<w>[0-6])", # W is set below by using 'U' 'y': r"(?P<y>\d\d)", @@ -597,7 +636,16 @@ cdef _calc_julian_from_U_or_W(int year, int week_of_year, int day_of_week, int week_starts_Mon): """Calculate the Julian day based on the year, week of the year, and day of the week, with week_start_day representing whether the week of the year - assumes the week starts on Sunday or Monday (6 or 0).""" + assumes the week starts on Sunday or Monday (6 or 0). + + :param year: the year + :param week_of_year: week taken from format U or W + :param day_of_week: weekday + :param week_starts_Mon: represent whether the week of the year + assumes the week starts on Sunday or Monday (6 or 0) + :returns: converted julian day. + :rtype: int + """ cdef: int first_weekday, week_0_length, days_to_week @@ -620,6 +668,31 @@ cdef _calc_julian_from_U_or_W(int year, int week_of_year, return 1 + days_to_week + day_of_week +cdef _calc_julian_from_V(int iso_year, int iso_week, int iso_weekday): + """Calculate the Julian day based on the ISO 8601 year, week, and weekday. + ISO weeks start on Mondays, with week 01 being the week containing 4 Jan. + ISO week days range from 1 (Monday) to 7 (Sunday). + + :param iso_year: the year taken from format %G + :param iso_week: the week taken from format %V + :param iso_weekday: weekday taken from format %u + :returns: the passed in year and the Gregorian ordinal date / julian date system + :rtype: (int, int) + """ + + cdef: + int correction, ordinal + correction = datetime_date(iso_year, 1, 4).isoweekday() + 3 + ordinal = (iso_week * 7) + iso_weekday - correction + # ordinal may be negative or 0 now, which means the date is in the previous + # calendar year + if ordinal < 1: + ordinal += datetime_date(iso_year, 1, 1).toordinal() + iso_year -= 1 + ordinal -= datetime_date(iso_year, 1, 1).toordinal() + return iso_year, ordinal + + cdef parse_timezone_directive(object z): """ Parse the '%z' directive and return a pytz.FixedOffset diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 0a19d8749fc7c..6e40063fb925a 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -824,6 +824,26 @@ cdef class _Timedelta(timedelta): """ Returns a numpy.timedelta64 object with 'ns' precision """ return np.timedelta64(self.value, 'ns') + def to_numpy(self, dtype=None, copy=False): + """ + Convert the Timestamp to a NumPy timedelta64. + + .. versionadded:: 0.25.0 + + This is an alias method for `Timedelta.to_timedelta64()`. The dtype and + copy parameters are available here only for compatibility. Their values + will not affect the return value. + + Returns + ------- + numpy.timedelta64 + + See Also + -------- + Series.to_numpy : Similar method for Series. + """ + return self.to_timedelta64() + def total_seconds(self): """ Total duration of timedelta in seconds (to ns precision) @@ -1127,10 +1147,11 @@ class Timedelta(_Timedelta): 'ms', 'milliseconds', 'millisecond', 'milli', 'millis', 'L', 'us', 'microseconds', 'microsecond', 'micro', 'micros', 'U', 'ns', 'nanoseconds', 'nano', 'nanos', 'nanosecond', 'N'} - days, seconds, microseconds, - milliseconds, minutes, hours, weeks : numeric, optional + **kwargs + Available kwargs: {days, seconds, microseconds, + milliseconds, minutes, hours, weeks}. Values for construction in compat with datetime.timedelta. - np ints and floats will be coerced to python ints and floats. + Numpy ints and floats will be coerced to python ints and floats. Notes ----- @@ -1158,6 +1179,11 @@ class Timedelta(_Timedelta): "[weeks, days, hours, minutes, seconds, " "milliseconds, microseconds, nanoseconds]") + if unit in {'Y', 'y', 'M'}: + warnings.warn("M and Y units are deprecated and " + "will be removed in a future version.", + FutureWarning, stacklevel=1) + if isinstance(value, Timedelta): value = value.value elif is_string_object(value): diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index fe0564cb62c30..8d825e0a6179e 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import sys import warnings from cpython cimport (PyObject_RichCompareBool, PyObject_RichCompare, @@ -43,10 +44,11 @@ from pandas._libs.tslibs.timezones import UTC # Constants _zero_time = datetime_time(0, 0) _no_input = object() - +PY36 = sys.version_info >= (3, 6) # ---------------------------------------------------------------------- + def maybe_integer_op_deprecated(obj): # GH#22535 add/sub of integers and int-arrays is deprecated if obj.freq is not None: @@ -197,7 +199,7 @@ def round_nsint64(values, mode, freq): # This is PITA. Because we inherit from datetime, which has very specific # construction requirements, we need to do object instantiation in python -# (see Timestamp class above). This will serve as a C extension type that +# (see Timestamp class below). This will serve as a C extension type that # shadows the python class, where we do any heavy lifting. cdef class _Timestamp(datetime): @@ -338,9 +340,31 @@ cdef class _Timestamp(datetime): self.microsecond, self.tzinfo) cpdef to_datetime64(self): - """ Returns a numpy.datetime64 object with 'ns' precision """ + """ + Return a numpy.datetime64 object with 'ns' precision. + """ return np.datetime64(self.value, 'ns') + def to_numpy(self, dtype=None, copy=False): + """ + Convert the Timestamp to a NumPy datetime64. + + .. versionadded:: 0.25.0 + + This is an alias method for `Timestamp.to_datetime64()`. The dtype and + copy parameters are available here only for compatibility. Their values + will not affect the return value. + + Returns + ------- + numpy.datetime64 + + See Also + -------- + DatetimeIndex.to_numpy : Similar method for DatetimeIndex. + """ + return self.to_datetime64() + def __add__(self, other): cdef: int64_t other_int, nanos @@ -500,6 +524,9 @@ cdef class _Timestamp(datetime): @property def asm8(self): + """ + Return numpy datetime64 format in nanoseconds. + """ return np.datetime64(self.value, 'ns') @property @@ -566,15 +593,18 @@ class Timestamp(_Timestamp): Using the primary calling convention: This converts a datetime-like string + >>> pd.Timestamp('2017-01-01T12') Timestamp('2017-01-01 12:00:00') This converts a float representing a Unix epoch in units of seconds + >>> pd.Timestamp(1513393355.5, unit='s') Timestamp('2017-12-16 03:02:35.500000') This converts an int representing a Unix-epoch in units of seconds and for a particular timezone + >>> pd.Timestamp(1513393355, unit='s', tz='US/Pacific') Timestamp('2017-12-15 19:02:35-0800', tz='US/Pacific') @@ -612,7 +642,7 @@ class Timestamp(_Timestamp): """ Timestamp.now(tz=None) - Returns new Timestamp object representing current time local to + Return new Timestamp object representing current time local to tz. Parameters @@ -667,10 +697,21 @@ class Timestamp(_Timestamp): """ return cls(datetime.fromtimestamp(ts)) + # Issue 25016. + @classmethod + def strptime(cls, date_string, format): + """ + Timestamp.strptime(string, format) + + Function is not implemented. Use pd.to_datetime(). + """ + raise NotImplementedError("Timestamp.strptime() is not implmented." + "Use to_datetime() to parse date strings.") + @classmethod def combine(cls, date, time): """ - Timsetamp.combine(date, time) + Timestamp.combine(date, time) date, time -> datetime with same date and time fields """ @@ -930,6 +971,9 @@ class Timestamp(_Timestamp): @property def dayofweek(self): + """ + Return day of whe week. + """ return self.weekday() def day_name(self, locale=None): @@ -979,30 +1023,48 @@ class Timestamp(_Timestamp): @property def dayofyear(self): + """ + Return the day of the year. + """ return ccalendar.get_day_of_year(self.year, self.month, self.day) @property def week(self): + """ + Return the week number of the year. + """ return ccalendar.get_week_of_year(self.year, self.month, self.day) weekofyear = week @property def quarter(self): + """ + Return the quarter of the year. + """ return ((self.month - 1) // 3) + 1 @property def days_in_month(self): + """ + Return the number of days in the month. + """ return ccalendar.get_days_in_month(self.year, self.month) daysinmonth = days_in_month @property def freqstr(self): + """ + Return the total number of days in the month. + """ return getattr(self.freq, 'freqstr', self.freq) @property def is_month_start(self): + """ + Return True if date is first day of month. + """ if self.freq is None: # fast-path for non-business frequencies return self.day == 1 @@ -1010,6 +1072,9 @@ class Timestamp(_Timestamp): @property def is_month_end(self): + """ + Return True if date is last day of month. + """ if self.freq is None: # fast-path for non-business frequencies return self.day == self.days_in_month @@ -1017,6 +1082,9 @@ class Timestamp(_Timestamp): @property def is_quarter_start(self): + """ + Return True if date is first day of the quarter. + """ if self.freq is None: # fast-path for non-business frequencies return self.day == 1 and self.month % 3 == 1 @@ -1024,6 +1092,9 @@ class Timestamp(_Timestamp): @property def is_quarter_end(self): + """ + Return True if date is last day of the quarter. + """ if self.freq is None: # fast-path for non-business frequencies return (self.month % 3) == 0 and self.day == self.days_in_month @@ -1031,6 +1102,9 @@ class Timestamp(_Timestamp): @property def is_year_start(self): + """ + Return True if date is first day of the year. + """ if self.freq is None: # fast-path for non-business frequencies return self.day == self.month == 1 @@ -1038,6 +1112,9 @@ class Timestamp(_Timestamp): @property def is_year_end(self): + """ + Return True if date is last day of the year. + """ if self.freq is None: # fast-path for non-business frequencies return self.month == 12 and self.day == 31 @@ -1045,6 +1122,9 @@ class Timestamp(_Timestamp): @property def is_leap_year(self): + """ + Return True if year is a leap year. + """ return bool(ccalendar.is_leapyear(self.year)) def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', @@ -1138,12 +1218,12 @@ class Timestamp(_Timestamp): value = tz_localize_to_utc(np.array([self.value], dtype='i8'), tz, ambiguous=ambiguous, nonexistent=nonexistent)[0] - return Timestamp(value, tz=tz) + return Timestamp(value, tz=tz, freq=self.freq) else: if tz is None: # reset tz value = tz_convert_single(self.value, UTC, self.tz) - return Timestamp(value, tz=None) + return Timestamp(value, tz=tz, freq=self.freq) else: raise TypeError('Cannot localize tz-aware Timestamp, use ' 'tz_convert for conversions') @@ -1173,7 +1253,7 @@ class Timestamp(_Timestamp): 'tz_localize to localize') else: # Same UTC timestamp, different time zone - return Timestamp(self.value, tz=tz) + return Timestamp(self.value, tz=tz, freq=self.freq) astimezone = tz_convert @@ -1195,7 +1275,6 @@ class Timestamp(_Timestamp): nanosecond : int, optional tzinfo : tz-convertible, optional fold : int, optional, default is 0 - added in 3.6, NotImplemented Returns ------- @@ -1252,12 +1331,16 @@ class Timestamp(_Timestamp): # see GH#18319 ts_input = _tzinfo.localize(datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, - dts.us)) + dts.us), + is_dst=not bool(fold)) _tzinfo = ts_input.tzinfo else: - ts_input = datetime(dts.year, dts.month, dts.day, - dts.hour, dts.min, dts.sec, dts.us, - tzinfo=_tzinfo) + kwargs = {'year': dts.year, 'month': dts.month, 'day': dts.day, + 'hour': dts.hour, 'minute': dts.min, 'second': dts.sec, + 'microsecond': dts.us, 'tzinfo': _tzinfo} + if PY36: + kwargs['fold'] = fold + ts_input = datetime(**kwargs) ts = convert_datetime_to_tsobject(ts_input, _tzinfo) value = ts.value + (dts.ps // 1000) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index f9c659106a516..4036af85b7212 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -9,7 +9,6 @@ * lists: lrange(), lmap(), lzip(), lfilter() * unicode: u() [no unicode builtin in Python 3] * longs: long (int in Python 3) -* callable * iterable method compatibility: iteritems, iterkeys, itervalues * Uses the original method if available, otherwise uses items, keys, values. * types: @@ -138,6 +137,7 @@ def lfilter(*args, **kwargs): reload = reload Hashable = collections.abc.Hashable Iterable = collections.abc.Iterable + Iterator = collections.abc.Iterator Mapping = collections.abc.Mapping MutableMapping = collections.abc.MutableMapping Sequence = collections.abc.Sequence @@ -200,6 +200,7 @@ def get_range_parameters(data): Hashable = collections.Hashable Iterable = collections.Iterable + Iterator = collections.Iterator Mapping = collections.Mapping MutableMapping = collections.MutableMapping Sequence = collections.Sequence @@ -378,14 +379,6 @@ class ResourceWarning(Warning): string_and_binary_types = string_types + (binary_type,) -try: - # callable reintroduced in later versions of Python - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - if PY2: # In PY2 functools.wraps doesn't provide metadata pytest needs to generate # decorated tests using parametrization. See pytest GH issue #2782 @@ -411,8 +404,6 @@ def wrapper(cls): return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper -from collections import OrderedDict, Counter - if PY3: def raise_with_traceback(exc, traceback=Ellipsis): if traceback == Ellipsis: diff --git a/pandas/compat/numpy/__init__.py b/pandas/compat/numpy/__init__.py index 5e67cf2ee2837..6e9f768d8bd68 100644 --- a/pandas/compat/numpy/__init__.py +++ b/pandas/compat/numpy/__init__.py @@ -12,6 +12,8 @@ _np_version_under1p13 = _nlv < LooseVersion('1.13') _np_version_under1p14 = _nlv < LooseVersion('1.14') _np_version_under1p15 = _nlv < LooseVersion('1.15') +_np_version_under1p16 = _nlv < LooseVersion('1.16') +_np_version_under1p17 = _nlv < LooseVersion('1.17') if _nlv < '1.12': @@ -64,5 +66,7 @@ def np_array_datetime64_compat(arr, *args, **kwargs): __all__ = ['np', '_np_version_under1p13', '_np_version_under1p14', - '_np_version_under1p15' + '_np_version_under1p15', + '_np_version_under1p16', + '_np_version_under1p17' ] diff --git a/pandas/compat/numpy/function.py b/pandas/compat/numpy/function.py index 417ddd0d8af17..f15783ad642b4 100644 --- a/pandas/compat/numpy/function.py +++ b/pandas/compat/numpy/function.py @@ -17,10 +17,10 @@ and methods that are spread throughout the codebase. This module will make it easier to adjust to future upstream changes in the analogous numpy signatures. """ +from collections import OrderedDict from numpy import ndarray -from pandas.compat import OrderedDict from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import ( validate_args, validate_args_and_kwargs, validate_kwargs) diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 61295b8249f58..8f16f8154b952 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -201,7 +201,7 @@ def load_newobj_ex(self): pass -def load(fh, encoding=None, compat=False, is_verbose=False): +def load(fh, encoding=None, is_verbose=False): """load a pickle, with a provided encoding if compat is True: @@ -212,7 +212,6 @@ def load(fh, encoding=None, compat=False, is_verbose=False): ---------- fh : a filelike object encoding : an optional encoding - compat : provide Series compatibility mode, boolean, default False is_verbose : show exception output """ diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 961488ff12e58..050749741e7bd 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -16,11 +16,15 @@ class DirNamesMixin(object): ['asobject', 'base', 'data', 'flags', 'itemsize', 'strides']) def _dir_deletions(self): - """ delete unwanted __dir__ for this object """ + """ + Delete unwanted __dir__ for this object. + """ return self._accessors | self._deprecations def _dir_additions(self): - """ add additional __dir__ for this object """ + """ + Add additional __dir__ for this object. + """ rv = set() for accessor in self._accessors: try: @@ -33,7 +37,7 @@ def _dir_additions(self): def __dir__(self): """ Provide method name lookup and completion - Only provide 'public' methods + Only provide 'public' methods. """ rv = set(dir(type(self))) rv = (rv - self._dir_deletions()) | self._dir_additions() @@ -42,7 +46,7 @@ def __dir__(self): class PandasDelegate(object): """ - an abstract base class for delegating methods/properties + An abstract base class for delegating methods/properties. """ def _delegate_property_get(self, name, *args, **kwargs): @@ -65,10 +69,10 @@ def _add_delegate_accessors(cls, delegate, accessors, typ, ---------- cls : the class to add the methods/properties to delegate : the class to get methods/properties & doc-strings - acccessors : string list of accessors to add + accessors : string list of accessors to add typ : 'property' or 'method' overwrite : boolean, default False - overwrite the method/property in the target class if it exists + overwrite the method/property in the target class if it exists. """ def _create_delegator_property(name): @@ -117,7 +121,7 @@ def delegate_names(delegate, accessors, typ, overwrite=False): ---------- delegate : object the class to get methods/properties & doc-strings - acccessors : Sequence[str] + accessors : Sequence[str] List of accessor to add typ : {'property', 'method'} overwrite : boolean, default False diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index b473a7aef929e..4a71951e2435e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -19,7 +19,7 @@ ensure_float64, ensure_int64, ensure_object, ensure_platform_int, ensure_uint64, is_array_like, is_bool_dtype, is_categorical_dtype, is_complex_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, - is_datetimelike, is_extension_array_dtype, is_float_dtype, + is_datetimelike, is_extension_array_dtype, is_float_dtype, is_integer, is_integer_dtype, is_interval_dtype, is_list_like, is_numeric_dtype, is_object_dtype, is_period_dtype, is_scalar, is_signed_integer_dtype, is_sparse, is_timedelta64_dtype, is_unsigned_integer_dtype, @@ -288,15 +288,20 @@ def unique(values): Returns ------- - unique values. - - If the input is an Index, the return is an Index - - If the input is a Categorical dtype, the return is a Categorical - - If the input is a Series/ndarray, the return will be an ndarray + numpy.ndarray or ExtensionArray + + The return can be: + + * Index : when the input is an Index + * Categorical : when the input is a Categorical dtype + * ndarray : when the input is a Series/ndarray + + Return numpy.ndarray or ExtensionArray. See Also -------- - pandas.Index.unique - pandas.Series.unique + Index.unique + Series.unique Examples -------- @@ -563,7 +568,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, coerced to ndarrays before factorization. """), order=dedent("""\ - order + order : None .. deprecated:: 0.23.0 This parameter has no effect and is deprecated. @@ -1724,6 +1729,89 @@ def func(arr, indexer, out, fill_value=np.nan): return out +# ------------ # +# searchsorted # +# ------------ # + +def searchsorted(arr, value, side="left", sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + .. versionadded:: 0.25.0 + + Find the indices into a sorted array `arr` (a) such that, if the + corresponding elements in `value` were inserted before the indices, + the order of `arr` would be preserved. + + Assuming that `arr` is sorted: + + ====== ================================ + `side` returned index `i` satisfies + ====== ================================ + left ``arr[i-1] < value <= self[i]`` + right ``arr[i-1] <= value < self[i]`` + ====== ================================ + + Parameters + ---------- + arr: array-like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + value : array_like + Values to insert into `arr`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `self`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + array of ints + Array of insertion points with the same shape as `value`. + + See Also + -------- + numpy.searchsorted : Similar method from NumPy. + """ + if sorter is not None: + sorter = ensure_platform_int(sorter) + + if isinstance(arr, np.ndarray) and is_integer_dtype(arr) and ( + is_integer(value) or is_integer_dtype(value)): + from .arrays.array_ import array + # if `arr` and `value` have different dtypes, `arr` would be + # recast by numpy, causing a slow search. + # Before searching below, we therefore try to give `value` the + # same dtype as `arr`, while guarding against integer overflows. + iinfo = np.iinfo(arr.dtype.type) + value_arr = np.array([value]) if is_scalar(value) else np.array(value) + if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all(): + # value within bounds, so no overflow, so can convert value dtype + # to dtype of arr + dtype = arr.dtype + else: + dtype = value_arr.dtype + + if is_scalar(value): + value = dtype.type(value) + else: + value = array(value, dtype=dtype) + elif not (is_object_dtype(arr) or is_numeric_dtype(arr) or + is_categorical_dtype(arr)): + from pandas.core.series import Series + # E.g. if `arr` is an array with dtype='datetime64[ns]' + # and `value` is a pd.Timestamp, we may need to convert value + value_ser = Series(value)._values + value = value_ser[0] if is_scalar(value) else value_ser + + result = arr.searchsorted(value, side=side, sorter=sorter) + return result + + # ---- # # diff # # ---- # diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 7aaefef3d03e5..e770281596134 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -555,17 +555,17 @@ def searchsorted(self, value, side="left", sorter=None): .. versionadded:: 0.24.0 Find the indices into a sorted array `self` (a) such that, if the - corresponding elements in `v` were inserted before the indices, the - order of `self` would be preserved. + corresponding elements in `value` were inserted before the indices, + the order of `self` would be preserved. - Assuming that `a` is sorted: + Assuming that `self` is sorted: - ====== ============================ + ====== ================================ `side` returned index `i` satisfies - ====== ============================ - left ``self[i-1] < v <= self[i]`` - right ``self[i-1] <= v < self[i]`` - ====== ============================ + ====== ================================ + left ``self[i-1] < value <= self[i]`` + right ``self[i-1] <= value < self[i]`` + ====== ================================ Parameters ---------- @@ -581,7 +581,7 @@ def searchsorted(self, value, side="left", sorter=None): Returns ------- - indices : array of ints + array of ints Array of insertion points with the same shape as `value`. See Also diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 35b662eaae9a5..7f77a5dcce613 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -214,7 +214,7 @@ def contains(cat, key, container): class Categorical(ExtensionArray, PandasObject): """ - Represents a categorical variable in classic R / S-plus fashion + Represent a categorical variable in classic R / S-plus fashion. `Categoricals` can only take on only a limited, and usually fixed, number of possible values (`categories`). In contrast to statistical categorical @@ -235,7 +235,7 @@ class Categorical(ExtensionArray, PandasObject): The unique categories for this categorical. If not given, the categories are assumed to be the unique values of `values` (sorted, if possible, otherwise in the order in which they appear). - ordered : boolean, (default False) + ordered : bool, default False Whether or not this categorical is treated as a ordered categorical. If True, the resulting categorical will be ordered. An ordered categorical respects, when sorted, the order of its @@ -253,7 +253,7 @@ class Categorical(ExtensionArray, PandasObject): codes : ndarray The codes (integer positions, which point to the categories) of this categorical, read only. - ordered : boolean + ordered : bool Whether or not this Categorical is ordered. dtype : CategoricalDtype The instance of ``CategoricalDtype`` storing the ``categories`` @@ -276,7 +276,7 @@ class Categorical(ExtensionArray, PandasObject): See Also -------- - pandas.api.types.CategoricalDtype : Type for categorical data. + api.types.CategoricalDtype : Type for categorical data. CategoricalIndex : An Index with an underlying ``Categorical``. Notes @@ -297,7 +297,7 @@ class Categorical(ExtensionArray, PandasObject): Ordered `Categoricals` can be sorted according to the custom order of the categories and can have a min and max value. - >>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True, + >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c'], ordered=True, ... categories=['c', 'b', 'a']) >>> c [a, b, c, a, b, c] @@ -323,14 +323,6 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, # we may have dtype.categories be None, and we need to # infer categories in a factorization step futher below - if is_categorical(values): - # GH23814, for perf, if values._values already an instance of - # Categorical, set values to codes, and run fastpath - if (isinstance(values, (ABCSeries, ABCIndexClass)) and - isinstance(values._values, type(self))): - values = values._values.codes.copy() - fastpath = True - if fastpath: self._codes = coerce_indexer_dtype(values, dtype.categories) self._dtype = self._dtype.update_dtype(dtype) @@ -382,7 +374,7 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, dtype = CategoricalDtype(categories, dtype.ordered) elif is_categorical_dtype(values): - old_codes = (values.cat.codes if isinstance(values, ABCSeries) + old_codes = (values._values.codes if isinstance(values, ABCSeries) else values.codes) codes = _recode_for_categories(old_codes, values.dtype.categories, dtype.categories) @@ -618,7 +610,7 @@ def from_codes(cls, codes, categories=None, ordered=None, dtype=None): ---------- codes : array-like, integers An integer array, where each integer points to a category in - categories or dtype.categories, or else is -1 for NaN + categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here, then they must be provided @@ -700,7 +692,7 @@ def _set_categories(self, categories, fastpath=False): Parameters ---------- - fastpath : boolean (default: False) + fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples @@ -747,15 +739,15 @@ def _set_dtype(self, dtype): def set_ordered(self, value, inplace=False): """ - Sets the ordered attribute to the boolean value + Set the ordered attribute to the boolean value. Parameters ---------- - value : boolean to set whether this categorical is ordered (True) or - not (False) - inplace : boolean (default: False) - Whether or not to set the ordered attribute inplace or return a copy - of this categorical with ordered set to the value + value : bool + Set whether this categorical is ordered (True) or not (False). + inplace : bool, default False + Whether or not to set the ordered attribute in-place or return + a copy of this categorical with ordered set to the value. """ inplace = validate_bool_kwarg(inplace, 'inplace') new_dtype = CategoricalDtype(self.categories, ordered=value) @@ -770,9 +762,9 @@ def as_ordered(self, inplace=False): Parameters ---------- - inplace : boolean (default: False) - Whether or not to set the ordered attribute inplace or return a copy - of this categorical with ordered set to True + inplace : bool, default False + Whether or not to set the ordered attribute in-place or return + a copy of this categorical with ordered set to True. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(True, inplace=inplace) @@ -783,9 +775,9 @@ def as_unordered(self, inplace=False): Parameters ---------- - inplace : boolean (default: False) - Whether or not to set the ordered attribute inplace or return a copy - of this categorical with ordered set to False + inplace : bool, default False + Whether or not to set the ordered attribute in-place or return + a copy of this categorical with ordered set to False. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace) @@ -793,7 +785,7 @@ def as_unordered(self, inplace=False): def set_categories(self, new_categories, ordered=None, rename=False, inplace=False): """ - Sets the categories to the specified new_categories. + Set the categories to the specified new_categories. `new_categories` can include new categories (which will result in unused categories) or remove old categories (which results in values @@ -815,19 +807,19 @@ def set_categories(self, new_categories, ordered=None, rename=False, ---------- new_categories : Index-like The categories in new order. - ordered : boolean, (default: False) + ordered : bool, default False Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. - rename : boolean (default: False) + rename : bool, default False Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. - inplace : boolean (default: False) - Whether or not to reorder the categories inplace or return a copy of - this categorical with reordered categories. + inplace : bool, default False + Whether or not to reorder the categories in-place or return a copy + of this categorical with reordered categories. Returns ------- - cat : Categorical with reordered categories or None if inplace. + Categorical with reordered categories or None if inplace. Raises ------ @@ -864,7 +856,7 @@ def set_categories(self, new_categories, ordered=None, rename=False, def rename_categories(self, new_categories, inplace=False): """ - Renames categories. + Rename categories. Parameters ---------- @@ -890,7 +882,7 @@ def rename_categories(self, new_categories, inplace=False): Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. - inplace : boolean (default: False) + inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. @@ -958,7 +950,7 @@ def rename_categories(self, new_categories, inplace=False): def reorder_categories(self, new_categories, ordered=None, inplace=False): """ - Reorders categories as specified in new_categories. + Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. @@ -967,10 +959,10 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False): ---------- new_categories : Index-like The categories in new order. - ordered : boolean, optional + ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. - inplace : boolean (default: False) + inplace : bool, default False Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. @@ -1010,7 +1002,7 @@ def add_categories(self, new_categories, inplace=False): ---------- new_categories : category or list-like of category The new categories to be included. - inplace : boolean (default: False) + inplace : bool, default False Whether or not to add the categories inplace or return a copy of this categorical with added categories. @@ -1051,7 +1043,7 @@ def add_categories(self, new_categories, inplace=False): def remove_categories(self, removals, inplace=False): """ - Removes the specified categories. + Remove the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN @@ -1060,7 +1052,7 @@ def remove_categories(self, removals, inplace=False): ---------- removals : category or list of categories The categories which should be removed. - inplace : boolean (default: False) + inplace : bool, default False Whether or not to remove the categories inplace or return a copy of this categorical with removed categories. @@ -1104,11 +1096,11 @@ def remove_categories(self, removals, inplace=False): def remove_unused_categories(self, inplace=False): """ - Removes categories which are not used. + Remove categories which are not used. Parameters ---------- - inplace : boolean (default: False) + inplace : bool, default False Whether or not to drop unused categories inplace or return a copy of this categorical with unused categories dropped. @@ -1289,10 +1281,10 @@ def __array__(self, dtype=None): Returns ------- - values : numpy array + numpy.array A numpy array of either the specified dtype or, if dtype==None (default), the same dtype as - categorical.categories.dtype + categorical.categories.dtype. """ ret = take_1d(self.categories.values, self._codes) if dtype and not is_dtype_equal(dtype, self.categories.dtype): @@ -1454,13 +1446,13 @@ def dropna(self): def value_counts(self, dropna=True): """ - Returns a Series containing counts of each category. + Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- - dropna : boolean, default True + dropna : bool, default True Don't include counts of NaN. Returns @@ -1499,9 +1491,9 @@ def get_values(self): Returns ------- - values : numpy array + numpy.array A numpy array of the same dtype as categorical.categories.dtype or - Index if datetime / periods + Index if datetime / periods. """ # if we are a datetime and period index, return Index to keep metadata if is_datetimelike(self.categories): @@ -1540,7 +1532,7 @@ def argsort(self, *args, **kwargs): Returns ------- - argsorted : numpy array + numpy.array See Also -------- @@ -1570,7 +1562,7 @@ def argsort(self, *args, **kwargs): def sort_values(self, inplace=False, ascending=True, na_position='last'): """ - Sorts the Categorical by category value returning a new + Sort the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this @@ -1581,9 +1573,9 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'): Parameters ---------- - inplace : boolean, default False + inplace : bool, default False Do operation in place. - ascending : boolean, default True + ascending : bool, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. @@ -1593,7 +1585,7 @@ def sort_values(self, inplace=False, ascending=True, na_position='last'): Returns ------- - y : Categorical or None + Categorical or None See Also -------- @@ -1667,7 +1659,7 @@ def _values_for_rank(self): Returns ------- - numpy array + numpy.array """ from pandas import Series @@ -1695,7 +1687,7 @@ def ravel(self, order='C'): Returns ------- - raveled : numpy array + numpy.array """ return np.array(self) @@ -2167,13 +2159,12 @@ def _reverse_indexer(self): r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'), categories.size) counts = counts.cumsum() - result = [r[counts[indexer]:counts[indexer + 1]] - for indexer in range(len(counts) - 1)] + result = (r[start:end] for start, end in zip(counts, counts[1:])) result = dict(zip(categories, result)) return result # reduction ops # - def _reduce(self, name, axis=0, skipna=True, **kwargs): + def _reduce(self, name, axis=0, **kwargs): func = getattr(self, name, None) if func is None: msg = 'Categorical cannot perform the operation {op}' @@ -2240,7 +2231,7 @@ def mode(self, dropna=True): Parameters ---------- - dropna : boolean, default True + dropna : bool, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 @@ -2321,8 +2312,7 @@ def _values_for_factorize(self): @classmethod def _from_factorized(cls, uniques, original): return original._constructor(original.categories.take(uniques), - categories=original.categories, - ordered=original.ordered) + dtype=original.dtype) def equals(self, other): """ @@ -2334,7 +2324,7 @@ def equals(self, other): Returns ------- - are_equal : boolean + bool """ if self.is_dtype_equal(other): if self.categories.equals(other.categories): @@ -2358,7 +2348,7 @@ def is_dtype_equal(self, other): Returns ------- - are_equal : boolean + bool """ try: @@ -2627,6 +2617,9 @@ def _recode_for_categories(codes, old_categories, new_categories): if len(old_categories) == 0: # All null anyway, so just retain the nulls return codes.copy() + elif new_categories.equals(old_categories): + # Same categories, so no need to actually recode + return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=-1) @@ -2674,9 +2667,7 @@ def _factorize_from_iterable(values): if is_categorical(values): if isinstance(values, (ABCCategoricalIndex, ABCSeries)): values = values._values - categories = CategoricalIndex(values.categories, - categories=values.categories, - ordered=values.ordered) + categories = CategoricalIndex(values.categories, dtype=values.dtype) codes = values.codes else: # The value of ordered is irrelevant since we don't use cat as such, diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 73e799f9e0a36..94668c74c1693 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -144,7 +144,7 @@ def strftime(self, date_format): Return an Index of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format - doc <%(URL)s>`__ + doc <%(URL)s>`__. Parameters ---------- @@ -154,7 +154,7 @@ def strftime(self, date_format): Returns ------- Index - Index of formatted strings + Index of formatted strings. See Also -------- @@ -748,7 +748,7 @@ def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): mask the result if needed, convert to the provided dtype if its not None - This is an internal routine + This is an internal routine. """ if self._hasnans: @@ -1047,7 +1047,7 @@ def _sub_period_array(self, other): Returns ------- result : np.ndarray[object] - Array of DateOffset objects; nulls represented by NaT + Array of DateOffset objects; nulls represented by NaT. """ if not is_period_dtype(self): raise TypeError("cannot subtract {dtype}-dtype from {cls}" diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d7a8417a71be2..75cf658423210 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -128,7 +128,7 @@ def _dt_array_cmp(cls, op): Wrap comparison operations to convert datetime-like to datetime64 """ opname = '__{name}__'.format(name=op.__name__) - nat_result = True if opname == '__ne__' else False + nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): @@ -720,11 +720,11 @@ def _sub_datetime_arraylike(self, other): self_i8 = self.asi8 other_i8 = other.asi8 + arr_mask = self._isnan | other._isnan new_values = checked_add_with_arr(self_i8, -other_i8, - arr_mask=self._isnan) + arr_mask=arr_mask) if self._hasnans or other._hasnans: - mask = (self._isnan) | (other._isnan) - new_values[mask] = iNaT + new_values[arr_mask] = iNaT return new_values.view('timedelta64[ns]') def _add_offset(self, offset): @@ -799,14 +799,14 @@ def tz_convert(self, tz): Parameters ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- - normalized : same type as self + Array or Index Raises ------ @@ -842,7 +842,7 @@ def tz_convert(self, tz): With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): - >>> dti = pd.date_range(start='2014-08-01 09:00',freq='H', + >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', ... periods=3, tz='Europe/Berlin') >>> dti @@ -882,7 +882,7 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', Parameters ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None + tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone to convert timestamps to. Passing ``None`` will remove the time zone information preserving local time. ambiguous : 'infer', 'NaT', bool array, default 'raise' @@ -930,7 +930,7 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', Returns ------- - result : same type as self + Same type as self Array/Index converted to the specified time zone. Raises @@ -970,43 +970,39 @@ def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', Be careful with DST changes. When there is sequential data, pandas can infer the DST time: - >>> s = pd.to_datetime(pd.Series([ - ... '2018-10-28 01:30:00', - ... '2018-10-28 02:00:00', - ... '2018-10-28 02:30:00', - ... '2018-10-28 02:00:00', - ... '2018-10-28 02:30:00', - ... '2018-10-28 03:00:00', - ... '2018-10-28 03:30:00'])) + >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 02:00:00', + ... '2018-10-28 02:30:00', + ... '2018-10-28 03:00:00', + ... '2018-10-28 03:30:00'])) >>> s.dt.tz_localize('CET', ambiguous='infer') - 2018-10-28 01:30:00+02:00 0 - 2018-10-28 02:00:00+02:00 1 - 2018-10-28 02:30:00+02:00 2 - 2018-10-28 02:00:00+01:00 3 - 2018-10-28 02:30:00+01:00 4 - 2018-10-28 03:00:00+01:00 5 - 2018-10-28 03:30:00+01:00 6 - dtype: int64 + 0 2018-10-28 01:30:00+02:00 + 1 2018-10-28 02:00:00+02:00 + 2 2018-10-28 02:30:00+02:00 + 3 2018-10-28 02:00:00+01:00 + 4 2018-10-28 02:30:00+01:00 + 5 2018-10-28 03:00:00+01:00 + 6 2018-10-28 03:30:00+01:00 + dtype: datetime64[ns, CET] In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly - >>> s = pd.to_datetime(pd.Series([ - ... '2018-10-28 01:20:00', - ... '2018-10-28 02:36:00', - ... '2018-10-28 03:46:00'])) + >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', + ... '2018-10-28 02:36:00', + ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) - 0 2018-10-28 01:20:00+02:00 - 1 2018-10-28 02:36:00+02:00 - 2 2018-10-28 03:46:00+01:00 - dtype: datetime64[ns, CET] + 0 2015-03-29 03:00:00+02:00 + 1 2015-03-29 03:30:00+02:00 + dtype: datetime64[ns, Europe/Warsaw] If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. - >>> s = pd.to_datetime(pd.Series([ - ... '2015-03-29 02:30:00', - ... '2015-03-29 03:30:00'])) + >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', + ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 @@ -1129,7 +1125,7 @@ def to_period(self, freq=None): Parameters ---------- - freq : string or Offset, optional + freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. @@ -1150,7 +1146,7 @@ def to_period(self, freq=None): Examples -------- - >>> df = pd.DataFrame({"y": [1,2,3]}, + >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) @@ -2058,7 +2054,7 @@ def validate_tz_from_dtype(dtype, tz): # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a " - "timezone-naive dtype (i.e. datetime64[ns]") + "timezone-naive dtype (i.e. datetime64[ns])") return tz diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index a6a4a49d3a939..fd90aec3b5e8c 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -561,7 +561,7 @@ def cmp_method(self, other): else: mask = self._mask | mask - result[mask] = True if op_name == 'ne' else False + result[mask] = op_name == 'ne' return result name = '__{name}__'.format(name=op.__name__) diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py index 47517782e2bbf..8e2ab586cacb6 100644 --- a/pandas/core/arrays/numpy_.py +++ b/pandas/core/arrays/numpy_.py @@ -4,6 +4,7 @@ from pandas._libs import lib from pandas.compat.numpy import function as nv +from pandas.util._decorators import Appender from pandas.util._validators import validate_fillna_kwargs from pandas.core.dtypes.dtypes import ExtensionDtype @@ -12,6 +13,7 @@ from pandas import compat from pandas.core import nanops +from pandas.core.algorithms import searchsorted from pandas.core.missing import backfill_1d, pad_1d from .base import ExtensionArray, ExtensionOpsMixin @@ -222,7 +224,7 @@ def __getitem__(self, item): item = item._ndarray result = self._ndarray[item] - if not lib.is_scalar(result): + if not lib.is_scalar(item): result = type(self)(result) return result @@ -423,6 +425,11 @@ def to_numpy(self, dtype=None, copy=False): return result + @Appender(ExtensionArray.searchsorted.__doc__) + def searchsorted(self, value, side='left', sorter=None): + return searchsorted(self.to_numpy(), value, + side=side, sorter=sorter) + # ------------------------------------------------------------------------ # Ops diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index e0c71b5609096..3ddceb8c2839d 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -46,7 +46,7 @@ def _period_array_cmp(cls, op): Wrap comparison operations to convert Period-like to PeriodDtype """ opname = '__{name}__'.format(name=op.__name__) - nat_result = True if opname == '__ne__' else False + nat_result = opname == '__ne__' def wrapper(self, other): op = getattr(self.asi8, opname) diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 4f0c96f7927da..74fe8072e6924 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -62,7 +62,7 @@ def _td_array_cmp(cls, op): Wrap comparison operations to convert timedelta-like to timedelta64 """ opname = '__{name}__'.format(name=op.__name__) - nat_result = True if opname == '__ne__' else False + nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): @@ -190,6 +190,8 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): "ndarray, or Series or Index containing one of those." ) raise ValueError(msg.format(type(values).__name__)) + if values.ndim != 1: + raise ValueError("Only 1-dimensional input arrays are supported.") if values.dtype == 'i8': # for compat with datetime/timedelta/period shared methods, @@ -945,6 +947,9 @@ def sequence_to_td64ns(data, copy=False, unit="ns", errors="raise"): .format(dtype=data.dtype)) data = np.array(data, copy=copy) + if data.ndim != 1: + raise ValueError("Only 1-dimensional input arrays are supported.") + assert data.dtype == 'm8[ns]', data return data, inferred_freq diff --git a/pandas/core/base.py b/pandas/core/base.py index c02ba88ea7fda..f896596dd5216 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1,6 +1,7 @@ """ Base and utility classes for pandas objects. """ +from collections import OrderedDict import textwrap import warnings @@ -8,7 +9,7 @@ import pandas._libs.lib as lib import pandas.compat as compat -from pandas.compat import PYPY, OrderedDict, builtins, map, range +from pandas.compat import PYPY, builtins, map, range from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -376,7 +377,7 @@ def nested_renaming_depr(level=4): # eg. {'A' : ['mean']}, normalize all to # be list-likes if any(is_aggregator(x) for x in compat.itervalues(arg)): - new_arg = compat.OrderedDict() + new_arg = OrderedDict() for k, v in compat.iteritems(arg): if not isinstance(v, (tuple, list, dict)): new_arg[k] = [v] @@ -444,14 +445,14 @@ def _agg(arg, func): run the aggregations over the arg with func return an OrderedDict """ - result = compat.OrderedDict() + result = OrderedDict() for fname, agg_how in compat.iteritems(arg): result[fname] = func(fname, agg_how) return result # set the final keys keys = list(compat.iterkeys(arg)) - result = compat.OrderedDict() + result = OrderedDict() # nested renamer if is_nested_renamer: @@ -459,7 +460,7 @@ def _agg(arg, func): if all(isinstance(r, dict) for r in result): - result, results = compat.OrderedDict(), result + result, results = OrderedDict(), result for r in results: result.update(r) keys = list(compat.iterkeys(result)) @@ -793,7 +794,7 @@ def array(self): Returns ------- - array : ExtensionArray + ExtensionArray An ExtensionArray of the values stored within. For extension types, this is the actual array. For NumPy native types, this is a thin (no copy) wrapper around :class:`numpy.ndarray`. @@ -1021,7 +1022,7 @@ def max(self, axis=None, skipna=True): def argmax(self, axis=None, skipna=True): """ - Return a ndarray of the maximum argument indexer. + Return an ndarray of the maximum argument indexer. Parameters ---------- @@ -1086,6 +1087,10 @@ def argmin(self, axis=None, skipna=True): Dummy argument for consistency with Series skipna : bool, default True + Returns + ------- + numpy.ndarray + See Also -------- numpy.ndarray.argmin @@ -1101,6 +1106,10 @@ def tolist(self): (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) + Returns + ------- + list + See Also -------- numpy.ndarray.tolist @@ -1161,7 +1170,7 @@ def _map_values(self, mapper, na_action=None): Returns ------- - applied : Union[Index, MultiIndex], inferred + Union[Index, MultiIndex], inferred The output of the mapping function applied to the index. If the function returns a tuple with more than one element a MultiIndex will be returned. @@ -1234,7 +1243,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, If True then the object returned will contain the relative frequencies of the unique values. sort : boolean, default True - Sort by values. + Sort by frequencies. ascending : boolean, default False Sort in ascending order. bins : integer, optional @@ -1245,7 +1254,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, Returns ------- - counts : Series + Series See Also -------- @@ -1323,12 +1332,31 @@ def nunique(self, dropna=True): Parameters ---------- - dropna : boolean, default True + dropna : bool, default True Don't include NaN in the count. Returns ------- - nunique : int + int + + See Also + -------- + DataFrame.nunique: Method nunique for DataFrame. + Series.count: Count non-NA/null observations in the Series. + + Examples + -------- + >>> s = pd.Series([1, 3, 5, 7, 7]) + >>> s + 0 1 + 1 3 + 2 5 + 3 7 + 4 7 + dtype: int64 + + >>> s.nunique() + 4 """ uniqs = self.unique() n = len(uniqs) @@ -1343,9 +1371,9 @@ def is_unique(self): Returns ------- - is_unique : boolean + bool """ - return self.nunique() == len(self) + return self.nunique(dropna=False) == len(self) @property def is_monotonic(self): @@ -1357,7 +1385,7 @@ def is_monotonic(self): Returns ------- - is_monotonic : boolean + bool """ from pandas import Index return Index(self).is_monotonic @@ -1374,7 +1402,7 @@ def is_monotonic_decreasing(self): Returns ------- - is_monotonic_decreasing : boolean + bool """ from pandas import Index return Index(self).is_monotonic_decreasing @@ -1494,11 +1522,11 @@ def factorize(self, sort=False, na_sentinel=-1): array([3]) """) - @Substitution(klass='IndexOpsMixin') + @Substitution(klass='Index') @Appender(_shared_docs['searchsorted']) def searchsorted(self, value, side='left', sorter=None): - # needs coercion on the key (DatetimeIndex does already) - return self._values.searchsorted(value, side=side, sorter=sorter) + return algorithms.searchsorted(self._values, value, + side=side, sorter=sorter) def drop_duplicates(self, keep='first', inplace=False): inplace = validate_bool_kwarg(inplace, 'inplace') diff --git a/pandas/core/common.py b/pandas/core/common.py index b4de0daa13b16..5b83cb344b1e7 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -5,6 +5,7 @@ """ import collections +from collections import OrderedDict from datetime import datetime, timedelta from functools import partial import inspect @@ -13,7 +14,7 @@ from pandas._libs import lib, tslibs import pandas.compat as compat -from pandas.compat import PY36, OrderedDict, iteritems +from pandas.compat import PY36, iteritems from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import ( @@ -32,7 +33,8 @@ class SettingWithCopyWarning(Warning): def flatten(l): - """Flatten an arbitrarily nested sequence. + """ + Flatten an arbitrarily nested sequence. Parameters ---------- @@ -160,12 +162,16 @@ def cast_scalar_indexer(val): def _not_none(*args): - """Returns a generator consisting of the arguments that are not None""" + """ + Returns a generator consisting of the arguments that are not None. + """ return (arg for arg in args if arg is not None) def _any_none(*args): - """Returns a boolean indicating if any argument is None""" + """ + Returns a boolean indicating if any argument is None. + """ for arg in args: if arg is None: return True @@ -173,7 +179,9 @@ def _any_none(*args): def _all_none(*args): - """Returns a boolean indicating if all arguments are None""" + """ + Returns a boolean indicating if all arguments are None. + """ for arg in args: if arg is not None: return False @@ -181,7 +189,9 @@ def _all_none(*args): def _any_not_none(*args): - """Returns a boolean indicating if any argument is not None""" + """ + Returns a boolean indicating if any argument is not None. + """ for arg in args: if arg is not None: return True @@ -189,7 +199,9 @@ def _any_not_none(*args): def _all_not_none(*args): - """Returns a boolean indicating if all arguments are not None""" + """ + Returns a boolean indicating if all arguments are not None. + """ for arg in args: if arg is None: return False @@ -197,7 +209,9 @@ def _all_not_none(*args): def count_not_none(*args): - """Returns the count of arguments that are not None""" + """ + Returns the count of arguments that are not None. + """ return sum(x is not None for x in args) @@ -277,7 +291,9 @@ def maybe_make_list(obj): def is_null_slice(obj): - """ we have a null slice """ + """ + We have a null slice. + """ return (isinstance(obj, slice) and obj.start is None and obj.stop is None and obj.step is None) @@ -291,7 +307,9 @@ def is_true_slices(l): # TODO: used only once in indexing; belongs elsewhere? def is_full_slice(obj, l): - """ we have a full length slice """ + """ + We have a full length slice. + """ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None) @@ -316,7 +334,7 @@ def get_callable_name(obj): def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, - otherwise return as it is + otherwise return as it is. Parameters ---------- @@ -333,7 +351,8 @@ def apply_if_callable(maybe_callable, obj, **kwargs): def dict_compat(d): """ - Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict + Helper function to convert datetimelike-keyed dicts + to Timestamp-keyed dict. Parameters ---------- @@ -383,13 +402,6 @@ def standardize_mapping(into): return into -def sentinel_factory(): - class Sentinel(object): - pass - - return Sentinel() - - def random_state(state=None): """ Helper function for processing random_state arguments. diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index b768ed6df303e..23c3e0eaace81 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -205,7 +205,7 @@ def eval(expr, parser='pandas', engine=None, truediv=True, A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the - :meth:`~pandas.DataFrame.query` method to inject the + :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. @@ -248,8 +248,8 @@ def eval(expr, parser='pandas', engine=None, truediv=True, See Also -------- - pandas.DataFrame.query - pandas.DataFrame.eval + DataFrame.query + DataFrame.eval Notes ----- diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 8c3218a976b6b..5c70255982e54 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -8,11 +8,11 @@ import numpy as np +from pandas._libs.tslibs import Timestamp from pandas.compat import PY3, string_types, text_type from pandas.core.dtypes.common import is_list_like, is_scalar -import pandas as pd from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation.common import _ensure_decoded, _result_type_many @@ -399,8 +399,9 @@ def evaluate(self, env, engine, parser, term_type, eval_in_python): if self.op in eval_in_python: res = self.func(left.value, right.value) else: - res = pd.eval(self, local_dict=env, engine=engine, - parser=parser) + from pandas.core.computation.eval import eval + res = eval(self, local_dict=env, engine=engine, + parser=parser) name = env.add_tmp(res) return term_type(name, env=env) @@ -422,7 +423,7 @@ def stringify(value): v = rhs.value if isinstance(v, (int, float)): v = stringify(v) - v = pd.Timestamp(_ensure_decoded(v)) + v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.rhs.update(v) @@ -431,7 +432,7 @@ def stringify(value): v = lhs.value if isinstance(v, (int, float)): v = stringify(v) - v = pd.Timestamp(_ensure_decoded(v)) + v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.lhs.update(v) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 00de29b07c75d..18f13e17c046e 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -5,6 +5,7 @@ import numpy as np +from pandas._libs.tslibs import Timedelta, Timestamp from pandas.compat import DeepChainMap, string_types, u from pandas.core.dtypes.common import is_list_like @@ -185,12 +186,12 @@ def stringify(value): if isinstance(v, (int, float)): v = stringify(v) v = _ensure_decoded(v) - v = pd.Timestamp(v) + v = Timestamp(v) if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == u('timedelta64') or kind == u('timedelta'): - v = pd.Timedelta(v, unit='s').value + v = Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == u('category'): metadata = com.values_from_object(self.metadata) @@ -251,7 +252,7 @@ def evaluate(self): .format(slf=self)) rhs = self.conform(self.rhs) - values = [TermValue(v, v, self.kind) for v in rhs] + values = [TermValue(v, v, self.kind).value for v in rhs] if self.is_in_table: @@ -262,7 +263,7 @@ def evaluate(self): self.filter = ( self.lhs, filter_op, - pd.Index([v.value for v in values])) + pd.Index(values)) return self return None @@ -274,7 +275,7 @@ def evaluate(self): self.filter = ( self.lhs, filter_op, - pd.Index([v.value for v in values])) + pd.Index(values)) else: raise TypeError("passing a filterable condition to a non-table " diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 33c5a1c2e0f0a..e158bc8c568eb 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -11,9 +11,9 @@ import numpy as np +from pandas._libs.tslibs import Timestamp from pandas.compat import DeepChainMap, StringIO, map -import pandas as pd # noqa from pandas.core.base import StringMixin import pandas.core.computation as compu @@ -48,7 +48,7 @@ def _raw_hex_id(obj): _DEFAULT_GLOBALS = { - 'Timestamp': pd._libs.tslib.Timestamp, + 'Timestamp': Timestamp, 'datetime': datetime.datetime, 'True': True, 'False': False, diff --git a/pandas/core/config.py b/pandas/core/config.py index 0f43ca65d187a..01664fffb1e27 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -282,8 +282,8 @@ def __doc__(self): Note: partial matches are supported for convenience, but unless you use the full option name (e.g. x.y.z.option_name), your code may break in future versions if new options with similar names are introduced. -value : - new value of option. +value : object + New value of option. Returns ------- diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index ab1cb9cf2499a..88bbdcf342d66 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -153,8 +153,8 @@ class ExtensionDtype(_DtypeOpsMixin): See Also -------- - pandas.api.extensions.register_extension_dtype - pandas.api.extensions.ExtensionArray + extensions.register_extension_dtype + extensions.ExtensionArray Notes ----- @@ -173,7 +173,7 @@ class ExtensionDtype(_DtypeOpsMixin): Optionally one can override construct_array_type for construction with the name of this dtype via the Registry. See - :meth:`pandas.api.extensions.register_extension_dtype`. + :meth:`extensions.register_extension_dtype`. * construct_array_type diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index ad62146dda268..f6561948df99a 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1111,11 +1111,9 @@ def find_common_type(types): # this is different from numpy, which casts bool with float/int as int has_bools = any(is_bool_dtype(t) for t in types) if has_bools: - has_ints = any(is_integer_dtype(t) for t in types) - has_floats = any(is_float_dtype(t) for t in types) - has_complex = any(is_complex_dtype(t) for t in types) - if has_ints or has_floats or has_complex: - return np.object + for t in types: + if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t): + return np.object return np.find_common_type(types, []) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index e9bf0f87088db..4be7eb8ddb890 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -139,7 +139,8 @@ def is_object_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array-like or dtype is of the object dtype. + boolean + Whether or not the array-like or dtype is of the object dtype. Examples -------- @@ -230,8 +231,8 @@ def is_scipy_sparse(arr): Returns ------- - boolean : Whether or not the array-like is a - scipy.sparse.spmatrix instance. + boolean + Whether or not the array-like is a scipy.sparse.spmatrix instance. Notes ----- @@ -270,7 +271,8 @@ def is_categorical(arr): Returns ------- - boolean : Whether or not the array-like is of a Categorical instance. + boolean + Whether or not the array-like is of a Categorical instance. Examples -------- @@ -305,8 +307,9 @@ def is_datetimetz(arr): Returns ------- - boolean : Whether or not the array-like is a datetime array-like with - a timezone component in its dtype. + boolean + Whether or not the array-like is a datetime array-like with a + timezone component in its dtype. Examples -------- @@ -347,7 +350,8 @@ def is_offsetlike(arr_or_obj): Returns ------- - boolean : Whether the object is a DateOffset or listlike of DatetOffsets + boolean + Whether the object is a DateOffset or listlike of DatetOffsets Examples -------- @@ -381,7 +385,8 @@ def is_period(arr): Returns ------- - boolean : Whether or not the array-like is a periodical index. + boolean + Whether or not the array-like is a periodical index. Examples -------- @@ -411,8 +416,8 @@ def is_datetime64_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array-like or dtype is of - the datetime64 dtype. + boolean + Whether or not the array-like or dtype is of the datetime64 dtype. Examples -------- @@ -442,8 +447,8 @@ def is_datetime64tz_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array-like or dtype is of - a DatetimeTZDtype dtype. + boolean + Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. Examples -------- @@ -480,8 +485,8 @@ def is_timedelta64_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array-like or dtype is - of the timedelta64 dtype. + boolean + Whether or not the array-like or dtype is of the timedelta64 dtype. Examples -------- @@ -511,7 +516,8 @@ def is_period_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array-like or dtype is of the Period dtype. + boolean + Whether or not the array-like or dtype is of the Period dtype. Examples -------- @@ -544,8 +550,8 @@ def is_interval_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array-like or dtype is - of the Interval dtype. + boolean + Whether or not the array-like or dtype is of the Interval dtype. Examples -------- @@ -580,8 +586,8 @@ def is_categorical_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array-like or dtype is - of the Categorical dtype. + boolean + Whether or not the array-like or dtype is of the Categorical dtype. Examples -------- @@ -613,7 +619,8 @@ def is_string_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of the string dtype. + boolean + Whether or not the array or dtype is of the string dtype. Examples -------- @@ -647,8 +654,9 @@ def is_period_arraylike(arr): Returns ------- - boolean : Whether or not the array-like is a periodical - array-like or PeriodIndex instance. + boolean + Whether or not the array-like is a periodical array-like or + PeriodIndex instance. Examples -------- @@ -678,8 +686,9 @@ def is_datetime_arraylike(arr): Returns ------- - boolean : Whether or not the array-like is a datetime - array-like or DatetimeIndex. + boolean + Whether or not the array-like is a datetime array-like or + DatetimeIndex. Examples -------- @@ -713,7 +722,8 @@ def is_datetimelike(arr): Returns ------- - boolean : Whether or not the array-like is a datetime-like array-like. + boolean + Whether or not the array-like is a datetime-like array-like. Examples -------- @@ -754,7 +764,8 @@ def is_dtype_equal(source, target): Returns ---------- - boolean : Whether or not the two dtypes are equal. + boolean + Whether or not the two dtypes are equal. Examples -------- @@ -794,7 +805,8 @@ def is_dtype_union_equal(source, target): Returns ---------- - boolean : Whether or not the two dtypes are equal. + boolean + Whether or not the two dtypes are equal. >>> is_dtype_equal("int", int) True @@ -835,7 +847,8 @@ def is_any_int_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of an integer dtype. + boolean + Whether or not the array or dtype is of an integer dtype. Examples -------- @@ -883,8 +896,9 @@ def is_integer_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of an integer dtype - and not an instance of timedelta64. + boolean + Whether or not the array or dtype is of an integer dtype and + not an instance of timedelta64. Examples -------- @@ -938,8 +952,9 @@ def is_signed_integer_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of a signed integer dtype - and not an instance of timedelta64. + boolean + Whether or not the array or dtype is of a signed integer dtype + and not an instance of timedelta64. Examples -------- @@ -993,8 +1008,8 @@ def is_unsigned_integer_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of an - unsigned integer dtype. + boolean + Whether or not the array or dtype is of an unsigned integer dtype. Examples -------- @@ -1036,7 +1051,8 @@ def is_int64_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of the int64 dtype. + boolean + Whether or not the array or dtype is of the int64 dtype. Notes ----- @@ -1086,7 +1102,8 @@ def is_datetime64_any_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of the datetime64 dtype. + boolean + Whether or not the array or dtype is of the datetime64 dtype. Examples -------- @@ -1126,7 +1143,8 @@ def is_datetime64_ns_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of the datetime64[ns] dtype. + boolean + Whether or not the array or dtype is of the datetime64[ns] dtype. Examples -------- @@ -1178,8 +1196,8 @@ def is_timedelta64_ns_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of the - timedelta64[ns] dtype. + boolean + Whether or not the array or dtype is of the timedelta64[ns] dtype. Examples -------- @@ -1207,8 +1225,9 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of a - timedelta64, or datetime64 dtype. + boolean + Whether or not the array or dtype is of a timedelta64, + or datetime64 dtype. Examples -------- @@ -1248,7 +1267,8 @@ def _is_unorderable_exception(e): Returns ------- - boolean : Whether or not the exception raised is an unorderable exception. + boolean + Whether or not the exception raised is an unorderable exception. """ if PY36: @@ -1275,8 +1295,8 @@ def is_numeric_v_string_like(a, b): Returns ------- - boolean : Whether we return a comparing a string-like - object to a numeric array. + boolean + Whether we return a comparing a string-like object to a numeric array. Examples -------- @@ -1332,8 +1352,8 @@ def is_datetimelike_v_numeric(a, b): Returns ------- - boolean : Whether we return a comparing a datetime-like - to a numeric object. + boolean + Whether we return a comparing a datetime-like to a numeric object. Examples -------- @@ -1388,8 +1408,8 @@ def is_datetimelike_v_object(a, b): Returns ------- - boolean : Whether we return a comparing a datetime-like - to an object instance. + boolean + Whether we return a comparing a datetime-like to an object instance. Examples -------- @@ -1442,7 +1462,8 @@ def needs_i8_conversion(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype should be converted to int64. + boolean + Whether or not the array or dtype should be converted to int64. Examples -------- @@ -1480,7 +1501,8 @@ def is_numeric_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of a numeric dtype. + boolean + Whether or not the array or dtype is of a numeric dtype. Examples -------- @@ -1524,7 +1546,8 @@ def is_string_like_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of the string dtype. + boolean + Whether or not the array or dtype is of the string dtype. Examples -------- @@ -1555,7 +1578,8 @@ def is_float_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of a float dtype. + boolean + Whether or not the array or dtype is of a float dtype. Examples -------- @@ -1586,7 +1610,8 @@ def is_bool_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of a boolean dtype. + boolean + Whether or not the array or dtype is of a boolean dtype. Notes ----- @@ -1655,8 +1680,8 @@ def is_extension_type(arr): Returns ------- - boolean : Whether or not the array-like is of a pandas - extension class instance. + boolean + Whether or not the array-like is of a pandas extension class instance. Examples -------- @@ -1760,7 +1785,8 @@ def is_complex_dtype(arr_or_dtype): Returns ------- - boolean : Whether or not the array or dtype is of a compex dtype. + boolean + Whether or not the array or dtype is of a compex dtype. Examples -------- @@ -1980,7 +2006,7 @@ def _validate_date_like_dtype(dtype): def pandas_dtype(dtype): """ - Converts input into a pandas only dtype object or a numpy dtype object. + Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index aada777decaa7..10e903acbe538 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -123,8 +123,6 @@ def is_nonempty(x): except Exception: return True - nonempty = [x for x in to_concat if is_nonempty(x)] - # If all arrays are empty, there's nothing to convert, just short-cut to # the concatenation, #3121. # @@ -148,11 +146,11 @@ def is_nonempty(x): elif 'sparse' in typs: return _concat_sparse(to_concat, axis=axis, typs=typs) - extensions = [is_extension_array_dtype(x) for x in to_concat] - if any(extensions) and axis == 1: + all_empty = all(not is_nonempty(x) for x in to_concat) + if any(is_extension_array_dtype(x) for x in to_concat) and axis == 1: to_concat = [np.atleast_2d(x.astype('object')) for x in to_concat] - if not nonempty: + if all_empty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index f84471c3b04e8..11a132c4d14ee 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -8,7 +8,8 @@ from pandas._libs.interval import Interval from pandas._libs.tslibs import NaT, Period, Timestamp, timezones -from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCIndexClass +from pandas.core.dtypes.generic import ( + ABCCategoricalIndex, ABCDateOffset, ABCIndexClass) from pandas import compat @@ -17,7 +18,8 @@ def register_extension_dtype(cls): - """Class decorator to register an ExtensionType with pandas. + """ + Register an ExtensionType with pandas as class decorator. .. versionadded:: 0.24.0 @@ -194,7 +196,7 @@ class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): See Also -------- - pandas.Categorical + Categorical Notes ----- @@ -413,8 +415,7 @@ def _hash_categories(categories, ordered=True): cat_array = hash_tuples(categories) else: if categories.dtype == 'O': - types = [type(x) for x in categories] - if not len(set(types)) == 1: + if len({type(x) for x in categories}) != 1: # TODO: hash_array doesn't handle mixed types. It casts # everything to a str first, which means we treat # {'1', '2'} the same as {'1', 2} @@ -639,6 +640,7 @@ def __init__(self, unit="ns", tz=None): if tz: tz = timezones.maybe_get_tz(tz) + tz = timezones.tz_standardize(tz) elif tz is not None: raise pytz.UnknownTimeZoneError(tz) elif tz is None: @@ -757,8 +759,7 @@ def __new__(cls, freq=None): # empty constructor for pickle compat return object.__new__(cls) - from pandas.tseries.offsets import DateOffset - if not isinstance(freq, DateOffset): + if not isinstance(freq, ABCDateOffset): freq = cls._parse_dtype_strict(freq) try: @@ -789,12 +790,10 @@ def construct_from_string(cls, string): Strict construction from a string, raise a TypeError if not possible """ - from pandas.tseries.offsets import DateOffset - if (isinstance(string, compat.string_types) and (string.startswith('period[') or string.startswith('Period[')) or - isinstance(string, DateOffset)): + isinstance(string, ABCDateOffset)): # do not parse string like U as period[U] # avoid tuple to be regarded as freq try: @@ -932,13 +931,18 @@ def construct_from_string(cls, string): attempt to construct this type from a string, raise a TypeError if its not possible """ - if (isinstance(string, compat.string_types) and - (string.startswith('interval') or - string.startswith('Interval'))): - return cls(string) + if not isinstance(string, compat.string_types): + msg = "a string needs to be passed, got type {typ}" + raise TypeError(msg.format(typ=type(string))) - msg = "a string needs to be passed, got type {typ}" - raise TypeError(msg.format(typ=type(string))) + if (string.lower() == 'interval' or + cls._match.search(string) is not None): + return cls(string) + + msg = ('Incorrectly formatted string passed to constructor. ' + 'Valid formats include Interval or Interval[dtype] ' + 'where dtype is numeric, datetime, or timedelta') + raise TypeError(msg) @property def type(self): @@ -979,7 +983,7 @@ def is_dtype(cls, dtype): return True else: return False - except ValueError: + except (ValueError, TypeError): return False else: return False diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index b11542622451c..1a02623fa6072 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -44,7 +44,7 @@ def is_number(obj): See Also -------- - pandas.api.types.is_integer: Checks a subgroup of numbers. + api.types.is_integer: Checks a subgroup of numbers. Examples -------- @@ -397,12 +397,15 @@ def is_dict_like(obj): True >>> is_dict_like([1, 2, 3]) False + >>> is_dict_like(dict) + False + >>> is_dict_like(dict()) + True """ - for attr in ("__getitem__", "keys", "__contains__"): - if not hasattr(obj, attr): - return False - - return True + dict_like_attrs = ("__getitem__", "keys", "__contains__") + return (all(hasattr(obj, attr) for attr in dict_like_attrs) + # [GH 25196] exclude classes + and not isinstance(obj, type)) def is_named_tuple(obj): diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 3c6d3f212342b..697c58a365233 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -221,8 +221,8 @@ def _isna_ndarraylike(obj): # box if isinstance(obj, ABCSeries): - from pandas import Series - result = Series(result, index=obj.index, name=obj.name, copy=False) + result = obj._constructor( + result, index=obj.index, name=obj.name, copy=False) return result @@ -250,8 +250,8 @@ def _isna_ndarraylike_old(obj): # box if isinstance(obj, ABCSeries): - from pandas import Series - result = Series(result, index=obj.index, name=obj.name, copy=False) + result = obj._constructor( + result, index=obj.index, name=obj.name, copy=False) return result diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b4f79bda25517..6b4d95055d06d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -13,6 +13,7 @@ from __future__ import division import collections +from collections import OrderedDict import functools import itertools import sys @@ -32,7 +33,7 @@ from pandas import compat from pandas.compat import (range, map, zip, lmap, lzip, StringIO, u, - OrderedDict, PY36, raise_with_traceback, + PY36, raise_with_traceback, Iterator, string_and_binary_types) from pandas.compat.numpy import function as nv from pandas.core.dtypes.cast import ( @@ -318,7 +319,7 @@ class DataFrame(NDFrame): DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. DataFrame.from_items : From sequence of (key, value) pairs - pandas.read_csv, pandas.read_table, pandas.read_clipboard. + read_csv, pandas.read_table, pandas.read_clipboard. Examples -------- @@ -482,7 +483,7 @@ def axes(self): -------- >>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes - [RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'], + [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] @@ -640,16 +641,6 @@ def _repr_html_(self): Mainly for IPython notebook. """ - # qtconsole doesn't report its line width, and also - # behaves badly when outputting an HTML table - # that doesn't fit the window, so disable it. - # XXX: In IPython 3.x and above, the Qt console will not attempt to - # display HTML, so this check can be removed when support for - # IPython 2.x is no longer needed. - if console.in_qtconsole(): - # 'HTML output is disabled in QtConsole' - return None - if self._info_repr(): buf = StringIO(u("")) self.info(buf=buf) @@ -727,7 +718,7 @@ def style(self): See Also -------- - pandas.io.formats.style.Styler + io.formats.style.Styler """ from pandas.io.formats.style import Styler return Styler(self) @@ -847,7 +838,7 @@ def itertuples(self, index=True, name="Pandas"): ---------- index : bool, default True If True, return the index as the first element of the tuple. - name : str, default "Pandas" + name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. @@ -1074,7 +1065,7 @@ def from_dict(cls, data, orient='columns', dtype=None, columns=None): Returns ------- - pandas.DataFrame + DataFrame See Also -------- @@ -1154,7 +1145,7 @@ def to_numpy(self, dtype=None, copy=False): Returns ------- - array : numpy.ndarray + numpy.ndarray See Also -------- @@ -1290,23 +1281,26 @@ def to_dict(self, orient='dict', into=dict): ('columns', self.columns.tolist()), ('data', [ list(map(com.maybe_box_datetimelike, t)) - for t in self.itertuples(index=False)] - ))) + for t in self.itertuples(index=False, name=None) + ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): + columns = self.columns.tolist() + rows = (dict(zip(columns, row)) + for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) - for k, v in compat.iteritems(row._asdict())) - for row in self.itertuples(index=False)] + for k, v in compat.iteritems(row)) + for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: raise ValueError( "DataFrame index must be unique for orient='index'." ) return into_c((t[0], dict(zip(self.columns, t[1:]))) - for t in self.itertuples()) + for t in self.itertuples(name=None)) else: raise ValueError("orient '{o}' not understood".format(o=orient)) @@ -1406,7 +1400,7 @@ def to_gbq(self, destination_table, project_id=None, chunksize=None, See Also -------- pandas_gbq.to_gbq : This function in the pandas-gbq library. - pandas.read_gbq : Read a DataFrame from Google BigQuery. + read_gbq : Read a DataFrame from Google BigQuery. """ from pandas.io import gbq return gbq.to_gbq( @@ -1445,7 +1439,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, Returns ------- - df : DataFrame + DataFrame """ # Make a copy of the input columns so we can modify it @@ -1524,8 +1518,8 @@ def from_records(cls, data, index=None, exclude=None, columns=None, result_index = Index([], name=index) else: try: - to_remove = [arr_columns.get_loc(field) for field in index] - index_data = [arrays[i] for i in to_remove] + index_data = [arrays[arr_columns.get_loc(field)] + for field in index] result_index = ensure_index_from_sequences(index_data, names=index) @@ -1716,7 +1710,8 @@ def to_records(self, index=True, convert_datetime64=None, # string naming a type. if dtype_mapping is None: formats.append(v.dtype) - elif isinstance(dtype_mapping, (type, compat.string_types)): + elif isinstance(dtype_mapping, (type, np.dtype, + compat.string_types)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" @@ -1760,7 +1755,7 @@ def from_items(cls, items, columns=None, orient='columns'): Returns ------- - frame : DataFrame + DataFrame """ warnings.warn("from_items is deprecated. Please use " @@ -1831,14 +1826,14 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, Read CSV file. .. deprecated:: 0.21.0 - Use :func:`pandas.read_csv` instead. + Use :func:`read_csv` instead. - It is preferable to use the more powerful :func:`pandas.read_csv` + It is preferable to use the more powerful :func:`read_csv` for most general purposes, but ``from_csv`` makes for an easy roundtrip to and from a file (the exact counterpart of ``to_csv``), especially with a DataFrame of time series data. - This method only differs from the preferred :func:`pandas.read_csv` + This method only differs from the preferred :func:`read_csv` in some defaults: - `index_col` is ``0`` instead of ``None`` (take first column as index @@ -1871,11 +1866,11 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, Returns ------- - y : DataFrame + DataFrame See Also -------- - pandas.read_csv + read_csv """ warnings.warn("from_csv is deprecated. Please use read_csv(...) " @@ -1961,47 +1956,9 @@ def to_panel(self): Returns ------- - panel : Panel + Panel """ - # only support this kind for now - if (not isinstance(self.index, MultiIndex) or # pragma: no cover - len(self.index.levels) != 2): - raise NotImplementedError('Only 2-level MultiIndex are supported.') - - if not self.index.is_unique: - raise ValueError("Can't convert non-uniquely indexed " - "DataFrame to Panel") - - self._consolidate_inplace() - - # minor axis must be sorted - if self.index.lexsort_depth < 2: - selfsorted = self.sort_index(level=0) - else: - selfsorted = self - - major_axis, minor_axis = selfsorted.index.levels - major_codes, minor_codes = selfsorted.index.codes - shape = len(major_axis), len(minor_axis) - - # preserve names, if any - major_axis = major_axis.copy() - major_axis.name = self.index.names[0] - - minor_axis = minor_axis.copy() - minor_axis.name = self.index.names[1] - - # create new axes - new_axes = [selfsorted.columns, major_axis, minor_axis] - - # create new manager - new_mgr = selfsorted._data.reshape_nd(axes=new_axes, - labels=[major_codes, - minor_codes], - shape=shape, - ref_items=selfsorted.columns) - - return self._constructor_expanddim(new_mgr) + raise NotImplementedError("Panel is being removed in pandas 0.25.0.") @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None) def to_stata(self, fname, convert_dates=None, write_index=True, @@ -2521,7 +2478,7 @@ def memory_usage(self, index=True, deep=False): Returns ------- - sizes : Series + Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. @@ -2530,7 +2487,7 @@ def memory_usage(self, index=True, deep=False): numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. - pandas.Categorical : Memory-efficient array for string values with + Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. @@ -2739,7 +2696,7 @@ def get_value(self, index, col, takeable=False): Returns ------- - value : scalar value + scalar """ warnings.warn("get_value is deprecated and will be removed " @@ -2779,14 +2736,14 @@ def set_value(self, index, col, value, takeable=False): ---------- index : row label col : column label - value : scalar value + value : scalar takeable : interpret the index/col as indexers, default False Returns ------- - frame : DataFrame + DataFrame If label pair is contained, will be reference to calling DataFrame, - otherwise a new object + otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " @@ -2881,6 +2838,7 @@ def _ixs(self, i, axis=0): return result def __getitem__(self, key): + key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) # shortcut if the key is in columns @@ -3005,28 +2963,30 @@ def query(self, expr, inplace=False, **kwargs): Parameters ---------- - expr : string + expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. inplace : bool Whether the query should modify the data in place or return - a modified copy + a modified copy. + **kwargs + See the documentation for :func:`eval` for complete details + on the keyword arguments accepted by :meth:`DataFrame.query`. .. versionadded:: 0.18.0 - kwargs : dict - See the documentation for :func:`pandas.eval` for complete details - on the keyword arguments accepted by :meth:`DataFrame.query`. - Returns ------- - q : DataFrame + DataFrame + DataFrame resulting from the provided query expression. See Also -------- - pandas.eval - DataFrame.eval + eval : Evaluate a string describing operations on + DataFrame columns. + DataFrame.eval : Evaluate a string describing operations on + DataFrame columns. Notes ----- @@ -3035,7 +2995,7 @@ def query(self, expr, inplace=False, **kwargs): multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. - This method uses the top-level :func:`pandas.eval` function to + This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly @@ -3065,9 +3025,23 @@ def query(self, expr, inplace=False, **kwargs): Examples -------- - >>> df = pd.DataFrame(np.random.randn(10, 2), columns=list('ab')) - >>> df.query('a > b') - >>> df[df.a > df.b] # same result as the previous expression + >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) + >>> df + A B + 0 1 10 + 1 2 8 + 2 3 6 + 3 4 4 + 4 5 2 + >>> df.query('A > B') + A B + 4 5 2 + + The previous expression is equivalent to + + >>> df[df.A > df.B] + A B + 4 5 2 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, compat.string_types): @@ -3108,7 +3082,7 @@ def eval(self, expr, inplace=False, **kwargs): .. versionadded:: 0.18.0. kwargs : dict - See the documentation for :func:`~pandas.eval` for complete details + See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. @@ -3123,12 +3097,12 @@ def eval(self, expr, inplace=False, **kwargs): of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. - pandas.eval : Evaluate a Python expression as a string using various + eval : Evaluate a Python expression as a string using various backends. Notes ----- - For more details see the API documentation for :func:`~pandas.eval`. + For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. @@ -3204,7 +3178,7 @@ def select_dtypes(self, include=None, exclude=None): Returns ------- - subset : DataFrame + DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. @@ -3569,7 +3543,7 @@ def _sanitize_column(self, key, value, broadcast=True): Returns ------- - sanitized_column : numpy-array + numpy.ndarray """ def reindexer(value): @@ -3823,7 +3797,12 @@ def drop(self, labels=None, axis=0, index=None, columns=None, axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). - index, columns : single label or list-like + index : single label or list-like + Alternative to specifying axis (``labels, axis=0`` + is equivalent to ``index=labels``). + + .. versionadded:: 0.21.0 + columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). @@ -3838,12 +3817,13 @@ def drop(self, labels=None, axis=0, index=None, columns=None, Returns ------- - dropped : pandas.DataFrame + DataFrame + DataFrame without the removed index or column labels. Raises ------ KeyError - If none of the labels are found in the selected axis + If any of the labels is not found in the selected axis. See Also -------- @@ -3856,7 +3836,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None, Examples -------- - >>> df = pd.DataFrame(np.arange(12).reshape(3,4), + >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D @@ -3893,7 +3873,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None, >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], - ... [1, 0.8], [0.3,0.2]]) + ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 @@ -3963,11 +3943,11 @@ def rename(self, *args, **kwargs): Returns ------- - renamed : DataFrame + DataFrame See Also -------- - pandas.DataFrame.rename_axis + DataFrame.rename_axis Examples -------- @@ -4051,7 +4031,8 @@ def set_index(self, keys, drop=True, append=False, inplace=False, This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" - encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. + encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and + instances of :class:`abc.Iterator`. drop : bool, default True Delete columns to be used as the new index. append : bool, default False @@ -4127,33 +4108,34 @@ def set_index(self, keys, drop=True, append=False, inplace=False, 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, 'inplace') + if not isinstance(keys, list): + keys = [keys] err_msg = ('The parameter "keys" may be a column key, one-dimensional ' 'array, or a list containing only valid column keys and ' 'one-dimensional arrays.') - if (is_scalar(keys) or isinstance(keys, tuple) - or isinstance(keys, (ABCIndexClass, ABCSeries, np.ndarray))): - # make sure we have a container of keys/arrays we can iterate over - # tuples can appear as valid column keys! - keys = [keys] - elif not isinstance(keys, list): - raise ValueError(err_msg) - missing = [] for col in keys: - if (is_scalar(col) or isinstance(col, tuple)): - # if col is a valid column key, everything is fine - # tuples are always considered keys, never as list-likes - if col not in self: - missing.append(col) - elif (not isinstance(col, (ABCIndexClass, ABCSeries, - np.ndarray, list)) - or getattr(col, 'ndim', 1) > 1): - raise ValueError(err_msg) + if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray, + list, Iterator)): + # arrays are fine as long as they are one-dimensional + # iterators get converted to list below + if getattr(col, 'ndim', 1) != 1: + raise ValueError(err_msg) + else: + # everything else gets tried as a key; see GH 24969 + try: + found = col in self.columns + except TypeError: + raise TypeError(err_msg + ' Received column of ' + 'type {}'.format(type(col))) + else: + if not found: + missing.append(col) if missing: - raise KeyError('{}'.format(missing)) + raise KeyError('None of {} are in the columns'.format(missing)) if inplace: frame = self @@ -4183,6 +4165,9 @@ def set_index(self, keys, drop=True, append=False, inplace=False, elif isinstance(col, (list, np.ndarray)): arrays.append(col) names.append(None) + elif isinstance(col, Iterator): + arrays.append(list(col)) + names.append(None) # from here, col can only be a column label else: arrays.append(frame[col]._values) @@ -4190,6 +4175,15 @@ def set_index(self, keys, drop=True, append=False, inplace=False, if drop: to_remove.append(col) + if len(arrays[-1]) != len(self): + # check newest element against length of calling frame, since + # ensure_index_from_sequences would not raise for append=False. + raise ValueError('Length mismatch: Expected {len_self} rows, ' + 'received array of length {len_col}'.format( + len_self=len(self), + len_col=len(arrays[-1]) + )) + index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: @@ -4614,7 +4608,8 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only - considering certain columns. + considering certain columns. Indexes, including time indexes + are ignored. Parameters ---------- @@ -4630,7 +4625,7 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False): Returns ------- - deduplicated : DataFrame + DataFrame """ if self.empty: return self.copy() @@ -4664,13 +4659,13 @@ def duplicated(self, subset=None, keep='first'): Returns ------- - duplicated : Series + Series """ from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT if self.empty: - return Series() + return Series(dtype=bool) def f(vals): labels, shape = algorithms.factorize( @@ -5032,7 +5027,7 @@ def swaplevel(self, i=-2, j=-1, axis=0): Returns ------- - swapped : same type as caller (new object) + DataFrame .. versionchanged:: 0.18.1 @@ -5130,8 +5125,7 @@ def _combine_const(self, other, func): def combine(self, other, func, fill_value=None, overwrite=True): """ - Perform column-wise combine with another DataFrame based on a - passed function. + Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the @@ -5147,13 +5141,14 @@ def combine(self, other, func, fill_value=None, overwrite=True): fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. - overwrite : boolean, default True + overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- - result : DataFrame + DataFrame + Combination of the provided DataFrames. See Also -------- @@ -5197,15 +5192,15 @@ def combine(self, other, func, fill_value=None, overwrite=True): >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) - A B - 0 0 NaN + A B + 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) - >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1],}, index=[1, 2]) + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN @@ -5220,7 +5215,7 @@ def combine(self, other, func, fill_value=None, overwrite=True): Demonstrating the preference of the passed in dataframe. - >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1],}, index=[1, 2]) + >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN @@ -5311,7 +5306,7 @@ def combine_first(self, other): Returns ------- - combined : DataFrame + DataFrame See Also -------- @@ -5672,7 +5667,7 @@ def pivot(self, index=None, columns=None, values=None): Returns ------- - table : DataFrame + DataFrame See Also -------- @@ -5704,19 +5699,19 @@ def pivot(self, index=None, columns=None, values=None): This first example aggregates values by taking the sum. - >>> table = pivot_table(df, values='D', index=['A', 'B'], + >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum) >>> table C large small A B - bar one 4 5 - two 7 6 - foo one 4 1 - two NaN 6 + bar one 4.0 5.0 + two 7.0 6.0 + foo one 4.0 1.0 + two NaN 6.0 We can also fill missing values using the `fill_value` parameter. - >>> table = pivot_table(df, values='D', index=['A', 'B'], + >>> table = pd.pivot_table(df, values='D', index=['A', 'B'], ... columns=['C'], aggfunc=np.sum, fill_value=0) >>> table C large small @@ -5728,12 +5723,11 @@ def pivot(self, index=None, columns=None, values=None): The next example aggregates by taking the mean across multiple columns. - >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], + >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': np.mean}) >>> table - D E - mean mean + D E A C bar large 5.500000 7.500000 small 5.500000 8.500000 @@ -5743,17 +5737,17 @@ def pivot(self, index=None, columns=None, values=None): We can also calculate multiple types of aggregations for any given value column. - >>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'], + >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'], ... aggfunc={'D': np.mean, ... 'E': [min, max, np.mean]}) >>> table - D E - mean max mean min + D E + mean max mean min A C - bar large 5.500000 9 7.500000 6 - small 5.500000 9 8.500000 8 - foo large 2.000000 5 4.500000 4 - small 2.333333 6 4.333333 2 + bar large 5.500000 9.0 7.500000 6.0 + small 5.500000 9.0 8.500000 8.0 + foo large 2.000000 5.0 4.500000 4.0 + small 2.333333 6.0 4.333333 2.0 """ @Substitution('') @@ -5813,9 +5807,9 @@ def stack(self, level=-1, dropna=True): Notes ----- The function is named by analogy with a collection of books - being re-organised from being side by side on a horizontal + being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked - vertically on top of of each other (in the index of the + vertically on top of each other (in the index of the dataframe). Examples @@ -5959,7 +5953,7 @@ def unstack(self, level=-1, fill_value=None): Returns ------- - unstacked : DataFrame or Series + Series or DataFrame See Also -------- @@ -6001,7 +5995,7 @@ def unstack(self, level=-1, fill_value=None): return unstack(self, level, fill_value) _shared_docs['melt'] = (""" - Unpivots a DataFrame from wide format to long format, optionally + Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one @@ -6125,7 +6119,7 @@ def diff(self, periods=1, axis=0): Returns ------- - diffed : DataFrame + DataFrame See Also -------- @@ -6238,11 +6232,11 @@ def _gotitem(self, -------- DataFrame.apply : Perform any type of operations. DataFrame.transform : Perform transformation type operations. - pandas.core.groupby.GroupBy : Perform operations over groups. - pandas.core.resample.Resampler : Perform operations over resampled bins. - pandas.core.window.Rolling : Perform operations over rolling window. - pandas.core.window.Expanding : Perform operations over expanding window. - pandas.core.window.EWM : Perform operation over exponential weighted + core.groupby.GroupBy : Perform operations over groups. + core.resample.Resampler : Perform operations over resampled bins. + core.window.Rolling : Perform operations over rolling window. + core.window.Expanding : Perform operations over expanding window. + core.window.EWM : Perform operation over exponential weighted window. """) @@ -6397,7 +6391,9 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, Returns ------- - applied : Series or DataFrame + Series or DataFrame + Result of applying ``func`` along the given axis of the + DataFrame. See Also -------- @@ -6416,7 +6412,7 @@ def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, Examples -------- - >>> df = pd.DataFrame([[4, 9],] * 3, columns=['A', 'B']) + >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 @@ -6590,11 +6586,11 @@ def append(self, other, ignore_index=False, Returns ------- - appended : DataFrame + DataFrame See Also -------- - pandas.concat : General function to concatenate DataFrame, Series + concat : General function to concatenate DataFrame, Series or Panel objects. Notes @@ -6891,41 +6887,67 @@ def round(self, decimals=0, *args, **kwargs): columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. + *args + Additional keywords have no effect but might be accepted for + compatibility with numpy. + **kwargs + Additional keywords have no effect but might be accepted for + compatibility with numpy. Returns ------- DataFrame + A DataFrame with the affected columns rounded to the specified + number of decimal places. See Also -------- - numpy.around - Series.round + numpy.around : Round a numpy array to the given number of decimals. + Series.round : Round a Series to the given number of decimals. Examples -------- - >>> df = pd.DataFrame(np.random.random([3, 3]), - ... columns=['A', 'B', 'C'], index=['first', 'second', 'third']) + >>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)], + ... columns=['dogs', 'cats']) >>> df - A B C - first 0.028208 0.992815 0.173891 - second 0.038683 0.645646 0.577595 - third 0.877076 0.149370 0.491027 - >>> df.round(2) - A B C - first 0.03 0.99 0.17 - second 0.04 0.65 0.58 - third 0.88 0.15 0.49 - >>> df.round({'A': 1, 'C': 2}) - A B C - first 0.0 0.992815 0.17 - second 0.0 0.645646 0.58 - third 0.9 0.149370 0.49 - >>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C']) + dogs cats + 0 0.21 0.32 + 1 0.01 0.67 + 2 0.66 0.03 + 3 0.21 0.18 + + By providing an integer each column is rounded to the same number + of decimal places + + >>> df.round(1) + dogs cats + 0 0.2 0.3 + 1 0.0 0.7 + 2 0.7 0.0 + 3 0.2 0.2 + + With a dict, the number of places for specific columns can be + specfified with the column names as key and the number of decimal + places as value + + >>> df.round({'dogs': 1, 'cats': 0}) + dogs cats + 0 0.2 0.0 + 1 0.0 1.0 + 2 0.7 0.0 + 3 0.2 0.0 + + Using a Series, the number of places for specific columns can be + specfified with the column names as index and the number of + decimal places as value + + >>> decimals = pd.Series([0, 1], index=['cats', 'dogs']) >>> df.round(decimals) - A B C - first 0.0 1 0.17 - second 0.0 1 0.58 - third 0.9 0 0.49 + dogs cats + 0 0.2 0.0 + 1 0.0 1.0 + 2 0.7 0.0 + 3 0.2 0.0 """ from pandas.core.reshape.concat import concat @@ -6978,16 +7000,18 @@ def corr(self, method='pearson', min_periods=1): * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float + .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations required per pair of columns - to have a valid result. Currently only available for pearson - and spearman correlation + to have a valid result. Currently only available for Pearson + and Spearman correlation. Returns ------- - y : DataFrame + DataFrame + Correlation matrix. See Also -------- @@ -6996,14 +7020,15 @@ def corr(self, method='pearson', min_periods=1): Examples -------- - >>> histogram_intersection = lambda a, b: np.minimum(a, b - ... ).sum().round(decimals=1) + >>> def histogram_intersection(a, b): + ... v = np.minimum(a, b).sum().round(decimals=1) + ... return v >>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr(method=histogram_intersection) - dogs cats - dogs 1.0 0.3 - cats 0.3 1.0 + dogs cats + dogs 1.0 0.3 + cats 0.3 1.0 """ numeric_df = self._get_numeric_data() cols = numeric_df.columns @@ -7078,10 +7103,10 @@ def cov(self, min_periods=None): See Also -------- - pandas.Series.cov : Compute covariance with another Series. - pandas.core.window.EWM.cov: Exponential weighted sample covariance. - pandas.core.window.Expanding.cov : Expanding sample covariance. - pandas.core.window.Rolling.cov : Rolling sample covariance. + Series.cov : Compute covariance with another Series. + core.window.EWM.cov: Exponential weighted sample covariance. + core.window.Expanding.cov : Expanding sample covariance. + core.window.Rolling.cov : Rolling sample covariance. Notes ----- @@ -7166,10 +7191,11 @@ def corrwith(self, other, axis=0, drop=False, method='pearson'): Parameters ---------- other : DataFrame, Series + Object with which to compute correlations. axis : {0 or 'index', 1 or 'columns'}, default 0 - 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise - drop : boolean, default False - Drop missing indices from result + 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise. + drop : bool, default False + Drop missing indices from result. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient @@ -7181,7 +7207,8 @@ def corrwith(self, other, axis=0, drop=False, method='pearson'): Returns ------- - correls : Series + Series + Pairwise correlations. See Also ------- @@ -7262,7 +7289,7 @@ def count(self, axis=0, level=None, numeric_only=False): If the axis is a `MultiIndex` (hierarchical), count along a particular `level`, collapsing into a `DataFrame`. A `str` specifies the level name. - numeric_only : boolean, default False + numeric_only : bool, default False Include only `float`, `int` or `boolean` data. Returns @@ -7464,7 +7491,8 @@ def f(x): if filter_type is None or filter_type == 'numeric': data = self._get_numeric_data() elif filter_type == 'bool': - data = self + # GH 25101, # GH 24434 + data = self._get_bool_data() if axis == 0 else self else: # pragma: no cover msg = ("Generating numeric_only data with filter_type {f}" "not supported.".format(f=filter_type)) @@ -7510,7 +7538,7 @@ def nunique(self, axis=0, dropna=True): Returns ------- - nunique : Series + Series See Also -------- @@ -7548,7 +7576,8 @@ def idxmin(self, axis=0, skipna=True): Returns ------- - idxmin : Series + Series + Indexes of minima along the specified axis. Raises ------ @@ -7584,7 +7613,8 @@ def idxmax(self, axis=0, skipna=True): Returns ------- - idxmax : Series + Series + Indexes of maxima along the specified axis. Raises ------ @@ -7731,12 +7761,12 @@ def quantile(self, q=0.5, axis=0, numeric_only=True, Returns ------- - quantiles : Series or DataFrame + Series or DataFrame - - If ``q`` is an array, a DataFrame will be returned where the + If ``q`` is an array, a DataFrame will be returned where the index is ``q``, the columns are the columns of self, and the values are the quantiles. - - If ``q`` is a float, a Series will be returned where the + If ``q`` is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. See Also @@ -7801,19 +7831,19 @@ def to_timestamp(self, freq=None, how='start', axis=0, copy=True): Parameters ---------- - freq : string, default frequency of PeriodIndex - Desired frequency + freq : str, default frequency of PeriodIndex + Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period - vs. end + vs. end. axis : {0 or 'index', 1 or 'columns'}, default 0 - The axis to convert (the index by default) - copy : boolean, default True - If false then underlying input data is not copied + The axis to convert (the index by default). + copy : bool, default True + If False then underlying input data is not copied. Returns ------- - df : DataFrame with DatetimeIndex + DataFrame with DatetimeIndex """ new_data = self._data if copy: @@ -7837,15 +7867,16 @@ def to_period(self, freq=None, axis=0, copy=True): Parameters ---------- - freq : string, default + freq : str, default + Frequency of the PeriodIndex. axis : {0 or 'index', 1 or 'columns'}, default 0 - The axis to convert (the index by default) - copy : boolean, default True - If False then underlying input data is not copied + The axis to convert (the index by default). + copy : bool, default True + If False then underlying input data is not copied. Returns ------- - ts : TimeSeries with PeriodIndex + TimeSeries with PeriodIndex """ new_data = self._data if copy: @@ -7918,7 +7949,7 @@ def isin(self, values): match. Note that 'falcon' does not match based on the number of legs in df2. - >>> other = pd.DataFrame({'num_legs': [8, 2],'num_wings': [0, 2]}, + >>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]}, ... index=['spider', 'falcon']) >>> df.isin(other) num_legs num_wings diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a351233a77465..ee8f9cba951b3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -61,6 +61,10 @@ by : str or list of str Name or list of names to sort by""") +# sentinel value to use as kwarg in place of None when None has special meaning +# and needs to be distinguished from a user explicitly passing None. +sentinel = object() + def _single_replace(self, to_replace, method, inplace, limit): """ @@ -290,11 +294,16 @@ def _construct_axes_dict_for_slice(self, axes=None, **kwargs): d.update(kwargs) return d - def _construct_axes_from_arguments(self, args, kwargs, require_all=False): + def _construct_axes_from_arguments( + self, args, kwargs, require_all=False, sentinel=None): """Construct and returns axes if supplied in args/kwargs. If require_all, raise if all axis arguments are not supplied return a tuple of (axes, kwargs). + + sentinel specifies the default parameter when an axis is not + supplied; useful to distinguish when a user explicitly passes None + in scenarios where None has special meaning. """ # construct the args @@ -322,7 +331,7 @@ def _construct_axes_from_arguments(self, args, kwargs, require_all=False): raise TypeError("not enough/duplicate arguments " "specified!") - axes = {a: kwargs.pop(a, None) for a in self._AXIS_ORDERS} + axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS} return axes, kwargs @classmethod @@ -639,6 +648,8 @@ def transpose(self, *args, **kwargs): copy : boolean, default False Make a copy of the underlying data. Mixed-dtype data will always result in a copy + **kwargs + Additional keyword arguments will be passed to the function. Returns ------- @@ -763,18 +774,18 @@ def pop(self, item): Parameters ---------- item : str - Column label to be popped + Label of column to be popped. Returns ------- - popped : Series + Series Examples -------- - >>> df = pd.DataFrame([('falcon', 'bird', 389.0), - ... ('parrot', 'bird', 24.0), - ... ('lion', 'mammal', 80.5), - ... ('monkey', 'mammal', np.nan)], + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed @@ -926,7 +937,7 @@ def swaplevel(self, i=-2, j=-1, axis=0): Parameters ---------- - i, j : int, string (can be mixed) + i, j : int, str (can be mixed) Level of index to be swapped. Can pass level name as string. Returns @@ -962,9 +973,9 @@ def rename(self, *args, **kwargs): and raise on DataFrame or Panel. dict-like or functions are transformations to apply to that axis' values - copy : boolean, default True - Also copy underlying data - inplace : boolean, default False + copy : bool, default True + Also copy underlying data. + inplace : bool, default False Whether to return a new %(klass)s. If True then value of copy is ignored. level : int or level name, default None @@ -977,7 +988,7 @@ def rename(self, *args, **kwargs): See Also -------- - pandas.NDFrame.rename_axis + NDFrame.rename_axis Examples -------- @@ -1089,7 +1100,7 @@ def rename(self, *args, **kwargs): @rewrite_axis_style_signature('mapper', [('copy', True), ('inplace', False)]) - def rename_axis(self, mapper=None, **kwargs): + def rename_axis(self, mapper=sentinel, **kwargs): """ Set the name of the axis for the index or columns. @@ -1218,7 +1229,8 @@ class name cat 4 0 monkey 2 2 """ - axes, kwargs = self._construct_axes_from_arguments((), kwargs) + axes, kwargs = self._construct_axes_from_arguments( + (), kwargs, sentinel=sentinel) copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) axis = kwargs.pop('axis', 0) @@ -1231,7 +1243,7 @@ class name inplace = validate_bool_kwarg(inplace, 'inplace') - if (mapper is not None): + if (mapper is not sentinel): # Use v0.23 behavior if a scalar or list non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not is_dict_like(mapper)) @@ -1254,7 +1266,7 @@ class name for axis in lrange(self._AXIS_LEN): v = axes.get(self._AXIS_NAMES[axis]) - if v is None: + if v is sentinel: continue non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v)) @@ -1321,7 +1333,6 @@ def _set_axis_name(self, name, axis=0, inplace=False): cat 4 monkey 2 """ - pd.MultiIndex.from_product([["mammal"], ['dog', 'cat', 'monkey']]) axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) @@ -1554,14 +1565,14 @@ def _is_label_reference(self, key, axis=0): ------- is_label: bool """ - axis = self._get_axis_number(axis) - other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] - if self.ndim > 2: raise NotImplementedError( "_is_label_reference is not implemented for {type}" .format(type=type(self))) + axis = self._get_axis_number(axis) + other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) + return (key is not None and is_hashable(key) and any(key in self.axes[ax] for ax in other_axes)) @@ -1613,15 +1624,14 @@ def _check_label_or_level_ambiguity(self, key, axis=0): ------ ValueError: `key` is ambiguous """ - - axis = self._get_axis_number(axis) - other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] - if self.ndim > 2: raise NotImplementedError( "_check_label_or_level_ambiguity is not implemented for {type}" .format(type=type(self))) + axis = self._get_axis_number(axis) + other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) + if (key is not None and is_hashable(key) and key in self.axes[axis].names and @@ -1679,15 +1689,14 @@ def _get_label_or_level_values(self, key, axis=0): if `key` is ambiguous. This will become an ambiguity error in a future version """ - - axis = self._get_axis_number(axis) - other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] - if self.ndim > 2: raise NotImplementedError( "_get_label_or_level_values is not implemented for {type}" .format(type=type(self))) + axis = self._get_axis_number(axis) + other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis] + if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) values = self.xs(key, axis=other_axes[0])._values @@ -1743,14 +1752,13 @@ def _drop_labels_or_levels(self, keys, axis=0): ValueError if any `keys` match neither a label nor a level """ - - axis = self._get_axis_number(axis) - if self.ndim > 2: raise NotImplementedError( "_drop_labels_or_levels is not implemented for {type}" .format(type=type(self))) + axis = self._get_axis_number(axis) + # Validate keys keys = com.maybe_make_list(keys) invalid_keys = [k for k in keys if not @@ -1807,7 +1815,7 @@ def __hash__(self): ' hashed'.format(self.__class__.__name__)) def __iter__(self): - """Iterate over infor axis""" + """Iterate over info axis""" return iter(self._info_axis) # can we get a better explanation of this? @@ -1851,8 +1859,8 @@ def empty(self): See Also -------- - pandas.Series.dropna - pandas.DataFrame.dropna + Series.dropna + DataFrame.dropna Notes ----- @@ -2799,14 +2807,17 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. + .. versionadded:: 0.18.0 multicolumn : bool, default True Use \multicolumn to enhance MultiIndex columns. The default will be read from the config module. + .. versionadded:: 0.20.0 multicolumn_format : str, default 'l' The alignment for multicolumns, similar to `column_format` The default will be read from the config module. + .. versionadded:: 0.20.0 multirow : bool, default False Use \multirow to enhance MultiIndex rows. Requires adding a @@ -2814,6 +2825,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, header=True, centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. + .. versionadded:: 0.20.0 Returns @@ -2938,7 +2950,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, will treat them as non-numeric. quotechar : str, default '\"' String of length 1. Character used to quote fields. - line_terminator : string, optional + line_terminator : str, optional The newline character or character sequence to use in the output file. Defaults to `os.linesep`, which depends on the OS in which this method is called ('\n' for linux, '\r\n' for Windows, i.e.). @@ -4940,11 +4952,15 @@ def pipe(self, func, *args, **kwargs): Returns ------- - DataFrame, Series or scalar - if DataFrame.agg is called with a single function, returns a Series - if DataFrame.agg is called with several functions, returns a DataFrame - if Series.agg is called with single function, returns a scalar - if Series.agg is called with several functions, returns a Series + scalar, Series or DataFrame + + The return can be: + + * scalar : when Series.agg is called with single function + * Series : when DataFrame.agg is called with a single function + * DataFrame : when DataFrame.agg is called with several functions + + Return scalar, Series or DataFrame. %(see_also)s @@ -5262,8 +5278,8 @@ def values(self): See Also -------- DataFrame.to_numpy : Recommended alternative to this method. - pandas.DataFrame.index : Retrieve the index labels. - pandas.DataFrame.columns : Retrieving the column names. + DataFrame.index : Retrieve the index labels. + DataFrame.columns : Retrieving the column names. Notes ----- @@ -5334,18 +5350,18 @@ def get_values(self): Return an ndarray after converting sparse values to dense. This is the same as ``.values`` for non-sparse data. For sparse - data contained in a `pandas.SparseArray`, the data are first + data contained in a `SparseArray`, the data are first converted to a dense representation. Returns ------- numpy.ndarray - Numpy representation of DataFrame + Numpy representation of DataFrame. See Also -------- values : Numpy representation of DataFrame. - pandas.SparseArray : Container for sparse data. + SparseArray : Container for sparse data. Examples -------- @@ -5419,7 +5435,7 @@ def get_ftype_counts(self): ------- dtype : Series Series with the count of columns with each type and - sparsity (dense/sparse) + sparsity (dense/sparse). See Also -------- @@ -5466,7 +5482,7 @@ def dtypes(self): See Also -------- - pandas.DataFrame.ftypes : Dtype and sparsity information. + DataFrame.ftypes : Dtype and sparsity information. Examples -------- @@ -5502,8 +5518,8 @@ def ftypes(self): See Also -------- - pandas.DataFrame.dtypes: Series with just dtype information. - pandas.SparseDataFrame : Container for sparse tabular data. + DataFrame.dtypes: Series with just dtype information. + SparseDataFrame : Container for sparse tabular data. Notes ----- @@ -5950,17 +5966,18 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, value : scalar, dict, Series, or DataFrame Value to use to fill holes (e.g. 0), alternately a dict/Series/DataFrame of values specifying which value to use for - each index (for a Series) or column (for a DataFrame). (values not - in the dict/Series/DataFrame will not be filled). This value cannot + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot be a list. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap + backfill / bfill: use next valid observation to fill gap. axis : %(axes_single_arg)s - inplace : boolean, default False - If True, fill in place. Note: this will modify any - other views on this object, (e.g. a no-copy slice for a column in a + Axis along which to fill missing values. + inplace : bool, default False + If True, fill in-place. Note: this will modify any + other views on this object (e.g., a no-copy slice for a column in a DataFrame). limit : int, default None If method is specified, this is the maximum number of consecutive @@ -5970,18 +5987,20 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None. downcast : dict, default is None - a dict of item->dtype of what to downcast if possible, + A dict of item->dtype of what to downcast if possible, or the string 'infer' which will try to downcast to an appropriate - equal type (e.g. float64 to int64 if possible) + equal type (e.g. float64 to int64 if possible). Returns ------- - filled : %(klass)s + %(klass)s + Object with missing values filled. See Also -------- interpolate : Fill NaN values using interpolation. - reindex, asfreq + reindex : Conform object to new index. + asfreq : Convert TimeSeries to specified frequency. Examples -------- @@ -5989,7 +6008,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, ... [3, 4, np.nan, 1], ... [np.nan, np.nan, np.nan, 5], ... [np.nan, 3, np.nan, 4]], - ... columns=list('ABCD')) + ... columns=list('ABCD')) >>> df A B C D 0 NaN 2.0 NaN 0 @@ -6599,10 +6618,10 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, * 'pad': Fill in NaNs using existing values. * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline', 'barycentric', 'polynomial': Passed to - `scipy.interpolate.interp1d`. Both 'polynomial' and 'spline' - require that you also specify an `order` (int), - e.g. ``df.interpolate(method='polynomial', order=4)``. - These use the numerical values of the index. + `scipy.interpolate.interp1d`. These methods use the numerical + values of the index. Both 'polynomial' and 'spline' require that + you also specify an `order` (int), e.g. + ``df.interpolate(method='polynomial', order=5)``. * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima': Wrappers around the SciPy interpolation methods of similar names. See `Notes`. @@ -6637,7 +6656,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, (interpolate). * 'outside': Only fill NaNs outside valid values (extrapolate). - .. versionadded:: 0.21.0 + .. versionadded:: 0.23.0 downcast : optional, 'infer' or None, defaults to None Downcast dtypes if possible. @@ -6648,7 +6667,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, ------- Series or DataFrame Returns the same object type as the caller, interpolated at - some or all ``NaN`` values + some or all ``NaN`` values. See Also -------- @@ -6743,7 +6762,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None, Note how the first entry in column 'b' remains ``NaN``, because there is no entry befofe it to use for interpolation. - >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), + >>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0), ... (np.nan, 2.0, np.nan, np.nan), ... (2.0, 3.0, np.nan, 9.0), ... (np.nan, 4.0, -4.0, 16.0)], @@ -6868,11 +6887,15 @@ def asof(self, where, subset=None): ------- scalar, Series, or DataFrame - * scalar : when `self` is a Series and `where` is a scalar - * Series: when `self` is a Series and `where` is an array-like, - or when `self` is a DataFrame and `where` is a scalar - * DataFrame : when `self` is a DataFrame and `where` is an - array-like + The return can be: + + * scalar : when `self` is a Series and `where` is a scalar + * Series: when `self` is a Series and `where` is an array-like, + or when `self` is a DataFrame and `where` is a scalar + * DataFrame : when `self` is a DataFrame and `where` is an + array-like + + Return scalar, Series, or DataFrame. See Also -------- @@ -7212,9 +7235,9 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, upper : float or array_like, default None Maximum threshold value. All values above this threshold will be set to it. - axis : int or string axis name, optional + axis : int or str axis name, optional Align object with lower and upper along the given axis. - inplace : boolean, default False + inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 @@ -7226,7 +7249,7 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, ------- Series or DataFrame Same type as calling object with the values outside the - clip boundaries replaced + clip boundaries replaced. Examples -------- @@ -7336,7 +7359,7 @@ def clip_upper(self, threshold, axis=None, inplace=False): axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. - inplace : boolean, default False + inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 @@ -7417,7 +7440,7 @@ def clip_lower(self, threshold, axis=None, inplace=False): axis : {0 or 'index', 1 or 'columns'}, default 0 Align `self` with `threshold` along the given axis. - inplace : boolean, default False + inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 @@ -7574,9 +7597,9 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, Examples -------- - >>> df = pd.DataFrame({'Animal' : ['Falcon', 'Falcon', - ... 'Parrot', 'Parrot'], - ... 'Max Speed' : [380., 370., 24., 26.]}) + >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', + ... 'Parrot', 'Parrot'], + ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 @@ -7595,16 +7618,16 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], - ... ['Capitve', 'Wild', 'Capitve', 'Wild']] + ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) - >>> df = pd.DataFrame({'Max Speed' : [390., 350., 30., 20.]}, - ... index=index) + >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, + ... index=index) >>> df Max Speed Animal Type - Falcon Capitve 390.0 + Falcon Captive 390.0 Wild 350.0 - Parrot Capitve 30.0 + Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed @@ -7614,7 +7637,7 @@ def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, >>> df.groupby(level=1).mean() Max Speed Type - Capitve 210.0 + Captive 210.0 Wild 185.0 """ from pandas.core.groupby.groupby import groupby @@ -7731,14 +7754,14 @@ def at_time(self, time, asof=False, axis=None): Parameters ---------- - time : datetime.time or string + time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- - values_at_time : same type as caller + Series or DataFrame Raises ------ @@ -7756,7 +7779,7 @@ def at_time(self, time, asof=False, axis=None): Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') - >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) + >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 @@ -7791,17 +7814,17 @@ def between_time(self, start_time, end_time, include_start=True, Parameters ---------- - start_time : datetime.time or string - end_time : datetime.time or string - include_start : boolean, default True - include_end : boolean, default True + start_time : datetime.time or str + end_time : datetime.time or str + include_start : bool, default True + include_end : bool, default True axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- - values_between_time : same type as caller + Series or DataFrame Raises ------ @@ -7819,7 +7842,7 @@ def between_time(self, start_time, end_time, include_start=True, Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min') - >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) + >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 @@ -8377,7 +8400,7 @@ def ranker(data): Returns ------- (left, right) : (%(klass)s, type of other) - Aligned objects + Aligned objects. """) @Appender(_shared_docs['align'] % _shared_doc_kwargs) @@ -8569,7 +8592,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, cond = self._constructor(cond, **self._construct_axes_dict()) # make sure we are boolean - fill_value = True if inplace else False + fill_value = bool(inplace) cond = cond.fillna(fill_value) msg = "Boolean array expected for the condition, not {dtype}" @@ -9984,8 +10007,7 @@ def _add_numeric_operations(cls): cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall, _all_see_also, _all_examples, empty_value=True) - @Substitution(outname='mad', - desc="Return the mean absolute deviation of the values " + @Substitution(desc="Return the mean absolute deviation of the values " "for the requested axis.", name1=name, name2=name2, axis_descr=axis_descr, min_count='', see_also='', examples='') @@ -10026,8 +10048,7 @@ def mad(self, axis=None, skipna=None, level=None): "ddof argument", nanops.nanstd) - @Substitution(outname='compounded', - desc="Return the compound percentage of the values for " + @Substitution(desc="Return the compound percentage of the values for " "the requested axis.", name1=name, name2=name2, axis_descr=axis_descr, min_count='', see_also='', examples='') @@ -10117,7 +10138,7 @@ def nanptp(values, axis=0, skipna=True): cls.ptp = _make_stat_function( cls, 'ptp', name, name2, axis_descr, - """Returns the difference between the maximum value and the + """Return the difference between the maximum value and the minimum value in the object. This is the equivalent of the ``numpy.ndarray`` method ``ptp``.\n\n.. deprecated:: 0.24.0 Use numpy.ptp instead""", @@ -10235,8 +10256,8 @@ def last_valid_index(self): def _doc_parms(cls): """Return a tuple of the doc parms.""" - axis_descr = "{%s}" % ', '.join(["{0} ({1})".format(a, i) - for i, a in enumerate(cls._AXIS_ORDERS)]) + axis_descr = "{%s}" % ', '.join("{0} ({1})".format(a, i) + for i, a in enumerate(cls._AXIS_ORDERS)) name = (cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else 'scalar') name2 = cls.__name__ @@ -10264,7 +10285,7 @@ def _doc_parms(cls): Returns ------- -%(outname)s : %(name1)s or %(name2)s (if level specified) +%(name1)s or %(name2)s (if level specified) %(see_also)s %(examples)s\ """ @@ -10275,7 +10296,7 @@ def _doc_parms(cls): Parameters ---------- axis : %(axis_descr)s -skipna : boolean, default True +skipna : bool, default True Exclude NA/null values. If an entire row/column is NA, the result will be NA level : int or level name, default None @@ -10284,13 +10305,13 @@ def _doc_parms(cls): ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. -numeric_only : boolean, default None +numeric_only : bool, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. Returns ------- -%(outname)s : %(name1)s or %(name2)s (if level specified)\n""" +%(name1)s or %(name2)s (if level specified)\n""" _bool_doc = """ %(desc)s @@ -10409,7 +10430,7 @@ def _doc_parms(cls): Returns ------- -%(outname)s : %(name1)s or %(name2)s\n +%(name1)s or %(name2)s\n See Also -------- core.window.Expanding.%(accum_func_name)s : Similar functionality @@ -10857,7 +10878,7 @@ def _doc_parms(cls): Series.max : Return the maximum. Series.idxmin : Return the index of the minimum. Series.idxmax : Return the index of the maximum. -DataFrame.min : Return the sum over the requested axis. +DataFrame.sum : Return the sum over the requested axis. DataFrame.min : Return the minimum over the requested axis. DataFrame.max : Return the maximum over the requested axis. DataFrame.idxmin : Return the index of the minimum over the requested axis. @@ -10902,7 +10923,7 @@ def _doc_parms(cls): def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc, f, see_also='', examples=''): - @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, min_count=_min_count_stub, see_also=see_also, examples=examples) @Appender(_num_doc) @@ -10930,7 +10951,7 @@ def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f, see_also='', examples=''): - @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, min_count='', see_also=see_also, examples=examples) @Appender(_num_doc) @@ -10954,7 +10975,7 @@ def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f): - @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr) @Appender(_num_ddof_doc) def stat_func(self, axis=None, skipna=None, level=None, ddof=1, @@ -10975,7 +10996,7 @@ def stat_func(self, axis=None, skipna=None, level=None, ddof=1, def _make_cum_function(cls, name, name1, name2, axis_descr, desc, accum_func, accum_func_name, mask_a, mask_b, examples): - @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, accum_func_name=accum_func_name, examples=examples) @Appender(_cnum_doc) @@ -11010,7 +11031,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value): - @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + @Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr, see_also=see_also, examples=examples, empty_value=empty_value) @Appender(_bool_doc) diff --git a/pandas/core/groupby/__init__.py b/pandas/core/groupby/__init__.py index 9c15a5ebfe0f2..ac35f3825e5e8 100644 --- a/pandas/core/groupby/__init__.py +++ b/pandas/core/groupby/__init__.py @@ -1,4 +1,4 @@ from pandas.core.groupby.groupby import GroupBy # noqa: F401 from pandas.core.groupby.generic import ( # noqa: F401 - SeriesGroupBy, DataFrameGroupBy, PanelGroupBy) + SeriesGroupBy, DataFrameGroupBy) from pandas.core.groupby.grouper import Grouper # noqa: F401 diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index c5142a4ee98cc..683c21f7bd47a 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1,5 +1,5 @@ """ -Define the SeriesGroupBy, DataFrameGroupBy, and PanelGroupBy +Define the SeriesGroupBy and DataFrameGroupBy classes that hold the groupby interfaces (and some implementations). These are user facing as the result of the ``df.groupby(...)`` operations, @@ -39,7 +39,6 @@ from pandas.core.index import CategoricalIndex, Index, MultiIndex import pandas.core.indexes.base as ibase from pandas.core.internals import BlockManager, make_block -from pandas.core.panel import Panel from pandas.core.series import Series from pandas.plotting._core import boxplot_frame_groupby @@ -965,7 +964,7 @@ def _transform_fast(self, func, func_nm): ids, _, ngroup = self.grouper.group_info cast = self._transform_should_cast(func_nm) - out = algorithms.take_1d(func().values, ids) + out = algorithms.take_1d(func()._values, ids) if cast: out = self._try_cast(out, self.obj) return Series(out, index=self.obj.index, name=self.obj.name) @@ -1021,7 +1020,9 @@ def true_and_notna(x, *args, **kwargs): return filtered def nunique(self, dropna=True): - """ Returns number of unique elements in the group """ + """ + Return number of unique elements in the group. + """ ids, _, _ = self.grouper.group_info val = self.obj.get_values() @@ -1461,8 +1462,8 @@ def _reindex_output(self, result): # reindex `result`, and then reset the in-axis grouper columns. # Select in-axis groupers - in_axis_grps = [(i, ping.name) for (i, ping) - in enumerate(groupings) if ping.in_axis] + in_axis_grps = ((i, ping.name) for (i, ping) + in enumerate(groupings) if ping.in_axis) g_nums, g_names = zip(*in_axis_grps) result = result.drop(labels=list(g_names), axis=1) @@ -1578,96 +1579,10 @@ def groupby_series(obj, col=None): from pandas.core.reshape.concat import concat results = [groupby_series(obj[col], col) for col in obj.columns] results = concat(results, axis=1) + results.columns.names = obj.columns.names if not self.as_index: results.index = ibase.default_index(len(results)) return results boxplot = boxplot_frame_groupby - - -class PanelGroupBy(NDFrameGroupBy): - - def aggregate(self, arg, *args, **kwargs): - return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs) - - agg = aggregate - - def _iterate_slices(self): - if self.axis == 0: - # kludge - if self._selection is None: - slice_axis = self._selected_obj.items - else: - slice_axis = self._selection_list - slicer = lambda x: self._selected_obj[x] - else: - raise NotImplementedError("axis other than 0 is not supported") - - for val in slice_axis: - if val in self.exclusions: - continue - - yield val, slicer(val) - - def aggregate(self, arg, *args, **kwargs): - """ - Aggregate using input function or dict of {column -> function} - - Parameters - ---------- - arg : function or dict - Function to use for aggregating groups. If a function, must either - work when passed a Panel or when passed to Panel.apply. If - pass a dict, the keys must be DataFrame column names - - Returns - ------- - aggregated : Panel - """ - if isinstance(arg, compat.string_types): - return getattr(self, arg)(*args, **kwargs) - - return self._aggregate_generic(arg, *args, **kwargs) - - def _wrap_generic_output(self, result, obj): - if self.axis == 0: - new_axes = list(obj.axes) - new_axes[0] = self.grouper.result_index - elif self.axis == 1: - x, y, z = obj.axes - new_axes = [self.grouper.result_index, z, x] - else: - x, y, z = obj.axes - new_axes = [self.grouper.result_index, y, x] - - result = Panel._from_axes(result, new_axes) - - if self.axis == 1: - result = result.swapaxes(0, 1).swapaxes(0, 2) - elif self.axis == 2: - result = result.swapaxes(0, 2) - - return result - - def _aggregate_item_by_item(self, func, *args, **kwargs): - obj = self._obj_with_exclusions - result = {} - - if self.axis > 0: - for item in obj: - try: - itemg = DataFrameGroupBy(obj[item], - axis=self.axis - 1, - grouper=self.grouper) - result[item] = itemg.aggregate(func, *args, **kwargs) - except (ValueError, TypeError): - raise - new_axes = list(obj.axes) - new_axes[self.axis] = self.grouper.result_index - return Panel._from_axes(result, new_axes) - else: - raise ValueError("axis value must be greater than 0") - - def _wrap_aggregated_output(self, output, names=None): - raise AbstractMethodError(self) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 8766fdbc29755..926da40deaff2 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -18,7 +18,7 @@ class providing the base-class of operations. from pandas._libs import Timestamp, groupby as libgroupby import pandas.compat as compat -from pandas.compat import callable, range, set_function_name, zip +from pandas.compat import range, set_function_name, zip from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -26,9 +26,12 @@ class providing the base-class of operations. from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.common import ( - ensure_float, is_extension_array_dtype, is_numeric_dtype, is_scalar) + ensure_float, is_datetime64tz_dtype, is_extension_array_dtype, + is_numeric_dtype, is_scalar) from pandas.core.dtypes.missing import isna, notna +from pandas.api.types import ( + is_datetime64_dtype, is_integer_dtype, is_object_dtype) import pandas.core.algorithms as algorithms from pandas.core.base import ( DataError, GroupByError, PandasObject, SelectionMixin, SpecificationError) @@ -44,9 +47,9 @@ class providing the base-class of operations. _common_see_also = """ See Also -------- - pandas.Series.%(name)s - pandas.DataFrame.%(name)s - pandas.Panel.%(name)s + Series.%(name)s + DataFrame.%(name)s + Panel.%(name)s """ _apply_docs = dict( @@ -206,8 +209,8 @@ class providing the base-class of operations. See Also -------- -pandas.Series.pipe : Apply a function with arguments to a series. -pandas.DataFrame.pipe: Apply a function with arguments to a dataframe. +Series.pipe : Apply a function with arguments to a series. +DataFrame.pipe: Apply a function with arguments to a dataframe. apply : Apply function to each group instead of to the full %(klass)s object. @@ -443,12 +446,12 @@ def get_converter(s): raise ValueError(msg) converters = [get_converter(s) for s in index_sample] - names = [tuple(f(n) for f, n in zip(converters, name)) - for name in names] + names = (tuple(f(n) for f, n in zip(converters, name)) + for name in names) else: converter = get_converter(index_sample) - names = [converter(name) for name in names] + names = (converter(name) for name in names) return [self.indices.get(name, []) for name in names] @@ -625,7 +628,7 @@ def curried(x): def get_group(self, name, obj=None): """ - Constructs NDFrame from group with provided name. + Construct NDFrame from group with provided name. Parameters ---------- @@ -764,7 +767,21 @@ def _try_cast(self, result, obj, numeric_only=False): dtype = obj.dtype if not is_scalar(result): - if is_extension_array_dtype(dtype): + if is_datetime64tz_dtype(dtype): + # GH 23683 + # Prior results _may_ have been generated in UTC. + # Ensure we localize to UTC first before converting + # to the target timezone + try: + result = obj._values._from_sequence( + result, dtype='datetime64[ns, UTC]' + ) + result = result.astype(dtype) + except TypeError: + # _try_cast was called at a point where the result + # was already tz-aware + pass + elif is_extension_array_dtype(dtype): # The function can return something of any type, so check # if the type is compatible with the calling EA. try: @@ -1024,15 +1041,17 @@ def _bool_agg(self, val_test, skipna): """ def objs_to_bool(vals): - try: - vals = vals.astype(np.bool) - except ValueError: # for objects + # type: np.ndarray -> (np.ndarray, typing.Type) + if is_object_dtype(vals): vals = np.array([bool(x) for x in vals]) + else: + vals = vals.astype(np.bool) - return vals.view(np.uint8) + return vals.view(np.uint8), np.bool - def result_to_bool(result): - return result.astype(np.bool, copy=False) + def result_to_bool(result, inference): + # type: (np.ndarray, typing.Type) -> np.ndarray + return result.astype(inference, copy=False) return self._get_cythonized_result('group_any_all', self.grouper, aggregate=True, @@ -1047,7 +1066,7 @@ def result_to_bool(result): @Appender(_common_see_also) def any(self, skipna=True): """ - Returns True if any value in the group is truthful, else False. + Return True if any value in the group is truthful, else False. Parameters ---------- @@ -1060,7 +1079,7 @@ def any(self, skipna=True): @Appender(_common_see_also) def all(self, skipna=True): """ - Returns True if all values in the group are truthful, else False. + Return True if all values in the group are truthful, else False. Parameters ---------- @@ -1351,7 +1370,7 @@ def resample(self, rule, *args, **kwargs): See Also -------- - pandas.Grouper : Specify a frequency to resample with when + Grouper : Specify a frequency to resample with when grouping by a key. DatetimeIndex.resample : Frequency conversion and resampling of time series. @@ -1688,6 +1707,75 @@ def nth(self, n, dropna=None): return result + def quantile(self, q=0.5, interpolation='linear'): + """ + Return group values at the given quantile, a la numpy.percentile. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + Value(s) between 0 and 1 providing the quantile(s) to compute. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + Method to use when the desired quantile falls between two points. + + Returns + ------- + Series or DataFrame + Return type determined by caller of GroupBy object. + + See Also + -------- + Series.quantile : Similar method for Series. + DataFrame.quantile : Similar method for DataFrame. + numpy.percentile : NumPy method to compute qth percentile. + + Examples + -------- + >>> df = pd.DataFrame([ + ... ['a', 1], ['a', 2], ['a', 3], + ... ['b', 1], ['b', 3], ['b', 5] + ... ], columns=['key', 'val']) + >>> df.groupby('key').quantile() + val + key + a 2.0 + b 3.0 + """ + + def pre_processor(vals): + # type: np.ndarray -> (np.ndarray, Optional[typing.Type]) + if is_object_dtype(vals): + raise TypeError("'quantile' cannot be performed against " + "'object' dtypes!") + + inference = None + if is_integer_dtype(vals): + inference = np.int64 + elif is_datetime64_dtype(vals): + inference = 'datetime64[ns]' + vals = vals.astype(np.float) + + return vals, inference + + def post_processor(vals, inference): + # type: (np.ndarray, Optional[typing.Type]) -> np.ndarray + if inference: + # Check for edge case + if not (is_integer_dtype(inference) and + interpolation in {'linear', 'midpoint'}): + vals = vals.astype(inference) + + return vals + + return self._get_cythonized_result('group_quantile', self.grouper, + aggregate=True, + needs_values=True, + needs_mask=True, + cython_dtype=np.float64, + pre_processing=pre_processor, + post_processing=post_processor, + q=q, interpolation=interpolation) + @Substitution(name='groupby') def ngroup(self, ascending=True): """ @@ -1813,7 +1901,7 @@ def cumcount(self, ascending=True): def rank(self, method='average', ascending=True, na_option='keep', pct=False, axis=0): """ - Provides the rank of values within each group. + Provide the rank of values within each group. Parameters ---------- @@ -1835,7 +1923,7 @@ def rank(self, method='average', ascending=True, na_option='keep', The axis of the object over which to compute the rank. Returns - ----- + ------- DataFrame with ranking of values within each group """ if na_option not in {'keep', 'top', 'bottom'}: @@ -1924,10 +2012,16 @@ def _get_cythonized_result(self, how, grouper, aggregate=False, Whether the result of the Cython operation is an index of values to be retrieved, instead of the actual values themselves pre_processing : function, default None - Function to be applied to `values` prior to passing to Cython - Raises if `needs_values` is False + Function to be applied to `values` prior to passing to Cython. + Function should return a tuple where the first element is the + values to be passed to Cython and the second element is an optional + type which the values should be converted to after being returned + by the Cython operation. Raises if `needs_values` is False. post_processing : function, default None - Function to be applied to result of Cython function + Function to be applied to result of Cython function. Should accept + an array of values as the first argument and type inferences as its + second argument, i.e. the signature should be + (ndarray, typing.Type). **kwargs : dict Extra arguments to be passed back to Cython funcs @@ -1963,10 +2057,12 @@ def _get_cythonized_result(self, how, grouper, aggregate=False, result = np.zeros(result_sz, dtype=cython_dtype) func = partial(base_func, result, labels) + inferences = None + if needs_values: vals = obj.values if pre_processing: - vals = pre_processing(vals) + vals, inferences = pre_processing(vals) func = partial(func, vals) if needs_mask: @@ -1982,7 +2078,7 @@ def _get_cythonized_result(self, how, grouper, aggregate=False, result = algorithms.take_nd(obj.values, result) if post_processing: - result = post_processing(result) + result = post_processing(result, inferences) output[name] = result @@ -2039,7 +2135,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, @Substitution(name='groupby', see_also=_common_see_also) def head(self, n=5): """ - Returns first n rows of each group. + Return first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. @@ -2067,7 +2163,7 @@ def head(self, n=5): @Substitution(name='groupby', see_also=_common_see_also) def tail(self, n=5): """ - Returns last n rows of each group. + Return last n rows of each group. Essentially equivalent to ``.apply(lambda x: x.tail(n))``, except ignores as_index flag. diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 260417bc0d598..d1ebb9cbe8ac4 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -8,7 +8,7 @@ import numpy as np import pandas.compat as compat -from pandas.compat import callable, zip +from pandas.compat import zip from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( @@ -54,14 +54,17 @@ class Grouper(object): axis : number/name of the axis, defaults to 0 sort : boolean, default to False whether to sort the resulting labels - - additional kwargs to control time-like groupers (when `freq` is passed) - - closed : closed end of interval; 'left' or 'right' - label : interval boundary to use for labeling; 'left' or 'right' + closed : {'left' or 'right'} + Closed end of interval. Only when `freq` parameter is passed. + label : {'left' or 'right'} + Interval boundary to use for labeling. + Only when `freq` parameter is passed. convention : {'start', 'end', 'e', 's'} - If grouper is PeriodIndex - base, loffset + If grouper is PeriodIndex and `freq` parameter is passed. + base : int, default 0 + Only when `freq` parameter is passed. + loffset : string, DateOffset, timedelta object + Only when `freq` parameter is passed. Returns ------- @@ -195,9 +198,9 @@ def groups(self): return self.grouper.groups def __repr__(self): - attrs_list = ["{}={!r}".format(attr_name, getattr(self, attr_name)) + attrs_list = ("{}={!r}".format(attr_name, getattr(self, attr_name)) for attr_name in self._attributes - if getattr(self, attr_name) is not None] + if getattr(self, attr_name) is not None) attrs = ", ".join(attrs_list) cls_name = self.__class__.__name__ return "{}({})".format(cls_name, attrs) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 87f48d5a40554..78c9aa9187135 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -380,7 +380,7 @@ def get_func(fname): # otherwise find dtype-specific version, falling back to object for dt in [dtype_str, 'object']: f = getattr(libgroupby, "{fname}_{dtype_str}".format( - fname=fname, dtype_str=dtype_str), None) + fname=fname, dtype_str=dt), None) if f is not None: return f diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index c43469d3c3a81..602e11a08b4ed 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -140,7 +140,7 @@ def to_pydatetime(self): Returns ------- numpy.ndarray - object dtype array containing native Python datetime objects. + Object dtype array containing native Python datetime objects. See Also -------- @@ -208,7 +208,7 @@ def to_pytimedelta(self): Returns ------- a : numpy.ndarray - 1D array containing data with `datetime.timedelta` type. + Array of 1D containing data with `datetime.timedelta` type. See Also -------- diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 684a19c56c92f..6299fc482d0df 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -112,7 +112,7 @@ def _get_combined_index(indexes, intersect=False, sort=False): elif intersect: index = indexes[0] for other in indexes[1:]: - index = index.intersection(other, sort=sort) + index = index.intersection(other) else: index = _union_indexes(indexes, sort=sort) index = ensure_index(index) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 767da81c5c43a..dee181fc1c569 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -6,9 +6,10 @@ import numpy as np from pandas._libs import ( - Timedelta, algos as libalgos, index as libindex, join as libjoin, lib, - tslibs) + algos as libalgos, index as libindex, join as libjoin, lib) from pandas._libs.lib import is_datetime_array +from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp +from pandas._libs.tslibs.timezones import tz_compare import pandas.compat as compat from pandas.compat import range, set_function_name, u from pandas.compat.numpy import function as nv @@ -447,7 +448,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, try: return DatetimeIndex(subarr, copy=copy, name=name, **kwargs) - except tslibs.OutOfBoundsDatetime: + except OutOfBoundsDatetime: pass elif inferred.startswith('timedelta'): @@ -665,7 +666,8 @@ def __array_wrap__(self, result, context=None): """ Gets called after a ufunc. """ - if is_bool_dtype(result): + result = lib.item_from_zerodim(result) + if is_bool_dtype(result) or lib.is_scalar(result): return result attrs = self._get_attributes_dict() @@ -1441,7 +1443,7 @@ def sortlevel(self, level=None, ascending=True, sort_remaining=None): Returns ------- - sorted_index : Index + Index """ return self.sort_values(return_indexer=True, ascending=ascending) @@ -1459,7 +1461,7 @@ def _get_level_values(self, level): Returns ------- - values : Index + Index Calling object, as there is only one level in the Index. See Also @@ -1504,7 +1506,7 @@ def droplevel(self, level=0): Returns ------- - index : Index or MultiIndex + Index or MultiIndex """ if not isinstance(level, (tuple, list)): level = [level] @@ -1556,11 +1558,11 @@ def droplevel(self, level=0): Returns ------- grouper : Index - Index of values to group on + Index of values to group on. labels : ndarray of int or None - Array of locations in level_index + Array of locations in level_index. uniques : Index or None - Index of unique values for level + Index of unique values for level. """ @Appender(_index_shared_docs['_get_grouper_for_level']) @@ -1828,13 +1830,13 @@ def isna(self): Returns ------- numpy.ndarray - A boolean array of whether my values are NA + A boolean array of whether my values are NA. See Also -------- - pandas.Index.notna : Boolean inverse of isna. - pandas.Index.dropna : Omit entries with missing values. - pandas.isna : Top-level isna. + Index.notna : Boolean inverse of isna. + Index.dropna : Omit entries with missing values. + isna : Top-level isna. Series.isna : Detect missing values in Series object. Examples @@ -1892,7 +1894,7 @@ def notna(self): -------- Index.notnull : Alias of notna. Index.isna: Inverse of notna. - pandas.notna : Top-level notna. + notna : Top-level notna. Examples -------- @@ -2074,9 +2076,9 @@ def duplicated(self, keep='first'): See Also -------- - pandas.Series.duplicated : Equivalent method on pandas.Series. - pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame. - pandas.Index.drop_duplicates : Remove duplicate values from Index. + Series.duplicated : Equivalent method on pandas.Series. + DataFrame.duplicated : Equivalent method on pandas.DataFrame. + Index.drop_duplicates : Remove duplicate values from Index. Examples -------- @@ -2245,18 +2247,37 @@ def _get_reconciled_name_object(self, other): return self._shallow_copy(name=name) return self - def union(self, other, sort=True): + def _validate_sort_keyword(self, sort): + if sort not in [None, False]: + raise ValueError("The 'sort' keyword only takes the values of " + "None or False; {0} was passed.".format(sort)) + + def union(self, other, sort=None): """ Form the union of two Index objects. Parameters ---------- other : Index or array-like - sort : bool, default True - Sort the resulting index if possible + sort : bool or None, default None + Whether to sort the resulting Index. + + * None : Sort the result, except when + + 1. `self` and `other` are equal. + 2. `self` or `other` has length 0. + 3. Some values in `self` or `other` cannot be compared. + A RuntimeWarning is issued in this case. + + * False : do not sort the result. .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default value from ``True`` to ``None`` + (without change in behaviour). + Returns ------- union : Index @@ -2269,6 +2290,7 @@ def union(self, other, sort=True): >>> idx1.union(idx2) Int64Index([1, 2, 3, 4, 5, 6], dtype='int64') """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) @@ -2319,7 +2341,7 @@ def union(self, other, sort=True): else: result = lvals - if sort: + if sort is None: try: result = sorting.safe_sort(result) except TypeError as e: @@ -2333,7 +2355,7 @@ def union(self, other, sort=True): def _wrap_setop_result(self, other, result): return self._constructor(result, name=get_op_result_name(self, other)) - def intersection(self, other, sort=True): + def intersection(self, other, sort=False): """ Form the intersection of two Index objects. @@ -2342,11 +2364,20 @@ def intersection(self, other, sort=True): Parameters ---------- other : Index or array-like - sort : bool, default True - Sort the resulting index if possible + sort : False or None, default False + Whether to sort the resulting index. + + * False : do not sort the result. + * None : sort the result, except when `self` and `other` are equal + or when the values cannot be compared. .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default from ``True`` to ``False``, to match + the behaviour of 0.23.4 and earlier. + Returns ------- intersection : Index @@ -2359,6 +2390,7 @@ def intersection(self, other, sort=True): >>> idx1.intersection(idx2) Int64Index([3, 4], dtype='int64') """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other = ensure_index(other) @@ -2398,7 +2430,7 @@ def intersection(self, other, sort=True): taken = other.take(indexer) - if sort: + if sort is None: taken = sorting.safe_sort(taken.values) if self.name != other.name: name = None @@ -2411,7 +2443,7 @@ def intersection(self, other, sort=True): return taken - def difference(self, other, sort=True): + def difference(self, other, sort=None): """ Return a new Index with elements from the index that are not in `other`. @@ -2421,11 +2453,22 @@ def difference(self, other, sort=True): Parameters ---------- other : Index or array-like - sort : bool, default True - Sort the resulting index if possible + sort : False or None, default None + Whether to sort the resulting index. By default, the + values are attempted to be sorted, but any TypeError from + incomparable elements is caught by pandas. + + * None : Attempt to sort the result, but catch any TypeErrors + from comparing incomparable elements. + * False : Do not sort the result. .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default value from ``True`` to ``None`` + (without change in behaviour). + Returns ------- difference : Index @@ -2440,6 +2483,7 @@ def difference(self, other, sort=True): >>> idx1.difference(idx2, sort=False) Int64Index([2, 1], dtype='int64') """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) if self.equals(other): @@ -2456,7 +2500,7 @@ def difference(self, other, sort=True): label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) the_diff = this.values.take(label_diff) - if sort: + if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: @@ -2464,7 +2508,7 @@ def difference(self, other, sort=True): return this._shallow_copy(the_diff, name=result_name, freq=None) - def symmetric_difference(self, other, result_name=None, sort=True): + def symmetric_difference(self, other, result_name=None, sort=None): """ Compute the symmetric difference of two Index objects. @@ -2472,11 +2516,22 @@ def symmetric_difference(self, other, result_name=None, sort=True): ---------- other : Index or array-like result_name : str - sort : bool, default True - Sort the resulting index if possible + sort : False or None, default None + Whether to sort the resulting index. By default, the + values are attempted to be sorted, but any TypeError from + incomparable elements is caught by pandas. + + * None : Attempt to sort the result, but catch any TypeErrors + from comparing incomparable elements. + * False : Do not sort the result. .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default value from ``True`` to ``None`` + (without change in behaviour). + Returns ------- symmetric_difference : Index @@ -2500,6 +2555,7 @@ def symmetric_difference(self, other, result_name=None, sort=True): >>> idx1 ^ idx2 Int64Index([1, 5], dtype='int64') """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name_update = self._convert_can_do_setop(other) if result_name is None: @@ -2520,7 +2576,7 @@ def symmetric_difference(self, other, result_name=None, sort=True): right_diff = other.values.take(right_indexer) the_diff = _concat._concat_compat([left_diff, right_diff]) - if sort: + if sort is None: try: the_diff = sorting.safe_sort(the_diff) except TypeError: @@ -2916,9 +2972,10 @@ def _convert_listlike_indexer(self, keyarr, kind=None): Returns ------- - tuple (indexer, keyarr) - indexer is an ndarray or None if cannot convert - keyarr are tuple-safe keys + indexer : numpy.ndarray or None + Return an ndarray or None if cannot convert. + keyarr : numpy.ndarray + Return tuple-safe keys. """ if isinstance(keyarr, Index): keyarr = self._convert_index_indexer(keyarr) @@ -3044,9 +3101,9 @@ def reindex(self, target, method=None, level=None, limit=None, Returns ------- new_index : pd.Index - Resulting index + Resulting index. indexer : np.ndarray or None - Indices of output values in original index + Indices of output values in original index. """ # GH6552: preserve names when reindexing to non-named target @@ -3102,9 +3159,9 @@ def _reindex_non_unique(self, target): Returns ------- new_index : pd.Index - Resulting index + Resulting index. indexer : np.ndarray or None - Indices of output values in original index + Indices of output values in original index. """ @@ -3995,7 +4052,7 @@ def putmask(self, mask, value): def equals(self, other): """ - Determines if two Index objects contain the same elements. + Determine if two Index objects contain the same elements. """ if self.is_(other): return True @@ -4090,7 +4147,7 @@ def asof(self, label): def asof_locs(self, where, mask): """ - Finds the locations (indices) of the labels from the index for + Find the locations (indices) of the labels from the index for every entry in the `where` argument. As in the `asof` function, if the label (a particular entry in @@ -4150,8 +4207,8 @@ def sort_values(self, return_indexer=False, ascending=True): See Also -------- - pandas.Series.sort_values : Sort values of a Series. - pandas.DataFrame.sort_values : Sort values in a DataFrame. + Series.sort_values : Sort values of a Series. + DataFrame.sort_values : Sort values in a DataFrame. Examples -------- @@ -4205,7 +4262,7 @@ def shift(self, periods=1, freq=None): Returns ------- pandas.Index - shifted index + Shifted index. See Also -------- @@ -4368,7 +4425,7 @@ def set_value(self, arr, key, value): in the target are marked by -1. missing : ndarray of int An indexer into the target of the values not found. - These correspond to the -1 in the indexer array + These correspond to the -1 in the indexer array. """ @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) @@ -4812,6 +4869,20 @@ def slice_locs(self, start=None, end=None, step=None, kind=None): # If it's a reverse slice, temporarily swap bounds. start, end = end, start + # GH 16785: If start and end happen to be date strings with UTC offsets + # attempt to parse and check that the offsets are the same + if (isinstance(start, (compat.string_types, datetime)) + and isinstance(end, (compat.string_types, datetime))): + try: + ts_start = Timestamp(start) + ts_end = Timestamp(end) + except (ValueError, TypeError): + pass + else: + if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): + raise ValueError("Both dates must have the " + "same UTC offset") + start_slice = None if start is not None: start_slice = self.get_slice_bound(start, 'left', kind) @@ -5123,9 +5194,9 @@ def _add_logical_methods(cls): See Also -------- - pandas.Index.any : Return whether any element in an Index is True. - pandas.Series.any : Return whether any element in a Series is True. - pandas.Series.all : Return whether all elements in a Series are True. + Index.any : Return whether any element in an Index is True. + Series.any : Return whether any element in a Series is True. + Series.all : Return whether all elements in a Series are True. Notes ----- @@ -5163,8 +5234,8 @@ def _add_logical_methods(cls): See Also -------- - pandas.Index.all : Return whether all elements are True. - pandas.Series.all : Return whether all elements are True. + Index.all : Return whether all elements are True. + Series.all : Return whether all elements are True. Notes ----- diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index e43b64827d02a..b494c41c3b58c 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -42,20 +42,35 @@ typ='method', overwrite=True) class CategoricalIndex(Index, accessor.PandasDelegate): """ - Immutable Index implementing an ordered, sliceable set. CategoricalIndex - represents a sparsely populated Index with an underlying Categorical. + Index based on an underlying :class:`Categorical`. + + CategoricalIndex, like Categorical, can only take on a limited, + and usually fixed, number of possible values (`categories`). Also, + like Categorical, it might have an order, but numerical operations + (additions, divisions, ...) are not possible. Parameters ---------- - data : array-like or Categorical, (1-dimensional) - categories : optional, array-like - categories for the CategoricalIndex - ordered : boolean, - designating if the categories are ordered - copy : bool - Make a copy of input ndarray - name : object - Name to be stored in the index + data : array-like (1-dimensional) + The values of the categorical. If `categories` are given, values not in + `categories` will be replaced with NaN. + categories : index-like, optional + The categories for the categorical. Items need to be unique. + If the categories are not given here (and also not in `dtype`), they + will be inferred from the `data`. + ordered : bool, optional + Whether or not this categorical is treated as an ordered + categorical. If not given here or in `dtype`, the resulting + categorical will be unordered. + dtype : CategoricalDtype or the string "category", optional + If :class:`CategoricalDtype`, cannot be used together with + `categories` or `ordered`. + + .. versionadded:: 0.21.0 + copy : bool, default False + Make a copy of input ndarray. + name : object, optional + Name to be stored in the index. Attributes ---------- @@ -75,9 +90,45 @@ class CategoricalIndex(Index, accessor.PandasDelegate): as_unordered map + Raises + ------ + ValueError + If the categories do not validate. + TypeError + If an explicit ``ordered=True`` is given but no `categories` and the + `values` are not sortable. + See Also -------- - Categorical, Index + Index : The base pandas Index type. + Categorical : A categorical array. + CategoricalDtype : Type for categorical data. + + Notes + ----- + See the `user guide + <http://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`_ + for more. + + Examples + -------- + >>> pd.CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c']) + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa + + ``CategoricalIndex`` can also be instantiated from a ``Categorical``: + + >>> c = pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']) + >>> pd.CategoricalIndex(c) + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['a', 'b', 'c'], ordered=False, dtype='category') # noqa + + Ordered ``CategoricalIndex`` can have a min and max value. + + >>> ci = pd.CategoricalIndex(['a','b','c','a','b','c'], ordered=True, + ... categories=['c', 'b', 'a']) + >>> ci + CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'], categories=['c', 'b', 'a'], ordered=True, dtype='category') # noqa + >>> ci.min() + 'c' """ _typ = 'categoricalindex' @@ -232,7 +283,7 @@ def _is_dtype_compat(self, other): def equals(self, other): """ - Determines if two CategorialIndex objects contain the same elements. + Determine if two CategorialIndex objects contain the same elements. """ if self.is_(other): return True @@ -780,8 +831,8 @@ def _concat_same_dtype(self, to_concat, name): Concatenate to_concat which has the same class ValueError if other is not in the categories """ - to_concat = [self._is_dtype_compat(c) for c in to_concat] - codes = np.concatenate([c.codes for c in to_concat]) + codes = np.concatenate([self._is_dtype_compat(c).codes + for c in to_concat]) result = self._create_from_codes(codes, name=name) # if name is None, _create_from_codes sets self.name result.name = name diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index cc373c06efcc9..b8d052ce7be04 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -32,9 +32,8 @@ from pandas.core.ops import get_op_result_name import pandas.core.tools.datetimes as tools -from pandas.tseries import offsets from pandas.tseries.frequencies import Resolution, to_offset -from pandas.tseries.offsets import CDay, prefix_mapping +from pandas.tseries.offsets import CDay, Nano, prefix_mapping def _new_DatetimeIndex(cls, d): @@ -460,7 +459,7 @@ def _formatter_func(self): # -------------------------------------------------------------------- # Set Operation Methods - def union(self, other): + def union(self, other, sort=None): """ Specialized union for DatetimeIndex objects. If combine overlapping ranges with the same DateOffset, will be much @@ -469,15 +468,29 @@ def union(self, other): Parameters ---------- other : DatetimeIndex or array-like + sort : bool or None, default None + Whether to sort the resulting Index. + + * None : Sort the result, except when + + 1. `self` and `other` are equal. + 2. `self` or `other` has length 0. + 3. Some values in `self` or `other` cannot be compared. + A RuntimeWarning is issued in this case. + + * False : do not sort the result + + .. versionadded:: 0.25.0 Returns ------- y : Index or DatetimeIndex """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) if len(other) == 0 or self.equals(other) or len(self) == 0: - return super(DatetimeIndex, self).union(other) + return super(DatetimeIndex, self).union(other, sort=sort) if not isinstance(other, DatetimeIndex): try: @@ -488,9 +501,9 @@ def union(self, other): this, other = self._maybe_utc_convert(other) if this._can_fast_union(other): - return this._fast_union(other) + return this._fast_union(other, sort=sort) else: - result = Index.union(this, other) + result = Index.union(this, other, sort=sort) if isinstance(result, DatetimeIndex): # TODO: we shouldn't be setting attributes like this; # in all the tests this equality already holds @@ -563,16 +576,28 @@ def _can_fast_union(self, other): # this will raise return False - def _fast_union(self, other): + def _fast_union(self, other, sort=None): if len(other) == 0: return self.view(type(self)) if len(self) == 0: return other.view(type(self)) - # to make our life easier, "sort" the two ranges + # Both DTIs are monotonic. Check if they are already + # in the "correct" order if self[0] <= other[0]: left, right = self, other + # DTIs are not in the "correct" order and we don't want + # to sort but want to remove overlaps + elif sort is False: + left, right = self, other + left_start = left[0] + loc = right.searchsorted(left_start, side='left') + right_chunk = right.values[:loc] + dates = _concat._concat_compat((left.values, right_chunk)) + return self._shallow_copy(dates) + # DTIs are not in the "correct" order and we want + # to sort else: left, right = other, self @@ -594,7 +619,7 @@ def _wrap_setop_result(self, other, result): name = get_op_result_name(self, other) return self._shallow_copy(result, name=name, freq=None, tz=self.tz) - def intersection(self, other, sort=True): + def intersection(self, other, sort=False): """ Specialized intersection for DatetimeIndex objects. May be much faster than Index.intersection @@ -602,11 +627,21 @@ def intersection(self, other, sort=True): Parameters ---------- other : DatetimeIndex or array-like + sort : False or None, default False + Sort the resulting index if possible. + + .. versionadded:: 0.24.0 + + .. versionchanged:: 0.24.1 + + Changed the default to ``False`` to match the behaviour + from before 0.24.0. Returns ------- y : Index or DatetimeIndex """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) if self.equals(other): @@ -816,54 +851,57 @@ def _parsed_string_to_bounds(self, reso, parsed): lower, upper: pd.Timestamp """ + valid_resos = {'year', 'month', 'quarter', 'day', 'hour', 'minute', + 'second', 'minute', 'second', 'microsecond'} + if reso not in valid_resos: + raise KeyError if reso == 'year': - return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz), - Timestamp(datetime(parsed.year, 12, 31, 23, - 59, 59, 999999), tz=self.tz)) + start = Timestamp(parsed.year, 1, 1) + end = Timestamp(parsed.year, 12, 31, 23, 59, 59, 999999) elif reso == 'month': d = ccalendar.get_days_in_month(parsed.year, parsed.month) - return (Timestamp(datetime(parsed.year, parsed.month, 1), - tz=self.tz), - Timestamp(datetime(parsed.year, parsed.month, d, 23, - 59, 59, 999999), tz=self.tz)) + start = Timestamp(parsed.year, parsed.month, 1) + end = Timestamp(parsed.year, parsed.month, d, 23, 59, 59, 999999) elif reso == 'quarter': qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead d = ccalendar.get_days_in_month(parsed.year, qe) # at end of month - return (Timestamp(datetime(parsed.year, parsed.month, 1), - tz=self.tz), - Timestamp(datetime(parsed.year, qe, d, 23, 59, - 59, 999999), tz=self.tz)) + start = Timestamp(parsed.year, parsed.month, 1) + end = Timestamp(parsed.year, qe, d, 23, 59, 59, 999999) elif reso == 'day': - st = datetime(parsed.year, parsed.month, parsed.day) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Day(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day) + end = start + timedelta(days=1) - Nano(1) elif reso == 'hour': - st = datetime(parsed.year, parsed.month, parsed.day, - hour=parsed.hour) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Hour(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour) + end = start + timedelta(hours=1) - Nano(1) elif reso == 'minute': - st = datetime(parsed.year, parsed.month, parsed.day, - hour=parsed.hour, minute=parsed.minute) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Minute(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour, parsed.minute) + end = start + timedelta(minutes=1) - Nano(1) elif reso == 'second': - st = datetime(parsed.year, parsed.month, parsed.day, - hour=parsed.hour, minute=parsed.minute, - second=parsed.second) - return (Timestamp(st, tz=self.tz), - Timestamp(Timestamp(st + offsets.Second(), - tz=self.tz).value - 1)) + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour, parsed.minute, parsed.second) + end = start + timedelta(seconds=1) - Nano(1) elif reso == 'microsecond': - st = datetime(parsed.year, parsed.month, parsed.day, - parsed.hour, parsed.minute, parsed.second, - parsed.microsecond) - return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz)) - else: - raise KeyError + start = Timestamp(parsed.year, parsed.month, parsed.day, + parsed.hour, parsed.minute, parsed.second, + parsed.microsecond) + end = start + timedelta(microseconds=1) - Nano(1) + # GH 24076 + # If an incoming date string contained a UTC offset, need to localize + # the parsed date to this offset first before aligning with the index's + # timezone + if parsed.tzinfo is not None: + if self.tz is None: + raise ValueError("The index must be timezone aware " + "when indexing with a date string with a " + "UTC offset") + start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz) + end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz) + elif self.tz is not None: + start = start.tz_localize(self.tz) + end = end.tz_localize(self.tz) + return start, end def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): is_monotonic = self.is_monotonic @@ -1000,7 +1038,7 @@ def get_loc(self, key, method=None, tolerance=None): except (KeyError, ValueError, TypeError): try: return self._get_string_slice(key) - except (TypeError, KeyError, ValueError): + except (TypeError, KeyError, ValueError, OverflowError): pass try: @@ -1274,7 +1312,7 @@ def delete(self, loc): def indexer_at_time(self, time, asof=False): """ - Returns index locations of index values at particular time of day + Return index locations of index values at particular time of day (e.g. 9:30AM). Parameters @@ -1292,20 +1330,19 @@ def indexer_at_time(self, time, asof=False): -------- indexer_between_time, DataFrame.at_time """ - from dateutil.parser import parse - if asof: raise NotImplementedError("'asof' argument is not supported") if isinstance(time, compat.string_types): + from dateutil.parser import parse time = parse(time).time() if time.tzinfo: - # TODO - raise NotImplementedError("argument 'time' with timezone info is " - "not supported") - - time_micros = self._get_time_micros() + if self.tz is None: + raise ValueError("Index must be timezone aware.") + time_micros = self.tz_convert(time.tzinfo)._get_time_micros() + else: + time_micros = self._get_time_micros() micros = _time_to_micros(time) return (micros == time_micros).nonzero()[0] @@ -1403,10 +1440,10 @@ def date_range(start=None, end=None, periods=None, freq=None, tz=None, See Also -------- - pandas.DatetimeIndex : An immutable container for datetimes. - pandas.timedelta_range : Return a fixed frequency TimedeltaIndex. - pandas.period_range : Return a fixed frequency PeriodIndex. - pandas.interval_range : Return a fixed frequency IntervalIndex. + DatetimeIndex : An immutable container for datetimes. + timedelta_range : Return a fixed frequency TimedeltaIndex. + period_range : Return a fixed frequency PeriodIndex. + interval_range : Return a fixed frequency IntervalIndex. Notes ----- diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 0210560aaa21f..2c63fe33c57fe 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1093,8 +1093,8 @@ def equals(self, other): def overlaps(self, other): return self._data.overlaps(other) - def _setop(op_name): - def func(self, other, sort=True): + def _setop(op_name, sort=None): + def func(self, other, sort=sort): other = self._as_like_interval_index(other) # GH 19016: ensure set op will not return a prohibited dtype @@ -1128,7 +1128,7 @@ def is_all_dates(self): return False union = _setop('union') - intersection = _setop('intersection') + intersection = _setop('intersection', sort=False) difference = _setop('difference') symmetric_difference = _setop('symmetric_difference') diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index e4d01a40bd181..616c17cd16f9a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -61,7 +61,7 @@ def _codes_to_ints(self, codes): Returns ------ int_keys : scalar or 1-dimensional array, of dtype uint64 - Integer(s) representing one combination (each) + Integer(s) representing one combination (each). """ # Shift the representation of each level by the pre-calculated number # of bits: @@ -101,7 +101,7 @@ def _codes_to_ints(self, codes): Returns ------ int_keys : int, or 1-dimensional array of dtype object - Integer(s) representing one combination (each) + Integer(s) representing one combination (each). """ # Shift the representation of each level by the pre-calculated number @@ -324,11 +324,17 @@ def from_arrays(cls, arrays, sortorder=None, names=None): codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ + error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): - raise TypeError("Input must be a list / sequence of array-likes.") + raise TypeError(error_msg) elif is_iterator(arrays): arrays = list(arrays) + # Check if elements of array are list-like + for array in arrays: + if not is_list_like(array): + raise TypeError(error_msg) + # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): @@ -840,7 +846,7 @@ def __contains__(self, key): try: self.get_loc(key) return True - except (LookupError, TypeError): + except (LookupError, TypeError, ValueError): return False contains = __contains__ @@ -1391,7 +1397,7 @@ def get_level_values(self, level): Returns ------- values : Index - ``values`` is a level of this MultiIndex converted to + Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples @@ -1956,7 +1962,7 @@ def swaplevel(self, i=-2, j=-1): Returns ------- MultiIndex - A new MultiIndex + A new MultiIndex. .. versionchanged:: 0.18.1 @@ -2053,9 +2059,9 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): Returns ------- sorted_index : pd.MultiIndex - Resulting index + Resulting index. indexer : np.ndarray - Indices of output values in original index + Indices of output values in original index. """ from pandas.core.sorting import indexer_from_factorized @@ -2189,7 +2195,7 @@ def reindex(self, target, method=None, level=None, limit=None, new_index : pd.MultiIndex Resulting index indexer : np.ndarray or None - Indices of output values in original index + Indices of output values in original index. """ # GH6552: preserve names when reindexing to non-named target @@ -2879,30 +2885,47 @@ def equal_levels(self, other): return False return True - def union(self, other, sort=True): + def union(self, other, sort=None): """ Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples - sort : bool, default True - Sort the resulting MultiIndex if possible + sort : False or None, default None + Whether to sort the resulting Index. + + * None : Sort the result, except when + + 1. `self` and `other` are equal. + 2. `self` has length 0. + 3. Some values in `self` or `other` cannot be compared. + A RuntimeWarning is issued in this case. + + * False : do not sort the result. .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default value from ``True`` to ``None`` + (without change in behaviour). + Returns ------- Index >>> index.union(index2) """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self + # TODO: Index.union returns other when `len(self)` is 0. + uniq_tuples = lib.fast_unique_multiple([self._ndarray_values, other._ndarray_values], sort=sort) @@ -2910,22 +2933,28 @@ def union(self, other, sort=True): return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names) - def intersection(self, other, sort=True): + def intersection(self, other, sort=False): """ Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples - sort : bool, default True + sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default from ``True`` to ``False``, to match + behaviour from before 0.24.0 + Returns ------- Index """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) @@ -2936,7 +2965,7 @@ def intersection(self, other, sort=True): other_tuples = other._ndarray_values uniq_tuples = set(self_tuples) & set(other_tuples) - if sort: + if sort is None: uniq_tuples = sorted(uniq_tuples) if len(uniq_tuples) == 0: @@ -2947,22 +2976,28 @@ def intersection(self, other, sort=True): return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names) - def difference(self, other, sort=True): + def difference(self, other, sort=None): """ Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex - sort : bool, default True + sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default value from ``True`` to ``None`` + (without change in behaviour). + Returns ------- diff : MultiIndex """ + self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) @@ -2982,7 +3017,7 @@ def difference(self, other, sort=True): label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) difference = this.values.take(label_diff) - if sort: + if sort is None: difference = sorted(difference) if len(difference) == 0: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index ebf5b279563cf..5aafe9734b6a0 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -343,22 +343,28 @@ def equals(self, other): return super(RangeIndex, self).equals(other) - def intersection(self, other, sort=True): + def intersection(self, other, sort=False): """ Form the intersection of two Index objects. Parameters ---------- other : Index or array-like - sort : bool, default True + sort : False or None, default False Sort the resulting index if possible .. versionadded:: 0.24.0 + .. versionchanged:: 0.24.1 + + Changed the default to ``False`` to match the behaviour + from before 0.24.0. + Returns ------- intersection : Index """ + self._validate_sort_keyword(sort) if self.equals(other): return self._get_reconciled_name_object(other) @@ -401,7 +407,7 @@ def intersection(self, other, sort=True): if (self._step < 0 and other._step < 0) is not (new_index._step < 0): new_index = new_index[::-1] - if sort: + if sort is None: new_index = new_index.sort_values() return new_index diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index cbe5ae198838f..830925535dab1 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -207,6 +207,11 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, 'collection of some kind, {data} was passed' .format(cls=cls.__name__, data=repr(data))) + if unit in {'Y', 'y', 'M'}: + warnings.warn("M and Y units are deprecated and " + "will be removed in a future version.", + FutureWarning, stacklevel=2) + if isinstance(data, TimedeltaArray): if copy: data = data.copy() diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index bbcde8f3b3305..623a48acdd48b 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -5,6 +5,7 @@ import numpy as np from pandas._libs.indexing import _NDFrameIndexerBase +from pandas._libs.lib import item_from_zerodim import pandas.compat as compat from pandas.compat import range, zip from pandas.errors import AbstractMethodError @@ -347,10 +348,10 @@ def _setitem_with_indexer(self, indexer, value): # must have all defined axes if we have a scalar # or a list-like on the non-info axes if we have a # list-like - len_non_info_axes = [ + len_non_info_axes = ( len(_ax) for _i, _ax in enumerate(self.obj.axes) if _i != i - ] + ) if any(not l for l in len_non_info_axes): if not is_list_like_indexer(value): raise ValueError("cannot set a frame with no " @@ -1856,6 +1857,7 @@ def _getitem_axis(self, key, axis=None): if axis is None: axis = self.axis or 0 + key = item_from_zerodim(key) if is_iterator(key): key = list(key) @@ -2222,6 +2224,7 @@ def _getitem_axis(self, key, axis=None): # a single integer else: + key = item_from_zerodim(key) if not is_integer(key): raise TypeError("Cannot index by location index with a " "non-integer key") diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index 7878613a8b1b1..a662e1d3ae197 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- from .blocks import ( # noqa:F401 - _block2d_to_blocknd, _factor_indexer, _block_shape, # io.pytables + _block_shape, # io.pytables _safe_reshape, # io.packers make_block, # io.pytables, io.packers FloatBlock, IntBlock, ComplexBlock, BoolBlock, ObjectBlock, diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index df764aa4ba666..4e2c04dba8b04 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -87,7 +87,8 @@ def __init__(self, values, placement, ndim=None): '{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs))) def _check_ndim(self, values, ndim): - """ndim inference and validation. + """ + ndim inference and validation. Infers ndim from 'values' if not provided to __init__. Validates that values.ndim and ndim are consistent if and only if @@ -267,20 +268,6 @@ def _slice(self, slicer): """ return a slice of my values """ return self.values[slicer] - def reshape_nd(self, labels, shape, ref_items): - """ - Parameters - ---------- - labels : list of new axis labels - shape : new shape - ref_items : new ref_items - - return a new block that is transformed to a nd block - """ - return _block2d_to_blocknd(values=self.get_values().T, - placement=self.mgr_locs, shape=shape, - labels=labels, ref_items=ref_items) - def getitem_block(self, slicer, new_mgr_locs=None): """ Perform __getitem__-like, return result as block. @@ -1128,24 +1115,18 @@ def check_int_bool(self, inplace): fill_value=fill_value, coerce=coerce, downcast=downcast) - # try an interp method - try: - m = missing.clean_interp_method(method, **kwargs) - except ValueError: - m = None - - if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r - return self._interpolate(method=m, index=index, values=values, - axis=axis, limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, inplace=inplace, - downcast=downcast, **kwargs) - - raise ValueError("invalid method '{0}' to interpolate.".format(method)) + # validate the interp method + m = missing.clean_interp_method(method, **kwargs) + + r = check_int_bool(self, inplace) + if r is not None: + return r + return self._interpolate(method=m, index=index, values=values, + axis=axis, limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, inplace=inplace, + downcast=downcast, **kwargs) def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, @@ -2072,17 +2053,9 @@ def get_values(self, dtype=None): return object dtype as boxed values, such as Timestamps/Timedelta """ if is_object_dtype(dtype): - values = self.values - - if self.ndim > 1: - values = values.ravel() - - values = lib.map_infer(values, self._box_func) - - if self.ndim > 1: - values = values.reshape(self.values.shape) - - return values + values = self.values.ravel() + result = self._holder(values).astype(object) + return result.reshape(self.values.shape) return self.values @@ -3155,31 +3128,6 @@ def _merge_blocks(blocks, dtype=None, _can_consolidate=True): return blocks -def _block2d_to_blocknd(values, placement, shape, labels, ref_items): - """ pivot to the labels shape """ - panel_shape = (len(placement),) + shape - - # TODO: lexsort depth needs to be 2!! - - # Create observation selection vector using major and minor - # labels, for converting to panel format. - selector = _factor_indexer(shape[1:], labels) - mask = np.zeros(np.prod(shape), dtype=bool) - mask.put(selector, True) - - if mask.all(): - pvalues = np.empty(panel_shape, dtype=values.dtype) - else: - dtype, fill_value = maybe_promote(values.dtype) - pvalues = np.empty(panel_shape, dtype=dtype) - pvalues.fill(fill_value) - - for i in range(len(placement)): - pvalues[i].flat[mask] = values[:, i] - - return make_block(pvalues, placement=placement) - - def _safe_reshape(arr, new_shape): """ If possible, reshape `arr` to have shape `new_shape`, @@ -3202,16 +3150,6 @@ def _safe_reshape(arr, new_shape): return arr -def _factor_indexer(shape, labels): - """ - given a tuple of shape and a list of Categorical labels, return the - expanded label indexer - """ - mult = np.array(shape)[::-1].cumprod()[::-1] - return ensure_platform_int( - np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) - - def _putmask_smart(v, m, n): """ Return a new ndarray, try to preserve dtype if possible. diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 4a16707a376e9..cb98274962656 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -183,13 +183,15 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): is_datetime64tz_dtype(empty_dtype)): if self.block is None: array = empty_dtype.construct_array_type() - return array(np.full(self.shape[1], fill_value), + return array(np.full(self.shape[1], fill_value.value), dtype=empty_dtype) pass elif getattr(self.block, 'is_categorical', False): pass elif getattr(self.block, 'is_sparse', False): pass + elif getattr(self.block, 'is_extension', False): + pass else: missing_arr = np.empty(self.shape, dtype=empty_dtype) missing_arr.fill(fill_value) @@ -335,8 +337,10 @@ def get_empty_dtype_and_na(join_units): elif 'category' in upcast_classes: return np.dtype(np.object_), np.nan elif 'datetimetz' in upcast_classes: + # GH-25014. We use NaT instead of iNaT, since this eventually + # ends up in DatetimeArray.take, which does not allow iNaT. dtype = upcast_classes['datetimetz'] - return dtype[0], tslibs.iNaT + return dtype[0], tslibs.NaT elif 'datetime' in upcast_classes: return np.dtype('M8[ns]'), tslibs.iNaT elif 'timedelta' in upcast_classes: diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index c05a9a0f8f3c7..7e97512682720 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -197,18 +197,12 @@ def init_dict(data, index, columns, dtype=None): arrays.loc[missing] = [val] * missing.sum() else: - - for key in data: - if (isinstance(data[key], ABCDatetimeIndex) and - data[key].tz is not None): - # GH#24096 need copy to be deep for datetime64tz case - # TODO: See if we can avoid these copies - data[key] = data[key].copy(deep=True) - keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) - arrays = [data[k] for k in keys] - + # GH#24096 need copy to be deep for datetime64tz case + # TODO: See if we can avoid these copies + arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else + data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 050c3d3e87fc6..407db772d73e8 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -552,9 +552,9 @@ def comp(s, regex=False): if isna(s): return isna(values) if hasattr(s, 'asm8'): - return _compare_or_regex_match(maybe_convert_objects(values), - getattr(s, 'asm8'), regex) - return _compare_or_regex_match(values, s, regex) + return _compare_or_regex_search(maybe_convert_objects(values), + getattr(s, 'asm8'), regex) + return _compare_or_regex_search(values, s, regex) masks = [comp(s, regex) for i, s in enumerate(src_list)] @@ -584,10 +584,6 @@ def comp(s, regex=False): bm._consolidate_inplace() return bm - def reshape_nd(self, axes, **kwargs): - """ a 2d-nd reshape operation on a BlockManager """ - return self.apply('reshape_nd', axes=axes, **kwargs) - def is_consolidated(self): """ Return True if more than one block with the same dtype @@ -1901,11 +1897,11 @@ def _consolidate(blocks): return new_blocks -def _compare_or_regex_match(a, b, regex=False): +def _compare_or_regex_search(a, b, regex=False): """ Compare two array_like inputs of the same shape or two scalar values - Calls operator.eq or re.match, depending on regex argument. If regex is + Calls operator.eq or re.search, depending on regex argument. If regex is True, perform an element-wise regex matching. Parameters @@ -1921,7 +1917,7 @@ def _compare_or_regex_match(a, b, regex=False): if not regex: op = lambda x: operator.eq(x, b) else: - op = np.vectorize(lambda x: bool(re.match(b, x)) if isinstance(x, str) + op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str) else False) is_a_array = isinstance(a, np.ndarray) @@ -1971,16 +1967,28 @@ def items_overlap_with_suffix(left, lsuffix, right, rsuffix): raise ValueError('columns overlap but no suffix specified: ' '{rename}'.format(rename=to_rename)) - def lrenamer(x): - if x in to_rename: - return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) - return x + def renamer(x, suffix): + """Rename the left and right indices. + + If there is overlap, and suffix is not None, add + suffix, otherwise, leave it as-is. - def rrenamer(x): - if x in to_rename: - return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) + Parameters + ---------- + x : original column name + suffix : str or None + + Returns + ------- + x : renamed column name + """ + if x in to_rename and suffix is not None: + return '{x}{suffix}'.format(x=x, suffix=suffix) return x + lrenamer = partial(renamer, suffix=lsuffix) + rrenamer = partial(renamer, suffix=rsuffix) + return (_transform_index(left, lrenamer), _transform_index(right, rrenamer)) diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 15538b8196684..9acdb1a06b2d1 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -1,5 +1,5 @@ """ -Routines for filling missing data +Routines for filling missing data. """ from distutils.version import LooseVersion import operator @@ -116,7 +116,7 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None, xvalues and yvalues will each be 1-d arrays of the same length. Bounds_error is currently hardcoded to False since non-scipy ones don't - take it as an argumnet. + take it as an argument. """ # Treat the original, non-scipy methods first. @@ -244,9 +244,9 @@ def interpolate_1d(xvalues, yvalues, method='linear', limit=None, def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs): """ - passed off to scipy.interpolate.interp1d. method is scipy's kind. + Passed off to scipy.interpolate.interp1d. method is scipy's kind. Returns an array interpolated at new_x. Add any new methods to - the list in _clean_interp_method + the list in _clean_interp_method. """ try: from scipy import interpolate @@ -293,9 +293,10 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': - # GH #10633 - if not order: - raise ValueError("order needs to be specified and greater than 0") + # GH #10633, #24014 + if isna(order) or (order <= 0): + raise ValueError("order needs to be specified and greater than 0; " + "got order: {}".format(order)) terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: @@ -314,7 +315,7 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): """ - Convenience function for interpolate.BPoly.from_derivatives + Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. @@ -325,7 +326,7 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] - orders : None or int or array_like of ints. Default: None. + order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list @@ -344,8 +345,7 @@ def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): Returns ------- y : scalar or array_like - The result, of length R or length M or M by R, - + The result, of length R or length M or M by R. """ import scipy from scipy import interpolate @@ -418,8 +418,9 @@ def _akima_interpolate(xi, yi, x, der=0, axis=0): def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None): - """ perform an actual interpolation of values, values will be make 2-d if - needed fills inplace, returns the result + """ + Perform an actual interpolation of values, values will be make 2-d if + needed fills inplace, returns the result. """ transf = (lambda x: x) if axis == 0 else (lambda x: x.T) @@ -533,13 +534,13 @@ def clean_reindex_fill_method(method): def fill_zeros(result, x, y, name, fill): """ - if this is a reversed op, then flip x,y + If this is a reversed op, then flip x,y - if we have an integer value (or array in y) + If we have an integer value (or array in y) and we have 0's, fill them with the fill, - return the result + return the result. - mask the nan's from x + Mask the nan's from x. """ if fill is None or is_float_dtype(result): return result diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 10cebc6f94b92..dbdabecafae3a 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -447,7 +447,7 @@ def _get_op_name(op, special): _op_descriptions[reverse_op]['reverse'] = key _flex_doc_SERIES = """ -{desc} of series and other, element-wise (binary operator `{op_name}`). +Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. @@ -459,14 +459,15 @@ def _get_op_name(op, special): Fill existing missing (NaN) values, and any new element needed for successful Series alignment, with this value before computation. If data in both corresponding Series locations is missing - the result will be missing + the result will be missing. level : int or name Broadcast across a level, matching Index values on the - passed MultiIndex level + passed MultiIndex level. Returns ------- -result : Series +Series + The result of the operation. See Also -------- @@ -495,6 +496,27 @@ def _get_op_name(op, special): d 1.0 e NaN dtype: float64 +>>> a.subtract(b, fill_value=0) +a 0.0 +b 1.0 +c 1.0 +d -1.0 +e NaN +dtype: float64 +>>> a.multiply(b) +a 1.0 +b NaN +c NaN +d NaN +e NaN +dtype: float64 +>>> a.divide(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 """ _arith_doc_FRAME = """ @@ -525,7 +547,7 @@ def _get_op_name(op, special): """ _flex_doc_FRAME = """ -{desc} of dataframe and other, element-wise (binary operator `{op_name}`). +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``, but with support to substitute a fill_value for missing data in one of the inputs. With reverse version, `{reverse}`. @@ -679,7 +701,7 @@ def _get_op_name(op, special): """ _flex_comp_doc_FRAME = """ -{desc} of dataframe and other, element-wise (binary operator `{op_name}`). +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison operators. @@ -825,7 +847,7 @@ def _get_op_name(op, special): """ _flex_doc_PANEL = """ -{desc} of series and other, element-wise (binary operator `{op_name}`). +Return {desc} of series and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``. Parameters diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 540192d1a592c..1555542079d80 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -4,12 +4,13 @@ # pylint: disable=E1103,W0231,W0212,W0621 from __future__ import division +from collections import OrderedDict import warnings import numpy as np import pandas.compat as compat -from pandas.compat import OrderedDict, map, range, u, zip +from pandas.compat import map, range, u, zip from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, deprecate_kwarg from pandas.util._validators import validate_axis_style_args @@ -42,7 +43,7 @@ axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}", optional_mapper='', optional_axis='', optional_labels='') _shared_doc_kwargs['args_transpose'] = ( - "three positional arguments: each one of\n{ax_single}".format( + "{ax_single}\n\tThree positional arguments from given options.".format( ax_single=_shared_doc_kwargs['axes_single_arg'])) @@ -539,7 +540,7 @@ def set_value(self, *args, **kwargs): ------- panel : Panel If label combo is contained, will be reference to calling Panel, - otherwise a new object + otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " @@ -802,7 +803,7 @@ def major_xs(self, key): Returns ------- y : DataFrame - index -> minor axis, columns -> items + Index -> minor axis, columns -> items. Notes ----- @@ -826,7 +827,7 @@ def minor_xs(self, key): Returns ------- y : DataFrame - index -> major axis, columns -> items + Index -> major axis, columns -> items. Notes ----- @@ -917,9 +918,7 @@ def groupby(self, function, axis='major'): ------- grouped : PanelGroupBy """ - from pandas.core.groupby import PanelGroupBy - axis = self._get_axis_number(axis) - return PanelGroupBy(self, function, axis=axis) + raise NotImplementedError("Panel is removed in pandas 0.25.0") def to_frame(self, filter_observations=True): """ @@ -999,7 +998,7 @@ def construct_index_parts(idx, major=True): def apply(self, func, axis='major', **kwargs): """ - Applies function along axis (or axes) of the Panel. + Apply function along axis (or axes) of the Panel. Parameters ---------- @@ -1010,7 +1009,8 @@ def apply(self, func, axis='major', **kwargs): DataFrames of items & major axis will be passed axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two axes - Additional keyword arguments will be passed as keywords to the function + **kwargs + Additional keyword arguments will be passed to the function. Returns ------- diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 6822225273906..b3b28d7772713 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -20,7 +20,7 @@ import pandas.core.algorithms as algos from pandas.core.generic import _shared_docs from pandas.core.groupby.base import GroupByMixin -from pandas.core.groupby.generic import PanelGroupBy, SeriesGroupBy +from pandas.core.groupby.generic import SeriesGroupBy from pandas.core.groupby.groupby import ( GroupBy, _GroupBy, _pipe_template, groupby) from pandas.core.groupby.grouper import Grouper @@ -30,14 +30,12 @@ from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range from pandas.tseries.frequencies import to_offset -from pandas.tseries.offsets import ( - DateOffset, Day, Nano, Tick, delta_to_nanoseconds) +from pandas.tseries.offsets import DateOffset, Day, Nano, Tick _shared_docs_kwargs = dict() class Resampler(_GroupBy): - """ Class for resampling datetimelike data, a groupby-like operation. See aggregate, transform, and apply functions on this object. @@ -85,9 +83,9 @@ def __unicode__(self): """ Provide a nice str repr of our rolling object. """ - attrs = ["{k}={v}".format(k=k, v=getattr(self.groupby, k)) + attrs = ("{k}={v}".format(k=k, v=getattr(self.groupby, k)) for k in self._attributes if - getattr(self.groupby, k, None) is not None] + getattr(self.groupby, k, None) is not None) return "{klass} [{attrs}]".format(klass=self.__class__.__name__, attrs=', '.join(attrs)) @@ -108,7 +106,7 @@ def __iter__(self): Returns ------- Generator yielding sequence of (name, subsetted object) - for each group + for each group. See Also -------- @@ -215,9 +213,9 @@ def pipe(self, func, *args, **kwargs): _agg_see_also_doc = dedent(""" See Also -------- - pandas.DataFrame.groupby.aggregate - pandas.DataFrame.resample.transform - pandas.DataFrame.aggregate + DataFrame.groupby.aggregate + DataFrame.resample.transform + DataFrame.aggregate """) _agg_examples_doc = dedent(""" @@ -287,8 +285,8 @@ def transform(self, arg, *args, **kwargs): Parameters ---------- - func : function - To apply to each group. Should return a Series with the same index + arg : function + To apply to each group. Should return a Series with the same index. Returns ------- @@ -342,15 +340,10 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): obj = self._selected_obj - try: - grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) - except TypeError: - - # panel grouper - grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis) + grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) try: - if isinstance(obj, ABCDataFrame) and compat.callable(how): + if isinstance(obj, ABCDataFrame) and callable(how): # Check if the function is reducing or not. result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: @@ -424,7 +417,7 @@ def pad(self, limit=None): Returns ------- - an upsampled Series + An upsampled Series. See Also -------- @@ -524,9 +517,9 @@ def backfill(self, limit=None): 'backfill'. nearest : Fill NaN values with nearest neighbor starting from center. pad : Forward fill NaN values. - pandas.Series.fillna : Fill NaN values in the Series using the + Series.fillna : Fill NaN values in the Series using the specified method, which can be 'backfill'. - pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the + DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'backfill'. References @@ -637,9 +630,9 @@ def fillna(self, method, limit=None): nearest : Fill NaN values in the resampled data with nearest neighbor starting from center. interpolate : Fill NaN values using interpolation. - pandas.Series.fillna : Fill NaN values in the Series using the + Series.fillna : Fill NaN values in the Series using the specified method, which can be 'bfill' and 'ffill'. - pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the + DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'bfill' and 'ffill'. References @@ -802,7 +795,7 @@ def std(self, ddof=1, *args, **kwargs): Parameters ---------- ddof : integer, default 1 - degrees of freedom + Degrees of freedom. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof) @@ -1613,20 +1606,20 @@ def _get_timestamp_range_edges(first, last, offset, closed='left', base=0): A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(offset, Tick): - is_day = isinstance(offset, Day) - day_nanos = delta_to_nanoseconds(timedelta(1)) - - # #1165 and #24127 - if (is_day and not offset.nanos % day_nanos) or not is_day: - first, last = _adjust_dates_anchored(first, last, offset, - closed=closed, base=base) - if is_day and first.tz is not None: - # _adjust_dates_anchored assumes 'D' means 24H, but first/last - # might contain a DST transition (23H, 24H, or 25H). - # Ensure first/last snap to midnight. - first = first.normalize() - last = last.normalize() - return first, last + if isinstance(offset, Day): + # _adjust_dates_anchored assumes 'D' means 24H, but first/last + # might contain a DST transition (23H, 24H, or 25H). + # So "pretend" the dates are naive when adjusting the endpoints + tz = first.tz + first = first.tz_localize(None) + last = last.tz_localize(None) + + first, last = _adjust_dates_anchored(first, last, offset, + closed=closed, base=base) + if isinstance(offset, Day): + first = first.tz_localize(tz) + last = last.tz_localize(tz) + return first, last else: first = first.normalize() diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 53671e00e88b4..a6c945ac2e464 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -38,15 +38,15 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless - they are all None in which case a ValueError will be raised + they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 - The axis to concatenate along + The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' - How to handle indexes on other axis(es) + How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing - inner/outer set logic - ignore_index : boolean, default False + inner/outer set logic. + ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have @@ -54,16 +54,16 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct - hierarchical index using the passed keys as the outermost level + hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a - MultiIndex. Otherwise they will be inferred from the keys + MultiIndex. Otherwise they will be inferred from the keys. names : list, default None - Names for the levels in the resulting hierarchical index - verify_integrity : boolean, default False + Names for the levels in the resulting hierarchical index. + verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can - be very expensive relative to the actual data concatenation - sort : boolean, default None + be very expensive relative to the actual data concatenation. + sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. @@ -76,12 +76,12 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, .. versionadded:: 0.23.0 - copy : boolean, default True - If False, do not copy data unnecessarily + copy : bool, default True + If False, do not copy data unnecessarily. Returns ------- - concatenated : object, type of objs + object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along @@ -89,10 +89,10 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, See Also -------- - Series.append - DataFrame.append - DataFrame.join - DataFrame.merge + Series.append : Concatenate Series. + DataFrame.append : Concatenate DataFrames. + DataFrame.join : Join DataFrames using indexes. + DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- @@ -128,7 +128,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, Add a hierarchical index at the outermost level of the data with the ``keys`` option. - >>> pd.concat([s1, s2], keys=['s1', 's2',]) + >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 312a108ad3380..0fa80de812c5f 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -230,7 +230,7 @@ def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): ------- DataFrame A DataFrame that contains each stub name as a variable, with new index - (i, j) + (i, j). Notes ----- diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 1dd19a7c1514e..fb50a3c60f705 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -159,9 +159,15 @@ def merge_ordered(left, right, on=None, left DataFrame fill_method : {'ffill', None}, default None Interpolation method for data - suffixes : 2-length sequence (tuple, list, ...) - Suffix to apply to overlapping column names in the left and right - side, respectively + suffixes : Sequence, default is ("_x", "_y") + A length-2 sequence where each element is optionally a string + indicating the suffix to add to overlapping column names in + `left` and `right` respectively. Pass a value of `None` instead + of a string to indicate that the column name from `left` or + `right` should be left as-is, with no suffix. At least one of the + values must not be None. + + .. versionchanged:: 0.25.0 how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) @@ -903,7 +909,7 @@ def _get_merge_keys(self): in zip(self.right.index.levels, self.right.index.codes)] else: - right_keys = [self.right.index.values] + right_keys = [self.right.index._values] elif _any(self.right_on): for k in self.right_on: if is_rkey(k): diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index c7c447d18b6b1..8d7616c4b6b61 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -88,9 +88,9 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', # the original values are ints # as we grouped with a NaN value # and then dropped, coercing to floats - for v in [v for v in values if v in data and v in agged]: - if (is_integer_dtype(data[v]) and - not is_integer_dtype(agged[v])): + for v in values: + if (v in data and is_integer_dtype(data[v]) and + v in agged and not is_integer_dtype(agged[v])): agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype) table = agged @@ -392,36 +392,36 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins=False, margins_name='All', dropna=True, normalize=False): """ - Compute a simple cross-tabulation of two (or more) factors. By default + Compute a simple cross tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an - aggregation function are passed + aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series - Values to group by in the rows + Values to group by in the rows. columns : array-like, Series, or list of arrays/Series - Values to group by in the columns + Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None - If passed, must match number of row arrays passed + If passed, must match number of row arrays passed. colnames : sequence, default None - If passed, must match number of column arrays passed + If passed, must match number of column arrays passed. aggfunc : function, optional - If specified, requires `values` be specified as well - margins : boolean, default False - Add row/column margins (subtotals) - margins_name : string, default 'All' - Name of the row / column that will contain the totals + If specified, requires `values` be specified as well. + margins : bool, default False + Add row/column margins (subtotals). + margins_name : str, default 'All' + Name of the row/column that will contain the totals when margins is True. .. versionadded:: 0.21.0 - dropna : boolean, default True - Do not include columns whose entries are all NaN - normalize : boolean, {'all', 'index', 'columns'}, or {0,1}, default False + dropna : bool, default True + Do not include columns whose entries are all NaN. + normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. @@ -433,7 +433,13 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, Returns ------- - crosstab : DataFrame + DataFrame + Cross tabulation of the data. + + See Also + -------- + DataFrame.pivot : Reshape data based on column values. + pivot_table : Create a pivot table as a DataFrame. Notes ----- @@ -455,32 +461,26 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], - ... dtype=object) - + ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) - ... # doctest: +NORMALIZE_WHITESPACE b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 + Here 'c' and 'f' are not represented in the data and will not be + shown in the output because dropna is True by default. Set + dropna=False to preserve categories with no data. + >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) - >>> crosstab(foo, bar) # 'c' and 'f' are not represented in the data, - # and will not be shown in the output because - # dropna is True by default. Set 'dropna=False' - # to preserve categories with no data - ... # doctest: +SKIP + >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 - - >>> crosstab(foo, bar, dropna=False) # 'c' and 'f' are not represented - # in the data, but they still will be counted - # and shown in the output - ... # doctest: +SKIP + >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index f436b3b92a359..6ba33301753d6 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -701,19 +701,20 @@ def _convert_level_number(level_num, columns): def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=None): """ - Convert categorical variable into dummy/indicator variables + Convert categorical variable into dummy/indicator variables. Parameters ---------- data : array-like, Series, or DataFrame - prefix : string, list of strings, or dict of strings, default None + Data of which to get dummy indicators. + prefix : str, list of str, or dict of str, default None String to append DataFrame column names. Pass a list with length equal to the number of columns when calling get_dummies on a DataFrame. Alternatively, `prefix` can be a dictionary mapping column names to prefixes. - prefix_sep : string, default '_' + prefix_sep : str, default '_' If appending prefix, separator/delimiter to use. Or pass a - list or dictionary as with `prefix.` + list or dictionary as with `prefix`. dummy_na : bool, default False Add a column to indicate NaNs, if False NaNs are ignored. columns : list-like, default None @@ -736,11 +737,12 @@ def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, Returns ------- - dummies : DataFrame + DataFrame + Dummy-coded data. See Also -------- - Series.str.get_dummies + Series.str.get_dummies : Convert Series to dummy codes. Examples -------- diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index c107ed51226b0..f99fd9004bb31 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -35,7 +35,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, ---------- x : array-like The input array to be binned. Must be 1-dimensional. - bins : int, sequence of scalars, or pandas.IntervalIndex + bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The @@ -70,16 +70,16 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, Returns ------- - out : pandas.Categorical, Series, or ndarray + out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True (default) : returns a Series for Series `x` or a - pandas.Categorical for all other inputs. The values stored within + Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a - pandas.Categorical for all other inputs. The values stored within + Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. @@ -94,16 +94,15 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. - pandas.Categorical : Array type for storing data that come from a + Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). - pandas.IntervalIndex : Immutable Index implementing an ordered, - sliceable set. + IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in - the resulting Series or pandas.Categorical object. + the resulting Series or Categorical object. Examples -------- @@ -164,7 +163,7 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, - ... right=False, duplicates='drop') + ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 0.0 b 1.0 @@ -373,14 +372,6 @@ def _bins_to_cuts(x, bins, right=True, labels=None, return result, bins -def _trim_zeros(x): - while len(x) > 1 and x[-1] == '0': - x = x[:-1] - if len(x) > 1 and x[-1] == '.': - x = x[:-1] - return x - - def _coerce_to_type(x): """ if the passed data is of datetime/timedelta type, diff --git a/pandas/core/series.py b/pandas/core/series.py index a25aa86a47927..f6598ed1ee614 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3,6 +3,7 @@ """ from __future__ import division +from collections import OrderedDict from textwrap import dedent import warnings @@ -10,7 +11,7 @@ from pandas._libs import iNaT, index as libindex, lib, tslibs import pandas.compat as compat -from pandas.compat import PY36, OrderedDict, StringIO, u, zip +from pandas.compat import PY36, StringIO, u, zip from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, deprecate from pandas.util._validators import validate_bool_kwarg @@ -129,7 +130,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): sequence are used, the index will override the keys found in the dict. dtype : str, numpy.dtype, or ExtensionDtype, optional - dtype for the output Series. If not specified, this will be + Data type for the output Series. If not specified, this will be inferred from `data`. See the :ref:`user guide <basics.dtypes>` for more usages. copy : bool, default False @@ -444,7 +445,7 @@ def values(self): Returns ------- - arr : numpy.ndarray or ndarray-like + numpy.ndarray or ndarray-like See Also -------- @@ -513,6 +514,11 @@ def ravel(self, order='C'): """ Return the flattened underlying data as an ndarray. + Returns + ------- + numpy.ndarray or ndarray-like + Flattened data of the Series. + See Also -------- numpy.ndarray.ravel @@ -580,7 +586,7 @@ def nonzero(self): def put(self, *args, **kwargs): """ - Applies the `put` method to its `values` attribute if it has one. + Apply the `put` method to its `values` attribute if it has one. See Also -------- @@ -687,7 +693,7 @@ def __array__(self, dtype=None): See Also -------- - pandas.array : Create a new array from data. + array : Create a new array from data. Series.array : Zero-copy view to the array backing the Series. Series.to_numpy : Series method for similar behavior. @@ -830,7 +836,7 @@ def _ixs(self, i, axis=0): Returns ------- - value : scalar (int) or Series (slice, sequence) + scalar (int) or Series (slice, sequence) """ try: @@ -1120,7 +1126,7 @@ def repeat(self, repeats, axis=None): Returns ------- - repeated_series : Series + Series Newly created Series with repeated elements. See Also @@ -1173,7 +1179,7 @@ def get_value(self, label, takeable=False): Returns ------- - value : scalar value + scalar value """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " @@ -1207,9 +1213,9 @@ def set_value(self, label, value, takeable=False): Returns ------- - series : Series + Series If label is contained, will be reference to calling Series, - otherwise a new object + otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " @@ -1394,29 +1400,30 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, Parameters ---------- buf : StringIO-like, optional - buffer to write to - na_rep : string, optional - string representation of NAN to use, default 'NaN' + Buffer to write to. + na_rep : str, optional + String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional - formatter function to apply to columns' elements if they are floats - default None - header : boolean, default True - Add the Series header (index name) + Formatter function to apply to columns' elements if they are + floats, default None. + header : bool, default True + Add the Series header (index name). index : bool, optional - Add index (row) labels, default True - length : boolean, default False - Add the Series length - dtype : boolean, default False - Add the Series dtype - name : boolean, default False - Add the Series name if not None + Add index (row) labels, default True. + length : bool, default False + Add the Series length. + dtype : bool, default False + Add the Series dtype. + name : bool, default False + Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- - formatted : string (if not buffer passed) + str or None + String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter(self, name=name, length=length, @@ -1456,7 +1463,7 @@ def iteritems(self): def keys(self): """ - Alias for index. + Return alias for index. """ return self.index @@ -1476,7 +1483,8 @@ def to_dict(self, into=dict): Returns ------- - value_dict : collections.Mapping + collections.Mapping + Key-value representation of Series. Examples -------- @@ -1488,7 +1496,7 @@ def to_dict(self, into=dict): OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) >>> dd = defaultdict(list) >>> s.to_dict(dd) - defaultdict(<type 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) + defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 into_c = com.standardize_mapping(into) @@ -1506,7 +1514,18 @@ def to_frame(self, name=None): Returns ------- - data_frame : DataFrame + DataFrame + DataFrame representation of Series. + + Examples + -------- + >>> s = pd.Series(["a", "b", "c"], + ... name="vals") + >>> s.to_frame() + vals + 0 a + 1 b + 2 c """ if name is None: df = self._constructor_expanddim(self) @@ -1521,12 +1540,14 @@ def to_sparse(self, kind='block', fill_value=None): Parameters ---------- - kind : {'block', 'integer'} + kind : {'block', 'integer'}, default 'block' fill_value : float, defaults to NaN (missing) + Value to use for filling NaN values. Returns ------- - sp : SparseSeries + SparseSeries + Sparse representation of the Series. """ # TODO: deprecate from pandas.core.sparse.series import SparseSeries @@ -1564,11 +1585,18 @@ def count(self, level=None): ---------- level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a - particular level, collapsing into a smaller Series + particular level, collapsing into a smaller Series. Returns ------- - nobs : int or Series (if level specified) + int or Series (if level specified) + Number of non-null values in the Series. + + Examples + -------- + >>> s = pd.Series([0.0, 1.0, np.nan]) + >>> s.count() + 2 """ if level is None: return notna(com.values_from_object(self)).sum() @@ -1597,14 +1625,15 @@ def mode(self, dropna=True): Parameters ---------- - dropna : boolean, default True + dropna : bool, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- - modes : Series (sorted) + Series + Modes of the Series in sorted order. """ # TODO: Add option for bins like value_counts() return algorithms.mode(self, dropna=dropna) @@ -1619,10 +1648,19 @@ def unique(self): Returns ------- ndarray or ExtensionArray - The unique values returned as a NumPy array. In case of an - extension-array backed Series, a new - :class:`~api.extensions.ExtensionArray` of that type with just - the unique values is returned. This includes + The unique values returned as a NumPy array. See Notes. + + See Also + -------- + unique : Top-level unique method for any 1-d array-like object. + Index.unique : Return Index with unique values from an Index object. + + Notes + ----- + Returns the unique values as a NumPy array. In case of an + extension-array backed Series, a new + :class:`~api.extensions.ExtensionArray` of that type with just + the unique values is returned. This includes * Categorical * Period @@ -1631,10 +1669,7 @@ def unique(self): * Sparse * IntegerNA - See Also - -------- - unique : Top-level unique method for any 1-d array-like object. - Index.unique : Return Index with unique values from an Index object. + See Examples section. Examples -------- @@ -1677,12 +1712,13 @@ def drop_duplicates(self, keep='first', inplace=False): - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. - inplace : boolean, default ``False`` + inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. Returns ------- - deduplicated : Series + Series + Series with duplicates dropped. See Also -------- @@ -1759,7 +1795,9 @@ def duplicated(self, keep='first'): Returns ------- - pandas.core.series.Series + Series + Series indicating whether each value has occurred in the + preceding values. See Also -------- @@ -1823,7 +1861,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs): Parameters ---------- - skipna : boolean, default True + skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 @@ -1835,7 +1873,8 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs): Returns ------- - idxmin : Index of minimum of values. + Index + Label of the minimum value. Raises ------ @@ -1860,7 +1899,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs): Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], - ... index=['A' ,'B' ,'C' ,'D']) + ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN @@ -1892,7 +1931,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs): Parameters ---------- - skipna : boolean, default True + skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 @@ -1904,7 +1943,8 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs): Returns ------- - idxmax : Index of maximum of values. + Index + Label of the maximum value. Raises ------ @@ -1988,12 +2028,22 @@ def round(self, decimals=0, *args, **kwargs): Returns ------- - Series object + Series + Rounded values of the Series. See Also -------- - numpy.around - DataFrame.round + numpy.around : Round values of an np.array. + DataFrame.round : Round values of a DataFrame. + + Examples + -------- + >>> s = pd.Series([0.1, 1.3, 2.7]) + >>> s.round() + 0 0.0 + 1 1.0 + 2 3.0 + dtype: float64 """ nv.validate_round(args, kwargs) result = com.values_from_object(self).round(decimals) @@ -2008,7 +2058,7 @@ def quantile(self, q=0.5, interpolation='linear'): Parameters ---------- q : float or array-like, default 0.5 (50% quantile) - 0 <= q <= 1, the quantile(s) to compute + 0 <= q <= 1, the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 @@ -2024,9 +2074,10 @@ def quantile(self, q=0.5, interpolation='linear'): Returns ------- - quantile : float or Series - if ``q`` is an array, a Series will be returned where the - index is ``q`` and the values are the quantiles. + float or Series + If ``q`` is an array, a Series will be returned where the + index is ``q`` and the values are the quantiles, otherwise + a float will be returned. See Also -------- @@ -2072,6 +2123,7 @@ def corr(self, other, method='pearson', min_periods=None): Parameters ---------- other : Series + Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient @@ -2081,16 +2133,18 @@ def corr(self, other, method='pearson', min_periods=None): .. versionadded:: 0.24.0 min_periods : int, optional - Minimum number of observations needed to have a valid result + Minimum number of observations needed to have a valid result. Returns ------- - correlation : float + float + Correlation with other. Examples -------- - >>> histogram_intersection = lambda a, b: np.minimum(a, b - ... ).sum().round(decimals=1) + >>> def histogram_intersection(a, b): + ... v = np.minimum(a, b).sum().round(decimals=1) + ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) @@ -2115,14 +2169,22 @@ def cov(self, other, min_periods=None): Parameters ---------- other : Series + Series with which to compute the covariance. min_periods : int, optional - Minimum number of observations needed to have a valid result + Minimum number of observations needed to have a valid result. Returns ------- - covariance : float + float + Covariance between Series and other normalized by N-1 + (unbiased estimator). - Normalized by N-1 (unbiased estimator). + Examples + -------- + >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) + >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) + >>> s1.cov(s2) + -0.01685762652715874 """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: @@ -2145,7 +2207,8 @@ def diff(self, periods=1): Returns ------- - diffed : Series + Series + First differences of the Series. See Also -------- @@ -2279,7 +2342,7 @@ def dot(self, other): 8 >>> s @ other 8 - >>> df = pd.DataFrame([[0 ,1], [-2, 3], [4, -5], [6, 7]]) + >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 @@ -2331,12 +2394,8 @@ def __rmatmul__(self, other): @Substitution(klass='Series') @Appender(base._shared_docs['searchsorted']) def searchsorted(self, value, side='left', sorter=None): - if sorter is not None: - sorter = ensure_platform_int(sorter) - result = self._values.searchsorted(Series(value)._values, - side=side, sorter=sorter) - - return result[0] if is_scalar(value) else result + return algorithms.searchsorted(self._values, value, + side=side, sorter=sorter) # ------------------------------------------------------------------- # Combination @@ -2348,17 +2407,19 @@ def append(self, to_append, ignore_index=False, verify_integrity=False): Parameters ---------- to_append : Series or list/tuple of Series - ignore_index : boolean, default False + Series to append with self. + ignore_index : bool, default False If True, do not use the index labels. .. versionadded:: 0.19.0 - verify_integrity : boolean, default False - If True, raise Exception on creating index with duplicates + verify_integrity : bool, default False + If True, raise Exception on creating index with duplicates. Returns ------- - appended : Series + Series + Concatenated Series. See Also -------- @@ -2376,7 +2437,7 @@ def append(self, to_append, ignore_index=False, verify_integrity=False): -------- >>> s1 = pd.Series([1, 2, 3]) >>> s2 = pd.Series([4, 5, 6]) - >>> s3 = pd.Series([4, 5, 6], index=[3,4,5]) + >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5]) >>> s1.append(s2) 0 1 1 2 @@ -2439,7 +2500,7 @@ def _binop(self, other, func, level=None, fill_value=None): Returns ------- - combined : Series + Series """ if not isinstance(other, Series): raise AssertionError('Other operand must be Series') @@ -2862,7 +2923,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, Returns ------- - pandas.Series + Series The original Series sorted by the labels. See Also @@ -2987,7 +3048,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, def argsort(self, axis=0, kind='quicksort', order=None): """ - Overrides ndarray.argsort. Argsorts the value, omitting NA/null values, + Override ndarray.argsort. Argsorts the value, omitting NA/null values, and places the result in the same locations as the non-NA values. Parameters @@ -3002,7 +3063,9 @@ def argsort(self, axis=0, kind='quicksort', order=None): Returns ------- - argsorted : Series, with -1 indicated where nan values are present + Series + Positions of values within the sort order with -1 indicating + nan values. See Also -------- @@ -3035,8 +3098,10 @@ def nlargest(self, n=5, keep='first'): When there are duplicate values that cannot all fit in a Series of `n` elements: - - ``first`` : take the first occurrences based on the index order - - ``last`` : take the last occurrences based on the index order + - ``first`` : return the first `n` occurrences in order + of appearance. + - ``last`` : return the last `n` occurrences in reverse + order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. @@ -3131,8 +3196,10 @@ def nsmallest(self, n=5, keep='first'): When there are duplicate values that cannot all fit in a Series of `n` elements: - - ``first`` : take the first occurrences based on the index order - - ``last`` : take the last occurrences based on the index order + - ``first`` : return the first `n` occurrences in order + of appearance. + - ``last`` : return the last `n` occurrences in reverse + order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. @@ -3173,7 +3240,7 @@ def nsmallest(self, n=5, keep='first'): Monserat 5200 dtype: int64 - The `n` largest elements where ``n=5`` by default. + The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Monserat 5200 @@ -3220,12 +3287,13 @@ def swaplevel(self, i=-2, j=-1, copy=True): Parameters ---------- - i, j : int, string (can be mixed) + i, j : int, str (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- - swapped : Series + Series + Series with levels swapped in MultiIndex. .. versionchanged:: 0.18.1 @@ -3265,21 +3333,23 @@ def unstack(self, level=-1, fill_value=None): Parameters ---------- - level : int, string, or list of these, default last level - Level(s) to unstack, can pass level name - fill_value : replace NaN with this value if the unstack produces - missing values + level : int, str, or list of these, default last level + Level(s) to unstack, can pass level name. + fill_value : scalar value, default None + Value to use when replacing NaN values. .. versionadded:: 0.18.0 Returns ------- - unstacked : DataFrame + DataFrame + Unstacked Series. Examples -------- >>> s = pd.Series([1, 2, 3, 4], - ... index=pd.MultiIndex.from_product([['one', 'two'], ['a', 'b']])) + ... index=pd.MultiIndex.from_product([['one', 'two'], + ... ['a', 'b']])) >>> s one a 1 b 2 @@ -3610,8 +3680,12 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, if axis is not None: self._get_axis_number(axis) - # dispatch to ExtensionArray interface - if isinstance(delegate, ExtensionArray): + if isinstance(delegate, Categorical): + # TODO deprecate numeric_only argument for Categorical and use + # skipna as well, see GH25303 + return delegate._reduce(name, numeric_only=numeric_only, **kwds) + elif isinstance(delegate, ExtensionArray): + # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) elif is_datetime64_dtype(delegate): # use DatetimeIndex implementation to handle skipna correctly @@ -3679,7 +3753,7 @@ def rename(self, index=None, **kwargs): Scalar or hashable sequence-like will alter the ``Series.name`` attribute. copy : bool, default True - Also copy underlying data + Whether to copy underlying data. inplace : bool, default False Whether to return a new Series. If True then value of copy is ignored. @@ -3689,11 +3763,12 @@ def rename(self, index=None, **kwargs): Returns ------- - renamed : Series (new object) + Series + Series with index labels or name altered. See Also -------- - Series.rename_axis + Series.rename_axis : Set the name of the axis. Examples -------- @@ -3703,7 +3778,7 @@ def rename(self, index=None, **kwargs): 1 2 2 3 dtype: int64 - >>> s.rename("my_name") # scalar, changes Series.name + >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 @@ -3762,7 +3837,8 @@ def drop(self, labels=None, axis=0, index=None, columns=None, Returns ------- - dropped : pandas.Series + Series + Series with specified index labels removed. Raises ------ @@ -3778,7 +3854,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None, Examples -------- - >>> s = pd.Series(data=np.arange(3), index=['A','B','C']) + >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C']) >>> s A 0 B 1 @@ -3787,7 +3863,7 @@ def drop(self, labels=None, axis=0, index=None, columns=None, Drop labels B en C - >>> s.drop(labels=['B','C']) + >>> s.drop(labels=['B', 'C']) A 0 dtype: int64 @@ -3960,7 +4036,8 @@ def isin(self, values): Returns ------- - isin : Series (bool dtype) + Series + Series of booleans indicating if each element is in values. Raises ------ @@ -4019,7 +4096,8 @@ def between(self, left, right, inclusive=True): Returns ------- Series - Each element will be a boolean. + Series representing whether each element is between left and + right (inclusive). See Also -------- @@ -4101,27 +4179,27 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, Parameters ---------- - path : string file path or file handle / StringIO - sep : string, default ',' - Field delimiter - parse_dates : boolean, default True - Parse dates. Different default from read_table + path : str, file path, or file handle / StringIO + sep : str, default ',' + Field delimiter. + parse_dates : bool, default True + Parse dates. Different default from read_table. header : int, default None - Row to use as header (skip prior rows) + Row to use as header (skip prior rows). index_col : int or sequence, default 0 Column to use for index. If a sequence is given, a MultiIndex - is used. Different default from read_table - encoding : string, optional - a string representing the encoding to use if the contents are - non-ascii, for python versions prior to 3 - infer_datetime_format : boolean, default False + is used. Different default from read_table. + encoding : str, optional + A string representing the encoding to use if the contents are + non-ascii, for python versions prior to 3. + infer_datetime_format : bool, default False If True and `parse_dates` is True for a column, try to infer the datetime format based on the first datetime string. If the format can be inferred, there often will be a large parsing speed-up. Returns ------- - y : Series + Series See Also -------- @@ -4322,19 +4400,21 @@ def valid(self, inplace=False, **kwargs): def to_timestamp(self, freq=None, how='start', copy=True): """ - Cast to datetimeindex of timestamps, at *beginning* of period. + Cast to DatetimeIndex of Timestamps, at *beginning* of period. Parameters ---------- - freq : string, default frequency of PeriodIndex - Desired frequency + freq : str, default frequency of PeriodIndex + Desired frequency. how : {'s', 'e', 'start', 'end'} Convention for converting period to timestamp; start of period - vs. end + vs. end. + copy : bool, default True + Whether or not to return a copy. Returns ------- - ts : Series with DatetimeIndex + Series with DatetimeIndex """ new_values = self._values if copy: @@ -4351,11 +4431,15 @@ def to_period(self, freq=None, copy=True): Parameters ---------- - freq : string, default + freq : str, default None + Frequency associated with the PeriodIndex. + copy : bool, default True + Whether or not to return a copy. Returns ------- - ts : Series with PeriodIndex + Series + Series with index converted to PeriodIndex. """ new_values = self._values if copy: diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 586193fe11850..2d54b82a3c844 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -124,8 +124,8 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None, columns = Index([]) else: for c in columns: - data[c] = SparseArray(np.nan, index=index, - kind=self._default_kind, + data[c] = SparseArray(self._default_fill_value, + index=index, kind=self._default_kind, fill_value=self._default_fill_value) mgr = to_manager(data, columns, index) if dtype is not None: @@ -194,7 +194,9 @@ def sp_maker(x): return to_manager(sdict, columns, index) def _init_matrix(self, data, index, columns, dtype=None): - """ Init self from ndarray or list of lists """ + """ + Init self from ndarray or list of lists. + """ data = prep_ndarray(data, copy=False) index, columns = self._prep_index(data, index, columns) data = {idx: data[:, i] for i, idx in enumerate(columns)} @@ -202,7 +204,9 @@ def _init_matrix(self, data, index, columns, dtype=None): def _init_spmatrix(self, data, index, columns, dtype=None, fill_value=None): - """ Init self from scipy.sparse matrix """ + """ + Init self from scipy.sparse matrix. + """ index, columns = self._prep_index(data, index, columns) data = data.tocoo() N = len(index) @@ -302,7 +306,9 @@ def __getstate__(self): _default_kind=self._default_kind) def _unpickle_sparse_frame_compat(self, state): - """ original pickle format """ + """ + Original pickle format + """ series, cols, idx, fv, kind = state if not isinstance(cols, Index): # pragma: no cover @@ -338,7 +344,9 @@ def to_dense(self): return DataFrame(data, index=self.index, columns=self.columns) def _apply_columns(self, func): - """ get new SparseDataFrame applying func to each columns """ + """ + Get new SparseDataFrame applying func to each columns + """ new_data = {col: func(series) for col, series in compat.iteritems(self)} diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/sparse/scipy_sparse.py index 2d0ce2d5e5951..5a39a1529a33a 100644 --- a/pandas/core/sparse/scipy_sparse.py +++ b/pandas/core/sparse/scipy_sparse.py @@ -3,7 +3,9 @@ Currently only includes SparseSeries.to_coo helpers. """ -from pandas.compat import OrderedDict, lmap +from collections import OrderedDict + +from pandas.compat import lmap from pandas.core.index import Index, MultiIndex from pandas.core.series import Series @@ -90,7 +92,8 @@ def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): - """ Convert a SparseSeries to a scipy.sparse.coo_matrix using index + """ + Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ @@ -116,7 +119,8 @@ def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), def _coo_to_sparse_series(A, dense_index=False): - """ Convert a scipy.sparse.coo_matrix to a SparseSeries. + """ + Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor. """ s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index ca79dcd9408d8..9577b07360f65 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -120,7 +120,7 @@ def str_count(arr, pat, flags=0): Returns ------- - counts : Series or Index + Series or Index Same type as the calling object containing the integer counts. See Also @@ -283,7 +283,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): return `True`. However, '.0' as a regex matches any character followed by a 0. - >>> s2 = pd.Series(['40','40.0','41','41.0','35']) + >>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35']) >>> s2.str.contains('.0', regex=True) 0 True 1 True @@ -433,13 +433,13 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): Parameters ---------- - pat : string or compiled regex + pat : str or compiled regex String can be a character sequence or regular expression. .. versionadded:: 0.20.0 `pat` also accepts a compiled regex. - repl : string or callable + repl : str or callable Replacement string or a callable. The callable is passed the regex match object and must return a replacement string to be used. See :func:`re.sub`. @@ -448,15 +448,15 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): `repl` also accepts a callable. n : int, default -1 (all) - Number of replacements to make from start - case : boolean, default None + Number of replacements to make from start. + case : bool, default None - If True, case sensitive (the default if `pat` is a string) - Set to False for case insensitive - Cannot be set if `pat` is a compiled regex flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - Cannot be set if `pat` is a compiled regex - regex : boolean, default True + regex : bool, default True - If True, assumes the passed-in pattern is a regular expression. - If False, treats the pattern as a literal string - Cannot be set to False if `pat` is a compiled regex or `repl` is @@ -537,6 +537,7 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): Using a compiled regex with flags + >>> import re >>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE) >>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar') 0 foo @@ -604,6 +605,7 @@ def str_repeat(arr, repeats): 0 a 1 b 2 c + dtype: object Single int repeats string in Series @@ -611,6 +613,7 @@ def str_repeat(arr, repeats): 0 aa 1 bb 2 cc + dtype: object Sequence of int repeats corresponding string in Series @@ -618,6 +621,7 @@ def str_repeat(arr, repeats): 0 a 1 bb 2 ccc + dtype: object """ if is_scalar(repeats): def rep(x): @@ -646,13 +650,14 @@ def str_match(arr, pat, case=True, flags=0, na=np.nan): Parameters ---------- - pat : string - Character sequence or regular expression - case : boolean, default True - If True, case sensitive + pat : str + Character sequence or regular expression. + case : bool, default True + If True, case sensitive. flags : int, default 0 (no flags) - re module flags, e.g. re.IGNORECASE - na : default NaN, fill value for missing values + re module flags, e.g. re.IGNORECASE. + na : default NaN + Fill value for missing values. Returns ------- @@ -768,7 +773,7 @@ def str_extract(arr, pat, flags=0, expand=True): Parameters ---------- - pat : string + pat : str Regular expression pattern with capturing groups. flags : int, default 0 (no flags) Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that @@ -966,21 +971,23 @@ def str_extractall(arr, pat, flags=0): def str_get_dummies(arr, sep='|'): """ - Split each string in the Series by sep and return a frame of - dummy/indicator variables. + Split each string in the Series by sep and return a DataFrame + of dummy/indicator variables. Parameters ---------- - sep : string, default "|" + sep : str, default "|" String to split on. Returns ------- - dummies : DataFrame + DataFrame + Dummy variables corresponding to values of the Series. See Also -------- - get_dummies + get_dummies : Convert categorical variable into dummy/indicator + variables. Examples -------- @@ -1089,11 +1096,11 @@ def str_findall(arr, pat, flags=0): Parameters ---------- - pat : string + pat : str Pattern or regular expression. flags : int, default 0 - ``re`` module flags, e.g. `re.IGNORECASE` (default is 0, which means - no flags). + Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which + means no flags). Returns ------- @@ -1182,17 +1189,18 @@ def str_find(arr, sub, start=0, end=None, side='left'): Parameters ---------- sub : str - Substring being searched + Substring being searched. start : int - Left edge index + Left edge index. end : int - Right edge index + Right edge index. side : {'left', 'right'}, default 'left' - Specifies a starting side, equivalent to ``find`` or ``rfind`` + Specifies a starting side, equivalent to ``find`` or ``rfind``. Returns ------- - found : Series/Index of integer values + Series or Index + Indexes where substring is found. """ if not isinstance(sub, compat.string_types): @@ -1430,7 +1438,7 @@ def str_slice_replace(arr, start=None, stop=None, repl=None): Returns ------- - replaced : Series or Index + Series or Index Same type as the original object. See Also @@ -1513,7 +1521,7 @@ def str_strip(arr, to_strip=None, side='both'): Returns ------- - stripped : Series/Index of objects + Series or Index """ if side == 'both': f = lambda x: x.strip(to_strip) @@ -1537,30 +1545,30 @@ def str_wrap(arr, width, **kwargs): Parameters ---------- width : int - Maximum line-width + Maximum line width. expand_tabs : bool, optional - If true, tab characters will be expanded to spaces (default: True) + If True, tab characters will be expanded to spaces (default: True). replace_whitespace : bool, optional - If true, each whitespace character (as defined by string.whitespace) + If True, each whitespace character (as defined by string.whitespace) remaining after tab expansion will be replaced by a single space - (default: True) + (default: True). drop_whitespace : bool, optional - If true, whitespace that, after wrapping, happens to end up at the - beginning or end of a line is dropped (default: True) + If True, whitespace that, after wrapping, happens to end up at the + beginning or end of a line is dropped (default: True). break_long_words : bool, optional - If true, then words longer than width will be broken in order to ensure + If True, then words longer than width will be broken in order to ensure that no lines are longer than width. If it is false, long words will - not be broken, and some lines may be longer than width. (default: True) + not be broken, and some lines may be longer than width (default: True). break_on_hyphens : bool, optional - If true, wrapping will occur preferably on whitespace and right after + If True, wrapping will occur preferably on whitespace and right after hyphens in compound words, as it is customary in English. If false, only whitespaces will be considered as potentially good places for line breaks, but you need to set break_long_words to false if you want truly - insecable words. (default: True) + insecable words (default: True). Returns ------- - wrapped : Series/Index of objects + Series or Index Notes ----- @@ -1581,6 +1589,7 @@ def str_wrap(arr, width, **kwargs): >>> s.str.wrap(12) 0 line to be\nwrapped 1 another line\nto be\nwrapped + dtype: object """ kwargs['width'] = width @@ -1613,7 +1622,7 @@ def str_translate(arr, table, deletechars=None): Returns ------- - translated : Series/Index of objects + Series or Index """ if deletechars is None: f = lambda x: x.translate(table) @@ -1641,15 +1650,16 @@ def str_get(arr, i): Returns ------- - items : Series/Index of objects + Series or Index Examples -------- >>> s = pd.Series(["String", - (1, 2, 3), - ["a", "b", "c"], - 123, -456, - {1:"Hello", "2":"World"}]) + ... (1, 2, 3), + ... ["a", "b", "c"], + ... 123, + ... -456, + ... {1: "Hello", "2": "World"}]) >>> s 0 String 1 (1, 2, 3) @@ -1674,7 +1684,7 @@ def str_get(arr, i): 2 c 3 NaN 4 NaN - 5 NaN + 5 None dtype: object """ def f(x): @@ -1699,7 +1709,7 @@ def str_decode(arr, encoding, errors="strict"): Returns ------- - decoded : Series/Index of objects + Series or Index """ if encoding in _cpython_optimized_decoders: # CPython optimized implementation @@ -1872,7 +1882,7 @@ def _wrap_result(self, result, use_codes=True, if expand is None: # infer from ndim if expand is not specified - expand = False if result.ndim == 1 else True + expand = result.ndim != 1 elif expand is True and not isinstance(self._orig, Index): # required when expand=True is explicitly specified @@ -2091,7 +2101,7 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): Returns ------- - concat : str or Series/Index of objects + str, Series or Index If `others` is None, `str` is returned, otherwise a `Series/Index` (same type as caller) of objects is returned. @@ -2869,7 +2879,7 @@ def rindex(self, sub, start=0, end=None): return self._wrap_result(result) _shared_docs['len'] = (""" - Computes the length of each element in the Series/Index. The element may be + Compute the length of each element in the Series/Index. The element may be a sequence (such as a string, tuple or list) or a collection (such as a dictionary). @@ -2916,7 +2926,7 @@ def rindex(self, sub, start=0, end=None): _shared_docs['casemethods'] = (""" Convert strings in the Series/Index to %(type)s. - + %(version)s Equivalent to :meth:`str.%(method)s`. Returns @@ -2933,6 +2943,7 @@ def rindex(self, sub, start=0, end=None): remaining to lowercase. Series.str.swapcase : Converts uppercase to lowercase and lowercase to uppercase. + Series.str.casefold: Removes all case distinctions in the string. Examples -------- @@ -2979,12 +2990,15 @@ def rindex(self, sub, start=0, end=None): 3 sWaPcAsE dtype: object """) - _shared_docs['lower'] = dict(type='lowercase', method='lower') - _shared_docs['upper'] = dict(type='uppercase', method='upper') - _shared_docs['title'] = dict(type='titlecase', method='title') + _shared_docs['lower'] = dict(type='lowercase', method='lower', version='') + _shared_docs['upper'] = dict(type='uppercase', method='upper', version='') + _shared_docs['title'] = dict(type='titlecase', method='title', version='') _shared_docs['capitalize'] = dict(type='be capitalized', - method='capitalize') - _shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase') + method='capitalize', version='') + _shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase', + version='') + _shared_docs['casefold'] = dict(type='be casefolded', method='casefold', + version='\n .. versionadded:: 0.25.0\n') lower = _noarg_wrapper(lambda x: x.lower(), docstring=_shared_docs['casemethods'] % _shared_docs['lower']) @@ -3000,6 +3014,9 @@ def rindex(self, sub, start=0, end=None): swapcase = _noarg_wrapper(lambda x: x.swapcase(), docstring=_shared_docs['casemethods'] % _shared_docs['swapcase']) + casefold = _noarg_wrapper(lambda x: x.casefold(), + docstring=_shared_docs['casemethods'] % + _shared_docs['casefold']) _shared_docs['ismethods'] = (""" Check whether all characters in each string are %(type)s. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index e6478da400d76..0c76ac6cd75ac 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -497,8 +497,8 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, See Also -------- - pandas.DataFrame.astype : Cast argument to a specified dtype. - pandas.to_timedelta : Convert argument to timedelta. + DataFrame.astype : Cast argument to a specified dtype. + to_timedelta : Convert argument to timedelta. Examples -------- @@ -588,9 +588,8 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, if not cache_array.empty: result = arg.map(cache_array) else: - from pandas import Series values = convert_listlike(arg._values, True, format) - result = Series(values, index=arg.index, name=arg.name) + result = arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, compat.MutableMapping)): result = _assemble_from_unit_mappings(arg, errors, box, tz) elif isinstance(arg, ABCIndexClass): @@ -827,7 +826,6 @@ def to_time(arg, format=None, infer_time_format=False, errors='raise'): ------- datetime.time """ - from pandas.core.series import Series def _convert_listlike(arg, format): @@ -892,9 +890,9 @@ def _convert_listlike(arg, format): return arg elif isinstance(arg, time): return arg - elif isinstance(arg, Series): + elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, format) - return Series(values, index=arg.index, name=arg.name) + return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, format) elif is_list_like(arg): diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index 79d8ee38637f9..08ce649d8602c 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -19,6 +19,14 @@ def to_numeric(arg, errors='raise', downcast=None): depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. + Please note that precision loss may occur if really large numbers + are passed in. Due to the internal limitations of `ndarray`, if + numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) + or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are + passed in, it is very likely they will be converted to float so that + they can stored in an `ndarray`. These warnings apply similarly to + `Series` since it internally leverages `ndarray`. + Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series @@ -51,13 +59,13 @@ def to_numeric(arg, errors='raise', downcast=None): Returns ------- ret : numeric if parsing succeeded. - Return type depends on input. Series if Series, otherwise ndarray + Return type depends on input. Series if Series, otherwise ndarray. See Also -------- - pandas.DataFrame.astype : Cast argument to a specified dtype. - pandas.to_datetime : Convert argument to datetime. - pandas.to_timedelta : Convert argument to timedelta. + DataFrame.astype : Cast argument to a specified dtype. + to_datetime : Convert argument to datetime. + to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples @@ -130,7 +138,7 @@ def to_numeric(arg, errors='raise', downcast=None): values = values.astype(np.int64) else: values = ensure_object(values) - coerce_numeric = False if errors in ('ignore', 'raise') else True + coerce_numeric = errors not in ('ignore', 'raise') values = lib.maybe_convert_numeric(values, set(), coerce_numeric=coerce_numeric) diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index e3428146b91d8..7ebaf3056e79e 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -2,14 +2,16 @@ timedelta support tools """ +import warnings + import numpy as np +from pandas._libs.tslibs import NaT from pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries -import pandas as pd from pandas.core.arrays.timedeltas import sequence_to_td64ns @@ -90,13 +92,17 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'): raise ValueError("errors must be one of 'ignore', " "'raise', or 'coerce'}") + if unit in {'Y', 'y', 'M'}: + warnings.warn("M and Y units are deprecated and " + "will be removed in a future version.", + FutureWarning, stacklevel=2) + if arg is None: return arg elif isinstance(arg, ABCSeries): - from pandas import Series values = _convert_listlike(arg._values, unit=unit, box=False, errors=errors) - return Series(values, index=arg.index, name=arg.name) + return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, unit=unit, box=box, errors=errors, name=arg.name) @@ -120,7 +126,8 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): try: result = Timedelta(r, unit) if not box: - result = result.asm8 + # explicitly view as timedelta64 for case when result is pd.NaT + result = result.asm8.view('timedelta64[ns]') except ValueError: if errors == 'raise': raise @@ -128,7 +135,7 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): return r # coerce - result = pd.NaT + result = NaT return result diff --git a/pandas/core/window.py b/pandas/core/window.py index 5a9157b43ecd6..9e29fdb94c1e0 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -164,9 +164,9 @@ def __unicode__(self): Provide a nice str repr of our rolling object. """ - attrs = ["{k}={v}".format(k=k, v=getattr(self, k)) + attrs = ("{k}={v}".format(k=k, v=getattr(self, k)) for k in self._attributes - if getattr(self, k, None) is not None] + if getattr(self, k, None) is not None) return "{klass} [{attrs}]".format(klass=self._window_type, attrs=','.join(attrs)) @@ -438,7 +438,7 @@ def aggregate(self, arg, *args, **kwargs): class Window(_Window): """ - Provides rolling window calculations. + Provide rolling window calculations. .. versionadded:: 0.18.0 @@ -900,9 +900,9 @@ class _Rolling_and_Expanding(_Rolling): See Also -------- - pandas.Series.%(name)s : Calling object with Series data. - pandas.DataFrame.%(name)s : Calling object with DataFrames. - pandas.DataFrame.count : Count of the full DataFrame. + Series.%(name)s : Calling object with Series data. + DataFrame.%(name)s : Calling object with DataFrames. + DataFrame.count : Count of the full DataFrame. Examples -------- @@ -1271,7 +1271,7 @@ def skew(self, **kwargs): ------- Series or DataFrame Returned object type is determined by the caller of the %(name)s - calculation + calculation. See Also -------- @@ -1322,9 +1322,9 @@ def kurt(self, **kwargs): See Also -------- - pandas.Series.quantile : Computes value at the given quantile over all data + Series.quantile : Computes value at the given quantile over all data in Series. - pandas.DataFrame.quantile : Computes values at the given quantile over + DataFrame.quantile : Computes values at the given quantile over requested axis in DataFrame. Examples @@ -1626,8 +1626,8 @@ def _validate_freq(self): _agg_see_also_doc = dedent(""" See Also -------- - pandas.Series.rolling - pandas.DataFrame.rolling + Series.rolling + DataFrame.rolling """) _agg_examples_doc = dedent(""" @@ -1803,7 +1803,7 @@ def corr(self, other=None, pairwise=None, **kwargs): class RollingGroupby(_GroupByMixin, Rolling): """ - Provides a rolling groupby implementation. + Provide a rolling groupby implementation. .. versionadded:: 0.18.1 @@ -1834,7 +1834,7 @@ def _validate_monotonic(self): class Expanding(_Rolling_and_Expanding): """ - Provides expanding transformations. + Provide expanding transformations. .. versionadded:: 0.18.0 @@ -1916,9 +1916,9 @@ def _get_window(self, other=None): _agg_see_also_doc = dedent(""" See Also -------- - pandas.DataFrame.expanding.aggregate - pandas.DataFrame.rolling.aggregate - pandas.DataFrame.aggregate + DataFrame.expanding.aggregate + DataFrame.rolling.aggregate + DataFrame.aggregate """) _agg_examples_doc = dedent(""" @@ -2076,7 +2076,7 @@ def corr(self, other=None, pairwise=None, **kwargs): class ExpandingGroupby(_GroupByMixin, Expanding): """ - Provides a expanding groupby implementation. + Provide a expanding groupby implementation. .. versionadded:: 0.18.1 @@ -2117,7 +2117,7 @@ def _constructor(self): class EWM(_Rolling): r""" - Provides exponential weighted functions. + Provide exponential weighted functions. .. versionadded:: 0.18.0 @@ -2125,16 +2125,17 @@ class EWM(_Rolling): ---------- com : float, optional Specify decay in terms of center of mass, - :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0` + :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. span : float, optional Specify decay in terms of span, - :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1` + :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. halflife : float, optional Specify decay in terms of half-life, - :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0` + :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } + halflife > 0`. alpha : float, optional Specify smoothing factor :math:`\alpha` directly, - :math:`0 < \alpha \leq 1` + :math:`0 < \alpha \leq 1`. .. versionadded:: 0.18.0 @@ -2143,14 +2144,19 @@ class EWM(_Rolling): (otherwise result is NA). adjust : bool, default True Divide by decaying adjustment factor in beginning periods to account - for imbalance in relative weightings (viewing EWMA as a moving average) + for imbalance in relative weightings + (viewing EWMA as a moving average). ignore_na : bool, default False Ignore missing values when calculating weights; - specify True to reproduce pre-0.15.0 behavior + specify True to reproduce pre-0.15.0 behavior. + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis to use. The value 0 identifies the rows, and 1 + identifies the columns. Returns ------- - a Window sub-classed for the particular operation + DataFrame + A Window sub-classed for the particular operation. See Also -------- @@ -2188,6 +2194,7 @@ class EWM(_Rolling): -------- >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df B 0 0.0 1 1.0 diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index eb6a4674a7497..7d5a7f1a99e41 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -9,10 +9,10 @@ class PerformanceWarning(Warning): """ - Warning raised when there is a possible - performance impact. + Warning raised when there is a possible performance impact. """ + class UnsupportedFunctionCall(ValueError): """ Exception raised when attempting to call a numpy function @@ -20,6 +20,7 @@ class UnsupportedFunctionCall(ValueError): the object e.g. ``np.cumsum(groupby_object)``. """ + class UnsortedIndexError(KeyError): """ Error raised when attempting to get a slice of a MultiIndex, @@ -31,7 +32,15 @@ class UnsortedIndexError(KeyError): class ParserError(ValueError): """ - Exception that is raised by an error encountered in `pd.read_csv`. + Exception that is raised by an error encountered in parsing file contents. + + This is a generic error raised for errors encountered when functions like + `read_csv` or `read_html` are parsing contents of a file. + + See Also + -------- + read_csv : Read CSV (comma-separated) file into a DataFrame. + read_html : Read HTML table into a DataFrame. """ @@ -45,8 +54,8 @@ class DtypeWarning(Warning): See Also -------- - pandas.read_csv : Read CSV (comma-separated) file into a DataFrame. - pandas.read_table : Read general delimited file into a DataFrame. + read_csv : Read CSV (comma-separated) file into a DataFrame. + read_table : Read general delimited file into a DataFrame. Notes ----- @@ -180,4 +189,4 @@ def __str__(self): else: name = self.class_instance.__class__.__name__ msg = "This {methodtype} must be defined in the concrete class {name}" - return (msg.format(methodtype=self.methodtype, name=name)) + return msg.format(methodtype=self.methodtype, name=name) diff --git a/pandas/io/clipboard/windows.py b/pandas/io/clipboard/windows.py index 3d979a61b5f2d..4f5275af693b7 100644 --- a/pandas/io/clipboard/windows.py +++ b/pandas/io/clipboard/windows.py @@ -29,6 +29,7 @@ def init_windows_clipboard(): HINSTANCE, HMENU, BOOL, UINT, HANDLE) windll = ctypes.windll + msvcrt = ctypes.CDLL('msvcrt') safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA) safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT, @@ -71,6 +72,10 @@ def init_windows_clipboard(): safeGlobalUnlock.argtypes = [HGLOBAL] safeGlobalUnlock.restype = BOOL + wcslen = CheckedCall(msvcrt.wcslen) + wcslen.argtypes = [c_wchar_p] + wcslen.restype = UINT + GMEM_MOVEABLE = 0x0002 CF_UNICODETEXT = 13 @@ -129,13 +134,13 @@ def copy_windows(text): # If the hMem parameter identifies a memory object, # the object must have been allocated using the # function with the GMEM_MOVEABLE flag. - count = len(text) + 1 + count = wcslen(text) + 1 handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar)) locked_handle = safeGlobalLock(handle) - ctypes.memmove(c_wchar_p(locked_handle), - c_wchar_p(text), count * sizeof(c_wchar)) + ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text), + count * sizeof(c_wchar)) safeGlobalUnlock(handle) safeSetClipboardData(CF_UNICODETEXT, handle) diff --git a/pandas/io/excel.py b/pandas/io/excel.py deleted file mode 100644 index 3d85ae7fd1f46..0000000000000 --- a/pandas/io/excel.py +++ /dev/null @@ -1,2027 +0,0 @@ -""" -Module parse to/from Excel -""" - -# --------------------------------------------------------------------- -# ExcelFile class -import abc -from datetime import date, datetime, time, timedelta -from distutils.version import LooseVersion -from io import UnsupportedOperation -import os -from textwrap import fill -import warnings - -import numpy as np - -import pandas._libs.json as json -import pandas.compat as compat -from pandas.compat import ( - OrderedDict, add_metaclass, lrange, map, range, string_types, u, zip) -from pandas.errors import EmptyDataError -from pandas.util._decorators import Appender, deprecate_kwarg - -from pandas.core.dtypes.common import ( - is_bool, is_float, is_integer, is_list_like) - -from pandas.core import config -from pandas.core.frame import DataFrame - -from pandas.io.common import ( - _NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg, - get_filepath_or_buffer) -from pandas.io.formats.printing import pprint_thing -from pandas.io.parsers import TextParser - -__all__ = ["read_excel", "ExcelWriter", "ExcelFile"] - -_writer_extensions = ["xlsx", "xls", "xlsm"] -_writers = {} - -_read_excel_doc = """ -Read an Excel file into a pandas DataFrame. - -Support both `xls` and `xlsx` file extensions from a local filesystem or URL. -Support an option to read a single sheet or a list of sheets. - -Parameters ----------- -io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book - The string could be a URL. Valid URL schemes include http, ftp, s3, - gcs, and file. For file URLs, a host is expected. For instance, a local - file could be /path/to/workbook.xlsx. -sheet_name : str, int, list, or None, default 0 - Strings are used for sheet names. Integers are used in zero-indexed - sheet positions. Lists of strings/integers are used to request - multiple sheets. Specify None to get all sheets. - - Available cases: - - * Defaults to ``0``: 1st sheet as a `DataFrame` - * ``1``: 2nd sheet as a `DataFrame` - * ``"Sheet1"``: Load sheet with name "Sheet1" - * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5" - as a dict of `DataFrame` - * None: All sheets. - -header : int, list of int, default 0 - Row (0-indexed) to use for the column labels of the parsed - DataFrame. If a list of integers is passed those row positions will - be combined into a ``MultiIndex``. Use None if there is no header. -names : array-like, default None - List of column names to use. If file contains no header row, - then you should explicitly pass header=None. -index_col : int, list of int, default None - Column (0-indexed) to use as the row labels of the DataFrame. - Pass None if there is no such column. If a list is passed, - those columns will be combined into a ``MultiIndex``. If a - subset of data is selected with ``usecols``, index_col - is based on the subset. -parse_cols : int or list, default None - Alias of `usecols`. - - .. deprecated:: 0.21.0 - Use `usecols` instead. - -usecols : int, str, list-like, or callable default None - Return a subset of the columns. - * If None, then parse all columns. - * If int, then indicates last column to be parsed. - - .. deprecated:: 0.24.0 - Pass in a list of int instead from 0 to `usecols` inclusive. - - * If str, then indicates comma separated list of Excel column letters - and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of - both sides. - * If list of int, then indicates list of column numbers to be parsed. - * If list of string, then indicates list of column names to be parsed. - - .. versionadded:: 0.24.0 - - * If callable, then evaluate each column name against it and parse the - column if the callable returns ``True``. - - .. versionadded:: 0.24.0 - -squeeze : bool, default False - If the parsed data only contains one column then return a Series. -dtype : Type name or dict of column -> type, default None - Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} - Use `object` to preserve data as stored in Excel and not interpret dtype. - If converters are specified, they will be applied INSTEAD - of dtype conversion. - - .. versionadded:: 0.20.0 - -engine : str, default None - If io is not a buffer or path, this must be set to identify io. - Acceptable values are None or xlrd. -converters : dict, default None - Dict of functions for converting values in certain columns. Keys can - either be integers or column labels, values are functions that take one - input argument, the Excel cell content, and return the transformed - content. -true_values : list, default None - Values to consider as True. - - .. versionadded:: 0.19.0 - -false_values : list, default None - Values to consider as False. - - .. versionadded:: 0.19.0 - -skiprows : list-like - Rows to skip at the beginning (0-indexed). -nrows : int, default None - Number of rows to parse. - - .. versionadded:: 0.23.0 - -na_values : scalar, str, list-like, or dict, default None - Additional strings to recognize as NA/NaN. If dict passed, specific - per-column NA values. By default the following values are interpreted - as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'. -keep_default_na : bool, default True - If na_values are specified and keep_default_na is False the default NaN - values are overridden, otherwise they're appended to. -verbose : bool, default False - Indicate number of NA values placed in non-numeric columns. -parse_dates : bool, list-like, or dict, default False - The behavior is as follows: - - * bool. If True -> try parsing the index. - * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 - each as a separate date column. - * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as - a single date column. - * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call - result 'foo' - - If a column or index contains an unparseable date, the entire column or - index will be returned unaltered as an object data type. For non-standard - datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv`` - - Note: A fast-path exists for iso8601-formatted dates. -date_parser : function, optional - Function to use for converting a sequence of string columns to an array of - datetime instances. The default uses ``dateutil.parser.parser`` to do the - conversion. Pandas will try to call `date_parser` in three different ways, - advancing to the next if an exception occurs: 1) Pass one or more arrays - (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the - string values from the columns defined by `parse_dates` into a single array - and pass that; and 3) call `date_parser` once for each row using one or - more strings (corresponding to the columns defined by `parse_dates`) as - arguments. -thousands : str, default None - Thousands separator for parsing string columns to numeric. Note that - this parameter is only necessary for columns stored as TEXT in Excel, - any numeric columns will automatically be parsed, regardless of display - format. -comment : str, default None - Comments out remainder of line. Pass a character or characters to this - argument to indicate comments in the input file. Any data between the - comment string and the end of the current line is ignored. -skip_footer : int, default 0 - Alias of `skipfooter`. - - .. deprecated:: 0.23.0 - Use `skipfooter` instead. -skipfooter : int, default 0 - Rows at the end to skip (0-indexed). -convert_float : bool, default True - Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric - data will be read in as floats: Excel stores all numbers as floats - internally. -mangle_dupe_cols : bool, default True - Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than - 'X'...'X'. Passing in False will cause data to be overwritten if there - are duplicate names in the columns. -**kwds : optional - Optional keyword arguments can be passed to ``TextFileReader``. - -Returns -------- -DataFrame or dict of DataFrames - DataFrame from the passed in Excel file. See notes in sheet_name - argument for more information on when a dict of DataFrames is returned. - -See Also --------- -to_excel : Write DataFrame to an Excel file. -to_csv : Write DataFrame to a comma-separated values (csv) file. -read_csv : Read a comma-separated values (csv) file into DataFrame. -read_fwf : Read a table of fixed-width formatted lines into DataFrame. - -Examples --------- -The file can be read using the file name as string or an open file object: - ->>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP - Name Value -0 string1 1 -1 string2 2 -2 #Comment 3 - ->>> pd.read_excel(open('tmp.xlsx', 'rb'), -... sheet_name='Sheet3') # doctest: +SKIP - Unnamed: 0 Name Value -0 0 string1 1 -1 1 string2 2 -2 2 #Comment 3 - -Index and header can be specified via the `index_col` and `header` arguments - ->>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP - 0 1 2 -0 NaN Name Value -1 0.0 string1 1 -2 1.0 string2 2 -3 2.0 #Comment 3 - -Column types are inferred but can be explicitly specified - ->>> pd.read_excel('tmp.xlsx', index_col=0, -... dtype={'Name': str, 'Value': float}) # doctest: +SKIP - Name Value -0 string1 1.0 -1 string2 2.0 -2 #Comment 3.0 - -True, False, and NA values, and thousands separators have defaults, -but can be explicitly specified, too. Supply the values you would like -as strings or lists of strings! - ->>> pd.read_excel('tmp.xlsx', index_col=0, -... na_values=['string1', 'string2']) # doctest: +SKIP - Name Value -0 NaN 1 -1 NaN 2 -2 #Comment 3 - -Comment lines in the excel input file can be skipped using the `comment` kwarg - ->>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP - Name Value -0 string1 1.0 -1 string2 2.0 -2 None NaN -""" - - -def register_writer(klass): - """Adds engine to the excel writer registry. You must use this method to - integrate with ``to_excel``. Also adds config options for any new - ``supported_extensions`` defined on the writer.""" - if not compat.callable(klass): - raise ValueError("Can only register callables as engines") - engine_name = klass.engine - _writers[engine_name] = klass - for ext in klass.supported_extensions: - if ext.startswith('.'): - ext = ext[1:] - if ext not in _writer_extensions: - config.register_option("io.excel.{ext}.writer".format(ext=ext), - engine_name, validator=str) - _writer_extensions.append(ext) - - -def _get_default_writer(ext): - _default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'} - try: - import xlsxwriter # noqa - _default_writers['xlsx'] = 'xlsxwriter' - except ImportError: - pass - return _default_writers[ext] - - -def get_writer(engine_name): - try: - return _writers[engine_name] - except KeyError: - raise ValueError("No Excel writer '{engine}'" - .format(engine=engine_name)) - - -@Appender(_read_excel_doc) -@deprecate_kwarg("parse_cols", "usecols") -@deprecate_kwarg("skip_footer", "skipfooter") -def read_excel(io, - sheet_name=0, - header=0, - names=None, - index_col=None, - parse_cols=None, - usecols=None, - squeeze=False, - dtype=None, - engine=None, - converters=None, - true_values=None, - false_values=None, - skiprows=None, - nrows=None, - na_values=None, - keep_default_na=True, - verbose=False, - parse_dates=False, - date_parser=None, - thousands=None, - comment=None, - skip_footer=0, - skipfooter=0, - convert_float=True, - mangle_dupe_cols=True, - **kwds): - - # Can't use _deprecate_kwarg since sheetname=None has a special meaning - if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds: - warnings.warn("The `sheetname` keyword is deprecated, use " - "`sheet_name` instead", FutureWarning, stacklevel=2) - sheet_name = kwds.pop("sheetname") - - if 'sheet' in kwds: - raise TypeError("read_excel() got an unexpected keyword argument " - "`sheet`") - - if not isinstance(io, ExcelFile): - io = ExcelFile(io, engine=engine) - - return io.parse( - sheet_name=sheet_name, - header=header, - names=names, - index_col=index_col, - usecols=usecols, - squeeze=squeeze, - dtype=dtype, - converters=converters, - true_values=true_values, - false_values=false_values, - skiprows=skiprows, - nrows=nrows, - na_values=na_values, - keep_default_na=keep_default_na, - verbose=verbose, - parse_dates=parse_dates, - date_parser=date_parser, - thousands=thousands, - comment=comment, - skipfooter=skipfooter, - convert_float=convert_float, - mangle_dupe_cols=mangle_dupe_cols, - **kwds) - - -@add_metaclass(abc.ABCMeta) -class _BaseExcelReader(object): - - @property - @abc.abstractmethod - def sheet_names(self): - pass - - @abc.abstractmethod - def get_sheet_by_name(self, name): - pass - - @abc.abstractmethod - def get_sheet_by_index(self, index): - pass - - @abc.abstractmethod - def get_sheet_data(self, sheet, convert_float): - pass - - def parse(self, - sheet_name=0, - header=0, - names=None, - index_col=None, - usecols=None, - squeeze=False, - dtype=None, - true_values=None, - false_values=None, - skiprows=None, - nrows=None, - na_values=None, - verbose=False, - parse_dates=False, - date_parser=None, - thousands=None, - comment=None, - skipfooter=0, - convert_float=True, - mangle_dupe_cols=True, - **kwds): - - _validate_header_arg(header) - - ret_dict = False - - # Keep sheetname to maintain backwards compatibility. - if isinstance(sheet_name, list): - sheets = sheet_name - ret_dict = True - elif sheet_name is None: - sheets = self.sheet_names - ret_dict = True - else: - sheets = [sheet_name] - - # handle same-type duplicates. - sheets = list(OrderedDict.fromkeys(sheets).keys()) - - output = OrderedDict() - - for asheetname in sheets: - if verbose: - print("Reading sheet {sheet}".format(sheet=asheetname)) - - if isinstance(asheetname, compat.string_types): - sheet = self.get_sheet_by_name(asheetname) - else: # assume an integer if not a string - sheet = self.get_sheet_by_index(asheetname) - - data = self.get_sheet_data(sheet, convert_float) - usecols = _maybe_convert_usecols(usecols) - - if sheet.nrows == 0: - output[asheetname] = DataFrame() - continue - - if is_list_like(header) and len(header) == 1: - header = header[0] - - # forward fill and pull out names for MultiIndex column - header_names = None - if header is not None and is_list_like(header): - header_names = [] - control_row = [True] * len(data[0]) - - for row in header: - if is_integer(skiprows): - row += skiprows - - data[row], control_row = _fill_mi_header(data[row], - control_row) - - if index_col is not None: - header_name, _ = _pop_header_name(data[row], index_col) - header_names.append(header_name) - - if is_list_like(index_col): - # Forward fill values for MultiIndex index. - if not is_list_like(header): - offset = 1 + header - else: - offset = 1 + max(header) - - # Check if we have an empty dataset - # before trying to collect data. - if offset < len(data): - for col in index_col: - last = data[offset][col] - - for row in range(offset + 1, len(data)): - if data[row][col] == '' or data[row][col] is None: - data[row][col] = last - else: - last = data[row][col] - - has_index_names = is_list_like(header) and len(header) > 1 - - # GH 12292 : error when read one empty column from excel file - try: - parser = TextParser(data, - names=names, - header=header, - index_col=index_col, - has_index_names=has_index_names, - squeeze=squeeze, - dtype=dtype, - true_values=true_values, - false_values=false_values, - skiprows=skiprows, - nrows=nrows, - na_values=na_values, - parse_dates=parse_dates, - date_parser=date_parser, - thousands=thousands, - comment=comment, - skipfooter=skipfooter, - usecols=usecols, - mangle_dupe_cols=mangle_dupe_cols, - **kwds) - - output[asheetname] = parser.read(nrows=nrows) - - if not squeeze or isinstance(output[asheetname], DataFrame): - if header_names: - output[asheetname].columns = output[ - asheetname].columns.set_names(header_names) - elif compat.PY2: - output[asheetname].columns = _maybe_convert_to_string( - output[asheetname].columns) - - except EmptyDataError: - # No Data, return an empty DataFrame - output[asheetname] = DataFrame() - - if ret_dict: - return output - else: - return output[asheetname] - - -class _XlrdReader(_BaseExcelReader): - - def __init__(self, filepath_or_buffer): - """Reader using xlrd engine. - - Parameters - ---------- - filepath_or_buffer : string, path object or Workbook - Object to be parsed. - """ - err_msg = "Install xlrd >= 1.0.0 for Excel support" - - try: - import xlrd - except ImportError: - raise ImportError(err_msg) - else: - if xlrd.__VERSION__ < LooseVersion("1.0.0"): - raise ImportError(err_msg + - ". Current version " + xlrd.__VERSION__) - - # If filepath_or_buffer is a url, want to keep the data as bytes so - # can't pass to get_filepath_or_buffer() - if _is_url(filepath_or_buffer): - filepath_or_buffer = _urlopen(filepath_or_buffer) - elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)): - filepath_or_buffer, _, _, _ = get_filepath_or_buffer( - filepath_or_buffer) - - if isinstance(filepath_or_buffer, xlrd.Book): - self.book = filepath_or_buffer - elif hasattr(filepath_or_buffer, "read"): - # N.B. xlrd.Book has a read attribute too - if hasattr(filepath_or_buffer, 'seek'): - try: - # GH 19779 - filepath_or_buffer.seek(0) - except UnsupportedOperation: - # HTTPResponse does not support seek() - # GH 20434 - pass - - data = filepath_or_buffer.read() - self.book = xlrd.open_workbook(file_contents=data) - elif isinstance(filepath_or_buffer, compat.string_types): - self.book = xlrd.open_workbook(filepath_or_buffer) - else: - raise ValueError('Must explicitly set engine if not passing in' - ' buffer or path for io.') - - @property - def sheet_names(self): - return self.book.sheet_names() - - def get_sheet_by_name(self, name): - return self.book.sheet_by_name(name) - - def get_sheet_by_index(self, index): - return self.book.sheet_by_index(index) - - def get_sheet_data(self, sheet, convert_float): - from xlrd import (xldate, XL_CELL_DATE, - XL_CELL_ERROR, XL_CELL_BOOLEAN, - XL_CELL_NUMBER) - - epoch1904 = self.book.datemode - - def _parse_cell(cell_contents, cell_typ): - """converts the contents of the cell into a pandas - appropriate object""" - - if cell_typ == XL_CELL_DATE: - - # Use the newer xlrd datetime handling. - try: - cell_contents = xldate.xldate_as_datetime( - cell_contents, epoch1904) - except OverflowError: - return cell_contents - - # Excel doesn't distinguish between dates and time, - # so we treat dates on the epoch as times only. - # Also, Excel supports 1900 and 1904 epochs. - year = (cell_contents.timetuple())[0:3] - if ((not epoch1904 and year == (1899, 12, 31)) or - (epoch1904 and year == (1904, 1, 1))): - cell_contents = time(cell_contents.hour, - cell_contents.minute, - cell_contents.second, - cell_contents.microsecond) - - elif cell_typ == XL_CELL_ERROR: - cell_contents = np.nan - elif cell_typ == XL_CELL_BOOLEAN: - cell_contents = bool(cell_contents) - elif convert_float and cell_typ == XL_CELL_NUMBER: - # GH5394 - Excel 'numbers' are always floats - # it's a minimal perf hit and less surprising - val = int(cell_contents) - if val == cell_contents: - cell_contents = val - return cell_contents - - data = [] - - for i in range(sheet.nrows): - row = [_parse_cell(value, typ) - for value, typ in zip(sheet.row_values(i), - sheet.row_types(i))] - data.append(row) - - return data - - -class ExcelFile(object): - """ - Class for parsing tabular excel sheets into DataFrame objects. - Uses xlrd. See read_excel for more documentation - - Parameters - ---------- - io : string, path object (pathlib.Path or py._path.local.LocalPath), - file-like object or xlrd workbook - If a string or path object, expected to be a path to xls or xlsx file. - engine : string, default None - If io is not a buffer or path, this must be set to identify io. - Acceptable values are None or ``xlrd``. - """ - - _engines = { - 'xlrd': _XlrdReader, - } - - def __init__(self, io, engine=None): - if engine is None: - engine = 'xlrd' - if engine not in self._engines: - raise ValueError("Unknown engine: {engine}".format(engine=engine)) - - # could be a str, ExcelFile, Book, etc. - self.io = io - # Always a string - self._io = _stringify_path(io) - - self._reader = self._engines[engine](self._io) - - def __fspath__(self): - return self._io - - def parse(self, - sheet_name=0, - header=0, - names=None, - index_col=None, - usecols=None, - squeeze=False, - converters=None, - true_values=None, - false_values=None, - skiprows=None, - nrows=None, - na_values=None, - parse_dates=False, - date_parser=None, - thousands=None, - comment=None, - skipfooter=0, - convert_float=True, - mangle_dupe_cols=True, - **kwds): - """ - Parse specified sheet(s) into a DataFrame - - Equivalent to read_excel(ExcelFile, ...) See the read_excel - docstring for more info on accepted parameters - """ - - # Can't use _deprecate_kwarg since sheetname=None has a special meaning - if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds: - warnings.warn("The `sheetname` keyword is deprecated, use " - "`sheet_name` instead", FutureWarning, stacklevel=2) - sheet_name = kwds.pop("sheetname") - elif 'sheetname' in kwds: - raise TypeError("Cannot specify both `sheet_name` " - "and `sheetname`. Use just `sheet_name`") - - if 'chunksize' in kwds: - raise NotImplementedError("chunksize keyword of read_excel " - "is not implemented") - - return self._reader.parse(sheet_name=sheet_name, - header=header, - names=names, - index_col=index_col, - usecols=usecols, - squeeze=squeeze, - converters=converters, - true_values=true_values, - false_values=false_values, - skiprows=skiprows, - nrows=nrows, - na_values=na_values, - parse_dates=parse_dates, - date_parser=date_parser, - thousands=thousands, - comment=comment, - skipfooter=skipfooter, - convert_float=convert_float, - mangle_dupe_cols=mangle_dupe_cols, - **kwds) - - @property - def book(self): - return self._reader.book - - @property - def sheet_names(self): - return self._reader.sheet_names - - def close(self): - """close io if necessary""" - if hasattr(self.io, 'close'): - self.io.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - -def _excel2num(x): - """ - Convert Excel column name like 'AB' to 0-based column index. - - Parameters - ---------- - x : str - The Excel column name to convert to a 0-based column index. - - Returns - ------- - num : int - The column index corresponding to the name. - - Raises - ------ - ValueError - Part of the Excel column name was invalid. - """ - index = 0 - - for c in x.upper().strip(): - cp = ord(c) - - if cp < ord("A") or cp > ord("Z"): - raise ValueError("Invalid column name: {x}".format(x=x)) - - index = index * 26 + cp - ord("A") + 1 - - return index - 1 - - -def _range2cols(areas): - """ - Convert comma separated list of column names and ranges to indices. - - Parameters - ---------- - areas : str - A string containing a sequence of column ranges (or areas). - - Returns - ------- - cols : list - A list of 0-based column indices. - - Examples - -------- - >>> _range2cols('A:E') - [0, 1, 2, 3, 4] - >>> _range2cols('A,C,Z:AB') - [0, 2, 25, 26, 27] - """ - cols = [] - - for rng in areas.split(","): - if ":" in rng: - rng = rng.split(":") - cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) - else: - cols.append(_excel2num(rng)) - - return cols - - -def _maybe_convert_usecols(usecols): - """ - Convert `usecols` into a compatible format for parsing in `parsers.py`. - - Parameters - ---------- - usecols : object - The use-columns object to potentially convert. - - Returns - ------- - converted : object - The compatible format of `usecols`. - """ - if usecols is None: - return usecols - - if is_integer(usecols): - warnings.warn(("Passing in an integer for `usecols` has been " - "deprecated. Please pass in a list of int from " - "0 to `usecols` inclusive instead."), - FutureWarning, stacklevel=2) - return lrange(usecols + 1) - - if isinstance(usecols, compat.string_types): - return _range2cols(usecols) - - return usecols - - -def _validate_freeze_panes(freeze_panes): - if freeze_panes is not None: - if ( - len(freeze_panes) == 2 and - all(isinstance(item, int) for item in freeze_panes) - ): - return True - - raise ValueError("freeze_panes must be of form (row, column)" - " where row and column are integers") - - # freeze_panes wasn't specified, return False so it won't be applied - # to output sheet - return False - - -def _trim_excel_header(row): - # trim header row so auto-index inference works - # xlrd uses '' , openpyxl None - while len(row) > 0 and (row[0] == '' or row[0] is None): - row = row[1:] - return row - - -def _maybe_convert_to_string(row): - """ - Convert elements in a row to string from Unicode. - - This is purely a Python 2.x patch and is performed ONLY when all - elements of the row are string-like. - - Parameters - ---------- - row : array-like - The row of data to convert. - - Returns - ------- - converted : array-like - """ - if compat.PY2: - converted = [] - - for i in range(len(row)): - if isinstance(row[i], compat.string_types): - try: - converted.append(str(row[i])) - except UnicodeEncodeError: - break - else: - break - else: - row = converted - - return row - - -def _fill_mi_header(row, control_row): - """Forward fills blank entries in row, but only inside the same parent index - - Used for creating headers in Multiindex. - Parameters - ---------- - row : list - List of items in a single row. - control_row : list of bool - Helps to determine if particular column is in same parent index as the - previous value. Used to stop propagation of empty cells between - different indexes. - - Returns - ---------- - Returns changed row and control_row - """ - last = row[0] - for i in range(1, len(row)): - if not control_row[i]: - last = row[i] - - if row[i] == '' or row[i] is None: - row[i] = last - else: - control_row[i] = False - last = row[i] - - return _maybe_convert_to_string(row), control_row - -# fill blank if index_col not None - - -def _pop_header_name(row, index_col): - """ - Pop the header name for MultiIndex parsing. - - Parameters - ---------- - row : list - The data row to parse for the header name. - index_col : int, list - The index columns for our data. Assumed to be non-null. - - Returns - ------- - header_name : str - The extracted header name. - trimmed_row : list - The original data row with the header name removed. - """ - # Pop out header name and fill w/blank. - i = index_col if not is_list_like(index_col) else max(index_col) - - header_name = row[i] - header_name = None if header_name == "" else header_name - - return header_name, row[:i] + [''] + row[i + 1:] - - -@add_metaclass(abc.ABCMeta) -class ExcelWriter(object): - """ - Class for writing DataFrame objects into excel sheets, default is to use - xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage. - - Parameters - ---------- - path : string - Path to xls or xlsx file. - engine : string (optional) - Engine to use for writing. If None, defaults to - ``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword - argument. - date_format : string, default None - Format string for dates written into Excel files (e.g. 'YYYY-MM-DD') - datetime_format : string, default None - Format string for datetime objects written into Excel files - (e.g. 'YYYY-MM-DD HH:MM:SS') - mode : {'w' or 'a'}, default 'w' - File mode to use (write or append). - - .. versionadded:: 0.24.0 - - Attributes - ---------- - None - - Methods - ------- - None - - Notes - ----- - None of the methods and properties are considered public. - - For compatibility with CSV writers, ExcelWriter serializes lists - and dicts to strings before writing. - - Examples - -------- - Default usage: - - >>> with ExcelWriter('path_to_file.xlsx') as writer: - ... df.to_excel(writer) - - To write to separate sheets in a single file: - - >>> with ExcelWriter('path_to_file.xlsx') as writer: - ... df1.to_excel(writer, sheet_name='Sheet1') - ... df2.to_excel(writer, sheet_name='Sheet2') - - You can set the date format or datetime format: - - >>> with ExcelWriter('path_to_file.xlsx', - date_format='YYYY-MM-DD', - datetime_format='YYYY-MM-DD HH:MM:SS') as writer: - ... df.to_excel(writer) - - You can also append to an existing Excel file: - - >>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer: - ... df.to_excel(writer, sheet_name='Sheet3') - """ - # Defining an ExcelWriter implementation (see abstract methods for more...) - - # - Mandatory - # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)`` - # --> called to write additional DataFrames to disk - # - ``supported_extensions`` (tuple of supported extensions), used to - # check that engine supports the given extension. - # - ``engine`` - string that gives the engine name. Necessary to - # instantiate class directly and bypass ``ExcelWriterMeta`` engine - # lookup. - # - ``save(self)`` --> called to save file to disk - # - Mostly mandatory (i.e. should at least exist) - # - book, cur_sheet, path - - # - Optional: - # - ``__init__(self, path, engine=None, **kwargs)`` --> always called - # with path as first argument. - - # You also need to register the class with ``register_writer()``. - # Technically, ExcelWriter implementations don't need to subclass - # ExcelWriter. - def __new__(cls, path, engine=None, **kwargs): - # only switch class if generic(ExcelWriter) - - if issubclass(cls, ExcelWriter): - if engine is None or (isinstance(engine, string_types) and - engine == 'auto'): - if isinstance(path, string_types): - ext = os.path.splitext(path)[-1][1:] - else: - ext = 'xlsx' - - try: - engine = config.get_option('io.excel.{ext}.writer' - .format(ext=ext)) - if engine == 'auto': - engine = _get_default_writer(ext) - except KeyError: - error = ValueError("No engine for filetype: '{ext}'" - .format(ext=ext)) - raise error - cls = get_writer(engine) - - return object.__new__(cls) - - # declare external properties you can count on - book = None - curr_sheet = None - path = None - - @abc.abstractproperty - def supported_extensions(self): - "extensions that writer engine supports" - pass - - @abc.abstractproperty - def engine(self): - "name of engine" - pass - - @abc.abstractmethod - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, - freeze_panes=None): - """ - Write given formatted cells into Excel an excel sheet - - Parameters - ---------- - cells : generator - cell of formatted data to save to Excel sheet - sheet_name : string, default None - Name of Excel sheet, if None, then use self.cur_sheet - startrow : upper left cell row to dump data frame - startcol : upper left cell column to dump data frame - freeze_panes: integer tuple of length 2 - contains the bottom-most row and right-most column to freeze - """ - pass - - @abc.abstractmethod - def save(self): - """ - Save workbook to disk. - """ - pass - - def __init__(self, path, engine=None, - date_format=None, datetime_format=None, mode='w', - **engine_kwargs): - # validate that this engine can handle the extension - if isinstance(path, string_types): - ext = os.path.splitext(path)[-1] - else: - ext = 'xls' if engine == 'xlwt' else 'xlsx' - - self.check_extension(ext) - - self.path = path - self.sheets = {} - self.cur_sheet = None - - if date_format is None: - self.date_format = 'YYYY-MM-DD' - else: - self.date_format = date_format - if datetime_format is None: - self.datetime_format = 'YYYY-MM-DD HH:MM:SS' - else: - self.datetime_format = datetime_format - - self.mode = mode - - def __fspath__(self): - return _stringify_path(self.path) - - def _get_sheet_name(self, sheet_name): - if sheet_name is None: - sheet_name = self.cur_sheet - if sheet_name is None: # pragma: no cover - raise ValueError('Must pass explicit sheet_name or set ' - 'cur_sheet property') - return sheet_name - - def _value_with_fmt(self, val): - """Convert numpy types to Python types for the Excel writers. - - Parameters - ---------- - val : object - Value to be written into cells - - Returns - ------- - Tuple with the first element being the converted value and the second - being an optional format - """ - fmt = None - - if is_integer(val): - val = int(val) - elif is_float(val): - val = float(val) - elif is_bool(val): - val = bool(val) - elif isinstance(val, datetime): - fmt = self.datetime_format - elif isinstance(val, date): - fmt = self.date_format - elif isinstance(val, timedelta): - val = val.total_seconds() / float(86400) - fmt = '0' - else: - val = compat.to_str(val) - - return val, fmt - - @classmethod - def check_extension(cls, ext): - """checks that path's extension against the Writer's supported - extensions. If it isn't supported, raises UnsupportedFiletypeError.""" - if ext.startswith('.'): - ext = ext[1:] - if not any(ext in extension for extension in cls.supported_extensions): - msg = (u("Invalid extension for engine '{engine}': '{ext}'") - .format(engine=pprint_thing(cls.engine), - ext=pprint_thing(ext))) - raise ValueError(msg) - else: - return True - - # Allow use as a contextmanager - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.close() - - def close(self): - """synonym for save, to make it more file-like""" - return self.save() - - -class _OpenpyxlWriter(ExcelWriter): - engine = 'openpyxl' - supported_extensions = ('.xlsx', '.xlsm') - - def __init__(self, path, engine=None, mode='w', **engine_kwargs): - # Use the openpyxl module as the Excel writer. - from openpyxl.workbook import Workbook - - super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs) - - if self.mode == 'a': # Load from existing workbook - from openpyxl import load_workbook - book = load_workbook(self.path) - self.book = book - else: - # Create workbook object with default optimized_write=True. - self.book = Workbook() - - if self.book.worksheets: - try: - self.book.remove(self.book.worksheets[0]) - except AttributeError: - - # compat - for openpyxl <= 2.4 - self.book.remove_sheet(self.book.worksheets[0]) - - def save(self): - """ - Save workbook to disk. - """ - return self.book.save(self.path) - - @classmethod - def _convert_to_style(cls, style_dict): - """ - converts a style_dict to an openpyxl style object - Parameters - ---------- - style_dict : style dictionary to convert - """ - - from openpyxl.style import Style - xls_style = Style() - for key, value in style_dict.items(): - for nk, nv in value.items(): - if key == "borders": - (xls_style.borders.__getattribute__(nk) - .__setattr__('border_style', nv)) - else: - xls_style.__getattribute__(key).__setattr__(nk, nv) - - return xls_style - - @classmethod - def _convert_to_style_kwargs(cls, style_dict): - """ - Convert a style_dict to a set of kwargs suitable for initializing - or updating-on-copy an openpyxl v2 style object - Parameters - ---------- - style_dict : dict - A dict with zero or more of the following keys (or their synonyms). - 'font' - 'fill' - 'border' ('borders') - 'alignment' - 'number_format' - 'protection' - Returns - ------- - style_kwargs : dict - A dict with the same, normalized keys as ``style_dict`` but each - value has been replaced with a native openpyxl style object of the - appropriate class. - """ - - _style_key_map = { - 'borders': 'border', - } - - style_kwargs = {} - for k, v in style_dict.items(): - if k in _style_key_map: - k = _style_key_map[k] - _conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k), - lambda x: None) - new_v = _conv_to_x(v) - if new_v: - style_kwargs[k] = new_v - - return style_kwargs - - @classmethod - def _convert_to_color(cls, color_spec): - """ - Convert ``color_spec`` to an openpyxl v2 Color object - Parameters - ---------- - color_spec : str, dict - A 32-bit ARGB hex string, or a dict with zero or more of the - following keys. - 'rgb' - 'indexed' - 'auto' - 'theme' - 'tint' - 'index' - 'type' - Returns - ------- - color : openpyxl.styles.Color - """ - - from openpyxl.styles import Color - - if isinstance(color_spec, str): - return Color(color_spec) - else: - return Color(**color_spec) - - @classmethod - def _convert_to_font(cls, font_dict): - """ - Convert ``font_dict`` to an openpyxl v2 Font object - Parameters - ---------- - font_dict : dict - A dict with zero or more of the following keys (or their synonyms). - 'name' - 'size' ('sz') - 'bold' ('b') - 'italic' ('i') - 'underline' ('u') - 'strikethrough' ('strike') - 'color' - 'vertAlign' ('vertalign') - 'charset' - 'scheme' - 'family' - 'outline' - 'shadow' - 'condense' - Returns - ------- - font : openpyxl.styles.Font - """ - - from openpyxl.styles import Font - - _font_key_map = { - 'sz': 'size', - 'b': 'bold', - 'i': 'italic', - 'u': 'underline', - 'strike': 'strikethrough', - 'vertalign': 'vertAlign', - } - - font_kwargs = {} - for k, v in font_dict.items(): - if k in _font_key_map: - k = _font_key_map[k] - if k == 'color': - v = cls._convert_to_color(v) - font_kwargs[k] = v - - return Font(**font_kwargs) - - @classmethod - def _convert_to_stop(cls, stop_seq): - """ - Convert ``stop_seq`` to a list of openpyxl v2 Color objects, - suitable for initializing the ``GradientFill`` ``stop`` parameter. - Parameters - ---------- - stop_seq : iterable - An iterable that yields objects suitable for consumption by - ``_convert_to_color``. - Returns - ------- - stop : list of openpyxl.styles.Color - """ - - return map(cls._convert_to_color, stop_seq) - - @classmethod - def _convert_to_fill(cls, fill_dict): - """ - Convert ``fill_dict`` to an openpyxl v2 Fill object - Parameters - ---------- - fill_dict : dict - A dict with one or more of the following keys (or their synonyms), - 'fill_type' ('patternType', 'patterntype') - 'start_color' ('fgColor', 'fgcolor') - 'end_color' ('bgColor', 'bgcolor') - or one or more of the following keys (or their synonyms). - 'type' ('fill_type') - 'degree' - 'left' - 'right' - 'top' - 'bottom' - 'stop' - Returns - ------- - fill : openpyxl.styles.Fill - """ - - from openpyxl.styles import PatternFill, GradientFill - - _pattern_fill_key_map = { - 'patternType': 'fill_type', - 'patterntype': 'fill_type', - 'fgColor': 'start_color', - 'fgcolor': 'start_color', - 'bgColor': 'end_color', - 'bgcolor': 'end_color', - } - - _gradient_fill_key_map = { - 'fill_type': 'type', - } - - pfill_kwargs = {} - gfill_kwargs = {} - for k, v in fill_dict.items(): - pk = gk = None - if k in _pattern_fill_key_map: - pk = _pattern_fill_key_map[k] - if k in _gradient_fill_key_map: - gk = _gradient_fill_key_map[k] - if pk in ['start_color', 'end_color']: - v = cls._convert_to_color(v) - if gk == 'stop': - v = cls._convert_to_stop(v) - if pk: - pfill_kwargs[pk] = v - elif gk: - gfill_kwargs[gk] = v - else: - pfill_kwargs[k] = v - gfill_kwargs[k] = v - - try: - return PatternFill(**pfill_kwargs) - except TypeError: - return GradientFill(**gfill_kwargs) - - @classmethod - def _convert_to_side(cls, side_spec): - """ - Convert ``side_spec`` to an openpyxl v2 Side object - Parameters - ---------- - side_spec : str, dict - A string specifying the border style, or a dict with zero or more - of the following keys (or their synonyms). - 'style' ('border_style') - 'color' - Returns - ------- - side : openpyxl.styles.Side - """ - - from openpyxl.styles import Side - - _side_key_map = { - 'border_style': 'style', - } - - if isinstance(side_spec, str): - return Side(style=side_spec) - - side_kwargs = {} - for k, v in side_spec.items(): - if k in _side_key_map: - k = _side_key_map[k] - if k == 'color': - v = cls._convert_to_color(v) - side_kwargs[k] = v - - return Side(**side_kwargs) - - @classmethod - def _convert_to_border(cls, border_dict): - """ - Convert ``border_dict`` to an openpyxl v2 Border object - Parameters - ---------- - border_dict : dict - A dict with zero or more of the following keys (or their synonyms). - 'left' - 'right' - 'top' - 'bottom' - 'diagonal' - 'diagonal_direction' - 'vertical' - 'horizontal' - 'diagonalUp' ('diagonalup') - 'diagonalDown' ('diagonaldown') - 'outline' - Returns - ------- - border : openpyxl.styles.Border - """ - - from openpyxl.styles import Border - - _border_key_map = { - 'diagonalup': 'diagonalUp', - 'diagonaldown': 'diagonalDown', - } - - border_kwargs = {} - for k, v in border_dict.items(): - if k in _border_key_map: - k = _border_key_map[k] - if k == 'color': - v = cls._convert_to_color(v) - if k in ['left', 'right', 'top', 'bottom', 'diagonal']: - v = cls._convert_to_side(v) - border_kwargs[k] = v - - return Border(**border_kwargs) - - @classmethod - def _convert_to_alignment(cls, alignment_dict): - """ - Convert ``alignment_dict`` to an openpyxl v2 Alignment object - Parameters - ---------- - alignment_dict : dict - A dict with zero or more of the following keys (or their synonyms). - 'horizontal' - 'vertical' - 'text_rotation' - 'wrap_text' - 'shrink_to_fit' - 'indent' - Returns - ------- - alignment : openpyxl.styles.Alignment - """ - - from openpyxl.styles import Alignment - - return Alignment(**alignment_dict) - - @classmethod - def _convert_to_number_format(cls, number_format_dict): - """ - Convert ``number_format_dict`` to an openpyxl v2.1.0 number format - initializer. - Parameters - ---------- - number_format_dict : dict - A dict with zero or more of the following keys. - 'format_code' : str - Returns - ------- - number_format : str - """ - return number_format_dict['format_code'] - - @classmethod - def _convert_to_protection(cls, protection_dict): - """ - Convert ``protection_dict`` to an openpyxl v2 Protection object. - Parameters - ---------- - protection_dict : dict - A dict with zero or more of the following keys. - 'locked' - 'hidden' - Returns - ------- - """ - - from openpyxl.styles import Protection - - return Protection(**protection_dict) - - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, - freeze_panes=None): - # Write the frame cells using openpyxl. - sheet_name = self._get_sheet_name(sheet_name) - - _style_cache = {} - - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] - else: - wks = self.book.create_sheet() - wks.title = sheet_name - self.sheets[sheet_name] = wks - - if _validate_freeze_panes(freeze_panes): - wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1, - column=freeze_panes[1] + 1) - - for cell in cells: - xcell = wks.cell( - row=startrow + cell.row + 1, - column=startcol + cell.col + 1 - ) - xcell.value, fmt = self._value_with_fmt(cell.val) - if fmt: - xcell.number_format = fmt - - style_kwargs = {} - if cell.style: - key = str(cell.style) - style_kwargs = _style_cache.get(key) - if style_kwargs is None: - style_kwargs = self._convert_to_style_kwargs(cell.style) - _style_cache[key] = style_kwargs - - if style_kwargs: - for k, v in style_kwargs.items(): - setattr(xcell, k, v) - - if cell.mergestart is not None and cell.mergeend is not None: - - wks.merge_cells( - start_row=startrow + cell.row + 1, - start_column=startcol + cell.col + 1, - end_column=startcol + cell.mergeend + 1, - end_row=startrow + cell.mergestart + 1 - ) - - # When cells are merged only the top-left cell is preserved - # The behaviour of the other cells in a merged range is - # undefined - if style_kwargs: - first_row = startrow + cell.row + 1 - last_row = startrow + cell.mergestart + 1 - first_col = startcol + cell.col + 1 - last_col = startcol + cell.mergeend + 1 - - for row in range(first_row, last_row + 1): - for col in range(first_col, last_col + 1): - if row == first_row and col == first_col: - # Ignore first cell. It is already handled. - continue - xcell = wks.cell(column=col, row=row) - for k, v in style_kwargs.items(): - setattr(xcell, k, v) - - -register_writer(_OpenpyxlWriter) - - -class _XlwtWriter(ExcelWriter): - engine = 'xlwt' - supported_extensions = ('.xls',) - - def __init__(self, path, engine=None, encoding=None, mode='w', - **engine_kwargs): - # Use the xlwt module as the Excel writer. - import xlwt - engine_kwargs['engine'] = engine - - if mode == 'a': - raise ValueError('Append mode is not supported with xlwt!') - - super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs) - - if encoding is None: - encoding = 'ascii' - self.book = xlwt.Workbook(encoding=encoding) - self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format) - self.fm_date = xlwt.easyxf(num_format_str=self.date_format) - - def save(self): - """ - Save workbook to disk. - """ - return self.book.save(self.path) - - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, - freeze_panes=None): - # Write the frame cells using xlwt. - - sheet_name = self._get_sheet_name(sheet_name) - - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] - else: - wks = self.book.add_sheet(sheet_name) - self.sheets[sheet_name] = wks - - if _validate_freeze_panes(freeze_panes): - wks.set_panes_frozen(True) - wks.set_horz_split_pos(freeze_panes[0]) - wks.set_vert_split_pos(freeze_panes[1]) - - style_dict = {} - - for cell in cells: - val, fmt = self._value_with_fmt(cell.val) - - stylekey = json.dumps(cell.style) - if fmt: - stylekey += fmt - - if stylekey in style_dict: - style = style_dict[stylekey] - else: - style = self._convert_to_style(cell.style, fmt) - style_dict[stylekey] = style - - if cell.mergestart is not None and cell.mergeend is not None: - wks.write_merge(startrow + cell.row, - startrow + cell.mergestart, - startcol + cell.col, - startcol + cell.mergeend, - val, style) - else: - wks.write(startrow + cell.row, - startcol + cell.col, - val, style) - - @classmethod - def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',', - line_sep=';'): - """helper which recursively generate an xlwt easy style string - for example: - - hstyle = {"font": {"bold": True}, - "border": {"top": "thin", - "right": "thin", - "bottom": "thin", - "left": "thin"}, - "align": {"horiz": "center"}} - will be converted to - font: bold on; \ - border: top thin, right thin, bottom thin, left thin; \ - align: horiz center; - """ - if hasattr(item, 'items'): - if firstlevel: - it = ["{key}: {val}" - .format(key=key, val=cls._style_to_xlwt(value, False)) - for key, value in item.items()] - out = "{sep} ".format(sep=(line_sep).join(it)) - return out - else: - it = ["{key} {val}" - .format(key=key, val=cls._style_to_xlwt(value, False)) - for key, value in item.items()] - out = "{sep} ".format(sep=(field_sep).join(it)) - return out - else: - item = "{item}".format(item=item) - item = item.replace("True", "on") - item = item.replace("False", "off") - return item - - @classmethod - def _convert_to_style(cls, style_dict, num_format_str=None): - """ - converts a style_dict to an xlwt style object - Parameters - ---------- - style_dict : style dictionary to convert - num_format_str : optional number format string - """ - import xlwt - - if style_dict: - xlwt_stylestr = cls._style_to_xlwt(style_dict) - style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';') - else: - style = xlwt.XFStyle() - if num_format_str is not None: - style.num_format_str = num_format_str - - return style - - -register_writer(_XlwtWriter) - - -class _XlsxStyler(object): - # Map from openpyxl-oriented styles to flatter xlsxwriter representation - # Ordering necessary for both determinism and because some are keyed by - # prefixes of others. - STYLE_MAPPING = { - 'font': [ - (('name',), 'font_name'), - (('sz',), 'font_size'), - (('size',), 'font_size'), - (('color', 'rgb',), 'font_color'), - (('color',), 'font_color'), - (('b',), 'bold'), - (('bold',), 'bold'), - (('i',), 'italic'), - (('italic',), 'italic'), - (('u',), 'underline'), - (('underline',), 'underline'), - (('strike',), 'font_strikeout'), - (('vertAlign',), 'font_script'), - (('vertalign',), 'font_script'), - ], - 'number_format': [ - (('format_code',), 'num_format'), - ((), 'num_format',), - ], - 'protection': [ - (('locked',), 'locked'), - (('hidden',), 'hidden'), - ], - 'alignment': [ - (('horizontal',), 'align'), - (('vertical',), 'valign'), - (('text_rotation',), 'rotation'), - (('wrap_text',), 'text_wrap'), - (('indent',), 'indent'), - (('shrink_to_fit',), 'shrink'), - ], - 'fill': [ - (('patternType',), 'pattern'), - (('patterntype',), 'pattern'), - (('fill_type',), 'pattern'), - (('start_color', 'rgb',), 'fg_color'), - (('fgColor', 'rgb',), 'fg_color'), - (('fgcolor', 'rgb',), 'fg_color'), - (('start_color',), 'fg_color'), - (('fgColor',), 'fg_color'), - (('fgcolor',), 'fg_color'), - (('end_color', 'rgb',), 'bg_color'), - (('bgColor', 'rgb',), 'bg_color'), - (('bgcolor', 'rgb',), 'bg_color'), - (('end_color',), 'bg_color'), - (('bgColor',), 'bg_color'), - (('bgcolor',), 'bg_color'), - ], - 'border': [ - (('color', 'rgb',), 'border_color'), - (('color',), 'border_color'), - (('style',), 'border'), - (('top', 'color', 'rgb',), 'top_color'), - (('top', 'color',), 'top_color'), - (('top', 'style',), 'top'), - (('top',), 'top'), - (('right', 'color', 'rgb',), 'right_color'), - (('right', 'color',), 'right_color'), - (('right', 'style',), 'right'), - (('right',), 'right'), - (('bottom', 'color', 'rgb',), 'bottom_color'), - (('bottom', 'color',), 'bottom_color'), - (('bottom', 'style',), 'bottom'), - (('bottom',), 'bottom'), - (('left', 'color', 'rgb',), 'left_color'), - (('left', 'color',), 'left_color'), - (('left', 'style',), 'left'), - (('left',), 'left'), - ], - } - - @classmethod - def convert(cls, style_dict, num_format_str=None): - """ - converts a style_dict to an xlsxwriter format dict - - Parameters - ---------- - style_dict : style dictionary to convert - num_format_str : optional number format string - """ - - # Create a XlsxWriter format object. - props = {} - - if num_format_str is not None: - props['num_format'] = num_format_str - - if style_dict is None: - return props - - if 'borders' in style_dict: - style_dict = style_dict.copy() - style_dict['border'] = style_dict.pop('borders') - - for style_group_key, style_group in style_dict.items(): - for src, dst in cls.STYLE_MAPPING.get(style_group_key, []): - # src is a sequence of keys into a nested dict - # dst is a flat key - if dst in props: - continue - v = style_group - for k in src: - try: - v = v[k] - except (KeyError, TypeError): - break - else: - props[dst] = v - - if isinstance(props.get('pattern'), string_types): - # TODO: support other fill patterns - props['pattern'] = 0 if props['pattern'] == 'none' else 1 - - for k in ['border', 'top', 'right', 'bottom', 'left']: - if isinstance(props.get(k), string_types): - try: - props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted', - 'thick', 'double', 'hair', 'mediumDashed', - 'dashDot', 'mediumDashDot', 'dashDotDot', - 'mediumDashDotDot', - 'slantDashDot'].index(props[k]) - except ValueError: - props[k] = 2 - - if isinstance(props.get('font_script'), string_types): - props['font_script'] = ['baseline', 'superscript', - 'subscript'].index(props['font_script']) - - if isinstance(props.get('underline'), string_types): - props['underline'] = {'none': 0, 'single': 1, 'double': 2, - 'singleAccounting': 33, - 'doubleAccounting': 34}[props['underline']] - - return props - - -class _XlsxWriter(ExcelWriter): - engine = 'xlsxwriter' - supported_extensions = ('.xlsx',) - - def __init__(self, path, engine=None, - date_format=None, datetime_format=None, mode='w', - **engine_kwargs): - # Use the xlsxwriter module as the Excel writer. - import xlsxwriter - - if mode == 'a': - raise ValueError('Append mode is not supported with xlsxwriter!') - - super(_XlsxWriter, self).__init__(path, engine=engine, - date_format=date_format, - datetime_format=datetime_format, - mode=mode, - **engine_kwargs) - - self.book = xlsxwriter.Workbook(path, **engine_kwargs) - - def save(self): - """ - Save workbook to disk. - """ - - return self.book.close() - - def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, - freeze_panes=None): - # Write the frame cells using xlsxwriter. - sheet_name = self._get_sheet_name(sheet_name) - - if sheet_name in self.sheets: - wks = self.sheets[sheet_name] - else: - wks = self.book.add_worksheet(sheet_name) - self.sheets[sheet_name] = wks - - style_dict = {'null': None} - - if _validate_freeze_panes(freeze_panes): - wks.freeze_panes(*(freeze_panes)) - - for cell in cells: - val, fmt = self._value_with_fmt(cell.val) - - stylekey = json.dumps(cell.style) - if fmt: - stylekey += fmt - - if stylekey in style_dict: - style = style_dict[stylekey] - else: - style = self.book.add_format( - _XlsxStyler.convert(cell.style, fmt)) - style_dict[stylekey] = style - - if cell.mergestart is not None and cell.mergeend is not None: - wks.merge_range(startrow + cell.row, - startcol + cell.col, - startrow + cell.mergestart, - startcol + cell.mergeend, - cell.val, style) - else: - wks.write(startrow + cell.row, - startcol + cell.col, - val, style) - - -register_writer(_XlsxWriter) diff --git a/pandas/io/excel/__init__.py b/pandas/io/excel/__init__.py new file mode 100644 index 0000000000000..704789cb6061e --- /dev/null +++ b/pandas/io/excel/__init__.py @@ -0,0 +1,16 @@ +from pandas.io.excel._base import read_excel, ExcelWriter, ExcelFile +from pandas.io.excel._openpyxl import _OpenpyxlWriter +from pandas.io.excel._util import register_writer +from pandas.io.excel._xlsxwriter import _XlsxWriter +from pandas.io.excel._xlwt import _XlwtWriter + +__all__ = ["read_excel", "ExcelWriter", "ExcelFile"] + + +register_writer(_OpenpyxlWriter) + + +register_writer(_XlwtWriter) + + +register_writer(_XlsxWriter) diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py new file mode 100644 index 0000000000000..c6d390692c789 --- /dev/null +++ b/pandas/io/excel/_base.py @@ -0,0 +1,852 @@ +import abc +from collections import OrderedDict +from datetime import date, datetime, timedelta +import os +from textwrap import fill +import warnings + +import pandas.compat as compat +from pandas.compat import add_metaclass, range, string_types, u +from pandas.errors import EmptyDataError +from pandas.util._decorators import Appender, deprecate_kwarg + +from pandas.core.dtypes.common import ( + is_bool, is_float, is_integer, is_list_like) + +from pandas.core import config +from pandas.core.frame import DataFrame + +from pandas.io.common import _NA_VALUES, _stringify_path, _validate_header_arg +from pandas.io.excel._util import ( + _fill_mi_header, _get_default_writer, _maybe_convert_to_string, + _maybe_convert_usecols, _pop_header_name, get_writer) +from pandas.io.formats.printing import pprint_thing +from pandas.io.parsers import TextParser + +_read_excel_doc = """ +Read an Excel file into a pandas DataFrame. + +Support both `xls` and `xlsx` file extensions from a local filesystem or URL. +Support an option to read a single sheet or a list of sheets. + +Parameters +---------- +io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book + The string could be a URL. Valid URL schemes include http, ftp, s3, + gcs, and file. For file URLs, a host is expected. For instance, a local + file could be /path/to/workbook.xlsx. +sheet_name : str, int, list, or None, default 0 + Strings are used for sheet names. Integers are used in zero-indexed + sheet positions. Lists of strings/integers are used to request + multiple sheets. Specify None to get all sheets. + + Available cases: + + * Defaults to ``0``: 1st sheet as a `DataFrame` + * ``1``: 2nd sheet as a `DataFrame` + * ``"Sheet1"``: Load sheet with name "Sheet1" + * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5" + as a dict of `DataFrame` + * None: All sheets. + +header : int, list of int, default 0 + Row (0-indexed) to use for the column labels of the parsed + DataFrame. If a list of integers is passed those row positions will + be combined into a ``MultiIndex``. Use None if there is no header. +names : array-like, default None + List of column names to use. If file contains no header row, + then you should explicitly pass header=None. +index_col : int, list of int, default None + Column (0-indexed) to use as the row labels of the DataFrame. + Pass None if there is no such column. If a list is passed, + those columns will be combined into a ``MultiIndex``. If a + subset of data is selected with ``usecols``, index_col + is based on the subset. +parse_cols : int or list, default None + Alias of `usecols`. + + .. deprecated:: 0.21.0 + Use `usecols` instead. + +usecols : int, str, list-like, or callable default None + Return a subset of the columns. + * If None, then parse all columns. + * If int, then indicates last column to be parsed. + + .. deprecated:: 0.24.0 + Pass in a list of int instead from 0 to `usecols` inclusive. + + * If str, then indicates comma separated list of Excel column letters + and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of + both sides. + * If list of int, then indicates list of column numbers to be parsed. + * If list of string, then indicates list of column names to be parsed. + + .. versionadded:: 0.24.0 + + * If callable, then evaluate each column name against it and parse the + column if the callable returns ``True``. + + .. versionadded:: 0.24.0 + +squeeze : bool, default False + If the parsed data only contains one column then return a Series. +dtype : Type name or dict of column -> type, default None + Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32} + Use `object` to preserve data as stored in Excel and not interpret dtype. + If converters are specified, they will be applied INSTEAD + of dtype conversion. + + .. versionadded:: 0.20.0 + +engine : str, default None + If io is not a buffer or path, this must be set to identify io. + Acceptable values are None or xlrd. +converters : dict, default None + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the Excel cell content, and return the transformed + content. +true_values : list, default None + Values to consider as True. + + .. versionadded:: 0.19.0 + +false_values : list, default None + Values to consider as False. + + .. versionadded:: 0.19.0 + +skiprows : list-like + Rows to skip at the beginning (0-indexed). +nrows : int, default None + Number of rows to parse. + + .. versionadded:: 0.23.0 + +na_values : scalar, str, list-like, or dict, default None + Additional strings to recognize as NA/NaN. If dict passed, specific + per-column NA values. By default the following values are interpreted + as NaN: '""" + fill("', '".join( + sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'. +keep_default_na : bool, default True + If na_values are specified and keep_default_na is False the default NaN + values are overridden, otherwise they're appended to. +verbose : bool, default False + Indicate number of NA values placed in non-numeric columns. +parse_dates : bool, list-like, or dict, default False + The behavior is as follows: + + * bool. If True -> try parsing the index. + * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 + each as a separate date column. + * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as + a single date column. + * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call + result 'foo' + + If a column or index contains an unparseable date, the entire column or + index will be returned unaltered as an object data type. For non-standard + datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv`` + + Note: A fast-path exists for iso8601-formatted dates. +date_parser : function, optional + Function to use for converting a sequence of string columns to an array of + datetime instances. The default uses ``dateutil.parser.parser`` to do the + conversion. Pandas will try to call `date_parser` in three different ways, + advancing to the next if an exception occurs: 1) Pass one or more arrays + (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the + string values from the columns defined by `parse_dates` into a single array + and pass that; and 3) call `date_parser` once for each row using one or + more strings (corresponding to the columns defined by `parse_dates`) as + arguments. +thousands : str, default None + Thousands separator for parsing string columns to numeric. Note that + this parameter is only necessary for columns stored as TEXT in Excel, + any numeric columns will automatically be parsed, regardless of display + format. +comment : str, default None + Comments out remainder of line. Pass a character or characters to this + argument to indicate comments in the input file. Any data between the + comment string and the end of the current line is ignored. +skip_footer : int, default 0 + Alias of `skipfooter`. + + .. deprecated:: 0.23.0 + Use `skipfooter` instead. +skipfooter : int, default 0 + Rows at the end to skip (0-indexed). +convert_float : bool, default True + Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric + data will be read in as floats: Excel stores all numbers as floats + internally. +mangle_dupe_cols : bool, default True + Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than + 'X'...'X'. Passing in False will cause data to be overwritten if there + are duplicate names in the columns. +**kwds : optional + Optional keyword arguments can be passed to ``TextFileReader``. + +Returns +------- +DataFrame or dict of DataFrames + DataFrame from the passed in Excel file. See notes in sheet_name + argument for more information on when a dict of DataFrames is returned. + +See Also +-------- +to_excel : Write DataFrame to an Excel file. +to_csv : Write DataFrame to a comma-separated values (csv) file. +read_csv : Read a comma-separated values (csv) file into DataFrame. +read_fwf : Read a table of fixed-width formatted lines into DataFrame. + +Examples +-------- +The file can be read using the file name as string or an open file object: + +>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP + Name Value +0 string1 1 +1 string2 2 +2 #Comment 3 + +>>> pd.read_excel(open('tmp.xlsx', 'rb'), +... sheet_name='Sheet3') # doctest: +SKIP + Unnamed: 0 Name Value +0 0 string1 1 +1 1 string2 2 +2 2 #Comment 3 + +Index and header can be specified via the `index_col` and `header` arguments + +>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP + 0 1 2 +0 NaN Name Value +1 0.0 string1 1 +2 1.0 string2 2 +3 2.0 #Comment 3 + +Column types are inferred but can be explicitly specified + +>>> pd.read_excel('tmp.xlsx', index_col=0, +... dtype={'Name': str, 'Value': float}) # doctest: +SKIP + Name Value +0 string1 1.0 +1 string2 2.0 +2 #Comment 3.0 + +True, False, and NA values, and thousands separators have defaults, +but can be explicitly specified, too. Supply the values you would like +as strings or lists of strings! + +>>> pd.read_excel('tmp.xlsx', index_col=0, +... na_values=['string1', 'string2']) # doctest: +SKIP + Name Value +0 NaN 1 +1 NaN 2 +2 #Comment 3 + +Comment lines in the excel input file can be skipped using the `comment` kwarg + +>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP + Name Value +0 string1 1.0 +1 string2 2.0 +2 None NaN +""" + + +@Appender(_read_excel_doc) +@deprecate_kwarg("parse_cols", "usecols") +@deprecate_kwarg("skip_footer", "skipfooter") +def read_excel(io, + sheet_name=0, + header=0, + names=None, + index_col=None, + parse_cols=None, + usecols=None, + squeeze=False, + dtype=None, + engine=None, + converters=None, + true_values=None, + false_values=None, + skiprows=None, + nrows=None, + na_values=None, + keep_default_na=True, + verbose=False, + parse_dates=False, + date_parser=None, + thousands=None, + comment=None, + skip_footer=0, + skipfooter=0, + convert_float=True, + mangle_dupe_cols=True, + **kwds): + + # Can't use _deprecate_kwarg since sheetname=None has a special meaning + if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds: + warnings.warn("The `sheetname` keyword is deprecated, use " + "`sheet_name` instead", FutureWarning, stacklevel=2) + sheet_name = kwds.pop("sheetname") + + if 'sheet' in kwds: + raise TypeError("read_excel() got an unexpected keyword argument " + "`sheet`") + + if not isinstance(io, ExcelFile): + io = ExcelFile(io, engine=engine) + + return io.parse( + sheet_name=sheet_name, + header=header, + names=names, + index_col=index_col, + usecols=usecols, + squeeze=squeeze, + dtype=dtype, + converters=converters, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + keep_default_na=keep_default_na, + verbose=verbose, + parse_dates=parse_dates, + date_parser=date_parser, + thousands=thousands, + comment=comment, + skipfooter=skipfooter, + convert_float=convert_float, + mangle_dupe_cols=mangle_dupe_cols, + **kwds) + + +@add_metaclass(abc.ABCMeta) +class _BaseExcelReader(object): + + @property + @abc.abstractmethod + def sheet_names(self): + pass + + @abc.abstractmethod + def get_sheet_by_name(self, name): + pass + + @abc.abstractmethod + def get_sheet_by_index(self, index): + pass + + @abc.abstractmethod + def get_sheet_data(self, sheet, convert_float): + pass + + def parse(self, + sheet_name=0, + header=0, + names=None, + index_col=None, + usecols=None, + squeeze=False, + dtype=None, + true_values=None, + false_values=None, + skiprows=None, + nrows=None, + na_values=None, + verbose=False, + parse_dates=False, + date_parser=None, + thousands=None, + comment=None, + skipfooter=0, + convert_float=True, + mangle_dupe_cols=True, + **kwds): + + _validate_header_arg(header) + + ret_dict = False + + # Keep sheetname to maintain backwards compatibility. + if isinstance(sheet_name, list): + sheets = sheet_name + ret_dict = True + elif sheet_name is None: + sheets = self.sheet_names + ret_dict = True + else: + sheets = [sheet_name] + + # handle same-type duplicates. + sheets = list(OrderedDict.fromkeys(sheets).keys()) + + output = OrderedDict() + + for asheetname in sheets: + if verbose: + print("Reading sheet {sheet}".format(sheet=asheetname)) + + if isinstance(asheetname, compat.string_types): + sheet = self.get_sheet_by_name(asheetname) + else: # assume an integer if not a string + sheet = self.get_sheet_by_index(asheetname) + + data = self.get_sheet_data(sheet, convert_float) + usecols = _maybe_convert_usecols(usecols) + + if sheet.nrows == 0: + output[asheetname] = DataFrame() + continue + + if is_list_like(header) and len(header) == 1: + header = header[0] + + # forward fill and pull out names for MultiIndex column + header_names = None + if header is not None and is_list_like(header): + header_names = [] + control_row = [True] * len(data[0]) + + for row in header: + if is_integer(skiprows): + row += skiprows + + data[row], control_row = _fill_mi_header(data[row], + control_row) + + if index_col is not None: + header_name, _ = _pop_header_name(data[row], index_col) + header_names.append(header_name) + + if is_list_like(index_col): + # Forward fill values for MultiIndex index. + if not is_list_like(header): + offset = 1 + header + else: + offset = 1 + max(header) + + # Check if we have an empty dataset + # before trying to collect data. + if offset < len(data): + for col in index_col: + last = data[offset][col] + + for row in range(offset + 1, len(data)): + if data[row][col] == '' or data[row][col] is None: + data[row][col] = last + else: + last = data[row][col] + + has_index_names = is_list_like(header) and len(header) > 1 + + # GH 12292 : error when read one empty column from excel file + try: + parser = TextParser(data, + names=names, + header=header, + index_col=index_col, + has_index_names=has_index_names, + squeeze=squeeze, + dtype=dtype, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + parse_dates=parse_dates, + date_parser=date_parser, + thousands=thousands, + comment=comment, + skipfooter=skipfooter, + usecols=usecols, + mangle_dupe_cols=mangle_dupe_cols, + **kwds) + + output[asheetname] = parser.read(nrows=nrows) + + if not squeeze or isinstance(output[asheetname], DataFrame): + if header_names: + output[asheetname].columns = output[ + asheetname].columns.set_names(header_names) + elif compat.PY2: + output[asheetname].columns = _maybe_convert_to_string( + output[asheetname].columns) + + except EmptyDataError: + # No Data, return an empty DataFrame + output[asheetname] = DataFrame() + + if ret_dict: + return output + else: + return output[asheetname] + + +@add_metaclass(abc.ABCMeta) +class ExcelWriter(object): + """ + Class for writing DataFrame objects into excel sheets, default is to use + xlwt for xls, openpyxl for xlsx. See DataFrame.to_excel for typical usage. + + Parameters + ---------- + path : string + Path to xls or xlsx file. + engine : string (optional) + Engine to use for writing. If None, defaults to + ``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword + argument. + date_format : string, default None + Format string for dates written into Excel files (e.g. 'YYYY-MM-DD') + datetime_format : string, default None + Format string for datetime objects written into Excel files + (e.g. 'YYYY-MM-DD HH:MM:SS') + mode : {'w' or 'a'}, default 'w' + File mode to use (write or append). + + .. versionadded:: 0.24.0 + + Attributes + ---------- + None + + Methods + ------- + None + + Notes + ----- + None of the methods and properties are considered public. + + For compatibility with CSV writers, ExcelWriter serializes lists + and dicts to strings before writing. + + Examples + -------- + Default usage: + + >>> with ExcelWriter('path_to_file.xlsx') as writer: + ... df.to_excel(writer) + + To write to separate sheets in a single file: + + >>> with ExcelWriter('path_to_file.xlsx') as writer: + ... df1.to_excel(writer, sheet_name='Sheet1') + ... df2.to_excel(writer, sheet_name='Sheet2') + + You can set the date format or datetime format: + + >>> with ExcelWriter('path_to_file.xlsx', + date_format='YYYY-MM-DD', + datetime_format='YYYY-MM-DD HH:MM:SS') as writer: + ... df.to_excel(writer) + + You can also append to an existing Excel file: + + >>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer: + ... df.to_excel(writer, sheet_name='Sheet3') + """ + # Defining an ExcelWriter implementation (see abstract methods for more...) + + # - Mandatory + # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)`` + # --> called to write additional DataFrames to disk + # - ``supported_extensions`` (tuple of supported extensions), used to + # check that engine supports the given extension. + # - ``engine`` - string that gives the engine name. Necessary to + # instantiate class directly and bypass ``ExcelWriterMeta`` engine + # lookup. + # - ``save(self)`` --> called to save file to disk + # - Mostly mandatory (i.e. should at least exist) + # - book, cur_sheet, path + + # - Optional: + # - ``__init__(self, path, engine=None, **kwargs)`` --> always called + # with path as first argument. + + # You also need to register the class with ``register_writer()``. + # Technically, ExcelWriter implementations don't need to subclass + # ExcelWriter. + def __new__(cls, path, engine=None, **kwargs): + # only switch class if generic(ExcelWriter) + + if issubclass(cls, ExcelWriter): + if engine is None or (isinstance(engine, string_types) and + engine == 'auto'): + if isinstance(path, string_types): + ext = os.path.splitext(path)[-1][1:] + else: + ext = 'xlsx' + + try: + engine = config.get_option('io.excel.{ext}.writer' + .format(ext=ext)) + if engine == 'auto': + engine = _get_default_writer(ext) + except KeyError: + raise ValueError("No engine for filetype: '{ext}'" + .format(ext=ext)) + cls = get_writer(engine) + + return object.__new__(cls) + + # declare external properties you can count on + book = None + curr_sheet = None + path = None + + @abc.abstractproperty + def supported_extensions(self): + "extensions that writer engine supports" + pass + + @abc.abstractproperty + def engine(self): + "name of engine" + pass + + @abc.abstractmethod + def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, + freeze_panes=None): + """ + Write given formatted cells into Excel an excel sheet + + Parameters + ---------- + cells : generator + cell of formatted data to save to Excel sheet + sheet_name : string, default None + Name of Excel sheet, if None, then use self.cur_sheet + startrow : upper left cell row to dump data frame + startcol : upper left cell column to dump data frame + freeze_panes: integer tuple of length 2 + contains the bottom-most row and right-most column to freeze + """ + pass + + @abc.abstractmethod + def save(self): + """ + Save workbook to disk. + """ + pass + + def __init__(self, path, engine=None, + date_format=None, datetime_format=None, mode='w', + **engine_kwargs): + # validate that this engine can handle the extension + if isinstance(path, string_types): + ext = os.path.splitext(path)[-1] + else: + ext = 'xls' if engine == 'xlwt' else 'xlsx' + + self.check_extension(ext) + + self.path = path + self.sheets = {} + self.cur_sheet = None + + if date_format is None: + self.date_format = 'YYYY-MM-DD' + else: + self.date_format = date_format + if datetime_format is None: + self.datetime_format = 'YYYY-MM-DD HH:MM:SS' + else: + self.datetime_format = datetime_format + + self.mode = mode + + def __fspath__(self): + return _stringify_path(self.path) + + def _get_sheet_name(self, sheet_name): + if sheet_name is None: + sheet_name = self.cur_sheet + if sheet_name is None: # pragma: no cover + raise ValueError('Must pass explicit sheet_name or set ' + 'cur_sheet property') + return sheet_name + + def _value_with_fmt(self, val): + """Convert numpy types to Python types for the Excel writers. + + Parameters + ---------- + val : object + Value to be written into cells + + Returns + ------- + Tuple with the first element being the converted value and the second + being an optional format + """ + fmt = None + + if is_integer(val): + val = int(val) + elif is_float(val): + val = float(val) + elif is_bool(val): + val = bool(val) + elif isinstance(val, datetime): + fmt = self.datetime_format + elif isinstance(val, date): + fmt = self.date_format + elif isinstance(val, timedelta): + val = val.total_seconds() / float(86400) + fmt = '0' + else: + val = compat.to_str(val) + + return val, fmt + + @classmethod + def check_extension(cls, ext): + """checks that path's extension against the Writer's supported + extensions. If it isn't supported, raises UnsupportedFiletypeError.""" + if ext.startswith('.'): + ext = ext[1:] + if not any(ext in extension for extension in cls.supported_extensions): + msg = (u("Invalid extension for engine '{engine}': '{ext}'") + .format(engine=pprint_thing(cls.engine), + ext=pprint_thing(ext))) + raise ValueError(msg) + else: + return True + + # Allow use as a contextmanager + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """synonym for save, to make it more file-like""" + return self.save() + + +class ExcelFile(object): + """ + Class for parsing tabular excel sheets into DataFrame objects. + Uses xlrd. See read_excel for more documentation + + Parameters + ---------- + io : string, path object (pathlib.Path or py._path.local.LocalPath), + file-like object or xlrd workbook + If a string or path object, expected to be a path to xls or xlsx file. + engine : string, default None + If io is not a buffer or path, this must be set to identify io. + Acceptable values are None or ``xlrd``. + """ + + from pandas.io.excel._xlrd import _XlrdReader + + _engines = { + 'xlrd': _XlrdReader, + } + + def __init__(self, io, engine=None): + if engine is None: + engine = 'xlrd' + if engine not in self._engines: + raise ValueError("Unknown engine: {engine}".format(engine=engine)) + + # could be a str, ExcelFile, Book, etc. + self.io = io + # Always a string + self._io = _stringify_path(io) + + self._reader = self._engines[engine](self._io) + + def __fspath__(self): + return self._io + + def parse(self, + sheet_name=0, + header=0, + names=None, + index_col=None, + usecols=None, + squeeze=False, + converters=None, + true_values=None, + false_values=None, + skiprows=None, + nrows=None, + na_values=None, + parse_dates=False, + date_parser=None, + thousands=None, + comment=None, + skipfooter=0, + convert_float=True, + mangle_dupe_cols=True, + **kwds): + """ + Parse specified sheet(s) into a DataFrame + + Equivalent to read_excel(ExcelFile, ...) See the read_excel + docstring for more info on accepted parameters + """ + + # Can't use _deprecate_kwarg since sheetname=None has a special meaning + if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds: + warnings.warn("The `sheetname` keyword is deprecated, use " + "`sheet_name` instead", FutureWarning, stacklevel=2) + sheet_name = kwds.pop("sheetname") + elif 'sheetname' in kwds: + raise TypeError("Cannot specify both `sheet_name` " + "and `sheetname`. Use just `sheet_name`") + + if 'chunksize' in kwds: + raise NotImplementedError("chunksize keyword of read_excel " + "is not implemented") + + return self._reader.parse(sheet_name=sheet_name, + header=header, + names=names, + index_col=index_col, + usecols=usecols, + squeeze=squeeze, + converters=converters, + true_values=true_values, + false_values=false_values, + skiprows=skiprows, + nrows=nrows, + na_values=na_values, + parse_dates=parse_dates, + date_parser=date_parser, + thousands=thousands, + comment=comment, + skipfooter=skipfooter, + convert_float=convert_float, + mangle_dupe_cols=mangle_dupe_cols, + **kwds) + + @property + def book(self): + return self._reader.book + + @property + def sheet_names(self): + return self._reader.sheet_names + + def close(self): + """close io if necessary""" + if hasattr(self.io, 'close'): + self.io.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py new file mode 100644 index 0000000000000..8d79c13a65c97 --- /dev/null +++ b/pandas/io/excel/_openpyxl.py @@ -0,0 +1,453 @@ +from pandas.io.excel._base import ExcelWriter +from pandas.io.excel._util import _validate_freeze_panes + + +class _OpenpyxlWriter(ExcelWriter): + engine = 'openpyxl' + supported_extensions = ('.xlsx', '.xlsm') + + def __init__(self, path, engine=None, mode='w', **engine_kwargs): + # Use the openpyxl module as the Excel writer. + from openpyxl.workbook import Workbook + + super(_OpenpyxlWriter, self).__init__(path, mode=mode, **engine_kwargs) + + if self.mode == 'a': # Load from existing workbook + from openpyxl import load_workbook + book = load_workbook(self.path) + self.book = book + else: + # Create workbook object with default optimized_write=True. + self.book = Workbook() + + if self.book.worksheets: + try: + self.book.remove(self.book.worksheets[0]) + except AttributeError: + + # compat - for openpyxl <= 2.4 + self.book.remove_sheet(self.book.worksheets[0]) + + def save(self): + """ + Save workbook to disk. + """ + return self.book.save(self.path) + + @classmethod + def _convert_to_style(cls, style_dict): + """ + converts a style_dict to an openpyxl style object + Parameters + ---------- + style_dict : style dictionary to convert + """ + + from openpyxl.style import Style + xls_style = Style() + for key, value in style_dict.items(): + for nk, nv in value.items(): + if key == "borders": + (xls_style.borders.__getattribute__(nk) + .__setattr__('border_style', nv)) + else: + xls_style.__getattribute__(key).__setattr__(nk, nv) + + return xls_style + + @classmethod + def _convert_to_style_kwargs(cls, style_dict): + """ + Convert a style_dict to a set of kwargs suitable for initializing + or updating-on-copy an openpyxl v2 style object + Parameters + ---------- + style_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'font' + 'fill' + 'border' ('borders') + 'alignment' + 'number_format' + 'protection' + Returns + ------- + style_kwargs : dict + A dict with the same, normalized keys as ``style_dict`` but each + value has been replaced with a native openpyxl style object of the + appropriate class. + """ + + _style_key_map = { + 'borders': 'border', + } + + style_kwargs = {} + for k, v in style_dict.items(): + if k in _style_key_map: + k = _style_key_map[k] + _conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k), + lambda x: None) + new_v = _conv_to_x(v) + if new_v: + style_kwargs[k] = new_v + + return style_kwargs + + @classmethod + def _convert_to_color(cls, color_spec): + """ + Convert ``color_spec`` to an openpyxl v2 Color object + Parameters + ---------- + color_spec : str, dict + A 32-bit ARGB hex string, or a dict with zero or more of the + following keys. + 'rgb' + 'indexed' + 'auto' + 'theme' + 'tint' + 'index' + 'type' + Returns + ------- + color : openpyxl.styles.Color + """ + + from openpyxl.styles import Color + + if isinstance(color_spec, str): + return Color(color_spec) + else: + return Color(**color_spec) + + @classmethod + def _convert_to_font(cls, font_dict): + """ + Convert ``font_dict`` to an openpyxl v2 Font object + Parameters + ---------- + font_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'name' + 'size' ('sz') + 'bold' ('b') + 'italic' ('i') + 'underline' ('u') + 'strikethrough' ('strike') + 'color' + 'vertAlign' ('vertalign') + 'charset' + 'scheme' + 'family' + 'outline' + 'shadow' + 'condense' + Returns + ------- + font : openpyxl.styles.Font + """ + + from openpyxl.styles import Font + + _font_key_map = { + 'sz': 'size', + 'b': 'bold', + 'i': 'italic', + 'u': 'underline', + 'strike': 'strikethrough', + 'vertalign': 'vertAlign', + } + + font_kwargs = {} + for k, v in font_dict.items(): + if k in _font_key_map: + k = _font_key_map[k] + if k == 'color': + v = cls._convert_to_color(v) + font_kwargs[k] = v + + return Font(**font_kwargs) + + @classmethod + def _convert_to_stop(cls, stop_seq): + """ + Convert ``stop_seq`` to a list of openpyxl v2 Color objects, + suitable for initializing the ``GradientFill`` ``stop`` parameter. + Parameters + ---------- + stop_seq : iterable + An iterable that yields objects suitable for consumption by + ``_convert_to_color``. + Returns + ------- + stop : list of openpyxl.styles.Color + """ + + return map(cls._convert_to_color, stop_seq) + + @classmethod + def _convert_to_fill(cls, fill_dict): + """ + Convert ``fill_dict`` to an openpyxl v2 Fill object + Parameters + ---------- + fill_dict : dict + A dict with one or more of the following keys (or their synonyms), + 'fill_type' ('patternType', 'patterntype') + 'start_color' ('fgColor', 'fgcolor') + 'end_color' ('bgColor', 'bgcolor') + or one or more of the following keys (or their synonyms). + 'type' ('fill_type') + 'degree' + 'left' + 'right' + 'top' + 'bottom' + 'stop' + Returns + ------- + fill : openpyxl.styles.Fill + """ + + from openpyxl.styles import PatternFill, GradientFill + + _pattern_fill_key_map = { + 'patternType': 'fill_type', + 'patterntype': 'fill_type', + 'fgColor': 'start_color', + 'fgcolor': 'start_color', + 'bgColor': 'end_color', + 'bgcolor': 'end_color', + } + + _gradient_fill_key_map = { + 'fill_type': 'type', + } + + pfill_kwargs = {} + gfill_kwargs = {} + for k, v in fill_dict.items(): + pk = gk = None + if k in _pattern_fill_key_map: + pk = _pattern_fill_key_map[k] + if k in _gradient_fill_key_map: + gk = _gradient_fill_key_map[k] + if pk in ['start_color', 'end_color']: + v = cls._convert_to_color(v) + if gk == 'stop': + v = cls._convert_to_stop(v) + if pk: + pfill_kwargs[pk] = v + elif gk: + gfill_kwargs[gk] = v + else: + pfill_kwargs[k] = v + gfill_kwargs[k] = v + + try: + return PatternFill(**pfill_kwargs) + except TypeError: + return GradientFill(**gfill_kwargs) + + @classmethod + def _convert_to_side(cls, side_spec): + """ + Convert ``side_spec`` to an openpyxl v2 Side object + Parameters + ---------- + side_spec : str, dict + A string specifying the border style, or a dict with zero or more + of the following keys (or their synonyms). + 'style' ('border_style') + 'color' + Returns + ------- + side : openpyxl.styles.Side + """ + + from openpyxl.styles import Side + + _side_key_map = { + 'border_style': 'style', + } + + if isinstance(side_spec, str): + return Side(style=side_spec) + + side_kwargs = {} + for k, v in side_spec.items(): + if k in _side_key_map: + k = _side_key_map[k] + if k == 'color': + v = cls._convert_to_color(v) + side_kwargs[k] = v + + return Side(**side_kwargs) + + @classmethod + def _convert_to_border(cls, border_dict): + """ + Convert ``border_dict`` to an openpyxl v2 Border object + Parameters + ---------- + border_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'left' + 'right' + 'top' + 'bottom' + 'diagonal' + 'diagonal_direction' + 'vertical' + 'horizontal' + 'diagonalUp' ('diagonalup') + 'diagonalDown' ('diagonaldown') + 'outline' + Returns + ------- + border : openpyxl.styles.Border + """ + + from openpyxl.styles import Border + + _border_key_map = { + 'diagonalup': 'diagonalUp', + 'diagonaldown': 'diagonalDown', + } + + border_kwargs = {} + for k, v in border_dict.items(): + if k in _border_key_map: + k = _border_key_map[k] + if k == 'color': + v = cls._convert_to_color(v) + if k in ['left', 'right', 'top', 'bottom', 'diagonal']: + v = cls._convert_to_side(v) + border_kwargs[k] = v + + return Border(**border_kwargs) + + @classmethod + def _convert_to_alignment(cls, alignment_dict): + """ + Convert ``alignment_dict`` to an openpyxl v2 Alignment object + Parameters + ---------- + alignment_dict : dict + A dict with zero or more of the following keys (or their synonyms). + 'horizontal' + 'vertical' + 'text_rotation' + 'wrap_text' + 'shrink_to_fit' + 'indent' + Returns + ------- + alignment : openpyxl.styles.Alignment + """ + + from openpyxl.styles import Alignment + + return Alignment(**alignment_dict) + + @classmethod + def _convert_to_number_format(cls, number_format_dict): + """ + Convert ``number_format_dict`` to an openpyxl v2.1.0 number format + initializer. + Parameters + ---------- + number_format_dict : dict + A dict with zero or more of the following keys. + 'format_code' : str + Returns + ------- + number_format : str + """ + return number_format_dict['format_code'] + + @classmethod + def _convert_to_protection(cls, protection_dict): + """ + Convert ``protection_dict`` to an openpyxl v2 Protection object. + Parameters + ---------- + protection_dict : dict + A dict with zero or more of the following keys. + 'locked' + 'hidden' + Returns + ------- + """ + + from openpyxl.styles import Protection + + return Protection(**protection_dict) + + def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, + freeze_panes=None): + # Write the frame cells using openpyxl. + sheet_name = self._get_sheet_name(sheet_name) + + _style_cache = {} + + if sheet_name in self.sheets: + wks = self.sheets[sheet_name] + else: + wks = self.book.create_sheet() + wks.title = sheet_name + self.sheets[sheet_name] = wks + + if _validate_freeze_panes(freeze_panes): + wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1, + column=freeze_panes[1] + 1) + + for cell in cells: + xcell = wks.cell( + row=startrow + cell.row + 1, + column=startcol + cell.col + 1 + ) + xcell.value, fmt = self._value_with_fmt(cell.val) + if fmt: + xcell.number_format = fmt + + style_kwargs = {} + if cell.style: + key = str(cell.style) + style_kwargs = _style_cache.get(key) + if style_kwargs is None: + style_kwargs = self._convert_to_style_kwargs(cell.style) + _style_cache[key] = style_kwargs + + if style_kwargs: + for k, v in style_kwargs.items(): + setattr(xcell, k, v) + + if cell.mergestart is not None and cell.mergeend is not None: + + wks.merge_cells( + start_row=startrow + cell.row + 1, + start_column=startcol + cell.col + 1, + end_column=startcol + cell.mergeend + 1, + end_row=startrow + cell.mergestart + 1 + ) + + # When cells are merged only the top-left cell is preserved + # The behaviour of the other cells in a merged range is + # undefined + if style_kwargs: + first_row = startrow + cell.row + 1 + last_row = startrow + cell.mergestart + 1 + first_col = startcol + cell.col + 1 + last_col = startcol + cell.mergeend + 1 + + for row in range(first_row, last_row + 1): + for col in range(first_col, last_col + 1): + if row == first_row and col == first_col: + # Ignore first cell. It is already handled. + continue + xcell = wks.cell(column=col, row=row) + for k, v in style_kwargs.items(): + setattr(xcell, k, v) diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py new file mode 100644 index 0000000000000..49255d83d1cd3 --- /dev/null +++ b/pandas/io/excel/_util.py @@ -0,0 +1,265 @@ +import warnings + +import pandas.compat as compat +from pandas.compat import lrange, range + +from pandas.core.dtypes.common import is_integer, is_list_like + +_writers = {} + + +def register_writer(klass): + """ + Add engine to the excel writer registry.io.excel. + + You must use this method to integrate with ``to_excel``. + + Parameters + ---------- + klass : ExcelWriter + """ + if not callable(klass): + raise ValueError("Can only register callables as engines") + engine_name = klass.engine + _writers[engine_name] = klass + + +def _get_default_writer(ext): + """ + Return the default writer for the given extension. + + Parameters + ---------- + ext : str + The excel file extension for which to get the default engine. + + Returns + ------- + str + The default engine for the extension. + """ + _default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'} + try: + import xlsxwriter # noqa + _default_writers['xlsx'] = 'xlsxwriter' + except ImportError: + pass + return _default_writers[ext] + + +def get_writer(engine_name): + try: + return _writers[engine_name] + except KeyError: + raise ValueError("No Excel writer '{engine}'" + .format(engine=engine_name)) + + +def _excel2num(x): + """ + Convert Excel column name like 'AB' to 0-based column index. + + Parameters + ---------- + x : str + The Excel column name to convert to a 0-based column index. + + Returns + ------- + num : int + The column index corresponding to the name. + + Raises + ------ + ValueError + Part of the Excel column name was invalid. + """ + index = 0 + + for c in x.upper().strip(): + cp = ord(c) + + if cp < ord("A") or cp > ord("Z"): + raise ValueError("Invalid column name: {x}".format(x=x)) + + index = index * 26 + cp - ord("A") + 1 + + return index - 1 + + +def _range2cols(areas): + """ + Convert comma separated list of column names and ranges to indices. + + Parameters + ---------- + areas : str + A string containing a sequence of column ranges (or areas). + + Returns + ------- + cols : list + A list of 0-based column indices. + + Examples + -------- + >>> _range2cols('A:E') + [0, 1, 2, 3, 4] + >>> _range2cols('A,C,Z:AB') + [0, 2, 25, 26, 27] + """ + cols = [] + + for rng in areas.split(","): + if ":" in rng: + rng = rng.split(":") + cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) + else: + cols.append(_excel2num(rng)) + + return cols + + +def _maybe_convert_usecols(usecols): + """ + Convert `usecols` into a compatible format for parsing in `parsers.py`. + + Parameters + ---------- + usecols : object + The use-columns object to potentially convert. + + Returns + ------- + converted : object + The compatible format of `usecols`. + """ + if usecols is None: + return usecols + + if is_integer(usecols): + warnings.warn(("Passing in an integer for `usecols` has been " + "deprecated. Please pass in a list of int from " + "0 to `usecols` inclusive instead."), + FutureWarning, stacklevel=2) + return lrange(usecols + 1) + + if isinstance(usecols, compat.string_types): + return _range2cols(usecols) + + return usecols + + +def _validate_freeze_panes(freeze_panes): + if freeze_panes is not None: + if ( + len(freeze_panes) == 2 and + all(isinstance(item, int) for item in freeze_panes) + ): + return True + + raise ValueError("freeze_panes must be of form (row, column)" + " where row and column are integers") + + # freeze_panes wasn't specified, return False so it won't be applied + # to output sheet + return False + + +def _trim_excel_header(row): + # trim header row so auto-index inference works + # xlrd uses '' , openpyxl None + while len(row) > 0 and (row[0] == '' or row[0] is None): + row = row[1:] + return row + + +def _maybe_convert_to_string(row): + """ + Convert elements in a row to string from Unicode. + + This is purely a Python 2.x patch and is performed ONLY when all + elements of the row are string-like. + + Parameters + ---------- + row : array-like + The row of data to convert. + + Returns + ------- + converted : array-like + """ + if compat.PY2: + converted = [] + + for i in range(len(row)): + if isinstance(row[i], compat.string_types): + try: + converted.append(str(row[i])) + except UnicodeEncodeError: + break + else: + break + else: + row = converted + + return row + + +def _fill_mi_header(row, control_row): + """Forward fill blank entries in row but only inside the same parent index. + + Used for creating headers in Multiindex. + Parameters + ---------- + row : list + List of items in a single row. + control_row : list of bool + Helps to determine if particular column is in same parent index as the + previous value. Used to stop propagation of empty cells between + different indexes. + + Returns + ---------- + Returns changed row and control_row + """ + last = row[0] + for i in range(1, len(row)): + if not control_row[i]: + last = row[i] + + if row[i] == '' or row[i] is None: + row[i] = last + else: + control_row[i] = False + last = row[i] + + return _maybe_convert_to_string(row), control_row + + +def _pop_header_name(row, index_col): + """ + Pop the header name for MultiIndex parsing. + + Parameters + ---------- + row : list + The data row to parse for the header name. + index_col : int, list + The index columns for our data. Assumed to be non-null. + + Returns + ------- + header_name : str + The extracted header name. + trimmed_row : list + The original data row with the header name removed. + """ + # Pop out header name and fill w/blank. + i = index_col if not is_list_like(index_col) else max(index_col) + + header_name = row[i] + header_name = None if header_name == "" else header_name + + return header_name, row[:i] + [''] + row[i + 1:] diff --git a/pandas/io/excel/_xlrd.py b/pandas/io/excel/_xlrd.py new file mode 100644 index 0000000000000..60f7d8f94a399 --- /dev/null +++ b/pandas/io/excel/_xlrd.py @@ -0,0 +1,126 @@ +from datetime import time +from distutils.version import LooseVersion +from io import UnsupportedOperation + +import numpy as np + +import pandas.compat as compat +from pandas.compat import range, zip + +from pandas.io.common import _is_url, _urlopen, get_filepath_or_buffer +from pandas.io.excel._base import _BaseExcelReader + + +class _XlrdReader(_BaseExcelReader): + + def __init__(self, filepath_or_buffer): + """Reader using xlrd engine. + + Parameters + ---------- + filepath_or_buffer : string, path object or Workbook + Object to be parsed. + """ + err_msg = "Install xlrd >= 1.0.0 for Excel support" + + try: + import xlrd + except ImportError: + raise ImportError(err_msg) + else: + if xlrd.__VERSION__ < LooseVersion("1.0.0"): + raise ImportError(err_msg + + ". Current version " + xlrd.__VERSION__) + + from pandas.io.excel._base import ExcelFile + # If filepath_or_buffer is a url, want to keep the data as bytes so + # can't pass to get_filepath_or_buffer() + if _is_url(filepath_or_buffer): + filepath_or_buffer = _urlopen(filepath_or_buffer) + elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)): + filepath_or_buffer, _, _, _ = get_filepath_or_buffer( + filepath_or_buffer) + + if isinstance(filepath_or_buffer, xlrd.Book): + self.book = filepath_or_buffer + elif hasattr(filepath_or_buffer, "read"): + # N.B. xlrd.Book has a read attribute too + if hasattr(filepath_or_buffer, 'seek'): + try: + # GH 19779 + filepath_or_buffer.seek(0) + except UnsupportedOperation: + # HTTPResponse does not support seek() + # GH 20434 + pass + + data = filepath_or_buffer.read() + self.book = xlrd.open_workbook(file_contents=data) + elif isinstance(filepath_or_buffer, compat.string_types): + self.book = xlrd.open_workbook(filepath_or_buffer) + else: + raise ValueError('Must explicitly set engine if not passing in' + ' buffer or path for io.') + + @property + def sheet_names(self): + return self.book.sheet_names() + + def get_sheet_by_name(self, name): + return self.book.sheet_by_name(name) + + def get_sheet_by_index(self, index): + return self.book.sheet_by_index(index) + + def get_sheet_data(self, sheet, convert_float): + from xlrd import (xldate, XL_CELL_DATE, + XL_CELL_ERROR, XL_CELL_BOOLEAN, + XL_CELL_NUMBER) + + epoch1904 = self.book.datemode + + def _parse_cell(cell_contents, cell_typ): + """converts the contents of the cell into a pandas + appropriate object""" + + if cell_typ == XL_CELL_DATE: + + # Use the newer xlrd datetime handling. + try: + cell_contents = xldate.xldate_as_datetime( + cell_contents, epoch1904) + except OverflowError: + return cell_contents + + # Excel doesn't distinguish between dates and time, + # so we treat dates on the epoch as times only. + # Also, Excel supports 1900 and 1904 epochs. + year = (cell_contents.timetuple())[0:3] + if ((not epoch1904 and year == (1899, 12, 31)) or + (epoch1904 and year == (1904, 1, 1))): + cell_contents = time(cell_contents.hour, + cell_contents.minute, + cell_contents.second, + cell_contents.microsecond) + + elif cell_typ == XL_CELL_ERROR: + cell_contents = np.nan + elif cell_typ == XL_CELL_BOOLEAN: + cell_contents = bool(cell_contents) + elif convert_float and cell_typ == XL_CELL_NUMBER: + # GH5394 - Excel 'numbers' are always floats + # it's a minimal perf hit and less surprising + val = int(cell_contents) + if val == cell_contents: + cell_contents = val + return cell_contents + + data = [] + + for i in range(sheet.nrows): + row = [_parse_cell(value, typ) + for value, typ in zip(sheet.row_values(i), + sheet.row_types(i))] + data.append(row) + + return data diff --git a/pandas/io/excel/_xlsxwriter.py b/pandas/io/excel/_xlsxwriter.py new file mode 100644 index 0000000000000..531a3657cac6f --- /dev/null +++ b/pandas/io/excel/_xlsxwriter.py @@ -0,0 +1,218 @@ +import pandas._libs.json as json +from pandas.compat import string_types + +from pandas.io.excel._base import ExcelWriter +from pandas.io.excel._util import _validate_freeze_panes + + +class _XlsxStyler(object): + # Map from openpyxl-oriented styles to flatter xlsxwriter representation + # Ordering necessary for both determinism and because some are keyed by + # prefixes of others. + STYLE_MAPPING = { + 'font': [ + (('name',), 'font_name'), + (('sz',), 'font_size'), + (('size',), 'font_size'), + (('color', 'rgb',), 'font_color'), + (('color',), 'font_color'), + (('b',), 'bold'), + (('bold',), 'bold'), + (('i',), 'italic'), + (('italic',), 'italic'), + (('u',), 'underline'), + (('underline',), 'underline'), + (('strike',), 'font_strikeout'), + (('vertAlign',), 'font_script'), + (('vertalign',), 'font_script'), + ], + 'number_format': [ + (('format_code',), 'num_format'), + ((), 'num_format',), + ], + 'protection': [ + (('locked',), 'locked'), + (('hidden',), 'hidden'), + ], + 'alignment': [ + (('horizontal',), 'align'), + (('vertical',), 'valign'), + (('text_rotation',), 'rotation'), + (('wrap_text',), 'text_wrap'), + (('indent',), 'indent'), + (('shrink_to_fit',), 'shrink'), + ], + 'fill': [ + (('patternType',), 'pattern'), + (('patterntype',), 'pattern'), + (('fill_type',), 'pattern'), + (('start_color', 'rgb',), 'fg_color'), + (('fgColor', 'rgb',), 'fg_color'), + (('fgcolor', 'rgb',), 'fg_color'), + (('start_color',), 'fg_color'), + (('fgColor',), 'fg_color'), + (('fgcolor',), 'fg_color'), + (('end_color', 'rgb',), 'bg_color'), + (('bgColor', 'rgb',), 'bg_color'), + (('bgcolor', 'rgb',), 'bg_color'), + (('end_color',), 'bg_color'), + (('bgColor',), 'bg_color'), + (('bgcolor',), 'bg_color'), + ], + 'border': [ + (('color', 'rgb',), 'border_color'), + (('color',), 'border_color'), + (('style',), 'border'), + (('top', 'color', 'rgb',), 'top_color'), + (('top', 'color',), 'top_color'), + (('top', 'style',), 'top'), + (('top',), 'top'), + (('right', 'color', 'rgb',), 'right_color'), + (('right', 'color',), 'right_color'), + (('right', 'style',), 'right'), + (('right',), 'right'), + (('bottom', 'color', 'rgb',), 'bottom_color'), + (('bottom', 'color',), 'bottom_color'), + (('bottom', 'style',), 'bottom'), + (('bottom',), 'bottom'), + (('left', 'color', 'rgb',), 'left_color'), + (('left', 'color',), 'left_color'), + (('left', 'style',), 'left'), + (('left',), 'left'), + ], + } + + @classmethod + def convert(cls, style_dict, num_format_str=None): + """ + converts a style_dict to an xlsxwriter format dict + + Parameters + ---------- + style_dict : style dictionary to convert + num_format_str : optional number format string + """ + + # Create a XlsxWriter format object. + props = {} + + if num_format_str is not None: + props['num_format'] = num_format_str + + if style_dict is None: + return props + + if 'borders' in style_dict: + style_dict = style_dict.copy() + style_dict['border'] = style_dict.pop('borders') + + for style_group_key, style_group in style_dict.items(): + for src, dst in cls.STYLE_MAPPING.get(style_group_key, []): + # src is a sequence of keys into a nested dict + # dst is a flat key + if dst in props: + continue + v = style_group + for k in src: + try: + v = v[k] + except (KeyError, TypeError): + break + else: + props[dst] = v + + if isinstance(props.get('pattern'), string_types): + # TODO: support other fill patterns + props['pattern'] = 0 if props['pattern'] == 'none' else 1 + + for k in ['border', 'top', 'right', 'bottom', 'left']: + if isinstance(props.get(k), string_types): + try: + props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted', + 'thick', 'double', 'hair', 'mediumDashed', + 'dashDot', 'mediumDashDot', 'dashDotDot', + 'mediumDashDotDot', + 'slantDashDot'].index(props[k]) + except ValueError: + props[k] = 2 + + if isinstance(props.get('font_script'), string_types): + props['font_script'] = ['baseline', 'superscript', + 'subscript'].index(props['font_script']) + + if isinstance(props.get('underline'), string_types): + props['underline'] = {'none': 0, 'single': 1, 'double': 2, + 'singleAccounting': 33, + 'doubleAccounting': 34}[props['underline']] + + return props + + +class _XlsxWriter(ExcelWriter): + engine = 'xlsxwriter' + supported_extensions = ('.xlsx',) + + def __init__(self, path, engine=None, + date_format=None, datetime_format=None, mode='w', + **engine_kwargs): + # Use the xlsxwriter module as the Excel writer. + import xlsxwriter + + if mode == 'a': + raise ValueError('Append mode is not supported with xlsxwriter!') + + super(_XlsxWriter, self).__init__(path, engine=engine, + date_format=date_format, + datetime_format=datetime_format, + mode=mode, + **engine_kwargs) + + self.book = xlsxwriter.Workbook(path, **engine_kwargs) + + def save(self): + """ + Save workbook to disk. + """ + + return self.book.close() + + def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, + freeze_panes=None): + # Write the frame cells using xlsxwriter. + sheet_name = self._get_sheet_name(sheet_name) + + if sheet_name in self.sheets: + wks = self.sheets[sheet_name] + else: + wks = self.book.add_worksheet(sheet_name) + self.sheets[sheet_name] = wks + + style_dict = {'null': None} + + if _validate_freeze_panes(freeze_panes): + wks.freeze_panes(*(freeze_panes)) + + for cell in cells: + val, fmt = self._value_with_fmt(cell.val) + + stylekey = json.dumps(cell.style) + if fmt: + stylekey += fmt + + if stylekey in style_dict: + style = style_dict[stylekey] + else: + style = self.book.add_format( + _XlsxStyler.convert(cell.style, fmt)) + style_dict[stylekey] = style + + if cell.mergestart is not None and cell.mergeend is not None: + wks.merge_range(startrow + cell.row, + startcol + cell.col, + startrow + cell.mergestart, + startcol + cell.mergeend, + cell.val, style) + else: + wks.write(startrow + cell.row, + startcol + cell.col, + val, style) diff --git a/pandas/io/excel/_xlwt.py b/pandas/io/excel/_xlwt.py new file mode 100644 index 0000000000000..191fbe914b750 --- /dev/null +++ b/pandas/io/excel/_xlwt.py @@ -0,0 +1,132 @@ +import pandas._libs.json as json + +from pandas.io.excel._base import ExcelWriter +from pandas.io.excel._util import _validate_freeze_panes + + +class _XlwtWriter(ExcelWriter): + engine = 'xlwt' + supported_extensions = ('.xls',) + + def __init__(self, path, engine=None, encoding=None, mode='w', + **engine_kwargs): + # Use the xlwt module as the Excel writer. + import xlwt + engine_kwargs['engine'] = engine + + if mode == 'a': + raise ValueError('Append mode is not supported with xlwt!') + + super(_XlwtWriter, self).__init__(path, mode=mode, **engine_kwargs) + + if encoding is None: + encoding = 'ascii' + self.book = xlwt.Workbook(encoding=encoding) + self.fm_datetime = xlwt.easyxf(num_format_str=self.datetime_format) + self.fm_date = xlwt.easyxf(num_format_str=self.date_format) + + def save(self): + """ + Save workbook to disk. + """ + return self.book.save(self.path) + + def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, + freeze_panes=None): + # Write the frame cells using xlwt. + + sheet_name = self._get_sheet_name(sheet_name) + + if sheet_name in self.sheets: + wks = self.sheets[sheet_name] + else: + wks = self.book.add_sheet(sheet_name) + self.sheets[sheet_name] = wks + + if _validate_freeze_panes(freeze_panes): + wks.set_panes_frozen(True) + wks.set_horz_split_pos(freeze_panes[0]) + wks.set_vert_split_pos(freeze_panes[1]) + + style_dict = {} + + for cell in cells: + val, fmt = self._value_with_fmt(cell.val) + + stylekey = json.dumps(cell.style) + if fmt: + stylekey += fmt + + if stylekey in style_dict: + style = style_dict[stylekey] + else: + style = self._convert_to_style(cell.style, fmt) + style_dict[stylekey] = style + + if cell.mergestart is not None and cell.mergeend is not None: + wks.write_merge(startrow + cell.row, + startrow + cell.mergestart, + startcol + cell.col, + startcol + cell.mergeend, + val, style) + else: + wks.write(startrow + cell.row, + startcol + cell.col, + val, style) + + @classmethod + def _style_to_xlwt(cls, item, firstlevel=True, field_sep=',', + line_sep=';'): + """helper which recursively generate an xlwt easy style string + for example: + + hstyle = {"font": {"bold": True}, + "border": {"top": "thin", + "right": "thin", + "bottom": "thin", + "left": "thin"}, + "align": {"horiz": "center"}} + will be converted to + font: bold on; \ + border: top thin, right thin, bottom thin, left thin; \ + align: horiz center; + """ + if hasattr(item, 'items'): + if firstlevel: + it = ["{key}: {val}" + .format(key=key, val=cls._style_to_xlwt(value, False)) + for key, value in item.items()] + out = "{sep} ".format(sep=(line_sep).join(it)) + return out + else: + it = ["{key} {val}" + .format(key=key, val=cls._style_to_xlwt(value, False)) + for key, value in item.items()] + out = "{sep} ".format(sep=(field_sep).join(it)) + return out + else: + item = "{item}".format(item=item) + item = item.replace("True", "on") + item = item.replace("False", "off") + return item + + @classmethod + def _convert_to_style(cls, style_dict, num_format_str=None): + """ + converts a style_dict to an xlwt style object + Parameters + ---------- + style_dict : style dictionary to convert + num_format_str : optional number format string + """ + import xlwt + + if style_dict: + xlwt_stylestr = cls._style_to_xlwt(style_dict) + style = xlwt.easyxf(xlwt_stylestr, field_sep=',', line_sep=';') + else: + style = xlwt.XFStyle() + if num_format_str is not None: + style.num_format_str = num_format_str + + return style diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index d5ef9f61bc132..ad63b3efdd832 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -108,44 +108,6 @@ def check_main(): return check_main() -def in_qtconsole(): - """ - check if we're inside an IPython qtconsole - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'qtconsole' in front_end.lower(): - return True - except NameError: - return False - return False - - -def in_ipnb(): - """ - check if we're inside an IPython Notebook - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'notebook' in front_end.lower(): - return True - except NameError: - return False - return False - - def in_ipython_frontend(): """ check if we're inside an an IPython zmq frontend diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 62fa04e784072..f68ef2cc39006 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1060,19 +1060,26 @@ def get_result_as_array(self): def format_values_with(float_format): formatter = self._value_formatter(float_format, threshold) + # default formatter leaves a space to the left when formatting + # floats, must be consistent for left-justifying NaNs (GH #25061) + if self.justify == 'left': + na_rep = ' ' + self.na_rep + else: + na_rep = self.na_rep + # separate the wheat from the chaff values = self.values mask = isna(values) if hasattr(values, 'to_dense'): # sparse numpy ndarray values = values.to_dense() values = np.array(values, dtype='object') - values[mask] = self.na_rep + values[mask] = na_rep imask = (~mask).ravel() values.flat[imask] = np.array([formatter(val) for val in values.ravel()[imask]]) if self.fixed_width: - return _trim_zeros(values, self.na_rep) + return _trim_zeros(values, na_rep) return values diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index f41749e0a7745..66d13bf2668f9 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -5,14 +5,14 @@ from __future__ import print_function +from collections import OrderedDict from textwrap import dedent -from pandas.compat import OrderedDict, lzip, map, range, u, unichr, zip +from pandas.compat import lzip, map, range, u, unichr, zip from pandas.core.dtypes.generic import ABCMultiIndex -from pandas import compat -import pandas.core.common as com +from pandas import compat, option_context from pandas.core.config import get_option from pandas.io.common import _is_url @@ -190,7 +190,7 @@ def _write_col_header(self, indent): if self.fmt.sparsify: # GH3547 - sentinel = com.sentinel_factory() + sentinel = object() else: sentinel = False levels = self.columns.format(sparsify=sentinel, adjoin=False, @@ -320,9 +320,15 @@ def _write_header(self, indent): self.write('</thead>', indent) + def _get_formatted_values(self): + with option_context('display.max_colwidth', 999999): + fmt_values = {i: self.fmt._format_col(i) + for i in range(self.ncols)} + return fmt_values + def _write_body(self, indent): self.write('<tbody>', indent) - fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)} + fmt_values = self._get_formatted_values() # write values if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): @@ -386,7 +392,7 @@ def _write_hierarchical_rows(self, fmt_values, indent): if self.fmt.sparsify: # GH3547 - sentinel = com.sentinel_factory() + sentinel = object() levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) @@ -486,6 +492,9 @@ class NotebookFormatter(HTMLFormatter): DataFrame._repr_html_() and DataFrame.to_html(notebook=True) """ + def _get_formatted_values(self): + return {i: self.fmt._format_col(i) for i in range(self.ncols)} + def write_style(self): # We use the "scoped" attribute here so that the desired # style properties for the data frame are not then applied diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 598453eb92d25..c8b5dc6b9b7c0 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -81,7 +81,7 @@ class Styler(object): See Also -------- - pandas.DataFrame.style + DataFrame.style Notes ----- @@ -424,16 +424,18 @@ def render(self, **kwargs): Parameters ---------- - `**kwargs` : Any additional keyword arguments are passed through - to ``self.template.render``. This is useful when you need to provide - additional variables for a custom template. + **kwargs + Any additional keyword arguments are passed + through to ``self.template.render``. + This is useful when you need to provide + additional variables for a custom template. .. versionadded:: 0.20 Returns ------- rendered : str - the rendered HTML + The rendered HTML. Notes ----- @@ -1223,7 +1225,7 @@ def from_custom_template(cls, searchpath, name): Returns ------- MyStyler : subclass of Styler - has the correct ``env`` and ``template`` class attributes set. + Has the correct ``env`` and ``template`` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), @@ -1322,7 +1324,7 @@ def _get_level_lengths(index, hidden_elements=None): Result is a dictionary of (level, inital_position): span """ - sentinel = com.sentinel_factory() + sentinel = object() levels = index.format(sparsify=sentinel, adjoin=False, names=False) if hidden_elements is None: diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index bb34259d710c7..cf2383955d593 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -15,6 +15,7 @@ import os import shutil +import subprocess from pandas.compat import PY3 @@ -94,22 +95,29 @@ def _get_terminal_size_tput(): # get terminal width # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width # -height-of-a-terminal-window + try: - import subprocess proc = subprocess.Popen(["tput", "cols"], stdin=subprocess.PIPE, stdout=subprocess.PIPE) - output = proc.communicate(input=None) - cols = int(output[0]) + output_cols = proc.communicate(input=None) proc = subprocess.Popen(["tput", "lines"], stdin=subprocess.PIPE, stdout=subprocess.PIPE) - output = proc.communicate(input=None) - rows = int(output[0]) - return (cols, rows) + output_rows = proc.communicate(input=None) except OSError: return None + try: + # Some terminals (e.g. spyder) may report a terminal size of '', + # making the `int` fail. + + cols = int(output_cols[0]) + rows = int(output_rows[0]) + return cols, rows + except (ValueError, IndexError): + return None + def _get_terminal_size_linux(): def ioctl_GWINSZ(fd): diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index 639b68d433ac6..a6cec7ea8fb16 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -127,7 +127,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, See Also -------- pandas_gbq.read_gbq : This function in the pandas-gbq library. - pandas.DataFrame.to_gbq : Write a DataFrame to Google BigQuery. + DataFrame.to_gbq : Write a DataFrame to Google BigQuery. """ pandas_gbq = _try_import() diff --git a/pandas/io/html.py b/pandas/io/html.py index 74934740a6957..347bb3eec54af 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -988,7 +988,7 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, latest information on table attributes for the modern web. parse_dates : bool, optional - See :func:`~pandas.read_csv` for more details. + See :func:`~read_csv` for more details. tupleize_cols : bool, optional If ``False`` try to parse multiple header rows into a @@ -1043,7 +1043,7 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, See Also -------- - pandas.read_csv + read_csv Notes ----- @@ -1066,7 +1066,7 @@ def read_html(io, match='.+', flavor=None, header=None, index_col=None, .. versionadded:: 0.21.0 - Similar to :func:`~pandas.read_csv` the `header` argument is applied + Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 4bbccc8339d7c..4bae067ee5196 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -226,8 +226,8 @@ def _write(self, obj, orient, double_precision, ensure_ascii, return serialized -def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, - convert_axes=True, convert_dates=True, keep_default_dates=True, +def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, + convert_axes=None, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None, encoding=None, lines=False, chunksize=None, compression='infer'): """ @@ -277,11 +277,25 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, 'table' as an allowed value for the ``orient`` argument typ : type of object to recover (series or frame), default 'frame' - dtype : boolean or dict, default True - If True, infer dtypes, if a dict of column to dtype, then use those, + dtype : boolean or dict, default None + If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. - convert_axes : boolean, default True + + For all ``orient`` values except ``'table'``, default is True. + + .. versionchanged:: 0.25.0 + + Not applicable for ``orient='table'``. + + convert_axes : boolean, default None Try to convert the axes to the proper dtypes. + + For all ``orient`` values except ``'table'``, default is True. + + .. versionchanged:: 0.25.0 + + Not applicable for ``orient='table'``. + convert_dates : boolean, default True List of columns to parse for dates; If True, then try to parse datelike columns default is True; a column label is datelike if @@ -408,6 +422,16 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ + if orient == 'table' and dtype: + raise ValueError("cannot pass both dtype and orient='table'") + if orient == 'table' and convert_axes: + raise ValueError("cannot pass both convert_axes and orient='table'") + + if dtype is None and orient != 'table': + dtype = True + if convert_axes is None and orient != 'table': + convert_axes = True + compression = _infer_compression(path_or_buf, compression) filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, @@ -600,15 +624,15 @@ class Parser(object): 'us': long(31536000000000), 'ns': long(31536000000000000)} - def __init__(self, json, orient, dtype=True, convert_axes=True, + def __init__(self, json, orient, dtype=None, convert_axes=True, convert_dates=True, keep_default_dates=False, numpy=False, precise_float=False, date_unit=None): self.json = json if orient is None: orient = self._default_orient - self.orient = orient + self.dtype = dtype if orient == "split": @@ -680,7 +704,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, # don't try to coerce, unless a force conversion if use_dtypes: - if self.dtype is False: + if not self.dtype: return data, False elif self.dtype is True: pass diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 2bd93b19d4225..971386c91944e 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -314,12 +314,13 @@ def parse_table_schema(json, precise_float): df = df.astype(dtypes) - df = df.set_index(table['schema']['primaryKey']) - if len(df.index.names) == 1: - if df.index.name == 'index': - df.index.name = None - else: - df.index.names = [None if x.startswith('level_') else x for x in - df.index.names] + if 'primaryKey' in table['schema']: + df = df.set_index(table['schema']['primaryKey']) + if len(df.index.names) == 1: + if df.index.name == 'index': + df.index.name = None + else: + df.index.names = [None if x.startswith('level_') else x for x in + df.index.names] return df diff --git a/pandas/io/packers.py b/pandas/io/packers.py index efe4e3a91c69c..588d63d73515f 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -219,7 +219,7 @@ def read(fh): finally: if fh is not None: fh.close() - elif hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read): + elif hasattr(path_or_buf, 'read') and callable(path_or_buf.read): # treat as a buffer like return read(path_or_buf) diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index dada9000d901a..ba322f42c07c1 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -262,16 +262,17 @@ def read_parquet(path, engine='auto', columns=None, **kwargs): ---------- path : string File path - columns : list, default=None - If not None, only these columns will be read from the file. - - .. versionadded 0.21.1 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. - kwargs are passed to the engine + columns : list, default=None + If not None, only these columns will be read from the file. + + .. versionadded 0.21.1 + **kwargs + Any additional kwargs are passed to the engine. Returns ------- diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index b31d3f665f47f..4163a571df800 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -203,9 +203,14 @@ * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call result 'foo' - If a column or index contains an unparseable date, the entire column or - index will be returned unaltered as an object data type. For non-standard - datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv`` + If a column or index cannot be represented as an array of datetimes, + say because of an unparseable value or a mixture of timezones, the column + or index will be returned unaltered as an object data type. For + non-standard datetime parsing, use ``pd.to_datetime`` after + ``pd.read_csv``. To parse an index or column with a mixture of timezones, + specify ``date_parser`` to be a partially-applied + :func:`pandas.to_datetime` with ``utc=True``. See + :ref:`io.csv.mixed_timezones` for more. Note: A fast-path exists for iso8601-formatted dates. infer_datetime_format : bool, default False diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py index 789f55a62dc58..ab4a266853a78 100644 --- a/pandas/io/pickle.py +++ b/pandas/io/pickle.py @@ -1,8 +1,7 @@ """ pickle compat """ import warnings -import numpy as np -from numpy.lib.format import read_array, write_array +from numpy.lib.format import read_array from pandas.compat import PY3, BytesIO, cPickle as pkl, pickle_compat as pc @@ -76,6 +75,7 @@ def to_pickle(obj, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL): try: f.write(pkl.dumps(obj, protocol=protocol)) finally: + f.close() for _f in fh: _f.close() @@ -138,63 +138,32 @@ def read_pickle(path, compression='infer'): >>> os.remove("./dummy.pkl") """ path = _stringify_path(path) + f, fh = _get_handle(path, 'rb', compression=compression, is_text=False) + + # 1) try with cPickle + # 2) try with the compat pickle to handle subclass changes + # 3) pass encoding only if its not None as py2 doesn't handle the param - def read_wrapper(func): - # wrapper file handle open/close operation - f, fh = _get_handle(path, 'rb', - compression=compression, - is_text=False) - try: - return func(f) - finally: - for _f in fh: - _f.close() - - def try_read(path, encoding=None): - # try with cPickle - # try with current pickle, if we have a Type Error then - # try with the compat pickle to handle subclass changes - # pass encoding only if its not None as py2 doesn't handle - # the param - - # cpickle - # GH 6899 - try: - with warnings.catch_warnings(record=True): - # We want to silence any warnings about, e.g. moved modules. - warnings.simplefilter("ignore", Warning) - return read_wrapper(lambda f: pkl.load(f)) - except Exception: # noqa: E722 - # reg/patched pickle - # compat not used in pandas/compat/pickle_compat.py::load - # TODO: remove except block OR modify pc.load to use compat - try: - return read_wrapper( - lambda f: pc.load(f, encoding=encoding, compat=False)) - # compat pickle - except Exception: # noqa: E722 - return read_wrapper( - lambda f: pc.load(f, encoding=encoding, compat=True)) try: - return try_read(path) + with warnings.catch_warnings(record=True): + # We want to silence any warnings about, e.g. moved modules. + warnings.simplefilter("ignore", Warning) + return pkl.load(f) except Exception: # noqa: E722 - if PY3: - return try_read(path, encoding='latin1') - raise - + try: + return pc.load(f, encoding=None) + except Exception: # noqa: E722 + if PY3: + return pc.load(f, encoding='latin1') + raise + finally: + f.close() + for _f in fh: + _f.close() # compat with sparse pickle / unpickle -def _pickle_array(arr): - arr = arr.view(np.ndarray) - - buf = BytesIO() - write_array(buf, arr) - - return buf.getvalue() - - def _unpickle_array(bytes): arr = read_array(BytesIO(bytes)) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 4e103482f48a2..2ee8759b9bdd8 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -15,34 +15,29 @@ import numpy as np -from pandas._libs import algos, lib, writers as libwriters +from pandas._libs import lib, writers as libwriters from pandas._libs.tslibs import timezones from pandas.compat import PY3, filter, lrange, range, string_types from pandas.errors import PerformanceWarning from pandas.core.dtypes.common import ( - ensure_int64, ensure_object, ensure_platform_int, is_categorical_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, is_list_like, - is_timedelta64_dtype) + ensure_object, is_categorical_dtype, is_datetime64_dtype, + is_datetime64tz_dtype, is_list_like, is_timedelta64_dtype) from pandas.core.dtypes.missing import array_equivalent from pandas import ( - DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, Panel, - PeriodIndex, Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat, - concat, isna, to_datetime) + DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, PeriodIndex, + Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat, concat, + isna, to_datetime) from pandas.core import config -from pandas.core.algorithms import match, unique -from pandas.core.arrays.categorical import ( - Categorical, _factorize_from_iterables) +from pandas.core.arrays.categorical import Categorical from pandas.core.arrays.sparse import BlockIndex, IntIndex from pandas.core.base import StringMixin import pandas.core.common as com from pandas.core.computation.pytables import Expr, maybe_expression from pandas.core.config import get_option from pandas.core.index import ensure_index -from pandas.core.internals import ( - BlockManager, _block2d_to_blocknd, _block_shape, _factor_indexer, - make_block) +from pandas.core.internals import BlockManager, _block_shape, make_block from pandas.io.common import _stringify_path from pandas.io.formats.printing import adjoin, pprint_thing @@ -175,7 +170,6 @@ class DuplicateWarning(Warning): SparseSeries: u'sparse_series', DataFrame: u'frame', SparseDataFrame: u'sparse_frame', - Panel: u'wide', } # storer class map @@ -187,7 +181,6 @@ class DuplicateWarning(Warning): u'sparse_series': 'SparseSeriesFixed', u'frame': 'FrameFixed', u'sparse_frame': 'SparseFrameFixed', - u'wide': 'PanelFixed', } # table class map @@ -197,16 +190,12 @@ class DuplicateWarning(Warning): u'appendable_multiseries': 'AppendableMultiSeriesTable', u'appendable_frame': 'AppendableFrameTable', u'appendable_multiframe': 'AppendableMultiFrameTable', - u'appendable_panel': 'AppendablePanelTable', u'worm': 'WORMTable', - u'legacy_frame': 'LegacyFrameTable', - u'legacy_panel': 'LegacyPanelTable', } # axes map _AXES_MAP = { DataFrame: [0], - Panel: [1, 2] } # register our configuration options @@ -326,8 +315,8 @@ def read_hdf(path_or_buf, key=None, mode='r', **kwargs): See Also -------- - pandas.DataFrame.to_hdf : Write a HDF file from a DataFrame. - pandas.HDFStore : Low-level access to HDF files. + DataFrame.to_hdf : Write a HDF file from a DataFrame. + HDFStore : Low-level access to HDF files. Examples -------- @@ -865,7 +854,7 @@ def put(self, key, value, format=None, append=False, **kwargs): Parameters ---------- key : object - value : {Series, DataFrame, Panel} + value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format Fast writing/reading. Not-appendable, nor searchable @@ -947,7 +936,7 @@ def append(self, key, value, format=None, append=True, columns=None, Parameters ---------- key : object - value : {Series, DataFrame, Panel} + value : {Series, DataFrame} format : 'table' is the default table(t) : table format Write as a PyTables Table structure which may perform @@ -3028,16 +3017,6 @@ class FrameFixed(BlockManagerFixed): obj_type = DataFrame -class PanelFixed(BlockManagerFixed): - pandas_kind = u'wide' - obj_type = Panel - is_shape_reversed = True - - def write(self, obj, **kwargs): - obj._consolidate_inplace() - return super(PanelFixed, self).write(obj, **kwargs) - - class Table(Fixed): """ represent a table: @@ -3288,7 +3267,7 @@ def get_attrs(self): self.nan_rep = getattr(self.attrs, 'nan_rep', None) self.encoding = _ensure_encoding( getattr(self.attrs, 'encoding', None)) - self.errors = getattr(self.attrs, 'errors', 'strict') + self.errors = _ensure_decoded(getattr(self.attrs, 'errors', 'strict')) self.levels = getattr( self.attrs, 'levels', None) or [] self.index_axes = [ @@ -3900,107 +3879,11 @@ def read(self, where=None, columns=None, **kwargs): if not self.read_axes(where=where, **kwargs): return None - lst_vals = [a.values for a in self.index_axes] - labels, levels = _factorize_from_iterables(lst_vals) - # labels and levels are tuples but lists are expected - labels = list(labels) - levels = list(levels) - N = [len(lvl) for lvl in levels] - - # compute the key - key = _factor_indexer(N[1:], labels) - - objs = [] - if len(unique(key)) == len(key): - - sorter, _ = algos.groupsort_indexer( - ensure_int64(key), np.prod(N)) - sorter = ensure_platform_int(sorter) - - # create the objs - for c in self.values_axes: - - # the data need to be sorted - sorted_values = c.take_data().take(sorter, axis=0) - if sorted_values.ndim == 1: - sorted_values = sorted_values.reshape( - (sorted_values.shape[0], 1)) - - take_labels = [l.take(sorter) for l in labels] - items = Index(c.values) - block = _block2d_to_blocknd( - values=sorted_values, placement=np.arange(len(items)), - shape=tuple(N), labels=take_labels, ref_items=items) - - # create the object - mgr = BlockManager([block], [items] + levels) - obj = self.obj_type(mgr) - - # permute if needed - if self.is_transposed: - obj = obj.transpose( - *tuple(Series(self.data_orientation).argsort())) - - objs.append(obj) - - else: - warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5) - - # reconstruct - long_index = MultiIndex.from_arrays( - [i.values for i in self.index_axes]) - - for c in self.values_axes: - lp = DataFrame(c.data, index=long_index, columns=c.values) - - # need a better algorithm - tuple_index = long_index.values - - unique_tuples = unique(tuple_index) - unique_tuples = com.asarray_tuplesafe(unique_tuples) - - indexer = match(unique_tuples, tuple_index) - indexer = ensure_platform_int(indexer) - - new_index = long_index.take(indexer) - new_values = lp.values.take(indexer, axis=0) - - lp = DataFrame(new_values, index=new_index, columns=lp.columns) - objs.append(lp.to_panel()) - - # create the composite object - if len(objs) == 1: - wp = objs[0] - else: - wp = concat(objs, axis=0, verify_integrity=False)._consolidate() - - # apply the selection filters & axis orderings - wp = self.process_axes(wp, columns=columns) - - return wp - - -class LegacyFrameTable(LegacyTable): - - """ support the legacy frame table """ - pandas_kind = u'frame_table' - table_type = u'legacy_frame' - obj_type = Panel - - def read(self, *args, **kwargs): - return super(LegacyFrameTable, self).read(*args, **kwargs)['value'] - - -class LegacyPanelTable(LegacyTable): - - """ support the legacy panel table """ - table_type = u'legacy_panel' - obj_type = Panel + raise NotImplementedError("Panel is removed in pandas 0.25.0") class AppendableTable(LegacyTable): - - """ suppor the new appendable table formats """ + """ support the new appendable table formats """ _indexables = None table_type = u'appendable' @@ -4232,8 +4115,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): class AppendableFrameTable(AppendableTable): - - """ suppor the new appendable table formats """ + """ support the new appendable table formats """ pandas_kind = u'frame_table' table_type = u'appendable_frame' ndim = 2 @@ -4442,24 +4324,6 @@ def read(self, **kwargs): return df -class AppendablePanelTable(AppendableTable): - - """ suppor the new appendable table formats """ - table_type = u'appendable_panel' - ndim = 3 - obj_type = Panel - - def get_object(self, obj): - """ these are written transposed """ - if self.is_transposed: - obj = obj.transpose(*self.data_orientation) - return obj - - @property - def is_transposed(self): - return self.data_orientation != tuple(range(self.ndim)) - - def _reindex_axis(obj, axis, labels, other=None): ax = obj._get_axis(axis) labels = ensure_index(labels) @@ -4875,16 +4739,3 @@ def select_coords(self): return self.coordinates return np.arange(start, stop) - -# utilities ### - - -def timeit(key, df, fn=None, remove=True, **kwargs): - if fn is None: - fn = 'timeit.h5' - store = HDFStore(fn, mode='w') - store.append(key, df, **kwargs) - store.close() - - if remove: - os.remove(fn) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 5d1163b3e0024..02fba52eac7f7 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -182,26 +182,29 @@ def execute(sql, con, cur=None, params=None): def read_sql_table(table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize=None): - """Read SQL database table into a DataFrame. + """ + Read SQL database table into a DataFrame. Given a table name and a SQLAlchemy connectable, returns a DataFrame. This function does not support DBAPI connections. Parameters ---------- - table_name : string + table_name : str Name of SQL table in database. - con : SQLAlchemy connectable (or database string URI) + con : SQLAlchemy connectable or str + A database URI could be provided as as str. SQLite DBAPI connection mode not supported. - schema : string, default None + schema : str, default None Name of SQL schema in database to query (if database flavor supports this). Uses default schema if None (default). - index_col : string or list of strings, optional, default: None + index_col : str or list of str, optional, default: None Column(s) to set as index(MultiIndex). - coerce_float : boolean, default True + coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. Can result in loss of Precision. - parse_dates : list or dict, default: None + parse_dates : list or dict, default None + The behavior is as follows: - List of column names to parse as dates. - Dict of ``{column_name: format string}`` where format string is strftime compatible in case of parsing string times or is one of @@ -210,8 +213,8 @@ def read_sql_table(table_name, con, schema=None, index_col=None, to the keyword arguments of :func:`pandas.to_datetime` Especially useful with databases without native Datetime support, such as SQLite. - columns : list, default: None - List of column names to select from SQL table + columns : list, default None + List of column names to select from SQL table. chunksize : int, default None If specified, returns an iterator where `chunksize` is the number of rows to include in each chunk. @@ -219,15 +222,21 @@ def read_sql_table(table_name, con, schema=None, index_col=None, Returns ------- DataFrame + A SQL table is returned as two-dimensional data structure with labeled + axes. See Also -------- read_sql_query : Read SQL query into a DataFrame. - read_sql + read_sql : Read SQL query or database table into a DataFrame. Notes ----- Any datetime values with time zone information will be converted to UTC. + + Examples + -------- + >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP """ con = _engine_builder(con) @@ -381,7 +390,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, try: _is_table_name = pandas_sql.has_table(sql) - except (ImportError, AttributeError): + except Exception: + # using generic exception to catch errors from sql drivers (GH24988) _is_table_name = False if _is_table_name: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 1b0660171ecac..62a9dbdc4657e 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -100,8 +100,8 @@ See Also -------- -pandas.io.stata.StataReader : Low-level reader for Stata data files. -pandas.DataFrame.to_stata: Export Stata data files. +io.stata.StataReader : Low-level reader for Stata data files. +DataFrame.to_stata: Export Stata data files. Examples -------- @@ -119,7 +119,7 @@ _iterator_params) _data_method_doc = """\ -Reads observations from Stata file, converting them into a dataframe +Read observations from Stata file, converting them into a dataframe .. deprecated:: This is a legacy method. Use `read` in new code. @@ -1726,18 +1726,22 @@ def _do_convert_categoricals(self, data, value_label_dict, lbllist, return data def data_label(self): - """Returns data label of Stata file""" + """ + Return data label of Stata file. + """ return self.data_label def variable_labels(self): - """Returns variable labels as a dict, associating each variable name - with corresponding label + """ + Return variable labels as a dict, associating each variable name + with corresponding label. """ return dict(zip(self.varlist, self._variable_labels)) def value_labels(self): - """Returns a dict, associating each variable name a dict, associating - each value its corresponding label + """ + Return a dict, associating each variable name a dict, associating + each value its corresponding label. """ if not self._value_labels_read: self._read_value_labels() @@ -1747,7 +1751,7 @@ def value_labels(self): def _open_file_binary_write(fname): """ - Open a binary file or no-op if file-like + Open a binary file or no-op if file-like. Parameters ---------- @@ -1778,14 +1782,14 @@ def _set_endianness(endianness): def _pad_bytes(name, length): """ - Takes a char string and pads it with null bytes until it's length chars + Take a char string and pads it with null bytes until it's length chars. """ return name + "\x00" * (length - len(name)) def _convert_datetime_to_stata_type(fmt): """ - Converts from one of the stata date formats to a type in TYPE_MAP + Convert from one of the stata date formats to a type in TYPE_MAP. """ if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq", "%tq", "th", "%th", "ty", "%ty"]: @@ -1812,7 +1816,7 @@ def _maybe_convert_to_int_keys(convert_dates, varlist): def _dtype_to_stata_type(dtype, column): """ - Converts dtype types to stata types. Returns the byte of the given ordinal. + Convert dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length @@ -1850,7 +1854,7 @@ def _dtype_to_stata_type(dtype, column): def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, force_strl=False): """ - Maps numpy dtype to stata's default format for this type. Not terribly + Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, @@ -2385,32 +2389,22 @@ def _prepare_data(self): data = self._convert_strls(data) # 3. Convert bad string data to '' and pad to correct length - dtypes = [] - data_cols = [] - has_strings = False + dtypes = {} native_byteorder = self._byteorder == _set_endianness(sys.byteorder) for i, col in enumerate(data): typ = typlist[i] if typ <= self._max_string_length: - has_strings = True data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,)) stype = 'S{type}'.format(type=typ) - dtypes.append(('c' + str(i), stype)) - string = data[col].str.encode(self._encoding) - data_cols.append(string.values.astype(stype)) + dtypes[col] = stype + data[col] = data[col].str.encode(self._encoding).astype(stype) else: - values = data[col].values dtype = data[col].dtype if not native_byteorder: dtype = dtype.newbyteorder(self._byteorder) - dtypes.append(('c' + str(i), dtype)) - data_cols.append(values) - dtypes = np.dtype(dtypes) + dtypes[col] = dtype - if has_strings or not native_byteorder: - self.data = np.fromiter(zip(*data_cols), dtype=dtypes) - else: - self.data = data.to_records(index=False) + self.data = data.to_records(index=False, column_dtypes=dtypes) def _write_data(self): data = self.data diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index e543ab88f53b2..48d870bfc2e03 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -39,7 +39,7 @@ else: _HAS_MPL = True if get_option('plotting.matplotlib.register_converters'): - _converter.register(explicit=True) + _converter.register(explicit=False) def _raise_if_no_mpl(): @@ -1413,7 +1413,7 @@ def orientation(self): Returns ------- - axes : matplotlib.axes.Axes or numpy.ndarray of them + matplotlib.axes.Axes or numpy.ndarray of them See Also -------- @@ -1809,26 +1809,26 @@ def _plot(data, x=None, y=None, subplots=False, Allows plotting of one column versus another""" series_coord = "" -df_unique = """stacked : boolean, default False in line and +df_unique = """stacked : bool, default False in line and bar plots, and True in area plot. If True, create stacked plot. - sort_columns : boolean, default False + sort_columns : bool, default False Sort column names to determine plot ordering - secondary_y : boolean or sequence, default False + secondary_y : bool or sequence, default False Whether to plot on the secondary y-axis If a list/tuple, which columns to plot on secondary y-axis""" series_unique = """label : label argument to provide to plot - secondary_y : boolean or sequence of ints, default False + secondary_y : bool or sequence of ints, default False If True then y-axis will be on the right""" df_ax = """ax : matplotlib axes object, default None - subplots : boolean, default False + subplots : bool, default False Make separate subplots for each column - sharex : boolean, default True if ax is None else False + sharex : bool, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in; Be aware, that passing in both an ax and sharex=True will alter all x axis labels for all axis in a figure! - sharey : boolean, default False + sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible layout : tuple (optional) @@ -1882,23 +1882,23 @@ def _plot(data, x=None, y=None, subplots=False, %(klass_kind)s %(klass_ax)s figsize : a tuple (width, height) in inches - use_index : boolean, default True + use_index : bool, default True Use index as ticks for x axis title : string or list Title to use for the plot. If a string is passed, print the string at the top of the figure. If a list is passed and `subplots` is True, print each item in the list above the corresponding subplot. - grid : boolean, default None (matlab style default) + grid : bool, default None (matlab style default) Axis grid lines legend : False/True/'reverse' Place legend on axis subplots style : list or dict matplotlib line style per column - logx : boolean, default False + logx : bool, default False Use log scaling on x axis - logy : boolean, default False + logy : bool, default False Use log scaling on y axis - loglog : boolean, default False + loglog : bool, default False Use log scaling on both x and y axes xticks : sequence Values to use for the xticks @@ -1913,12 +1913,12 @@ def _plot(data, x=None, y=None, subplots=False, colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. - colorbar : boolean, optional + colorbar : bool, optional If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots) position : float Specify relative alignments for bar plot layout. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) - table : boolean, Series or DataFrame, default False + table : bool, Series or DataFrame, default False If True, draw a table using the data in the DataFrame and the data will be transposed to meet matplotlib's default layout. If a Series or DataFrame is passed, use passed data to draw a table. @@ -1927,7 +1927,7 @@ def _plot(data, x=None, y=None, subplots=False, detail. xerr : same types as yerr. %(klass_unique)s - mark_right : boolean, default True + mark_right : bool, default True When using a secondary_y axis, automatically mark the column labels with "(right)" in the legend `**kwds` : keywords @@ -1935,7 +1935,7 @@ def _plot(data, x=None, y=None, subplots=False, Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them Notes ----- @@ -2025,7 +2025,7 @@ def plot_series(data, kind='line', ax=None, # Series unique rot : int or float, default 0 The rotation angle of labels (in degrees) with respect to the screen coordinate system. - grid : boolean, default True + grid : bool, default True Setting this to True will show the grid. figsize : A tuple (width, height) in inches The size of the figure to create in matplotlib. @@ -2050,9 +2050,17 @@ def plot_series(data, kind='line', ax=None, # Series unique Returns ------- - result : + result + See Notes. + + See Also + -------- + Series.plot.hist: Make a histogram. + matplotlib.pyplot.boxplot : Matplotlib equivalent plot. - The return type depends on the `return_type` parameter: + Notes + ----- + The return type depends on the `return_type` parameter: * 'axes' : object of class matplotlib.axes.Axes * 'dict' : dict of matplotlib.lines.Line2D objects @@ -2062,14 +2070,8 @@ def plot_series(data, kind='line', ax=None, # Series unique * :class:`~pandas.Series` * :class:`~numpy.array` (for ``return_type = None``) + Return Series or numpy.array. - See Also - -------- - Series.plot.hist: Make a histogram. - matplotlib.pyplot.boxplot : Matplotlib equivalent plot. - - Notes - ----- Use ``return_type='dict'`` when you want to tweak the appearance of the lines after plotting. In this case a dict containing the Lines making up the boxes, caps, fliers, medians, and whiskers is returned. @@ -2271,7 +2273,7 @@ def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False, Returns ------- - fig : matplotlib.Figure + matplotlib.Figure """ import matplotlib.pyplot as plt @@ -2320,7 +2322,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, If passed, will be used to limit data to a subset of columns. by : object, optional If passed, then used to form histograms for separate groups. - grid : boolean, default True + grid : bool, default True Whether to show axis grid lines. xlabelsize : int, default None If specified changes the x-axis label size. @@ -2334,13 +2336,13 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, y labels rotated 90 degrees clockwise. ax : Matplotlib axes object, default None The axes to plot the histogram on. - sharex : boolean, default True if ax is None else False + sharex : bool, default True if ax is None else False In case subplots=True, share x axis and set some x axis labels to invisible; defaults to True if ax is None otherwise False if an ax is passed in. Note that passing in both an ax and sharex=True will alter all x axis labels for all subplots in a figure. - sharey : boolean, default False + sharey : bool, default False In case subplots=True, share y axis and set some y axis labels to invisible. figsize : tuple @@ -2359,7 +2361,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, Returns ------- - axes : matplotlib.AxesSubplot or numpy.ndarray of them + matplotlib.AxesSubplot or numpy.ndarray of them See Also -------- @@ -2427,7 +2429,7 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, If passed, then used to form histograms for separate groups ax : matplotlib axis object If not passed, uses gca() - grid : boolean, default True + grid : bool, default True Whether to show axis grid lines xlabelsize : int, default None If specified changes the x-axis label size @@ -2509,15 +2511,15 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, bins : int, default 50 figsize : tuple, optional layout : optional - sharex : boolean, default False - sharey : boolean, default False + sharex : bool, default False + sharey : bool, default False rot : int, default 90 grid : bool, default True kwargs : dict, keyword arguments passed to matplotlib.Axes.hist Returns ------- - axes : collection of Matplotlib Axes + collection of Matplotlib Axes """ _raise_if_no_mpl() _converter._WARN = False @@ -2548,7 +2550,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, Parameters ---------- grouped : Grouped DataFrame - subplots : + subplots : bool * ``False`` - no subplots will be used * ``True`` - create a subplot for each group column : column name or list of names, or vector @@ -2751,7 +2753,7 @@ def line(self, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them Examples -------- @@ -2776,7 +2778,7 @@ def bar(self, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='bar', **kwds) @@ -2792,7 +2794,7 @@ def barh(self, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='barh', **kwds) @@ -2808,7 +2810,7 @@ def box(self, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='box', **kwds) @@ -2826,7 +2828,7 @@ def hist(self, bins=10, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='hist', bins=bins, **kwds) @@ -2885,7 +2887,7 @@ def area(self, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='area', **kwds) @@ -2901,7 +2903,7 @@ def pie(self, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them """ return self(kind='pie', **kwds) @@ -2957,12 +2959,12 @@ def line(self, x=None, y=None, **kwds): Either the location or the label of the columns to be used. By default, it will use the remaining DataFrame numeric columns. **kwds - Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`. + Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns ------- - axes : :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray` - Returns an ndarray when ``subplots=True``. + :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray` + Return an ndarray when ``subplots=True``. See Also -------- @@ -3022,18 +3024,18 @@ def bar(self, x=None, y=None, **kwds): all numerical columns are used. **kwds Additional keyword arguments are documented in - :meth:`pandas.DataFrame.plot`. + :meth:`DataFrame.plot`. Returns ------- - axes : matplotlib.axes.Axes or np.ndarray of them + matplotlib.axes.Axes or np.ndarray of them An ndarray is returned with one :class:`matplotlib.axes.Axes` per column when ``subplots=True``. See Also -------- - pandas.DataFrame.plot.barh : Horizontal bar plot. - pandas.DataFrame.plot : Make plots of a DataFrame. + DataFrame.plot.barh : Horizontal bar plot. + DataFrame.plot : Make plots of a DataFrame. matplotlib.pyplot.bar : Make a bar plot with matplotlib. Examples @@ -3104,16 +3106,16 @@ def barh(self, x=None, y=None, **kwds): y : label or position, default All numeric columns in dataframe Columns to be plotted from the DataFrame. **kwds - Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`. + Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them. + :class:`matplotlib.axes.Axes` or numpy.ndarray of them See Also -------- - pandas.DataFrame.plot.bar: Vertical bar plot. - pandas.DataFrame.plot : Make plots of DataFrame using matplotlib. + DataFrame.plot.bar: Vertical bar plot. + DataFrame.plot : Make plots of DataFrame using matplotlib. matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib. Examples @@ -3191,16 +3193,16 @@ def box(self, by=None, **kwds): Column in the DataFrame to group by. **kwds : optional Additional keywords are documented in - :meth:`pandas.DataFrame.plot`. + :meth:`DataFrame.plot`. Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them See Also -------- - pandas.DataFrame.boxplot: Another method to draw a box plot. - pandas.Series.plot.box: Draw a box plot from a Series object. + DataFrame.boxplot: Another method to draw a box plot. + Series.plot.box: Draw a box plot from a Series object. matplotlib.pyplot.boxplot: Draw a box plot in matplotlib. Examples @@ -3234,11 +3236,12 @@ def hist(self, by=None, bins=10, **kwds): Number of histogram bins to be used. **kwds Additional keyword arguments are documented in - :meth:`pandas.DataFrame.plot`. + :meth:`DataFrame.plot`. Returns ------- - axes : matplotlib.AxesSubplot histogram. + class:`matplotlib.AxesSubplot` + Return a histogram plot. See Also -------- @@ -3327,12 +3330,12 @@ def area(self, x=None, y=None, **kwds): unstacked plot. **kwds : optional Additional keyword arguments are documented in - :meth:`pandas.DataFrame.plot`. + :meth:`DataFrame.plot`. Returns ------- matplotlib.axes.Axes or numpy.ndarray - Area plot, or array of area plots if subplots is True + Area plot, or array of area plots if subplots is True. See Also -------- @@ -3398,11 +3401,11 @@ def pie(self, y=None, **kwds): Label or position of the column to plot. If not provided, ``subplots=True`` argument must be passed. **kwds - Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`. + Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns ------- - axes : matplotlib.axes.Axes or np.ndarray of them. + matplotlib.axes.Axes or np.ndarray of them A NumPy array is returned when `subplots` is True. See Also @@ -3474,11 +3477,11 @@ def scatter(self, x, y, s=None, c=None, **kwds): marker points according to a colormap. **kwds - Keyword arguments to pass on to :meth:`pandas.DataFrame.plot`. + Keyword arguments to pass on to :meth:`DataFrame.plot`. Returns ------- - axes : :class:`matplotlib.axes.Axes` or numpy.ndarray of them + :class:`matplotlib.axes.Axes` or numpy.ndarray of them See Also -------- @@ -3548,7 +3551,7 @@ def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, y-direction. **kwds Additional keyword arguments are documented in - :meth:`pandas.DataFrame.plot`. + :meth:`DataFrame.plot`. Returns ------- diff --git a/pandas/plotting/_misc.py b/pandas/plotting/_misc.py index 1c69c03025e00..5171ea68fd497 100644 --- a/pandas/plotting/_misc.py +++ b/pandas/plotting/_misc.py @@ -178,11 +178,11 @@ def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds): Returns ------- - axes : :class:`matplotlib.axes.Axes` + class:`matplotlib.axes.Axes` See Also -------- - pandas.plotting.andrews_curves : Plot clustering visualization. + plotting.andrews_curves : Plot clustering visualization. Examples -------- @@ -273,7 +273,7 @@ def normalize(series): def andrews_curves(frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds): """ - Generates a matplotlib plot of Andrews curves, for visualising clusters of + Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: @@ -302,7 +302,7 @@ def andrews_curves(frame, class_column, ax=None, samples=200, color=None, Returns ------- - ax : Matplotlib axis object + class:`matplotlip.axis.Axes` """ from math import sqrt, pi @@ -389,13 +389,13 @@ def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): Returns ------- - fig : matplotlib.figure.Figure - Matplotlib figure + matplotlib.figure.Figure + Matplotlib figure. See Also -------- - pandas.DataFrame.plot : Basic plotting for DataFrame objects. - pandas.Series.plot : Basic plotting for Series objects. + DataFrame.plot : Basic plotting for DataFrame objects. + Series.plot : Basic plotting for Series objects. Examples -------- @@ -490,7 +490,7 @@ def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None, Returns ------- - ax: matplotlib axis object + class:`matplotlib.axis.Axes` Examples -------- @@ -579,7 +579,7 @@ def lag_plot(series, lag=1, ax=None, **kwds): Returns ------- - ax: Matplotlib axis object + class:`matplotlib.axis.Axes` """ import matplotlib.pyplot as plt @@ -598,7 +598,8 @@ def lag_plot(series, lag=1, ax=None, **kwds): def autocorrelation_plot(series, ax=None, **kwds): - """Autocorrelation plot for time series. + """ + Autocorrelation plot for time series. Parameters: ----------- @@ -609,7 +610,7 @@ def autocorrelation_plot(series, ax=None, **kwds): Returns: ----------- - ax: Matplotlib axis object + class:`matplotlib.axis.Axes` """ import matplotlib.pyplot as plt n = len(series) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 405dc0805a285..c81a371f37dc1 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -1440,6 +1440,20 @@ def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, class TestDatetime64OverflowHandling(object): # TODO: box + de-duplicate + def test_dt64_overflow_masking(self, box_with_array): + # GH#25317 + left = Series([Timestamp('1969-12-31')]) + right = Series([NaT]) + + left = tm.box_expected(left, box_with_array) + right = tm.box_expected(right, box_with_array) + + expected = TimedeltaIndex([NaT]) + expected = tm.box_expected(expected, box_with_array) + + result = left - right + tm.assert_equal(result, expected) + def test_dt64_series_arith_overflow(self): # GH#12534, fixed by GH#19024 dt = pd.Timestamp('1700-01-31') diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index c31d7acad3111..0faed74d4a021 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -205,10 +205,20 @@ def test_subtraction_ops(self): td = Timedelta('1 days') dt = Timestamp('20130101') - pytest.raises(TypeError, lambda: tdi - dt) - pytest.raises(TypeError, lambda: tdi - dti) - pytest.raises(TypeError, lambda: td - dt) - pytest.raises(TypeError, lambda: td - dti) + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi - dt + with pytest.raises(TypeError, match=msg): + tdi - dti + + msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object" + " but received a 'Timedelta'") + with pytest.raises(TypeError, match=msg): + td - dt + + msg = "bad operand type for unary -: 'DatetimeArray'" + with pytest.raises(TypeError, match=msg): + td - dti result = dt - dti expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar') @@ -265,19 +275,38 @@ def _check(result, expected): _check(result, expected) # tz mismatches - pytest.raises(TypeError, lambda: dt_tz - ts) - pytest.raises(TypeError, lambda: dt_tz - dt) - pytest.raises(TypeError, lambda: dt_tz - ts_tz2) - pytest.raises(TypeError, lambda: dt - dt_tz) - pytest.raises(TypeError, lambda: ts - dt_tz) - pytest.raises(TypeError, lambda: ts_tz2 - ts) - pytest.raises(TypeError, lambda: ts_tz2 - dt) - pytest.raises(TypeError, lambda: ts_tz - ts_tz2) + msg = ("Timestamp subtraction must have the same timezones or no" + " timezones") + with pytest.raises(TypeError, match=msg): + dt_tz - ts + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt_tz - dt + msg = ("Timestamp subtraction must have the same timezones or no" + " timezones") + with pytest.raises(TypeError, match=msg): + dt_tz - ts_tz2 + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt - dt_tz + msg = ("Timestamp subtraction must have the same timezones or no" + " timezones") + with pytest.raises(TypeError, match=msg): + ts - dt_tz + with pytest.raises(TypeError, match=msg): + ts_tz2 - ts + with pytest.raises(TypeError, match=msg): + ts_tz2 - dt + with pytest.raises(TypeError, match=msg): + ts_tz - ts_tz2 # with dti - pytest.raises(TypeError, lambda: dti - ts_tz) - pytest.raises(TypeError, lambda: dti_tz - ts) - pytest.raises(TypeError, lambda: dti_tz - ts_tz2) + with pytest.raises(TypeError, match=msg): + dti - ts_tz + with pytest.raises(TypeError, match=msg): + dti_tz - ts + with pytest.raises(TypeError, match=msg): + dti_tz - ts_tz2 result = dti_tz - dt_tz expected = TimedeltaIndex(['0 days', '1 days', '2 days']) @@ -349,8 +378,11 @@ def test_addition_ops(self): tm.assert_index_equal(result, expected) # unequal length - pytest.raises(ValueError, lambda: tdi + dti[0:1]) - pytest.raises(ValueError, lambda: tdi[0:1] + dti) + msg = "cannot add indices of unequal length" + with pytest.raises(ValueError, match=msg): + tdi + dti[0:1] + with pytest.raises(ValueError, match=msg): + tdi[0:1] + dti # random indexes with pytest.raises(NullFrequencyError): diff --git a/pandas/tests/arrays/categorical/test_analytics.py b/pandas/tests/arrays/categorical/test_analytics.py index 5efcd527de8d8..7ce82d5bcdded 100644 --- a/pandas/tests/arrays/categorical/test_analytics.py +++ b/pandas/tests/arrays/categorical/test_analytics.py @@ -18,8 +18,11 @@ def test_min_max(self): # unordered cats have no min/max cat = Categorical(["a", "b", "c", "d"], ordered=False) - pytest.raises(TypeError, lambda: cat.min()) - pytest.raises(TypeError, lambda: cat.max()) + msg = "Categorical is not ordered for operation {}" + with pytest.raises(TypeError, match=msg.format('min')): + cat.min() + with pytest.raises(TypeError, match=msg.format('max')): + cat.max() cat = Categorical(["a", "b", "c", "d"], ordered=True) _min = cat.min() @@ -108,18 +111,24 @@ def test_searchsorted(self): tm.assert_numpy_array_equal(res_ser, exp) # Searching for a single value that is not from the Categorical - pytest.raises(KeyError, lambda: c1.searchsorted('cucumber')) - pytest.raises(KeyError, lambda: s1.searchsorted('cucumber')) + msg = r"Value\(s\) to be inserted must be in categories" + with pytest.raises(KeyError, match=msg): + c1.searchsorted('cucumber') + with pytest.raises(KeyError, match=msg): + s1.searchsorted('cucumber') # Searching for multiple values one of each is not from the Categorical - pytest.raises(KeyError, - lambda: c1.searchsorted(['bread', 'cucumber'])) - pytest.raises(KeyError, - lambda: s1.searchsorted(['bread', 'cucumber'])) + with pytest.raises(KeyError, match=msg): + c1.searchsorted(['bread', 'cucumber']) + with pytest.raises(KeyError, match=msg): + s1.searchsorted(['bread', 'cucumber']) # searchsorted call for unordered Categorical - pytest.raises(ValueError, lambda: c2.searchsorted('apple')) - pytest.raises(ValueError, lambda: s2.searchsorted('apple')) + msg = "Categorical not ordered" + with pytest.raises(ValueError, match=msg): + c2.searchsorted('apple') + with pytest.raises(ValueError, match=msg): + s2.searchsorted('apple') def test_unique(self): # categories are reordered based on value when ordered=False diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index 25c299692ceca..f07e3aba53cd4 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -212,6 +212,18 @@ def test_constructor(self): c = Categorical(np.array([], dtype='int64'), # noqa categories=[3, 2, 1], ordered=True) + def test_constructor_with_existing_categories(self): + # GH25318: constructing with pd.Series used to bogusly skip recoding + # categories + c0 = Categorical(["a", "b", "c", "a"]) + c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"]) + + c2 = Categorical(c0, categories=c1.categories) + tm.assert_categorical_equal(c1, c2) + + c3 = Categorical(Series(c0), categories=c1.categories) + tm.assert_categorical_equal(c1, c3) + def test_constructor_not_sequence(self): # https://github.com/pandas-dev/pandas/issues/16022 msg = r"^Parameter 'categories' must be list-like, was" diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py index b2965bbcc456a..e1264722aedcd 100644 --- a/pandas/tests/arrays/categorical/test_operators.py +++ b/pandas/tests/arrays/categorical/test_operators.py @@ -4,6 +4,8 @@ import numpy as np import pytest +from pandas.compat import PY2 + import pandas as pd from pandas import Categorical, DataFrame, Series, date_range from pandas.tests.arrays.categorical.common import TestCategorical @@ -17,6 +19,7 @@ def test_categories_none_comparisons(self): 'a', 'c', 'c', 'c'], ordered=True) tm.assert_categorical_equal(factor, self.factor) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_comparisons(self): result = self.factor[self.factor == 'a'] @@ -95,16 +98,24 @@ def test_comparisons(self): # comparison (in both directions) with Series will raise s = Series(["b", "b", "b"]) - pytest.raises(TypeError, lambda: cat > s) - pytest.raises(TypeError, lambda: cat_rev > s) - pytest.raises(TypeError, lambda: s < cat) - pytest.raises(TypeError, lambda: s < cat_rev) + msg = ("Cannot compare a Categorical for op __gt__ with type" + r" <class 'numpy\.ndarray'>") + with pytest.raises(TypeError, match=msg): + cat > s + with pytest.raises(TypeError, match=msg): + cat_rev > s + with pytest.raises(TypeError, match=msg): + s < cat + with pytest.raises(TypeError, match=msg): + s < cat_rev # comparison with numpy.array will raise in both direction, but only on # newer numpy versions a = np.array(["b", "b", "b"]) - pytest.raises(TypeError, lambda: cat > a) - pytest.raises(TypeError, lambda: cat_rev > a) + with pytest.raises(TypeError, match=msg): + cat > a + with pytest.raises(TypeError, match=msg): + cat_rev > a # Make sure that unequal comparison take the categories order in # account @@ -163,16 +174,23 @@ def test_comparison_with_unknown_scalars(self): # for unequal comps, but not for equal/not equal cat = Categorical([1, 2, 3], ordered=True) - pytest.raises(TypeError, lambda: cat < 4) - pytest.raises(TypeError, lambda: cat > 4) - pytest.raises(TypeError, lambda: 4 < cat) - pytest.raises(TypeError, lambda: 4 > cat) + msg = ("Cannot compare a Categorical for op __{}__ with a scalar," + " which is not a category") + with pytest.raises(TypeError, match=msg.format('lt')): + cat < 4 + with pytest.raises(TypeError, match=msg.format('gt')): + cat > 4 + with pytest.raises(TypeError, match=msg.format('gt')): + 4 < cat + with pytest.raises(TypeError, match=msg.format('lt')): + 4 > cat tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False])) tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True])) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") @pytest.mark.parametrize('data,reverse,base', [ (list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])] @@ -219,16 +237,26 @@ def test_comparisons(self, data, reverse, base): # categorical cannot be compared to Series or numpy array, and also # not the other way around - pytest.raises(TypeError, lambda: cat > s) - pytest.raises(TypeError, lambda: cat_rev > s) - pytest.raises(TypeError, lambda: cat > a) - pytest.raises(TypeError, lambda: cat_rev > a) + msg = ("Cannot compare a Categorical for op __gt__ with type" + r" <class 'numpy\.ndarray'>") + with pytest.raises(TypeError, match=msg): + cat > s + with pytest.raises(TypeError, match=msg): + cat_rev > s + with pytest.raises(TypeError, match=msg): + cat > a + with pytest.raises(TypeError, match=msg): + cat_rev > a - pytest.raises(TypeError, lambda: s < cat) - pytest.raises(TypeError, lambda: s < cat_rev) + with pytest.raises(TypeError, match=msg): + s < cat + with pytest.raises(TypeError, match=msg): + s < cat_rev - pytest.raises(TypeError, lambda: a < cat) - pytest.raises(TypeError, lambda: a < cat_rev) + with pytest.raises(TypeError, match=msg): + a < cat + with pytest.raises(TypeError, match=msg): + a < cat_rev @pytest.mark.parametrize('ctor', [ lambda *args, **kwargs: Categorical(*args, **kwargs), @@ -287,16 +315,21 @@ def test_numeric_like_ops(self): right=False, labels=cat_labels) # numeric ops should not succeed - for op in ['__add__', '__sub__', '__mul__', '__truediv__']: - pytest.raises(TypeError, - lambda: getattr(df, op)(df)) + for op, str_rep in [('__add__', r'\+'), + ('__sub__', '-'), + ('__mul__', r'\*'), + ('__truediv__', '/')]: + msg = r"Series cannot perform the operation {}".format(str_rep) + with pytest.raises(TypeError, match=msg): + getattr(df, op)(df) # reduction ops should not succeed (unless specifically defined, e.g. # min/max) s = df['value_group'] for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']: - pytest.raises(TypeError, - lambda: getattr(s, op)(numeric_only=False)) + msg = "Categorical cannot perform the operation {}".format(op) + with pytest.raises(TypeError, match=msg): + getattr(s, op)(numeric_only=False) # mad technically works because it takes always the numeric data @@ -306,8 +339,13 @@ def test_numeric_like_ops(self): np.sum(s) # numeric ops on a Series - for op in ['__add__', '__sub__', '__mul__', '__truediv__']: - pytest.raises(TypeError, lambda: getattr(s, op)(2)) + for op, str_rep in [('__add__', r'\+'), + ('__sub__', '-'), + ('__mul__', r'\*'), + ('__truediv__', '/')]: + msg = r"Series cannot perform the operation {}".format(str_rep) + with pytest.raises(TypeError, match=msg): + getattr(s, op)(2) # invalid ufunc with pytest.raises(TypeError): diff --git a/pandas/tests/arrays/sparse/test_libsparse.py b/pandas/tests/arrays/sparse/test_libsparse.py index 6e9d790bf85f3..2cbe7d9ea084c 100644 --- a/pandas/tests/arrays/sparse/test_libsparse.py +++ b/pandas/tests/arrays/sparse/test_libsparse.py @@ -449,11 +449,13 @@ def test_check_integrity(self): # also OK even though empty index = BlockIndex(1, locs, lengths) # noqa - # block extend beyond end - pytest.raises(Exception, BlockIndex, 10, [5], [10]) + msg = "Block 0 extends beyond end" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [5], [10]) - # block overlap - pytest.raises(Exception, BlockIndex, 10, [2, 5], [5, 3]) + msg = "Block 0 overlaps" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [2, 5], [5, 3]) def test_to_int_index(self): locs = [0, 10] diff --git a/pandas/tests/arrays/test_array.py b/pandas/tests/arrays/test_array.py index 9fea1989e46df..b68ec2bf348b4 100644 --- a/pandas/tests/arrays/test_array.py +++ b/pandas/tests/arrays/test_array.py @@ -9,6 +9,7 @@ import pandas as pd from pandas.api.extensions import register_extension_dtype +from pandas.api.types import is_scalar from pandas.core.arrays import PandasArray, integer_array, period_array from pandas.tests.extension.decimal import ( DecimalArray, DecimalDtype, to_decimal) @@ -254,3 +255,51 @@ def test_array_not_registered(registry_without_decimal): result = pd.array(data, dtype=DecimalDtype) expected = DecimalArray._from_sequence(data) tm.assert_equal(result, expected) + + +class TestArrayAnalytics(object): + def test_searchsorted(self, string_dtype): + arr = pd.array(['a', 'b', 'c'], dtype=string_dtype) + + result = arr.searchsorted('a', side='left') + assert is_scalar(result) + assert result == 0 + + result = arr.searchsorted('a', side='right') + assert is_scalar(result) + assert result == 1 + + def test_searchsorted_numeric_dtypes_scalar(self, any_real_dtype): + arr = pd.array([1, 3, 90], dtype=any_real_dtype) + result = arr.searchsorted(30) + assert is_scalar(result) + assert result == 2 + + result = arr.searchsorted([30]) + expected = np.array([2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_searchsorted_numeric_dtypes_vector(self, any_real_dtype): + arr = pd.array([1, 3, 90], dtype=any_real_dtype) + result = arr.searchsorted([2, 30]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize('arr, val', [ + [pd.date_range('20120101', periods=10, freq='2D'), + pd.Timestamp('20120102')], + [pd.date_range('20120101', periods=10, freq='2D', tz='Asia/Hong_Kong'), + pd.Timestamp('20120102', tz='Asia/Hong_Kong')], + [pd.timedelta_range(start='1 day', end='10 days', periods=10), + pd.Timedelta('2 days')]]) + def test_search_sorted_datetime64_scalar(self, arr, val): + arr = pd.array(arr) + result = arr.searchsorted(val) + assert is_scalar(result) + assert result == 1 + + def test_searchsorted_sorter(self, any_real_dtype): + arr = pd.array([3, 1, 2], dtype=any_real_dtype) + result = arr.searchsorted([0, 3], sorter=np.argsort(arr)) + expected = np.array([0, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/arrays/test_integer.py b/pandas/tests/arrays/test_integer.py index 09298bb5cd08d..67e7db5460e6d 100644 --- a/pandas/tests/arrays/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -339,7 +339,7 @@ def _compare_other(self, data, op_name, other): expected = pd.Series(op(data._data, other)) # fill the nan locations - expected[data._mask] = True if op_name == '__ne__' else False + expected[data._mask] = op_name == '__ne__' tm.assert_series_equal(result, expected) @@ -351,7 +351,7 @@ def _compare_other(self, data, op_name, other): expected = op(expected, other) # fill the nan locations - expected[data._mask] = True if op_name == '__ne__' else False + expected[data._mask] = op_name == '__ne__' tm.assert_series_equal(result, expected) diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 6b4662ca02e80..1fec533a14a6f 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -9,6 +9,18 @@ class TestTimedeltaArrayConstructor(object): + def test_only_1dim_accepted(self): + # GH#25282 + arr = np.array([0, 1, 2, 3], dtype='m8[h]').astype('m8[ns]') + + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 2-dim + TimedeltaArray(arr.reshape(2, 2)) + + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 0-dim + TimedeltaArray(arr[[0]].squeeze()) + def test_freq_validation(self): # ensure that the public constructor cannot create an invalid instance arr = np.array([0, 0, 1], dtype=np.int64) * 3600 * 10**9 @@ -51,6 +63,16 @@ def test_copy(self): class TestTimedeltaArray(object): + def test_np_sum(self): + # GH#25282 + vals = np.arange(5, dtype=np.int64).view('m8[h]').astype('m8[ns]') + arr = TimedeltaArray(vals) + result = np.sum(arr) + assert result == vals.sum() + + result = np.sum(pd.TimedeltaIndex(arr)) + assert result == vals.sum() + def test_from_sequence_dtype(self): msg = "dtype .*object.* cannot be converted to timedelta64" with pytest.raises(ValueError, match=msg): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index c1ba15f428eb7..a14d8e4471c23 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -285,10 +285,14 @@ def check_operands(left, right, cmp_op): def check_simple_cmp_op(self, lhs, cmp1, rhs): ex = 'lhs {0} rhs'.format(cmp1) + msg = (r"only list-like( or dict-like)? objects are allowed to be" + r" passed to (DataFrame\.)?isin\(\), you passed a" + r" (\[|')bool(\]|')|" + "argument of type 'bool' is not iterable") if cmp1 in ('in', 'not in') and not is_list_like(rhs): - pytest.raises(TypeError, pd.eval, ex, engine=self.engine, - parser=self.parser, local_dict={'lhs': lhs, - 'rhs': rhs}) + with pytest.raises(TypeError, match=msg): + pd.eval(ex, engine=self.engine, parser=self.parser, + local_dict={'lhs': lhs, 'rhs': rhs}) else: expected = _eval_single_bin(lhs, cmp1, rhs, self.engine) result = pd.eval(ex, engine=self.engine, parser=self.parser) @@ -341,9 +345,11 @@ def check_floor_division(self, lhs, arith1, rhs): expected = lhs // rhs self.check_equal(res, expected) else: - pytest.raises(TypeError, pd.eval, ex, - local_dict={'lhs': lhs, 'rhs': rhs}, - engine=self.engine, parser=self.parser) + msg = (r"unsupported operand type\(s\) for //: 'VariableNode' and" + " 'VariableNode'") + with pytest.raises(TypeError, match=msg): + pd.eval(ex, local_dict={'lhs': lhs, 'rhs': rhs}, + engine=self.engine, parser=self.parser) def get_expected_pow_result(self, lhs, rhs): try: @@ -396,10 +402,14 @@ def check_compound_invert_op(self, lhs, cmp1, rhs): skip_these = 'in', 'not in' ex = '~(lhs {0} rhs)'.format(cmp1) + msg = (r"only list-like( or dict-like)? objects are allowed to be" + r" passed to (DataFrame\.)?isin\(\), you passed a" + r" (\[|')float(\]|')|" + "argument of type 'float' is not iterable") if is_scalar(rhs) and cmp1 in skip_these: - pytest.raises(TypeError, pd.eval, ex, engine=self.engine, - parser=self.parser, local_dict={'lhs': lhs, - 'rhs': rhs}) + with pytest.raises(TypeError, match=msg): + pd.eval(ex, engine=self.engine, parser=self.parser, + local_dict={'lhs': lhs, 'rhs': rhs}) else: # compound if is_scalar(lhs) and is_scalar(rhs): @@ -1101,8 +1111,9 @@ def test_simple_arith_ops(self): ex3 = '1 {0} (x + 1)'.format(op) if op in ('in', 'not in'): - pytest.raises(TypeError, pd.eval, ex, - engine=self.engine, parser=self.parser) + msg = "argument of type 'int' is not iterable" + with pytest.raises(TypeError, match=msg): + pd.eval(ex, engine=self.engine, parser=self.parser) else: expec = _eval_single_bin(1, op, 1, self.engine) x = self.eval(ex, engine=self.engine, parser=self.parser) @@ -1236,19 +1247,25 @@ def test_assignment_fails(self): df = DataFrame(np.random.randn(5, 3), columns=list('abc')) df2 = DataFrame(np.random.randn(5, 3)) expr1 = 'df = df2' - pytest.raises(ValueError, self.eval, expr1, - local_dict={'df': df, 'df2': df2}) + msg = "cannot assign without a target object" + with pytest.raises(ValueError, match=msg): + self.eval(expr1, local_dict={'df': df, 'df2': df2}) def test_assignment_column(self): df = DataFrame(np.random.randn(5, 2), columns=list('ab')) orig_df = df.copy() # multiple assignees - pytest.raises(SyntaxError, df.eval, 'd c = a + b') + with pytest.raises(SyntaxError, match="invalid syntax"): + df.eval('d c = a + b') # invalid assignees - pytest.raises(SyntaxError, df.eval, 'd,c = a + b') - pytest.raises(SyntaxError, df.eval, 'Timestamp("20131001") = a + b') + msg = "left hand side of an assignment must be a single name" + with pytest.raises(SyntaxError, match=msg): + df.eval('d,c = a + b') + msg = "can't assign to function call" + with pytest.raises(SyntaxError, match=msg): + df.eval('Timestamp("20131001") = a + b') # single assignment - existing variable expected = orig_df.copy() @@ -1291,7 +1308,9 @@ def f(): # multiple assignment df = orig_df.copy() df.eval('c = a + b', inplace=True) - pytest.raises(SyntaxError, df.eval, 'c = a = b') + msg = "can only assign a single expression" + with pytest.raises(SyntaxError, match=msg): + df.eval('c = a = b') # explicit targets df = orig_df.copy() @@ -1545,21 +1564,24 @@ def test_check_many_exprs(self): def test_fails_and(self): df = DataFrame(np.random.randn(5, 3)) - pytest.raises(NotImplementedError, pd.eval, 'df > 2 and df > 3', - local_dict={'df': df}, parser=self.parser, - engine=self.engine) + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval('df > 2 and df > 3', local_dict={'df': df}, + parser=self.parser, engine=self.engine) def test_fails_or(self): df = DataFrame(np.random.randn(5, 3)) - pytest.raises(NotImplementedError, pd.eval, 'df > 2 or df > 3', - local_dict={'df': df}, parser=self.parser, - engine=self.engine) + msg = "'BoolOp' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval('df > 2 or df > 3', local_dict={'df': df}, + parser=self.parser, engine=self.engine) def test_fails_not(self): df = DataFrame(np.random.randn(5, 3)) - pytest.raises(NotImplementedError, pd.eval, 'not df > 2', - local_dict={'df': df}, parser=self.parser, - engine=self.engine) + msg = "'Not' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + pd.eval('not df > 2', local_dict={'df': df}, parser=self.parser, + engine=self.engine) def test_fails_ampersand(self): df = DataFrame(np.random.randn(5, 3)) # noqa diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index 62e96fd39a759..5c1f6ff405b3b 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -607,13 +607,16 @@ def test__get_dtype(input_param, result): assert com._get_dtype(input_param) == result -@pytest.mark.parametrize('input_param', [None, - 1, 1.2, - 'random string', - pd.DataFrame([1, 2])]) -def test__get_dtype_fails(input_param): +@pytest.mark.parametrize('input_param,expected_error_message', [ + (None, "Cannot deduce dtype from null object"), + (1, "data type not understood"), + (1.2, "data type not understood"), + ('random string', "data type 'random string' not understood"), + (pd.DataFrame([1, 2]), "data type not understood")]) +def test__get_dtype_fails(input_param, expected_error_message): # python objects - pytest.raises(TypeError, com._get_dtype, input_param) + with pytest.raises(TypeError, match=expected_error_message): + com._get_dtype(input_param) @pytest.mark.parametrize('input_param,result', [ diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 0fe0a845f5129..4366f610871ff 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -3,6 +3,7 @@ import numpy as np import pytest +import pytz from pandas.core.dtypes.common import ( is_bool_dtype, is_categorical, is_categorical_dtype, @@ -37,7 +38,8 @@ def test_equality_invalid(self): assert not is_dtype_equal(self.dtype, np.int64) def test_numpy_informed(self): - pytest.raises(TypeError, np.dtype, self.dtype) + with pytest.raises(TypeError, match="data type not understood"): + np.dtype(self.dtype) assert not self.dtype == np.str_ assert not np.str_ == self.dtype @@ -86,8 +88,9 @@ def test_equality(self): def test_construction_from_string(self): result = CategoricalDtype.construct_from_string('category') assert is_dtype_equal(self.dtype, result) - pytest.raises( - TypeError, lambda: CategoricalDtype.construct_from_string('foo')) + msg = "cannot construct a CategoricalDtype" + with pytest.raises(TypeError, match=msg): + CategoricalDtype.construct_from_string('foo') def test_constructor_invalid(self): msg = "Parameter 'categories' must be list-like" @@ -201,8 +204,9 @@ def test_hash_vs_equality(self): assert hash(dtype2) != hash(dtype4) def test_construction(self): - pytest.raises(ValueError, - lambda: DatetimeTZDtype('ms', 'US/Eastern')) + msg = "DatetimeTZDtype only supports ns units" + with pytest.raises(ValueError, match=msg): + DatetimeTZDtype('ms', 'US/Eastern') def test_subclass(self): a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]') @@ -225,8 +229,9 @@ def test_construction_from_string(self): result = DatetimeTZDtype.construct_from_string( 'datetime64[ns, US/Eastern]') assert is_dtype_equal(self.dtype, result) - pytest.raises(TypeError, - lambda: DatetimeTZDtype.construct_from_string('foo')) + msg = "Could not construct DatetimeTZDtype from 'foo'" + with pytest.raises(TypeError, match=msg): + DatetimeTZDtype.construct_from_string('foo') def test_construct_from_string_raises(self): with pytest.raises(TypeError, match="notatz"): @@ -302,6 +307,15 @@ def test_empty(self): with pytest.raises(TypeError, match="A 'tz' is required."): DatetimeTZDtype() + def test_tz_standardize(self): + # GH 24713 + tz = pytz.timezone('US/Eastern') + dr = date_range('2013-01-01', periods=3, tz='US/Eastern') + dtype = DatetimeTZDtype('ns', dr.tz) + assert dtype.tz == tz + dtype = DatetimeTZDtype('ns', dr[0].tz) + assert dtype.tz == tz + class TestPeriodDtype(Base): @@ -501,10 +515,11 @@ def test_construction_not_supported(self, subtype): with pytest.raises(TypeError, match=msg): IntervalDtype(subtype) - def test_construction_errors(self): + @pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]']) + def test_construction_errors(self, subtype): msg = 'could not construct IntervalDtype' with pytest.raises(TypeError, match=msg): - IntervalDtype('xx') + IntervalDtype(subtype) def test_construction_from_string(self): result = IntervalDtype('interval[int64]') @@ -513,7 +528,7 @@ def test_construction_from_string(self): assert is_dtype_equal(self.dtype, result) @pytest.mark.parametrize('string', [ - 'foo', 'foo[int64]', 0, 3.14, ('a', 'b'), None]) + 0, 3.14, ('a', 'b'), None]) def test_construction_from_string_errors(self, string): # these are invalid entirely msg = 'a string needs to be passed, got type' @@ -522,10 +537,12 @@ def test_construction_from_string_errors(self, string): IntervalDtype.construct_from_string(string) @pytest.mark.parametrize('string', [ - 'interval[foo]']) + 'foo', 'foo[int64]', 'IntervalA']) def test_construction_from_string_error_subtype(self, string): # this is an invalid subtype - msg = 'could not construct IntervalDtype' + msg = ("Incorrectly formatted string passed to constructor. " + r"Valid formats include Interval or Interval\[dtype\] " + "where dtype is numeric, datetime, or timedelta") with pytest.raises(TypeError, match=msg): IntervalDtype.construct_from_string(string) @@ -549,6 +566,7 @@ def test_is_dtype(self): assert not IntervalDtype.is_dtype('U') assert not IntervalDtype.is_dtype('S') assert not IntervalDtype.is_dtype('foo') + assert not IntervalDtype.is_dtype('IntervalA') assert not IntervalDtype.is_dtype(np.object_) assert not IntervalDtype.is_dtype(np.int64) assert not IntervalDtype.is_dtype(np.float64) diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index 1622088d05f4d..2bb3559d56d61 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -from warnings import catch_warnings, simplefilter +from warnings import catch_warnings import numpy as np @@ -39,9 +39,6 @@ def test_abc_types(self): assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass) assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries) assert isinstance(self.df, gt.ABCDataFrame) - with catch_warnings(record=True): - simplefilter('ignore', FutureWarning) - assert isinstance(self.df.to_panel(), gt.ABCPanel) assert isinstance(self.sparse_series, gt.ABCSparseSeries) assert isinstance(self.sparse_array, gt.ABCSparseArray) assert isinstance(self.sparse_frame, gt.ABCSparseDataFrame) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 89662b70a39ad..187b37d4f788e 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -159,13 +159,15 @@ def test_is_nested_list_like_fails(obj): @pytest.mark.parametrize( - "ll", [{}, {'A': 1}, Series([1])]) + "ll", [{}, {'A': 1}, Series([1]), collections.defaultdict()]) def test_is_dict_like_passes(ll): assert inference.is_dict_like(ll) -@pytest.mark.parametrize( - "ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])]) +@pytest.mark.parametrize("ll", [ + '1', 1, [1, 2], (1, 2), range(2), Index([1]), + dict, collections.defaultdict, Series +]) def test_is_dict_like_fails(ll): assert not inference.is_dict_like(ll) @@ -616,6 +618,37 @@ def test_decimals(self): result = lib.infer_dtype(arr, skipna=True) assert result == 'decimal' + # complex is compatible with nan, so skipna has no effect + @pytest.mark.parametrize('skipna', [True, False]) + def test_complex(self, skipna): + # gets cast to complex on array construction + arr = np.array([1.0, 2.0, 1 + 1j]) + result = lib.infer_dtype(arr, skipna=skipna) + assert result == 'complex' + + arr = np.array([1.0, 2.0, 1 + 1j], dtype='O') + result = lib.infer_dtype(arr, skipna=skipna) + assert result == 'mixed' + + # gets cast to complex on array construction + arr = np.array([1, np.nan, 1 + 1j]) + result = lib.infer_dtype(arr, skipna=skipna) + assert result == 'complex' + + arr = np.array([1.0, np.nan, 1 + 1j], dtype='O') + result = lib.infer_dtype(arr, skipna=skipna) + assert result == 'mixed' + + # complex with nans stays complex + arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype='O') + result = lib.infer_dtype(arr, skipna=skipna) + assert result == 'complex' + + # test smaller complex dtype; will pass through _try_infer_map fastpath + arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64) + result = lib.infer_dtype(arr, skipna=skipna) + assert result == 'complex' + def test_string(self): pass diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index d913d2ad299ce..7ca01e13a33a9 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -2,7 +2,7 @@ from datetime import datetime from decimal import Decimal -from warnings import catch_warnings, filterwarnings, simplefilter +from warnings import catch_warnings, filterwarnings import numpy as np import pytest @@ -94,15 +94,6 @@ def test_isna_isnull(self, isna_f): expected = df.apply(isna_f) tm.assert_frame_equal(result, expected) - # panel - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - for p in [tm.makePanel(), tm.makePeriodPanel(), - tm.add_nans(tm.makePanel())]: - result = isna_f(p) - expected = p.apply(isna_f) - tm.assert_panel_equal(result, expected) - def test_isna_lists(self): result = isna([[False]]) exp = np.array([[False]]) diff --git a/pandas/tests/extension/base/groupby.py b/pandas/tests/extension/base/groupby.py index dd406ca0cd5ed..1929dad075695 100644 --- a/pandas/tests/extension/base/groupby.py +++ b/pandas/tests/extension/base/groupby.py @@ -55,19 +55,14 @@ def test_groupby_extension_transform(self, data_for_grouping): self.assert_series_equal(result, expected) - @pytest.mark.parametrize('op', [ - lambda x: 1, - lambda x: [1] * len(x), - lambda x: pd.Series([1] * len(x)), - lambda x: x, - ], ids=['scalar', 'list', 'series', 'object']) - def test_groupby_extension_apply(self, data_for_grouping, op): + def test_groupby_extension_apply( + self, data_for_grouping, groupby_apply_op): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) - df.groupby("B").apply(op) - df.groupby("B").A.apply(op) - df.groupby("A").apply(op) - df.groupby("A").B.apply(op) + df.groupby("B").apply(groupby_apply_op) + df.groupby("B").A.apply(groupby_apply_op) + df.groupby("A").apply(groupby_apply_op) + df.groupby("A").B.apply(groupby_apply_op) def test_in_numeric_groupby(self, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index f64df7a84b7c0..1852edaa9e748 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -240,7 +240,6 @@ def test_shift_fill_value(self, data): expected = data.take([2, 3, 0, 0]) self.assert_extension_array_equal(result, expected) - @pytest.mark.parametrize("as_frame", [True, False]) def test_hash_pandas_object_works(self, data, as_frame): # https://github.com/pandas-dev/pandas/issues/23066 data = pd.Series(data) @@ -250,7 +249,6 @@ def test_hash_pandas_object_works(self, data, as_frame): b = pd.util.hash_pandas_object(data) self.assert_equal(a, b) - @pytest.mark.parametrize("as_series", [True, False]) def test_searchsorted(self, data_for_sorting, as_series): b, c, a = data_for_sorting arr = type(data_for_sorting)._from_sequence([a, b, c]) @@ -275,7 +273,6 @@ def test_searchsorted(self, data_for_sorting, as_series): sorter = np.array([1, 2, 0]) assert data_for_sorting.searchsorted(a, sorter=sorter) == 0 - @pytest.mark.parametrize("as_frame", [True, False]) def test_where_series(self, data, na_value, as_frame): assert data[0] != data[1] cls = type(data) @@ -309,8 +306,6 @@ def test_where_series(self, data, na_value, as_frame): expected = expected.to_frame(name='a') self.assert_equal(result, expected) - @pytest.mark.parametrize("use_numpy", [True, False]) - @pytest.mark.parametrize("as_series", [True, False]) @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]]) def test_repeat(self, data, repeats, as_series, use_numpy): arr = type(data)._from_sequence(data[:3], dtype=data.dtype) @@ -327,7 +322,6 @@ def test_repeat(self, data, repeats, as_series, use_numpy): self.assert_equal(result, expected) - @pytest.mark.parametrize("use_numpy", [True, False]) @pytest.mark.parametrize('repeats, kwargs, error, msg', [ (2, dict(axis=1), ValueError, "'axis"), (-1, dict(), ValueError, "negative"), diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py index 2fe547e50a34b..834f49f0461f0 100644 --- a/pandas/tests/extension/base/missing.py +++ b/pandas/tests/extension/base/missing.py @@ -1,5 +1,4 @@ import numpy as np -import pytest import pandas as pd import pandas.util.testing as tm @@ -89,14 +88,13 @@ def test_fillna_series(self, data_missing): result = ser.fillna(ser) self.assert_series_equal(result, ser) - @pytest.mark.parametrize('method', ['ffill', 'bfill']) - def test_fillna_series_method(self, data_missing, method): + def test_fillna_series_method(self, data_missing, fillna_method): fill_value = data_missing[1] - if method == 'ffill': + if fillna_method == 'ffill': data_missing = data_missing[::-1] - result = pd.Series(data_missing).fillna(method=method) + result = pd.Series(data_missing).fillna(method=fillna_method) expected = pd.Series(data_missing._from_sequence( [fill_value, fill_value], dtype=data_missing.dtype)) diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index 42fda982f7339..db6328e39e6cc 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -24,7 +24,6 @@ def test_setitem_sequence(self, data, box_in_series): assert data[0] == original[1] assert data[1] == original[0] - @pytest.mark.parametrize('as_array', [True, False]) def test_setitem_sequence_mismatched_length_raises(self, data, as_array): ser = pd.Series(data) original = ser.copy() diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py index 5349dd919f2a2..3cc2d313b09f5 100644 --- a/pandas/tests/extension/conftest.py +++ b/pandas/tests/extension/conftest.py @@ -2,6 +2,8 @@ import pytest +from pandas import Series + @pytest.fixture def dtype(): @@ -108,3 +110,58 @@ def data_for_grouping(): def box_in_series(request): """Whether to box the data in a Series""" return request.param + + +@pytest.fixture(params=[ + lambda x: 1, + lambda x: [1] * len(x), + lambda x: Series([1] * len(x)), + lambda x: x, +], ids=['scalar', 'list', 'series', 'object']) +def groupby_apply_op(request): + """ + Functions to test groupby.apply(). + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_frame(request): + """ + Boolean fixture to support Series and Series.to_frame() comparison testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_series(request): + """ + Boolean fixture to support arr and Series(arr) comparison testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def use_numpy(request): + """ + Boolean fixture to support comparison testing of ExtensionDtype array + and numpy array. + """ + return request.param + + +@pytest.fixture(params=['ffill', 'bfill']) +def fillna_method(request): + """ + Parametrized fixture giving method parameters 'ffill' and 'bfill' for + Series.fillna(method=<method>) testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_array(request): + """ + Boolean fixture to support ExtensionDtype _from_sequence method testing. + """ + return request.param diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 7ca6882c7441b..41f5beb8c885d 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from pandas.compat.numpy import _np_version_under1p16 + import pandas as pd from pandas import compat from pandas.core.arrays.numpy_ import PandasArray, PandasDtype @@ -9,9 +11,9 @@ from . import base -@pytest.fixture -def dtype(): - return PandasDtype(np.dtype('float')) +@pytest.fixture(params=['float', 'object']) +def dtype(request): + return PandasDtype(np.dtype(request.param)) @pytest.fixture @@ -38,11 +40,19 @@ def allow_in_pandas(monkeypatch): @pytest.fixture def data(allow_in_pandas, dtype): + if dtype.numpy_dtype == 'object': + return pd.Series([(i,) for i in range(100)]).array return PandasArray(np.arange(1, 101, dtype=dtype._dtype)) @pytest.fixture -def data_missing(allow_in_pandas): +def data_missing(allow_in_pandas, dtype): + # For NumPy <1.16, np.array([np.nan, (1,)]) raises + # ValueError: setting an array element with a sequence. + if dtype.numpy_dtype == 'object': + if _np_version_under1p16: + raise pytest.skip("Skipping for NumPy <1.16") + return PandasArray(np.array([np.nan, (1,)])) return PandasArray(np.array([np.nan, 1.0])) @@ -59,49 +69,84 @@ def cmp(a, b): @pytest.fixture -def data_for_sorting(allow_in_pandas): +def data_for_sorting(allow_in_pandas, dtype): """Length-3 array with a known sort order. This should be three items [B, C, A] with A < B < C """ + if dtype.numpy_dtype == 'object': + # Use an empty tuple for first element, then remove, + # to disable np.array's shape inference. + return PandasArray( + np.array([(), (2,), (3,), (1,)])[1:] + ) return PandasArray( np.array([1, 2, 0]) ) @pytest.fixture -def data_missing_for_sorting(allow_in_pandas): +def data_missing_for_sorting(allow_in_pandas, dtype): """Length-3 array with a known sort order. This should be three items [B, NA, A] with A < B and NA missing. """ + if dtype.numpy_dtype == 'object': + return PandasArray( + np.array([(1,), np.nan, (0,)]) + ) return PandasArray( np.array([1, np.nan, 0]) ) @pytest.fixture -def data_for_grouping(allow_in_pandas): +def data_for_grouping(allow_in_pandas, dtype): """Data for factorization, grouping, and unique tests. Expected to be like [B, B, NA, NA, A, A, B, C] Where A < B < C and NA is missing """ - a, b, c = np.arange(3) + if dtype.numpy_dtype == 'object': + a, b, c = (1,), (2,), (3,) + else: + a, b, c = np.arange(3) return PandasArray(np.array( [b, b, np.nan, np.nan, a, a, b, c] )) +@pytest.fixture +def skip_numpy_object(dtype): + """ + Tests for PandasArray with nested data. Users typically won't create + these objects via `pd.array`, but they can show up through `.array` + on a Series with nested data. Many of the base tests fail, as they aren't + appropriate for nested data. + + This fixture allows these tests to be skipped when used as a usefixtures + marker to either an individual test or a test class. + """ + if dtype == 'object': + raise pytest.skip("Skipping for object dtype.") + + +skip_nested = pytest.mark.usefixtures('skip_numpy_object') + + class BaseNumPyTests(object): pass class TestCasting(BaseNumPyTests, base.BaseCastingTests): - pass + + @skip_nested + def test_astype_str(self, data): + # ValueError: setting an array element with a sequence + super(TestCasting, self).test_astype_str(data) class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests): @@ -110,6 +155,11 @@ class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests): def test_from_dtype(self, data): pass + @skip_nested + def test_array_from_scalars(self, data): + # ValueError: PandasArray must be 1-dimensional. + super(TestConstructors, self).test_array_from_scalars(data) + class TestDtype(BaseNumPyTests, base.BaseDtypeTests): @@ -120,15 +170,32 @@ def test_check_dtype(self, data): class TestGetitem(BaseNumPyTests, base.BaseGetitemTests): - pass + + @skip_nested + def test_getitem_scalar(self, data): + # AssertionError + super(TestGetitem, self).test_getitem_scalar(data) + + @skip_nested + def test_take_series(self, data): + # ValueError: PandasArray must be 1-dimensional. + super(TestGetitem, self).test_take_series(data) class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests): - pass + @skip_nested + def test_groupby_extension_apply( + self, data_for_grouping, groupby_apply_op): + # ValueError: Names should be list-like for a MultiIndex + super(TestGroupby, self).test_groupby_extension_apply( + data_for_grouping, groupby_apply_op) class TestInterface(BaseNumPyTests, base.BaseInterfaceTests): - pass + @skip_nested + def test_array_interface(self, data): + # NumPy array shape inference + super(TestInterface, self).test_array_interface(data) class TestMethods(BaseNumPyTests, base.BaseMethodsTests): @@ -143,7 +210,57 @@ def test_value_counts(self, all_data, dropna): def test_combine_le(self, data_repeated): super(TestMethods, self).test_combine_le(data_repeated) - + @skip_nested + def test_combine_add(self, data_repeated): + # Not numeric + super(TestMethods, self).test_combine_add(data_repeated) + + @skip_nested + def test_shift_fill_value(self, data): + # np.array shape inference. Shift implementation fails. + super(TestMethods, self).test_shift_fill_value(data) + + @skip_nested + @pytest.mark.parametrize('box', [pd.Series, lambda x: x]) + @pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique]) + def test_unique(self, data, box, method): + # Fails creating expected + super(TestMethods, self).test_unique(data, box, method) + + @skip_nested + def test_fillna_copy_frame(self, data_missing): + # The "scalar" for this array isn't a scalar. + super(TestMethods, self).test_fillna_copy_frame(data_missing) + + @skip_nested + def test_fillna_copy_series(self, data_missing): + # The "scalar" for this array isn't a scalar. + super(TestMethods, self).test_fillna_copy_series(data_missing) + + @skip_nested + def test_hash_pandas_object_works(self, data, as_frame): + # ndarray of tuples not hashable + super(TestMethods, self).test_hash_pandas_object_works(data, as_frame) + + @skip_nested + def test_searchsorted(self, data_for_sorting, as_series): + # Test setup fails. + super(TestMethods, self).test_searchsorted(data_for_sorting, as_series) + + @skip_nested + def test_where_series(self, data, na_value, as_frame): + # Test setup fails. + super(TestMethods, self).test_where_series(data, na_value, as_frame) + + @skip_nested + @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]]) + def test_repeat(self, data, repeats, as_series, use_numpy): + # Fails creating expected + super(TestMethods, self).test_repeat( + data, repeats, as_series, use_numpy) + + +@skip_nested class TestArithmetics(BaseNumPyTests, base.BaseArithmeticOpsTests): divmod_exc = None series_scalar_exc = None @@ -183,6 +300,7 @@ class TestPrinting(BaseNumPyTests, base.BasePrintingTests): pass +@skip_nested class TestNumericReduce(BaseNumPyTests, base.BaseNumericReduceTests): def check_reduce(self, s, op_name, skipna): @@ -192,12 +310,33 @@ def check_reduce(self, s, op_name, skipna): tm.assert_almost_equal(result, expected) +@skip_nested class TestBooleanReduce(BaseNumPyTests, base.BaseBooleanReduceTests): pass -class TestMising(BaseNumPyTests, base.BaseMissingTests): - pass +class TestMissing(BaseNumPyTests, base.BaseMissingTests): + + @skip_nested + def test_fillna_scalar(self, data_missing): + # Non-scalar "scalar" values. + super(TestMissing, self).test_fillna_scalar(data_missing) + + @skip_nested + def test_fillna_series_method(self, data_missing, fillna_method): + # Non-scalar "scalar" values. + super(TestMissing, self).test_fillna_series_method( + data_missing, fillna_method) + + @skip_nested + def test_fillna_series(self, data_missing): + # Non-scalar "scalar" values. + super(TestMissing, self).test_fillna_series(data_missing) + + @skip_nested + def test_fillna_frame(self, data_missing): + # Non-scalar "scalar" values. + super(TestMissing, self).test_fillna_frame(data_missing) class TestReshaping(BaseNumPyTests, base.BaseReshapingTests): @@ -207,10 +346,85 @@ class TestReshaping(BaseNumPyTests, base.BaseReshapingTests): def test_concat_mixed_dtypes(self, data): super(TestReshaping, self).test_concat_mixed_dtypes(data) + @skip_nested + def test_merge(self, data, na_value): + # Fails creating expected + super(TestReshaping, self).test_merge(data, na_value) -class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): - pass + @skip_nested + def test_merge_on_extension_array(self, data): + # Fails creating expected + super(TestReshaping, self).test_merge_on_extension_array(data) + @skip_nested + def test_merge_on_extension_array_duplicates(self, data): + # Fails creating expected + super(TestReshaping, self).test_merge_on_extension_array_duplicates( + data) + + +class TestSetitem(BaseNumPyTests, base.BaseSetitemTests): + @skip_nested + def test_setitem_scalar_series(self, data, box_in_series): + # AssertionError + super(TestSetitem, self).test_setitem_scalar_series( + data, box_in_series) + + @skip_nested + def test_setitem_sequence(self, data, box_in_series): + # ValueError: shape mismatch: value array of shape (2,1) could not + # be broadcast to indexing result of shape (2,) + super(TestSetitem, self).test_setitem_sequence(data, box_in_series) + + @skip_nested + def test_setitem_sequence_mismatched_length_raises(self, data, as_array): + # ValueError: PandasArray must be 1-dimensional. + (super(TestSetitem, self). + test_setitem_sequence_mismatched_length_raises(data, as_array)) + + @skip_nested + def test_setitem_sequence_broadcasts(self, data, box_in_series): + # ValueError: cannot set using a list-like indexer with a different + # length than the value + super(TestSetitem, self).test_setitem_sequence_broadcasts( + data, box_in_series) + + @skip_nested + def test_setitem_loc_scalar_mixed(self, data): + # AssertionError + super(TestSetitem, self).test_setitem_loc_scalar_mixed(data) + + @skip_nested + def test_setitem_loc_scalar_multiple_homogoneous(self, data): + # AssertionError + super(TestSetitem, self).test_setitem_loc_scalar_multiple_homogoneous( + data) + + @skip_nested + def test_setitem_iloc_scalar_mixed(self, data): + # AssertionError + super(TestSetitem, self).test_setitem_iloc_scalar_mixed(data) + + @skip_nested + def test_setitem_iloc_scalar_multiple_homogoneous(self, data): + # AssertionError + super(TestSetitem, self).test_setitem_iloc_scalar_multiple_homogoneous( + data) + + @skip_nested + @pytest.mark.parametrize('setter', ['loc', None]) + def test_setitem_mask_broadcast(self, data, setter): + # ValueError: cannot set using a list-like indexer with a different + # length than the value + super(TestSetitem, self).test_setitem_mask_broadcast(data, setter) + + @skip_nested + def test_setitem_scalar_key_sequence_raise(self, data): + # Failed: DID NOT RAISE <class 'ValueError'> + super(TestSetitem, self).test_setitem_scalar_key_sequence_raise(data) + + +@skip_nested class TestParsing(BaseNumPyTests, base.BaseParsingTests): pass diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 21dbf9524961c..146dea2b65d83 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -287,11 +287,10 @@ def test_combine_first(self, data): pytest.skip("TODO(SparseArray.__setitem__ will preserve dtype.") super(TestMethods, self).test_combine_first(data) - @pytest.mark.parametrize("as_series", [True, False]) def test_searchsorted(self, data_for_sorting, as_series): with tm.assert_produces_warning(PerformanceWarning): super(TestMethods, self).test_searchsorted(data_for_sorting, - as_series=as_series) + as_series) class TestCasting(BaseSparseTests, base.BaseCastingTests): diff --git a/pandas/tests/frame/common.py b/pandas/tests/frame/common.py index 2ea087c0510bf..5624f7c1303b6 100644 --- a/pandas/tests/frame/common.py +++ b/pandas/tests/frame/common.py @@ -85,7 +85,7 @@ def tzframe(self): @cache_readonly def empty(self): - return pd.DataFrame({}) + return pd.DataFrame() @cache_readonly def ts1(self): diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index 377e737a53158..fbe03325a3ad9 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -29,16 +29,6 @@ def float_frame_with_na(): return df -@pytest.fixture -def float_frame2(): - """ - Fixture for DataFrame of floats with index of unique strings - - Columns are ['D', 'C', 'B', 'A'] - """ - return DataFrame(tm.getSeriesData(), columns=['D', 'C', 'B', 'A']) - - @pytest.fixture def bool_frame_with_na(): """ @@ -104,21 +94,6 @@ def mixed_float_frame(): return df -@pytest.fixture -def mixed_float_frame2(): - """ - Fixture for DataFrame of different float types with index of unique strings - - Columns are ['A', 'B', 'C', 'D']. - """ - df = DataFrame(tm.getSeriesData()) - df.D = df.D.astype('float32') - df.C = df.C.astype('float32') - df.B = df.B.astype('float16') - df.D = df.D.astype('float64') - return df - - @pytest.fixture def mixed_int_frame(): """ @@ -135,19 +110,6 @@ def mixed_int_frame(): return df -@pytest.fixture -def mixed_type_frame(): - """ - Fixture for DataFrame of float/int/string columns with RangeIndex - - Columns are ['a', 'b', 'c', 'float32', 'int32']. - """ - return DataFrame({'a': 1., 'b': 2, 'c': 'foo', - 'float32': np.array([1.] * 10, dtype='float32'), - 'int32': np.array([1] * 10, dtype='int32')}, - index=np.arange(10)) - - @pytest.fixture def timezone_frame(): """ @@ -165,30 +127,6 @@ def timezone_frame(): return df -@pytest.fixture -def empty_frame(): - """ - Fixture for empty DataFrame - """ - return DataFrame({}) - - -@pytest.fixture -def datetime_series(): - """ - Fixture for Series of floats with DatetimeIndex - """ - return tm.makeTimeSeries(nper=30) - - -@pytest.fixture -def datetime_series_short(): - """ - Fixture for Series of floats with DatetimeIndex - """ - return tm.makeTimeSeries(nper=30)[5:] - - @pytest.fixture def simple_frame(): """ diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index c2355742199dc..f4a2a5f8032a0 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -178,10 +178,10 @@ def test_set_index_pass_arrays(self, frame_of_index_cols, # MultiIndex constructor does not work directly on Series -> lambda # We also emulate a "constructor" for the label -> lambda # also test index name if append=True (name is duplicate here for A) - @pytest.mark.parametrize('box2', [Series, Index, np.array, list, + @pytest.mark.parametrize('box2', [Series, Index, np.array, list, iter, lambda x: MultiIndex.from_arrays([x]), lambda x: x.name]) - @pytest.mark.parametrize('box1', [Series, Index, np.array, list, + @pytest.mark.parametrize('box1', [Series, Index, np.array, list, iter, lambda x: MultiIndex.from_arrays([x]), lambda x: x.name]) @pytest.mark.parametrize('append, index_name', [(True, None), @@ -195,6 +195,9 @@ def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop, keys = [box1(df['A']), box2(df['A'])] result = df.set_index(keys, drop=drop, append=append) + # if either box is iter, it has been consumed; re-read + keys = [box1(df['A']), box2(df['A'])] + # need to adapt first drop for case that both keys are 'A' -- # cannot drop the same column twice; # use "is" because == would give ambiguous Boolean error for containers @@ -255,21 +258,150 @@ def test_set_index_raise_keys(self, frame_of_index_cols, drop, append): @pytest.mark.parametrize('append', [True, False]) @pytest.mark.parametrize('drop', [True, False]) - @pytest.mark.parametrize('box', [set, iter]) + @pytest.mark.parametrize('box', [set], ids=['set']) def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append): df = frame_of_index_cols msg = 'The parameter "keys" may be a column key, .*' - # forbidden type, e.g. set/tuple/iter - with pytest.raises(ValueError, match=msg): + # forbidden type, e.g. set + with pytest.raises(TypeError, match=msg): df.set_index(box(df['A']), drop=drop, append=append) - # forbidden type in list, e.g. set/tuple/iter - with pytest.raises(ValueError, match=msg): + # forbidden type in list, e.g. set + with pytest.raises(TypeError, match=msg): df.set_index(['A', df['A'], box(df['A'])], drop=drop, append=append) + # MultiIndex constructor does not work directly on Series -> lambda + @pytest.mark.parametrize('box', [Series, Index, np.array, iter, + lambda x: MultiIndex.from_arrays([x])], + ids=['Series', 'Index', 'np.array', + 'iter', 'MultiIndex']) + @pytest.mark.parametrize('length', [4, 6], ids=['too_short', 'too_long']) + @pytest.mark.parametrize('append', [True, False]) + @pytest.mark.parametrize('drop', [True, False]) + def test_set_index_raise_on_len(self, frame_of_index_cols, box, length, + drop, append): + # GH 24984 + df = frame_of_index_cols # has length 5 + + values = np.random.randint(0, 10, (length,)) + + msg = 'Length mismatch: Expected 5 rows, received array of length.*' + + # wrong length directly + with pytest.raises(ValueError, match=msg): + df.set_index(box(values), drop=drop, append=append) + + # wrong length in list + with pytest.raises(ValueError, match=msg): + df.set_index(['A', df.A, box(values)], drop=drop, append=append) + + def test_set_index_custom_label_type(self): + # GH 24969 + + class Thing(object): + def __init__(self, name, color): + self.name = name + self.color = color + + def __str__(self): + return "<Thing %r>" % (self.name,) + + # necessary for pretty KeyError + __repr__ = __str__ + + thing1 = Thing('One', 'red') + thing2 = Thing('Two', 'blue') + df = DataFrame({thing1: [0, 1], thing2: [2, 3]}) + expected = DataFrame({thing1: [0, 1]}, + index=Index([2, 3], name=thing2)) + + # use custom label directly + result = df.set_index(thing2) + tm.assert_frame_equal(result, expected) + + # custom label wrapped in list + result = df.set_index([thing2]) + tm.assert_frame_equal(result, expected) + + # missing key + thing3 = Thing('Three', 'pink') + msg = "<Thing 'Three'>" + with pytest.raises(KeyError, match=msg): + # missing label directly + df.set_index(thing3) + + with pytest.raises(KeyError, match=msg): + # missing label in list + df.set_index([thing3]) + + def test_set_index_custom_label_hashable_iterable(self): + # GH 24969 + + # actual example discussed in GH 24984 was e.g. for shapely.geometry + # objects (e.g. a collection of Points) that can be both hashable and + # iterable; using frozenset as a stand-in for testing here + + class Thing(frozenset): + # need to stabilize repr for KeyError (due to random order in sets) + def __repr__(self): + tmp = sorted(list(self)) + # double curly brace prints one brace in format string + return "frozenset({{{}}})".format(', '.join(map(repr, tmp))) + + thing1 = Thing(['One', 'red']) + thing2 = Thing(['Two', 'blue']) + df = DataFrame({thing1: [0, 1], thing2: [2, 3]}) + expected = DataFrame({thing1: [0, 1]}, + index=Index([2, 3], name=thing2)) + + # use custom label directly + result = df.set_index(thing2) + tm.assert_frame_equal(result, expected) + + # custom label wrapped in list + result = df.set_index([thing2]) + tm.assert_frame_equal(result, expected) + + # missing key + thing3 = Thing(['Three', 'pink']) + msg = r"frozenset\(\{'Three', 'pink'\}\)" + with pytest.raises(KeyError, match=msg): + # missing label directly + df.set_index(thing3) + + with pytest.raises(KeyError, match=msg): + # missing label in list + df.set_index([thing3]) + + def test_set_index_custom_label_type_raises(self): + # GH 24969 + + # purposefully inherit from something unhashable + class Thing(set): + def __init__(self, name, color): + self.name = name + self.color = color + + def __str__(self): + return "<Thing %r>" % (self.name,) + + thing1 = Thing('One', 'red') + thing2 = Thing('Two', 'blue') + df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2]) + + msg = 'The parameter "keys" may be a column key, .*' + + with pytest.raises(TypeError, match=msg): + # use custom label directly + df.set_index(thing2) + + with pytest.raises(TypeError, match=msg): + # custom label wrapped in list + df.set_index([thing2]) + def test_construction_with_categorical_index(self): ci = tm.makeCategoricalIndex(10) ci.name = 'B' @@ -501,7 +633,8 @@ def test_rename(self, float_frame): tm.assert_index_equal(renamed.index, Index(['BAR', 'FOO'])) # have to pass something - pytest.raises(TypeError, float_frame.rename) + with pytest.raises(TypeError, match="must pass an index to rename"): + float_frame.rename() # partial columns renamed = float_frame.rename(columns={'C': 'foo', 'D': 'bar'}) @@ -600,6 +733,26 @@ def test_rename_axis_mapper(self): with pytest.raises(TypeError, match='bogus'): df.rename_axis(bogus=None) + @pytest.mark.parametrize('kwargs, rename_index, rename_columns', [ + ({'mapper': None, 'axis': 0}, True, False), + ({'mapper': None, 'axis': 1}, False, True), + ({'index': None}, True, False), + ({'columns': None}, False, True), + ({'index': None, 'columns': None}, True, True), + ({}, False, False)]) + def test_rename_axis_none(self, kwargs, rename_index, rename_columns): + # GH 25034 + index = Index(list('abc'), name='foo') + columns = Index(['col1', 'col2'], name='bar') + data = np.arange(6).reshape(3, 2) + df = DataFrame(data, index, columns) + + result = df.rename_axis(**kwargs) + expected_index = index.rename(None) if rename_index else index + expected_columns = columns.rename(None) if rename_columns else columns + expected = DataFrame(data, expected_index, expected_columns) + tm.assert_frame_equal(result, expected) + def test_rename_multiindex(self): tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')] diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 386e5f57617cf..3363a45149fff 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -8,7 +8,7 @@ import numpy as np import pytest -from pandas.compat import PY35, lrange +from pandas.compat import PY2, PY35, is_platform_windows, lrange import pandas.util._test_decorators as td import pandas as pd @@ -231,9 +231,9 @@ def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, getattr(bool_frame_with_na, opname)(axis=1, bool_only=False) -class TestDataFrameAnalytics(): +class TestDataFrameAnalytics(object): - # ---------------------------------------------------------------------= + # --------------------------------------------------------------------- # Correlation and covariance @td.skip_if_no_scipy @@ -502,6 +502,9 @@ def test_corrwith_kendall(self): expected = Series(np.ones(len(result))) tm.assert_series_equal(result, expected) + # --------------------------------------------------------------------- + # Describe + def test_bool_describe_in_mixed_frame(self): df = DataFrame({ 'string_data': ['a', 'b', 'c', 'd', 'e'], @@ -693,82 +696,113 @@ def test_describe_tz_values(self, tz_naive_fixture): result = df.describe(include='all') tm.assert_frame_equal(result, expected) - def test_reduce_mixed_frame(self): - # GH 6806 - df = DataFrame({ - 'bool_data': [True, True, False, False, False], - 'int_data': [10, 20, 30, 40, 50], - 'string_data': ['a', 'b', 'c', 'd', 'e'], - }) - df.reindex(columns=['bool_data', 'int_data', 'string_data']) - test = df.sum(axis=0) - tm.assert_numpy_array_equal(test.values, - np.array([2, 150, 'abcde'], dtype=object)) - tm.assert_series_equal(test, df.T.sum(axis=1)) + # --------------------------------------------------------------------- + # Reductions - def test_count(self, float_frame_with_na, float_frame, float_string_frame): - f = lambda s: notna(s).sum() - assert_stat_op_calc('count', f, float_frame_with_na, has_skipna=False, - check_dtype=False, check_dates=True) + def test_stat_op_api(self, float_frame, float_string_frame): assert_stat_op_api('count', float_frame, float_string_frame, has_numeric_only=True) + assert_stat_op_api('sum', float_frame, float_string_frame, + has_numeric_only=True) - # corner case - frame = DataFrame() - ct1 = frame.count(1) - assert isinstance(ct1, Series) + assert_stat_op_api('nunique', float_frame, float_string_frame) + assert_stat_op_api('mean', float_frame, float_string_frame) + assert_stat_op_api('product', float_frame, float_string_frame) + assert_stat_op_api('median', float_frame, float_string_frame) + assert_stat_op_api('min', float_frame, float_string_frame) + assert_stat_op_api('max', float_frame, float_string_frame) + assert_stat_op_api('mad', float_frame, float_string_frame) + assert_stat_op_api('var', float_frame, float_string_frame) + assert_stat_op_api('std', float_frame, float_string_frame) + assert_stat_op_api('sem', float_frame, float_string_frame) + assert_stat_op_api('median', float_frame, float_string_frame) - ct2 = frame.count(0) - assert isinstance(ct2, Series) + try: + from scipy.stats import skew, kurtosis # noqa:F401 + assert_stat_op_api('skew', float_frame, float_string_frame) + assert_stat_op_api('kurt', float_frame, float_string_frame) + except ImportError: + pass - # GH 423 - df = DataFrame(index=lrange(10)) - result = df.count(1) - expected = Series(0, index=df.index) - tm.assert_series_equal(result, expected) + def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame): - df = DataFrame(columns=lrange(10)) - result = df.count(0) - expected = Series(0, index=df.columns) - tm.assert_series_equal(result, expected) + def count(s): + return notna(s).sum() - df = DataFrame() - result = df.count() - expected = Series(0, index=[]) - tm.assert_series_equal(result, expected) + def nunique(s): + return len(algorithms.unique1d(s.dropna())) + + def mad(x): + return np.abs(x - x.mean()).mean() - def test_nunique(self, float_frame_with_na, float_frame, - float_string_frame): - f = lambda s: len(algorithms.unique1d(s.dropna())) - assert_stat_op_calc('nunique', f, float_frame_with_na, + def var(x): + return np.var(x, ddof=1) + + def std(x): + return np.std(x, ddof=1) + + def sem(x): + return np.std(x, ddof=1) / np.sqrt(len(x)) + + def skewness(x): + from scipy.stats import skew # noqa:F811 + if len(x) < 3: + return np.nan + return skew(x, bias=False) + + def kurt(x): + from scipy.stats import kurtosis # noqa:F811 + if len(x) < 4: + return np.nan + return kurtosis(x, bias=False) + + assert_stat_op_calc('nunique', nunique, float_frame_with_na, has_skipna=False, check_dtype=False, check_dates=True) - assert_stat_op_api('nunique', float_frame, float_string_frame) - df = DataFrame({'A': [1, 1, 1], - 'B': [1, 2, 3], - 'C': [1, np.nan, 3]}) - tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2})) - tm.assert_series_equal(df.nunique(dropna=False), - Series({'A': 1, 'B': 3, 'C': 3})) - tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2})) - tm.assert_series_equal(df.nunique(axis=1, dropna=False), - Series({0: 1, 1: 3, 2: 2})) - - def test_sum(self, float_frame_with_na, mixed_float_frame, - float_frame, float_string_frame): - assert_stat_op_api('sum', float_frame, float_string_frame, - has_numeric_only=True) - assert_stat_op_calc('sum', np.sum, float_frame_with_na, - skipna_alternative=np.nansum) # mixed types (with upcasting happening) assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'), check_dtype=False, check_less_precise=True) + assert_stat_op_calc('sum', np.sum, float_frame_with_na, + skipna_alternative=np.nansum) + assert_stat_op_calc('mean', np.mean, float_frame_with_na, + check_dates=True) + assert_stat_op_calc('product', np.prod, float_frame_with_na) + + assert_stat_op_calc('mad', mad, float_frame_with_na) + assert_stat_op_calc('var', var, float_frame_with_na) + assert_stat_op_calc('std', std, float_frame_with_na) + assert_stat_op_calc('sem', sem, float_frame_with_na) + + assert_stat_op_calc('count', count, float_frame_with_na, + has_skipna=False, check_dtype=False, + check_dates=True) + + try: + from scipy import skew, kurtosis # noqa:F401 + assert_stat_op_calc('skew', skewness, float_frame_with_na) + assert_stat_op_calc('kurt', kurt, float_frame_with_na) + except ImportError: + pass + + # TODO: Ensure warning isn't emitted in the first place + @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") + def test_median(self, float_frame_with_na, int_frame): + def wrapper(x): + if isna(x).any(): + return np.nan + return np.median(x) + + assert_stat_op_calc('median', wrapper, float_frame_with_na, + check_dates=True) + assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False, + check_dates=True) + @pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']) def test_stat_operators_attempt_obj_array(self, method): - # GH 676 + # GH#676 data = { 'a': [-0.00049987540199591344, -0.0016467257772919831, 0.00067695870775883013], @@ -789,10 +823,44 @@ def test_stat_operators_attempt_obj_array(self, method): if method in ['sum', 'prod']: tm.assert_series_equal(result, expected) - def test_mean(self, float_frame_with_na, float_frame, float_string_frame): - assert_stat_op_calc('mean', np.mean, float_frame_with_na, - check_dates=True) - assert_stat_op_api('mean', float_frame, float_string_frame) + @pytest.mark.parametrize('op', ['mean', 'std', 'var', + 'skew', 'kurt', 'sem']) + def test_mixed_ops(self, op): + # GH#16116 + df = DataFrame({'int': [1, 2, 3, 4], + 'float': [1., 2., 3., 4.], + 'str': ['a', 'b', 'c', 'd']}) + + result = getattr(df, op)() + assert len(result) == 2 + + with pd.option_context('use_bottleneck', False): + result = getattr(df, op)() + assert len(result) == 2 + + def test_reduce_mixed_frame(self): + # GH 6806 + df = DataFrame({ + 'bool_data': [True, True, False, False, False], + 'int_data': [10, 20, 30, 40, 50], + 'string_data': ['a', 'b', 'c', 'd', 'e'], + }) + df.reindex(columns=['bool_data', 'int_data', 'string_data']) + test = df.sum(axis=0) + tm.assert_numpy_array_equal(test.values, + np.array([2, 150, 'abcde'], dtype=object)) + tm.assert_series_equal(test, df.T.sum(axis=1)) + + def test_nunique(self): + df = DataFrame({'A': [1, 1, 1], + 'B': [1, 2, 3], + 'C': [1, np.nan, 3]}) + tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2})) + tm.assert_series_equal(df.nunique(dropna=False), + Series({'A': 1, 'B': 3, 'C': 3})) + tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2})) + tm.assert_series_equal(df.nunique(axis=1, dropna=False), + Series({0: 1, 1: 3, 2: 2})) @pytest.mark.parametrize('tz', [None, 'UTC']) def test_mean_mixed_datetime_numeric(self, tz): @@ -813,103 +881,7 @@ def test_mean_excludeds_datetimes(self, tz): expected = pd.Series() tm.assert_series_equal(result, expected) - def test_product(self, float_frame_with_na, float_frame, - float_string_frame): - assert_stat_op_calc('product', np.prod, float_frame_with_na) - assert_stat_op_api('product', float_frame, float_string_frame) - - # TODO: Ensure warning isn't emitted in the first place - @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") - def test_median(self, float_frame_with_na, float_frame, - float_string_frame): - def wrapper(x): - if isna(x).any(): - return np.nan - return np.median(x) - - assert_stat_op_calc('median', wrapper, float_frame_with_na, - check_dates=True) - assert_stat_op_api('median', float_frame, float_string_frame) - - def test_min(self, float_frame_with_na, int_frame, - float_frame, float_string_frame): - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - assert_stat_op_calc('min', np.min, float_frame_with_na, - check_dates=True) - assert_stat_op_calc('min', np.min, int_frame) - assert_stat_op_api('min', float_frame, float_string_frame) - - def test_cummin(self, datetime_frame): - datetime_frame.loc[5:10, 0] = np.nan - datetime_frame.loc[10:15, 1] = np.nan - datetime_frame.loc[15:, 2] = np.nan - - # axis = 0 - cummin = datetime_frame.cummin() - expected = datetime_frame.apply(Series.cummin) - tm.assert_frame_equal(cummin, expected) - - # axis = 1 - cummin = datetime_frame.cummin(axis=1) - expected = datetime_frame.apply(Series.cummin, axis=1) - tm.assert_frame_equal(cummin, expected) - - # it works - df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) - result = df.cummin() # noqa - - # fix issue - cummin_xs = datetime_frame.cummin(axis=1) - assert np.shape(cummin_xs) == np.shape(datetime_frame) - - def test_cummax(self, datetime_frame): - datetime_frame.loc[5:10, 0] = np.nan - datetime_frame.loc[10:15, 1] = np.nan - datetime_frame.loc[15:, 2] = np.nan - - # axis = 0 - cummax = datetime_frame.cummax() - expected = datetime_frame.apply(Series.cummax) - tm.assert_frame_equal(cummax, expected) - - # axis = 1 - cummax = datetime_frame.cummax(axis=1) - expected = datetime_frame.apply(Series.cummax, axis=1) - tm.assert_frame_equal(cummax, expected) - - # it works - df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) - result = df.cummax() # noqa - - # fix issue - cummax_xs = datetime_frame.cummax(axis=1) - assert np.shape(cummax_xs) == np.shape(datetime_frame) - - def test_max(self, float_frame_with_na, int_frame, - float_frame, float_string_frame): - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - assert_stat_op_calc('max', np.max, float_frame_with_na, - check_dates=True) - assert_stat_op_calc('max', np.max, int_frame) - assert_stat_op_api('max', float_frame, float_string_frame) - - def test_mad(self, float_frame_with_na, float_frame, float_string_frame): - f = lambda x: np.abs(x - x.mean()).mean() - assert_stat_op_calc('mad', f, float_frame_with_na) - assert_stat_op_api('mad', float_frame, float_string_frame) - - def test_var_std(self, float_frame_with_na, datetime_frame, float_frame, - float_string_frame): - alt = lambda x: np.var(x, ddof=1) - assert_stat_op_calc('var', alt, float_frame_with_na) - assert_stat_op_api('var', float_frame, float_string_frame) - - alt = lambda x: np.std(x, ddof=1) - assert_stat_op_calc('std', alt, float_frame_with_na) - assert_stat_op_api('std', float_frame, float_string_frame) - + def test_var_std(self, datetime_frame): result = datetime_frame.std(ddof=4) expected = datetime_frame.apply(lambda x: x.std(ddof=4)) tm.assert_almost_equal(result, expected) @@ -926,6 +898,7 @@ def test_var_std(self, float_frame_with_na, datetime_frame, float_frame, result = nanops.nanvar(arr, axis=0) assert not (result < 0).any() + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") @pytest.mark.parametrize( "meth", ['sem', 'var', 'std']) def test_numeric_only_flag(self, meth): @@ -947,84 +920,14 @@ def test_numeric_only_flag(self, meth): tm.assert_series_equal(expected, result) # df1 has all numbers, df2 has a letter inside - pytest.raises(TypeError, lambda: getattr(df1, meth)( - axis=1, numeric_only=False)) - pytest.raises(TypeError, lambda: getattr(df2, meth)( - axis=1, numeric_only=False)) - - @pytest.mark.parametrize('op', ['mean', 'std', 'var', - 'skew', 'kurt', 'sem']) - def test_mixed_ops(self, op): - # GH 16116 - df = DataFrame({'int': [1, 2, 3, 4], - 'float': [1., 2., 3., 4.], - 'str': ['a', 'b', 'c', 'd']}) - - result = getattr(df, op)() - assert len(result) == 2 - - with pd.option_context('use_bottleneck', False): - result = getattr(df, op)() - assert len(result) == 2 - - def test_cumsum(self, datetime_frame): - datetime_frame.loc[5:10, 0] = np.nan - datetime_frame.loc[10:15, 1] = np.nan - datetime_frame.loc[15:, 2] = np.nan - - # axis = 0 - cumsum = datetime_frame.cumsum() - expected = datetime_frame.apply(Series.cumsum) - tm.assert_frame_equal(cumsum, expected) - - # axis = 1 - cumsum = datetime_frame.cumsum(axis=1) - expected = datetime_frame.apply(Series.cumsum, axis=1) - tm.assert_frame_equal(cumsum, expected) - - # works - df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) - result = df.cumsum() # noqa - - # fix issue - cumsum_xs = datetime_frame.cumsum(axis=1) - assert np.shape(cumsum_xs) == np.shape(datetime_frame) - - def test_cumprod(self, datetime_frame): - datetime_frame.loc[5:10, 0] = np.nan - datetime_frame.loc[10:15, 1] = np.nan - datetime_frame.loc[15:, 2] = np.nan - - # axis = 0 - cumprod = datetime_frame.cumprod() - expected = datetime_frame.apply(Series.cumprod) - tm.assert_frame_equal(cumprod, expected) - - # axis = 1 - cumprod = datetime_frame.cumprod(axis=1) - expected = datetime_frame.apply(Series.cumprod, axis=1) - tm.assert_frame_equal(cumprod, expected) - - # fix issue - cumprod_xs = datetime_frame.cumprod(axis=1) - assert np.shape(cumprod_xs) == np.shape(datetime_frame) - - # ints - df = datetime_frame.fillna(0).astype(int) - df.cumprod(0) - df.cumprod(1) - - # ints32 - df = datetime_frame.fillna(0).astype(np.int32) - df.cumprod(0) - df.cumprod(1) - - def test_sem(self, float_frame_with_na, datetime_frame, - float_frame, float_string_frame): - alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x)) - assert_stat_op_calc('sem', alt, float_frame_with_na) - assert_stat_op_api('sem', float_frame, float_string_frame) - + msg = r"unsupported operand type\(s\) for -: 'float' and 'str'" + with pytest.raises(TypeError, match=msg): + getattr(df1, meth)(axis=1, numeric_only=False) + msg = "could not convert string to float: 'a'" + with pytest.raises(TypeError, match=msg): + getattr(df2, meth)(axis=1, numeric_only=False) + + def test_sem(self, datetime_frame): result = datetime_frame.sem(ddof=4) expected = datetime_frame.apply( lambda x: x.std(ddof=4) / np.sqrt(len(x))) @@ -1039,29 +942,7 @@ def test_sem(self, float_frame_with_na, datetime_frame, assert not (result < 0).any() @td.skip_if_no_scipy - def test_skew(self, float_frame_with_na, float_frame, float_string_frame): - from scipy.stats import skew - - def alt(x): - if len(x) < 3: - return np.nan - return skew(x, bias=False) - - assert_stat_op_calc('skew', alt, float_frame_with_na) - assert_stat_op_api('skew', float_frame, float_string_frame) - - @td.skip_if_no_scipy - def test_kurt(self, float_frame_with_na, float_frame, float_string_frame): - from scipy.stats import kurtosis - - def alt(x): - if len(x) < 4: - return np.nan - return kurtosis(x, bias=False) - - assert_stat_op_calc('kurt', alt, float_frame_with_na) - assert_stat_op_api('kurt', float_frame, float_string_frame) - + def test_kurt(self): index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], @@ -1218,7 +1099,9 @@ def test_operators_timedelta64(self): assert df['off1'].dtype == 'timedelta64[ns]' assert df['off2'].dtype == 'timedelta64[ns]' - def test_sum_corner(self, empty_frame): + def test_sum_corner(self): + empty_frame = DataFrame() + axis0 = empty_frame.sum(0) axis1 = empty_frame.sum(1) assert isinstance(axis0, Series) @@ -1323,20 +1206,146 @@ def test_stats_mixed_type(self, float_string_frame): float_string_frame.mean(1) float_string_frame.skew(1) - # TODO: Ensure warning isn't emitted in the first place - @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") - def test_median_corner(self, int_frame, float_frame, float_string_frame): - def wrapper(x): - if isna(x).any(): - return np.nan - return np.median(x) + def test_sum_bools(self): + df = DataFrame(index=lrange(1), columns=lrange(10)) + bools = isna(df) + assert bools.sum(axis=1)[0] == 10 - assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False, - check_dates=True) - assert_stat_op_api('median', float_frame, float_string_frame) + # --------------------------------------------------------------------- + # Cumulative Reductions - cumsum, cummax, ... + + def test_cumsum_corner(self): + dm = DataFrame(np.arange(20).reshape(4, 5), + index=lrange(4), columns=lrange(5)) + # ?(wesm) + result = dm.cumsum() # noqa + + def test_cumsum(self, datetime_frame): + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan + + # axis = 0 + cumsum = datetime_frame.cumsum() + expected = datetime_frame.apply(Series.cumsum) + tm.assert_frame_equal(cumsum, expected) + + # axis = 1 + cumsum = datetime_frame.cumsum(axis=1) + expected = datetime_frame.apply(Series.cumsum, axis=1) + tm.assert_frame_equal(cumsum, expected) + # works + df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) + result = df.cumsum() # noqa + + # fix issue + cumsum_xs = datetime_frame.cumsum(axis=1) + assert np.shape(cumsum_xs) == np.shape(datetime_frame) + + def test_cumprod(self, datetime_frame): + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan + + # axis = 0 + cumprod = datetime_frame.cumprod() + expected = datetime_frame.apply(Series.cumprod) + tm.assert_frame_equal(cumprod, expected) + + # axis = 1 + cumprod = datetime_frame.cumprod(axis=1) + expected = datetime_frame.apply(Series.cumprod, axis=1) + tm.assert_frame_equal(cumprod, expected) + + # fix issue + cumprod_xs = datetime_frame.cumprod(axis=1) + assert np.shape(cumprod_xs) == np.shape(datetime_frame) + + # ints + df = datetime_frame.fillna(0).astype(int) + df.cumprod(0) + df.cumprod(1) + + # ints32 + df = datetime_frame.fillna(0).astype(np.int32) + df.cumprod(0) + df.cumprod(1) + + def test_cummin(self, datetime_frame): + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan + + # axis = 0 + cummin = datetime_frame.cummin() + expected = datetime_frame.apply(Series.cummin) + tm.assert_frame_equal(cummin, expected) + + # axis = 1 + cummin = datetime_frame.cummin(axis=1) + expected = datetime_frame.apply(Series.cummin, axis=1) + tm.assert_frame_equal(cummin, expected) + + # it works + df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) + result = df.cummin() # noqa + + # fix issue + cummin_xs = datetime_frame.cummin(axis=1) + assert np.shape(cummin_xs) == np.shape(datetime_frame) + + def test_cummax(self, datetime_frame): + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan + + # axis = 0 + cummax = datetime_frame.cummax() + expected = datetime_frame.apply(Series.cummax) + tm.assert_frame_equal(cummax, expected) + + # axis = 1 + cummax = datetime_frame.cummax(axis=1) + expected = datetime_frame.apply(Series.cummax, axis=1) + tm.assert_frame_equal(cummax, expected) + + # it works + df = DataFrame({'A': np.arange(20)}, index=np.arange(20)) + result = df.cummax() # noqa + + # fix issue + cummax_xs = datetime_frame.cummax(axis=1) + assert np.shape(cummax_xs) == np.shape(datetime_frame) + + # --------------------------------------------------------------------- # Miscellanea + def test_count(self): + # corner case + frame = DataFrame() + ct1 = frame.count(1) + assert isinstance(ct1, Series) + + ct2 = frame.count(0) + assert isinstance(ct2, Series) + + # GH#423 + df = DataFrame(index=lrange(10)) + result = df.count(1) + expected = Series(0, index=df.index) + tm.assert_series_equal(result, expected) + + df = DataFrame(columns=lrange(10)) + result = df.count(0) + expected = Series(0, index=df.columns) + tm.assert_series_equal(result, expected) + + df = DataFrame() + result = df.count() + expected = Series(0, index=[]) + tm.assert_series_equal(result, expected) + def test_count_objects(self, float_string_frame): dm = DataFrame(float_string_frame._series) df = DataFrame(float_string_frame._series) @@ -1344,19 +1353,26 @@ def test_count_objects(self, float_string_frame): tm.assert_series_equal(dm.count(), df.count()) tm.assert_series_equal(dm.count(1), df.count(1)) - def test_cumsum_corner(self): - dm = DataFrame(np.arange(20).reshape(4, 5), - index=lrange(4), columns=lrange(5)) - # ?(wesm) - result = dm.cumsum() # noqa + def test_pct_change(self): + # GH#11150 + pnl = DataFrame([np.arange(0, 40, 10), + np.arange(0, 40, 10), + np.arange(0, 40, 10)]).astype(np.float64) + pnl.iat[1, 0] = np.nan + pnl.iat[1, 1] = np.nan + pnl.iat[2, 3] = 60 - def test_sum_bools(self): - df = DataFrame(index=lrange(1), columns=lrange(10)) - bools = isna(df) - assert bools.sum(axis=1)[0] == 10 + for axis in range(2): + expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift( + axis=axis) - 1 + result = pnl.pct_change(axis=axis, fill_method='pad') + + tm.assert_frame_equal(result, expected) + # ---------------------------------------------------------------------- # Index of max / min + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_idxmin(self, float_frame, int_frame): frame = float_frame frame.loc[5:10] = np.nan @@ -1369,8 +1385,11 @@ def test_idxmin(self, float_frame, int_frame): skipna=skipna) tm.assert_series_equal(result, expected) - pytest.raises(ValueError, frame.idxmin, axis=2) + msg = "No axis named 2 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + frame.idxmin(axis=2) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_idxmax(self, float_frame, int_frame): frame = float_frame frame.loc[5:10] = np.nan @@ -1383,7 +1402,9 @@ def test_idxmax(self, float_frame, int_frame): skipna=skipna) tm.assert_series_equal(result, expected) - pytest.raises(ValueError, frame.idxmax, axis=2) + msg = "No axis named 2 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + frame.idxmax(axis=2) # ---------------------------------------------------------------------- # Logical reductions @@ -1442,6 +1463,26 @@ def test_any_datetime(self): expected = Series([True, True, True, False]) tm.assert_series_equal(result, expected) + def test_any_all_bool_only(self): + + # GH 25101 + df = DataFrame({"col1": [1, 2, 3], + "col2": [4, 5, 6], + "col3": [None, None, None]}) + + result = df.all(bool_only=True) + expected = Series(dtype=np.bool) + tm.assert_series_equal(result, expected) + + df = DataFrame({"col1": [1, 2, 3], + "col2": [4, 5, 6], + "col3": [None, None, None], + "col4": [False, False, True]}) + + result = df.all(bool_only=True) + expected = Series({"col4": False}) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('func, data, expected', [ (np.any, {}, False), (np.all, {}, True), @@ -1680,7 +1721,9 @@ def test_isin_empty_datetimelike(self): result = df1_td.isin(df3) tm.assert_frame_equal(result, expected) + # --------------------------------------------------------------------- # Rounding + def test_round(self): # GH 2665 @@ -1810,6 +1853,17 @@ def test_numpy_round(self): with pytest.raises(ValueError, match=msg): np.round(df, decimals=0, out=df) + @pytest.mark.xfail( + PY2 and is_platform_windows(), reason="numpy/numpy#7882", + raises=AssertionError, strict=True) + def test_numpy_round_nan(self): + # See gh-14197 + df = Series([1.53, np.nan, 0.06]).to_frame() + with tm.assert_produces_warning(None): + result = df.round() + expected = Series([2., np.nan, 0.]).to_frame() + tm.assert_frame_equal(result, expected) + def test_round_mixed_type(self): # GH 11885 df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4], @@ -1836,7 +1890,9 @@ def test_round_issue(self): tm.assert_index_equal(rounded.index, dfs.index) decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A']) - pytest.raises(ValueError, df.round, decimals) + msg = "Index of decimals must be unique" + with pytest.raises(ValueError, match=msg): + df.round(decimals) def test_built_in_round(self): if not compat.PY3: @@ -1868,22 +1924,9 @@ def test_round_nonunique_categorical(self): tm.assert_frame_equal(result, expected) - def test_pct_change(self): - # GH 11150 - pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange( - 0, 40, 10)]).astype(np.float64) - pnl.iat[1, 0] = np.nan - pnl.iat[1, 1] = np.nan - pnl.iat[2, 3] = 60 - - for axis in range(2): - expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift( - axis=axis) - 1 - result = pnl.pct_change(axis=axis, fill_method='pad') - - tm.assert_frame_equal(result, expected) - + # --------------------------------------------------------------------- # Clip + def test_clip(self, float_frame): median = float_frame.median().median() original = float_frame.copy() @@ -2056,7 +2099,9 @@ def test_clip_with_na_args(self, float_frame): 'col_2': [np.nan, np.nan, np.nan]}) tm.assert_frame_equal(result, expected) + # --------------------------------------------------------------------- # Matrix-like + def test_dot(self): a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], columns=['p', 'q', 'r', 's']) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 0934dd20638e4..118341276d799 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas.compat import long, lrange, range +from pandas.compat import PY2, long, lrange, range import pandas as pd from pandas import ( @@ -142,10 +142,16 @@ def test_tab_completion(self): assert key not in dir(df) assert isinstance(df.__getitem__('A'), pd.DataFrame) - def test_not_hashable(self, empty_frame): + def test_not_hashable(self): + empty_frame = DataFrame() + df = self.klass([1]) - pytest.raises(TypeError, hash, df) - pytest.raises(TypeError, hash, empty_frame) + msg = ("'(Sparse)?DataFrame' objects are mutable, thus they cannot be" + " hashed") + with pytest.raises(TypeError, match=msg): + hash(df) + with pytest.raises(TypeError, match=msg): + hash(empty_frame) def test_new_empty_index(self): df1 = self.klass(np.random.randn(0, 3)) @@ -169,9 +175,12 @@ def test_get_agg_axis(self, float_frame): idx = float_frame._get_agg_axis(1) assert idx is float_frame.index - pytest.raises(ValueError, float_frame._get_agg_axis, 2) + msg = r"Axis must be 0 or 1 \(got 2\)" + with pytest.raises(ValueError, match=msg): + float_frame._get_agg_axis(2) - def test_nonzero(self, float_frame, float_string_frame, empty_frame): + def test_nonzero(self, float_frame, float_string_frame): + empty_frame = DataFrame() assert empty_frame.empty assert not float_frame.empty @@ -351,12 +360,15 @@ def test_transpose(self, float_frame): for col, s in compat.iteritems(mixed_T): assert s.dtype == np.object_ + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_swapaxes(self): df = self.klass(np.random.randn(10, 5)) self._assert_frame_equal(df.T, df.swapaxes(0, 1)) self._assert_frame_equal(df.T, df.swapaxes(1, 0)) self._assert_frame_equal(df, df.swapaxes(0, 0)) - pytest.raises(ValueError, df.swapaxes, 2, 5) + msg = "No axis named 2 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + df.swapaxes(2, 5) def test_axis_aliases(self, float_frame): f = float_frame diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ade527a16c902..4d1e3e7ae1f38 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -74,8 +74,10 @@ def test_apply_mixed_datetimelike(self): result = df.apply(lambda x: x, axis=1) assert_frame_equal(result, df) - def test_apply_empty(self, float_frame, empty_frame): + def test_apply_empty(self, float_frame): # empty + empty_frame = DataFrame() + applied = empty_frame.apply(np.sqrt) assert applied.empty @@ -97,8 +99,10 @@ def test_apply_empty(self, float_frame, empty_frame): result = expected.apply(lambda x: x['a'], axis=1) assert_frame_equal(expected, result) - def test_apply_with_reduce_empty(self, empty_frame): + def test_apply_with_reduce_empty(self): # reduce with an empty DataFrame + empty_frame = DataFrame() + x = [] result = empty_frame.apply(x.append, axis=1, result_type='expand') assert_frame_equal(result, empty_frame) @@ -116,7 +120,9 @@ def test_apply_with_reduce_empty(self, empty_frame): # Ensure that x.append hasn't been called assert x == [] - def test_apply_deprecate_reduce(self, empty_frame): + def test_apply_deprecate_reduce(self): + empty_frame = DataFrame() + x = [] with tm.assert_produces_warning(FutureWarning): empty_frame.apply(x.append, axis=1, reduce=True) @@ -318,6 +324,13 @@ def test_apply_reduce_Series(self, float_frame): result = float_frame.apply(np.mean, axis=1) assert_series_equal(result, expected) + def test_apply_reduce_rows_to_dict(self): + # GH 25196 + data = pd.DataFrame([[1, 2], [3, 4]]) + expected = pd.Series([{0: 1, 1: 3}, {0: 2, 1: 4}]) + result = data.apply(dict) + assert_series_equal(result, expected) + def test_apply_differently_indexed(self): df = DataFrame(np.random.randn(20, 10)) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index dea925dcde676..fb00776b33cbb 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -7,7 +7,7 @@ import numpy as np import pytest -from pandas.compat import lrange, lzip, u +from pandas.compat import PY2, lrange, lzip, u from pandas.errors import PerformanceWarning import pandas as pd @@ -38,8 +38,11 @@ def test_drop_names(self): assert obj.columns.name == 'second' assert list(df.columns) == ['d', 'e', 'f'] - pytest.raises(KeyError, df.drop, ['g']) - pytest.raises(KeyError, df.drop, ['g'], 1) + msg = r"\['g'\] not found in axis" + with pytest.raises(KeyError, match=msg): + df.drop(['g']) + with pytest.raises(KeyError, match=msg): + df.drop(['g'], 1) # errors = 'ignore' dropped = df.drop(['g'], errors='ignore') @@ -84,10 +87,14 @@ def test_drop(self): assert_frame_equal(simple.drop( [0, 3], axis='index'), simple.loc[[1, 2], :]) - pytest.raises(KeyError, simple.drop, 5) - pytest.raises(KeyError, simple.drop, 'C', 1) - pytest.raises(KeyError, simple.drop, [1, 5]) - pytest.raises(KeyError, simple.drop, ['A', 'C'], 1) + with pytest.raises(KeyError, match=r"\[5\] not found in axis"): + simple.drop(5) + with pytest.raises(KeyError, match=r"\['C'\] not found in axis"): + simple.drop('C', 1) + with pytest.raises(KeyError, match=r"\[5\] not found in axis"): + simple.drop([1, 5]) + with pytest.raises(KeyError, match=r"\['C'\] not found in axis"): + simple.drop(['A', 'C'], 1) # errors = 'ignore' assert_frame_equal(simple.drop(5, errors='ignore'), simple) @@ -444,7 +451,9 @@ def test_reindex_dups(self): assert_frame_equal(result, expected) # reindex fails - pytest.raises(ValueError, df.reindex, index=list(range(len(df)))) + msg = "cannot reindex from a duplicate axis" + with pytest.raises(ValueError, match=msg): + df.reindex(index=list(range(len(df)))) def test_reindex_axis_style(self): # https://github.com/pandas-dev/pandas/issues/12392 @@ -963,10 +972,15 @@ def test_take(self): assert_frame_equal(result, expected, check_names=False) # illegal indices - pytest.raises(IndexError, df.take, [3, 1, 2, 30], axis=0) - pytest.raises(IndexError, df.take, [3, 1, 2, -31], axis=0) - pytest.raises(IndexError, df.take, [3, 1, 2, 5], axis=1) - pytest.raises(IndexError, df.take, [3, 1, 2, -5], axis=1) + msg = "indices are out-of-bounds" + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, 30], axis=0) + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, -31], axis=0) + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, 5], axis=1) + with pytest.raises(IndexError, match=msg): + df.take([3, 1, 2, -5], axis=1) # mixed-dtype order = [4, 1, 2, 0, 3] @@ -1037,6 +1051,7 @@ def test_reindex_corner(self): smaller = self.intframe.reindex(columns=['A', 'B', 'E']) assert smaller['E'].dtype == np.float64 + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_reindex_axis(self): cols = ['A', 'B', 'E'] with tm.assert_produces_warning(FutureWarning) as m: @@ -1052,7 +1067,9 @@ def test_reindex_axis(self): reindexed2 = self.intframe.reindex(index=rows) assert_frame_equal(reindexed1, reindexed2) - pytest.raises(ValueError, self.intframe.reindex_axis, rows, axis=2) + msg = "No axis named 2 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + self.intframe.reindex_axis(rows, axis=2) # no-op case cols = self.frame.columns.copy() diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 5419f4d5127f6..4b06d2e35cdfc 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -274,10 +274,12 @@ def f(dtype): columns=["A", "B", "C"], dtype=dtype) - pytest.raises(NotImplementedError, f, - [("A", "datetime64[h]"), - ("B", "str"), - ("C", "int32")]) + msg = ("compound dtypes are not implemented in the DataFrame" + " constructor") + with pytest.raises(NotImplementedError, match=msg): + f([("A", "datetime64[h]"), + ("B", "str"), + ("C", "int32")]) # these work (though results may be unexpected) f('int64') @@ -347,7 +349,9 @@ def test_copy(self, float_frame, float_string_frame): copy = float_string_frame.copy() assert copy._data is not float_string_frame._data - def test_pickle(self, float_string_frame, empty_frame, timezone_frame): + def test_pickle(self, float_string_frame, timezone_frame): + empty_frame = DataFrame() + unpickled = tm.round_trip_pickle(float_string_frame) assert_frame_equal(float_string_frame, unpickled) diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index 59497153c8524..c2364dc135a9a 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -504,6 +504,16 @@ def test_concat_numerical_names(self): names=[1, 2])) tm.assert_frame_equal(result, expected) + def test_concat_astype_dup_col(self): + # gh 23049 + df = pd.DataFrame([{'a': 'b'}]) + df = pd.concat([df, df], axis=1) + + result = df.astype('category') + expected = pd.DataFrame(np.array(["b", "b"]).reshape(1, 2), + columns=["a", "a"]).astype("category") + tm.assert_frame_equal(result, expected) + class TestDataFrameCombineFirst(TestData): diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 90ad48cac3a5f..fc642d211b30c 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2,6 +2,7 @@ from __future__ import print_function +from collections import OrderedDict from datetime import datetime, timedelta import functools import itertools @@ -11,8 +12,8 @@ import pytest from pandas.compat import ( - PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange, - lzip, range, zip) + PY2, PY3, PY36, is_platform_little_endian, lmap, long, lrange, lzip, range, + zip) from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import is_integer_dtype @@ -58,8 +59,9 @@ def test_constructor_cast_failure(self): df['foo'] = np.ones((4, 2)).tolist() # this is not ok - pytest.raises(ValueError, df.__setitem__, tuple(['test']), - np.ones((4, 2))) + msg = "Wrong number of items passed 2, placement implies 1" + with pytest.raises(ValueError, match=msg): + df['test'] = np.ones((4, 2)) # this is ok df['foo2'] = np.ones((4, 2)).tolist() @@ -247,7 +249,7 @@ def test_constructor_dict(self): assert isna(frame['col3']).all() # Corner cases - assert len(DataFrame({})) == 0 + assert len(DataFrame()) == 0 # mix dict and array, wrong size - no spec for which error should raise # first @@ -1183,6 +1185,13 @@ def test_constructor_mixed_dict_and_Series(self): index=['a', 'b']) tm.assert_frame_equal(result, expected) + def test_constructor_mixed_type_rows(self): + # Issue 25075 + data = [[1, 2], (3, 4)] + result = DataFrame(data) + expected = DataFrame([[1, 2], [3, 4]]) + tm.assert_frame_equal(result, expected) + def test_constructor_tuples(self): result = DataFrame({'A': [(1, 2), (3, 4)]}) expected = DataFrame({'A': Series([(1, 2), (3, 4)])}) @@ -1252,7 +1261,9 @@ def test_constructor_Series_named(self): expected = DataFrame({0: s}) tm.assert_frame_equal(df, expected) - pytest.raises(ValueError, DataFrame, s, columns=[1, 2]) + msg = r"Shape of passed values is \(10, 1\), indices imply \(10, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame(s, columns=[1, 2]) # #2234 a = Series([], name='x') @@ -1426,8 +1437,10 @@ def test_constructor_column_duplicates(self): tm.assert_frame_equal(idf, edf) - pytest.raises(ValueError, DataFrame.from_dict, - OrderedDict([('b', 8), ('a', 5), ('a', 6)])) + msg = "If using all scalar values, you must pass an index" + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict( + OrderedDict([('b', 8), ('a', 5), ('a', 6)])) def test_constructor_empty_with_string_dtype(self): # GH 9428 @@ -1458,8 +1471,11 @@ def test_constructor_single_value(self): dtype=object), index=[1, 2], columns=['a', 'c'])) - pytest.raises(ValueError, DataFrame, 'a', [1, 2]) - pytest.raises(ValueError, DataFrame, 'a', columns=['a', 'c']) + msg = "DataFrame constructor not properly called!" + with pytest.raises(ValueError, match=msg): + DataFrame('a', [1, 2]) + with pytest.raises(ValueError, match=msg): + DataFrame('a', columns=['a', 'c']) msg = 'incompatible data and dtype' with pytest.raises(TypeError, match=msg): @@ -1685,6 +1701,7 @@ def test_constructor_series_copy(self): assert not (series['A'] == 5).all() + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_constructor_with_nas(self): # GH 5016 # na's in indices @@ -1697,9 +1714,11 @@ def check(df): # No NaN found -> error if len(indexer) == 0: - def f(): + msg = ("cannot do label indexing on" + r" <class 'pandas\.core\.indexes\.range\.RangeIndex'>" + r" with these indexers \[nan\] of <class 'float'>") + with pytest.raises(TypeError, match=msg): df.loc[:, np.nan] - pytest.raises(TypeError, f) # single nan should result in Series elif len(indexer) == 1: tm.assert_series_equal(df.iloc[:, indexer[0]], @@ -1775,13 +1794,15 @@ def test_constructor_categorical(self): tm.assert_frame_equal(df, expected) # invalid (shape) - pytest.raises(ValueError, - lambda: DataFrame([Categorical(list('abc')), - Categorical(list('abdefg'))])) + msg = r"Shape of passed values is \(6, 2\), indices imply \(3, 2\)" + with pytest.raises(ValueError, match=msg): + DataFrame([Categorical(list('abc')), + Categorical(list('abdefg'))]) # ndim > 1 - pytest.raises(NotImplementedError, - lambda: Categorical(np.array([list('abcd')]))) + msg = "> 1 ndim Categorical are not supported at this time" + with pytest.raises(NotImplementedError, match=msg): + Categorical(np.array([list('abcd')])) def test_constructor_categorical_series(self): @@ -2157,8 +2178,11 @@ def test_from_records_bad_index_column(self): tm.assert_index_equal(df1.index, Index(df.C)) # should fail - pytest.raises(ValueError, DataFrame.from_records, df, index=[2]) - pytest.raises(KeyError, DataFrame.from_records, df, index=2) + msg = r"Shape of passed values is \(10, 3\), indices imply \(1, 3\)" + with pytest.raises(ValueError, match=msg): + DataFrame.from_records(df, index=[2]) + with pytest.raises(KeyError, match=r"^2$"): + DataFrame.from_records(df, index=2) def test_from_records_non_tuple(self): class Record(object): diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index ddf85136126a1..db60fbf0f8563 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -10,7 +10,9 @@ from pandas.compat import long -from pandas import DataFrame, MultiIndex, Series, Timestamp, compat, date_range +from pandas import ( + CategoricalDtype, DataFrame, MultiIndex, Series, Timestamp, compat, + date_range) from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -73,11 +75,15 @@ def test_to_dict_index_not_unique_with_index_orient(self): # GH22801 # Data loss when indexes are not unique. Raise ValueError. df = DataFrame({'a': [1, 2], 'b': [0.5, 0.75]}, index=['A', 'A']) - pytest.raises(ValueError, df.to_dict, orient='index') + msg = "DataFrame index must be unique for orient='index'" + with pytest.raises(ValueError, match=msg): + df.to_dict(orient='index') def test_to_dict_invalid_orient(self): df = DataFrame({'A': [0, 1]}) - pytest.raises(ValueError, df.to_dict, orient='xinvalid') + msg = "orient 'xinvalid' not understood" + with pytest.raises(ValueError, match=msg): + df.to_dict(orient='xinvalid') def test_to_records_dt64(self): df = DataFrame([["one", "two", "three"], @@ -220,6 +226,12 @@ def test_to_records_with_categorical(self): dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")])), + # Pass in a dtype instance. + (dict(column_dtypes=np.dtype('unicode')), + np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[("index", "<i8"), ("A", "<U"), + ("B", "<U"), ("C", "<U")])), + # Pass in a dictionary (name-only). (dict(column_dtypes={"A": np.int8, "B": np.float32, "C": "<U2"}), np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], @@ -249,6 +261,12 @@ def test_to_records_with_categorical(self): dtype=[("index", "<i8"), ("A", "i1"), ("B", "<f4"), ("C", "O")])), + # Names / indices not in dtype mapping default to array dtype. + (dict(column_dtypes={"A": np.dtype('int8'), "B": np.dtype('float32')}), + np.rec.array([("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")], + dtype=[("index", "<i8"), ("A", "i1"), + ("B", "<f4"), ("C", "O")])), + # Mixture of everything. (dict(column_dtypes={"A": np.int8, "B": np.float32}, index_dtypes="<U2"), @@ -258,17 +276,26 @@ def test_to_records_with_categorical(self): # Invalid dype values. (dict(index=False, column_dtypes=list()), - "Invalid dtype \\[\\] specified for column A"), + (ValueError, "Invalid dtype \\[\\] specified for column A")), (dict(index=False, column_dtypes={"A": "int32", "B": 5}), - "Invalid dtype 5 specified for column B"), + (ValueError, "Invalid dtype 5 specified for column B")), + + # Numpy can't handle EA types, so check error is raised + (dict(index=False, column_dtypes={"A": "int32", + "B": CategoricalDtype(['a', 'b'])}), + (ValueError, 'Invalid dtype category specified for column B')), + + # Check that bad types raise + (dict(index=False, column_dtypes={"A": "int32", "B": "foo"}), + (TypeError, 'data type "foo" not understood')), ]) def test_to_records_dtype(self, kwargs, expected): # see gh-18146 df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]}) - if isinstance(expected, str): - with pytest.raises(ValueError, match=expected): + if not isinstance(expected, np.recarray): + with pytest.raises(expected[0], match=expected[1]): df.to_records(**kwargs) else: result = df.to_records(**kwargs) @@ -488,3 +515,17 @@ def test_to_dict_index_dtypes(self, into, expected): result = DataFrame.from_dict(result, orient='index')[cols] expected = DataFrame.from_dict(expected, orient='index')[cols] tm.assert_frame_equal(result, expected) + + def test_to_dict_numeric_names(self): + # https://github.com/pandas-dev/pandas/issues/24940 + df = DataFrame({str(i): [i] for i in range(5)}) + result = set(df.to_dict('records')[0].keys()) + expected = set(df.columns) + assert result == expected + + def test_to_dict_wide(self): + # https://github.com/pandas-dev/pandas/issues/24939 + df = DataFrame({('A_{:d}'.format(i)): [i] for i in range(256)}) + result = df.to_dict('records')[0] + expected = {'A_{:d}'.format(i): i for i in range(256)} + assert result == expected diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index a9f8ab47b16de..b37bf02a6b8e7 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -2,6 +2,7 @@ from __future__ import print_function +from collections import OrderedDict from datetime import timedelta import numpy as np @@ -66,7 +67,7 @@ def test_empty_frame_dtypes_ftypes(self): assert_series_equal(norows_int_df.ftypes, pd.Series( 'int32:dense', index=list("abc"))) - odict = compat.OrderedDict + odict = OrderedDict df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3]) ex_dtypes = pd.Series(odict([('a', np.int64), @@ -100,7 +101,7 @@ def test_datetime_with_tz_dtypes(self): def test_dtypes_are_correct_after_column_slice(self): # GH6525 df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_) - odict = compat.OrderedDict + odict = OrderedDict assert_series_equal(df.dtypes, pd.Series(odict([('a', np.float_), ('b', np.float_), @@ -153,8 +154,8 @@ def test_select_dtypes_include_using_list_like(self): ei = df[['h', 'i']] assert_frame_equal(ri, ei) - pytest.raises(NotImplementedError, - lambda: df.select_dtypes(include=['period'])) + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include=['period']) def test_select_dtypes_exclude_using_list_like(self): df = DataFrame({'a': list('abc'), @@ -217,8 +218,8 @@ def test_select_dtypes_include_using_scalars(self): ei = df[['f']] assert_frame_equal(ri, ei) - pytest.raises(NotImplementedError, - lambda: df.select_dtypes(include='period')) + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include='period') def test_select_dtypes_exclude_using_scalars(self): df = DataFrame({'a': list('abc'), @@ -244,8 +245,8 @@ def test_select_dtypes_exclude_using_scalars(self): ei = df[['a', 'b', 'c', 'd', 'e', 'g', 'h', 'i', 'j', 'k']] assert_frame_equal(ri, ei) - pytest.raises(NotImplementedError, - lambda: df.select_dtypes(exclude='period')) + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(exclude='period') def test_select_dtypes_include_exclude_using_scalars(self): df = DataFrame({'a': list('abc'), @@ -295,7 +296,7 @@ def test_select_dtypes_include_exclude_mixed_scalars_lists(self): def test_select_dtypes_duplicate_columns(self): # GH20839 - odict = compat.OrderedDict + odict = OrderedDict df = DataFrame(odict([('a', list('abc')), ('b', list(range(1, 4))), ('c', np.arange(3, 6).astype('u1')), @@ -600,8 +601,12 @@ def test_astype_dict_like(self, dtype_class): # in the keys of the dtype dict dt4 = dtype_class({'b': str, 2: str}) dt5 = dtype_class({'e': str}) - pytest.raises(KeyError, df.astype, dt4) - pytest.raises(KeyError, df.astype, dt5) + msg = ("Only a column name can be used for the key in a dtype mappings" + " argument") + with pytest.raises(KeyError, match=msg): + df.astype(dt4) + with pytest.raises(KeyError, match=msg): + df.astype(dt5) assert_frame_equal(df, original) # if the dtypes provided are the same as the original dtypes, the diff --git a/pandas/tests/frame/test_duplicates.py b/pandas/tests/frame/test_duplicates.py index f61dbbdb989e4..3396670fb5879 100644 --- a/pandas/tests/frame/test_duplicates.py +++ b/pandas/tests/frame/test_duplicates.py @@ -182,6 +182,17 @@ def test_drop_duplicates(): assert df.duplicated(keep=keep).sum() == 0 +def test_duplicated_on_empty_frame(): + # GH 25184 + + df = DataFrame(columns=['a', 'b']) + dupes = df.duplicated('a') + + result = df[dupes] + expected = df.copy() + tm.assert_frame_equal(result, expected) + + def test_drop_duplicates_with_duplicate_column_names(): # GH17836 df = DataFrame([ diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 19b8ae4eb6e0f..ffe54f7a94307 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -9,7 +9,7 @@ import pytest from pandas._libs.tslib import iNaT -from pandas.compat import long, lrange, lzip, map, range, zip +from pandas.compat import PY2, long, lrange, lzip, map, range, zip from pandas.core.dtypes.common import is_float_dtype, is_integer, is_scalar from pandas.core.dtypes.dtypes import CategoricalDtype @@ -431,8 +431,9 @@ def test_getitem_setitem_ix_negative_integers(self): def test_getattr(self): assert_series_equal(self.frame.A, self.frame['A']) - pytest.raises(AttributeError, getattr, self.frame, - 'NONEXISTENT_NAME') + msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'" + with pytest.raises(AttributeError, match=msg): + self.frame.NONEXISTENT_NAME def test_setattr_column(self): df = DataFrame({'foobar': 1}, index=lrange(10)) @@ -793,7 +794,8 @@ def test_delitem_corner(self): f = self.frame.copy() del f['D'] assert len(f.columns) == 3 - pytest.raises(KeyError, f.__delitem__, 'D') + with pytest.raises(KeyError, match=r"^'D'$"): + del f['D'] del f['B'] assert len(f.columns) == 2 @@ -842,7 +844,9 @@ def test_getitem_fancy_2d(self): with catch_warnings(record=True): simplefilter("ignore", DeprecationWarning) - pytest.raises(ValueError, f.ix.__getitem__, f > 0.5) + msg = "Cannot index with multidimensional key" + with pytest.raises(ValueError, match=msg): + f.ix[f > 0.5] def test_slice_floats(self): index = [52195.504153, 52196.303147, 52198.369883] @@ -865,6 +869,7 @@ def test_getitem_fancy_slice_integers_step(self): df.iloc[:8:2] = np.nan assert isna(df.iloc[:8:2]).values.all() + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_getitem_setitem_integer_slice_keyerrors(self): df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2)) @@ -887,8 +892,10 @@ def test_getitem_setitem_integer_slice_keyerrors(self): # non-monotonic, raise KeyError df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]] - pytest.raises(KeyError, df2.loc.__getitem__, slice(3, 11)) - pytest.raises(KeyError, df2.loc.__setitem__, slice(3, 11), 0) + with pytest.raises(KeyError, match=r"^3$"): + df2.loc[3:11] + with pytest.raises(KeyError, match=r"^3$"): + df2.loc[3:11] = 0 def test_setitem_fancy_2d(self): @@ -1077,6 +1084,7 @@ def test_fancy_getitem_int_labels(self): expected = df[3] assert_series_equal(result, expected) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_fancy_index_int_labels_exceptions(self): df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2)) @@ -1084,14 +1092,18 @@ def test_fancy_index_int_labels_exceptions(self): simplefilter("ignore", DeprecationWarning) # labels that aren't contained - pytest.raises(KeyError, df.ix.__setitem__, - ([0, 1, 2], [2, 3, 4]), 5) + with pytest.raises(KeyError, match=r"\[1\] not in index"): + df.ix[[0, 1, 2], [2, 3, 4]] = 5 # try to set indices not contained in frame - pytest.raises(KeyError, self.frame.ix.__setitem__, - ['foo', 'bar', 'baz'], 1) - pytest.raises(KeyError, self.frame.ix.__setitem__, - (slice(None, None), ['E']), 1) + msg = (r"None of \[Index\(\['foo', 'bar', 'baz'\]," + r" dtype='object'\)\] are in the \[index\]") + with pytest.raises(KeyError, match=msg): + self.frame.ix[['foo', 'bar', 'baz']] = 1 + msg = (r"None of \[Index\(\['E'\], dtype='object'\)\] are in the" + r" \[columns\]") + with pytest.raises(KeyError, match=msg): + self.frame.ix[:, ['E']] = 1 # partial setting now allows this GH2578 # pytest.raises(KeyError, self.frame.ix.__setitem__, @@ -1504,6 +1516,7 @@ def test_getitem_setitem_boolean_multi(self): expected.loc[[0, 2], [1]] = 5 assert_frame_equal(df, expected) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_getitem_setitem_float_labels(self): index = Index([1.5, 2, 3, 4, 5]) df = DataFrame(np.random.randn(5, 5), index=index) @@ -1537,7 +1550,11 @@ def test_getitem_setitem_float_labels(self): df = DataFrame(np.random.randn(5, 5), index=index) # positional slicing only via iloc! - pytest.raises(TypeError, lambda: df.iloc[1.0:5]) + msg = ("cannot do slice indexing on" + r" <class 'pandas\.core\.indexes\.numeric\.Float64Index'> with" + r" these indexers \[1.0\] of <class 'float'>") + with pytest.raises(TypeError, match=msg): + df.iloc[1.0:5] result = df.iloc[4:5] expected = df.reindex([5.0]) @@ -1744,11 +1761,16 @@ def test_getitem_setitem_ix_bool_keyerror(self): # #2199 df = DataFrame({'a': [1, 2, 3]}) - pytest.raises(KeyError, df.loc.__getitem__, False) - pytest.raises(KeyError, df.loc.__getitem__, True) + with pytest.raises(KeyError, match=r"^False$"): + df.loc[False] + with pytest.raises(KeyError, match=r"^True$"): + df.loc[True] - pytest.raises(KeyError, df.loc.__setitem__, False, 0) - pytest.raises(KeyError, df.loc.__setitem__, True, 0) + msg = "cannot use a single bool to index into setitem" + with pytest.raises(KeyError, match=msg): + df.loc[False] = 0 + with pytest.raises(KeyError, match=msg): + df.loc[True] = 0 def test_getitem_list_duplicates(self): # #1943 @@ -1813,6 +1835,7 @@ def test_set_value(self): self.frame.set_value(idx, col, 1) assert self.frame[col][idx] == 1 + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_set_value_resize(self): with tm.assert_produces_warning(FutureWarning, @@ -1849,7 +1872,9 @@ def test_set_value_resize(self): assert isna(res3['baz'].drop(['foobar'])).all() with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam') + msg = "could not convert string to float: 'sam'" + with pytest.raises(ValueError, match=msg): + res3.set_value('foobar', 'baz', 'sam') def test_set_value_with_index_dtype_change(self): df_orig = DataFrame(np.random.randn(3, 3), @@ -1888,7 +1913,8 @@ def test_get_set_value_no_partial_indexing(self): df = DataFrame(index=index, columns=lrange(4)) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - pytest.raises(KeyError, df.get_value, 0, 1) + with pytest.raises(KeyError, match=r"^0$"): + df.get_value(0, 1) def test_single_element_ix_dont_upcast(self): self.frame['E'] = 1 @@ -2158,10 +2184,15 @@ def test_non_monotonic_reindex_methods(self): df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]], columns=list('A')) # index is not monotonic increasing or decreasing - pytest.raises(ValueError, df_rev.reindex, df.index, method='pad') - pytest.raises(ValueError, df_rev.reindex, df.index, method='ffill') - pytest.raises(ValueError, df_rev.reindex, df.index, method='bfill') - pytest.raises(ValueError, df_rev.reindex, df.index, method='nearest') + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method='pad') + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method='ffill') + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method='bfill') + with pytest.raises(ValueError, match=msg): + df_rev.reindex(df.index, method='nearest') def test_reindex_level(self): from itertools import permutations @@ -2669,14 +2700,20 @@ def _check_align(df, cond, other, check_dtypes=True): # invalid conditions df = default_frame err1 = (df + 1).values[0:2, :] - pytest.raises(ValueError, df.where, cond, err1) + msg = "other must be the same shape as self when an ndarray" + with pytest.raises(ValueError, match=msg): + df.where(cond, err1) err2 = cond.iloc[:2, :].values other1 = _safe_add(df) - pytest.raises(ValueError, df.where, err2, other1) + msg = "Array conditional must be same shape as self" + with pytest.raises(ValueError, match=msg): + df.where(err2, other1) - pytest.raises(ValueError, df.mask, True) - pytest.raises(ValueError, df.mask, 0) + with pytest.raises(ValueError, match=msg): + df.mask(True) + with pytest.raises(ValueError, match=msg): + df.mask(0) # where inplace def _check_set(df, cond, check_dtypes=True): diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 77a3d4785d295..2f3b0a9f76de9 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -9,7 +9,7 @@ import numpy as np import pytest -from pandas.compat import lrange +from pandas.compat import PY2, lrange import pandas.util._test_decorators as td import pandas as pd @@ -83,6 +83,7 @@ def test_dropIncompleteRows(self): tm.assert_index_equal(samesize_frame.index, self.frame.index) tm.assert_index_equal(inp_frame2.index, self.frame.index) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_dropna(self): df = DataFrame(np.random.randn(6, 4)) df[2][:2] = np.nan @@ -139,7 +140,9 @@ def test_dropna(self): assert_frame_equal(dropped, expected) # bad input - pytest.raises(ValueError, df.dropna, axis=3) + msg = "No axis named 3 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + df.dropna(axis=3) def test_drop_and_dropna_caching(self): # tst that cacher updates @@ -158,10 +161,15 @@ def test_drop_and_dropna_caching(self): def test_dropna_corner(self): # bad input - pytest.raises(ValueError, self.frame.dropna, how='foo') - pytest.raises(TypeError, self.frame.dropna, how=None) + msg = "invalid how option: foo" + with pytest.raises(ValueError, match=msg): + self.frame.dropna(how='foo') + msg = "must specify how or thresh" + with pytest.raises(TypeError, match=msg): + self.frame.dropna(how=None) # non-existent column - 8303 - pytest.raises(KeyError, self.frame.dropna, subset=['A', 'X']) + with pytest.raises(KeyError, match=r"^\['X'\]$"): + self.frame.dropna(subset=['A', 'X']) def test_dropna_multiple_axes(self): df = DataFrame([[1, np.nan, 2, 3], @@ -226,8 +234,12 @@ def test_fillna(self): result = self.mixed_frame.fillna(value=0) result = self.mixed_frame.fillna(method='pad') - pytest.raises(ValueError, self.tsframe.fillna) - pytest.raises(ValueError, self.tsframe.fillna, 5, method='ffill') + msg = "Must specify a fill 'value' or 'method'" + with pytest.raises(ValueError, match=msg): + self.tsframe.fillna() + msg = "Cannot specify both 'value' and 'method'" + with pytest.raises(ValueError, match=msg): + self.tsframe.fillna(5, method='ffill') # mixed numeric (but no float16) mf = self.mixed_float.reindex(columns=['A', 'B', 'D']) @@ -595,11 +607,18 @@ def test_fillna_invalid_method(self): def test_fillna_invalid_value(self): # list - pytest.raises(TypeError, self.frame.fillna, [1, 2]) + msg = ("\"value\" parameter must be a scalar or dict, but you passed" + " a \"{}\"") + with pytest.raises(TypeError, match=msg.format('list')): + self.frame.fillna([1, 2]) # tuple - pytest.raises(TypeError, self.frame.fillna, (1, 2)) + with pytest.raises(TypeError, match=msg.format('tuple')): + self.frame.fillna((1, 2)) # frame with series - pytest.raises(TypeError, self.frame.iloc[:, 0].fillna, self.frame) + msg = ("\"value\" parameter must be a scalar, dict or Series, but you" + " passed a \"DataFrame\"") + with pytest.raises(TypeError, match=msg): + self.frame.iloc[:, 0].fillna(self.frame) def test_fillna_col_reordering(self): cols = ["COL." + str(i) for i in range(5, 0, -1)] diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py index 1f4da1bbb0470..6bef7e3f65b21 100644 --- a/pandas/tests/frame/test_mutate_columns.py +++ b/pandas/tests/frame/test_mutate_columns.py @@ -177,7 +177,9 @@ def test_insert(self): with pytest.raises(ValueError, match='already exists'): df.insert(1, 'a', df['b']) - pytest.raises(ValueError, df.insert, 1, 'c', df['b']) + msg = "cannot insert c, already exists" + with pytest.raises(ValueError, match=msg): + df.insert(1, 'c', df['b']) df.columns.name = 'some_name' # preserve columns name field diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index a5bed14cf06d2..799d548100b5e 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -187,8 +187,11 @@ def check(result, expected=None): # reindex is invalid! df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]], columns=['bar', 'a', 'a']) - pytest.raises(ValueError, df.reindex, columns=['bar']) - pytest.raises(ValueError, df.reindex, columns=['bar', 'foo']) + msg = "cannot reindex from a duplicate axis" + with pytest.raises(ValueError, match=msg): + df.reindex(columns=['bar']) + with pytest.raises(ValueError, match=msg): + df.reindex(columns=['bar', 'foo']) # drop df = DataFrame([[1, 5, 7.], [1, 5, 7.], [1, 5, 7.]], @@ -306,7 +309,9 @@ def check(result, expected=None): # boolean with the duplicate raises df = DataFrame(np.arange(12).reshape(3, 4), columns=dups, dtype='float64') - pytest.raises(ValueError, lambda: df[df.A > 6]) + msg = "cannot reindex from a duplicate axis" + with pytest.raises(ValueError, match=msg): + df[df.A > 6] # dup aligining operations should work # GH 5185 @@ -323,7 +328,9 @@ def check(result, expected=None): columns=['A', 'A']) # not-comparing like-labelled - pytest.raises(ValueError, lambda: df1 == df2) + msg = "Can only compare identically-labeled DataFrame objects" + with pytest.raises(ValueError, match=msg): + df1 == df2 df1r = df1.reindex_like(df2) result = df1r == df2 diff --git a/pandas/tests/frame/test_quantile.py b/pandas/tests/frame/test_quantile.py index d1f1299a5202e..19b6636978643 100644 --- a/pandas/tests/frame/test_quantile.py +++ b/pandas/tests/frame/test_quantile.py @@ -5,6 +5,8 @@ import numpy as np import pytest +from pandas.compat import PY2 + import pandas as pd from pandas import DataFrame, Series, Timestamp from pandas.tests.frame.common import TestData @@ -71,6 +73,7 @@ def test_quantile_axis_mixed(self): with pytest.raises(TypeError): df.quantile(.5, axis=1, numeric_only=False) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_quantile_axis_parameter(self): # GH 9543/9544 @@ -92,8 +95,12 @@ def test_quantile_axis_parameter(self): result = df.quantile(.5, axis="columns") assert_series_equal(result, expected) - pytest.raises(ValueError, df.quantile, 0.1, axis=-1) - pytest.raises(ValueError, df.quantile, 0.1, axis="column") + msg = "No axis named -1 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + df.quantile(0.1, axis=-1) + msg = "No axis named column for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + df.quantile(0.1, axis="column") def test_quantile_interpolation(self): # see gh-10174 diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 9c4d306ea5720..ba02cb54bcea1 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -14,7 +14,6 @@ from pandas import DataFrame, Index, MultiIndex, Series, date_range from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.tests.frame.common import TestData -import pandas.util.testing as tm from pandas.util.testing import ( assert_frame_equal, assert_series_equal, makeCustomDataframe as mkdf) @@ -79,10 +78,10 @@ def test_query_numexpr(self): result = df.eval('A+1', engine='numexpr') assert_series_equal(result, self.expected2, check_names=False) else: - pytest.raises(ImportError, - lambda: df.query('A>0', engine='numexpr')) - pytest.raises(ImportError, - lambda: df.eval('A+1', engine='numexpr')) + with pytest.raises(ImportError): + df.query('A>0', engine='numexpr') + with pytest.raises(ImportError): + df.eval('A+1', engine='numexpr') class TestDataFrameEval(TestData): @@ -355,13 +354,6 @@ def to_series(mi, level): else: raise AssertionError("object must be a Series or Index") - @pytest.mark.filterwarnings("ignore::FutureWarning") - def test_raise_on_panel_with_multiindex(self, parser, engine): - p = tm.makePanel(7) - p.items = tm.makeCustomIndex(len(p.items), nlevels=2) - with pytest.raises(NotImplementedError): - pd.eval('p + 1', parser=parser, engine=engine) - @td.skip_if_no_ne class TestDataFrameQueryNumExprPandas(object): @@ -860,9 +852,10 @@ def test_str_query_method(self, parser, engine): for lhs, op, rhs in zip(lhs, ops, rhs): ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs) - pytest.raises(NotImplementedError, df.query, ex, - engine=engine, parser=parser, - local_dict={'strings': df.strings}) + msg = r"'(Not)?In' nodes are not implemented" + with pytest.raises(NotImplementedError, match=msg): + df.query(ex, engine=engine, parser=parser, + local_dict={'strings': df.strings}) else: res = df.query('"a" == strings', engine=engine, parser=parser) assert_frame_equal(res, expect) diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py index 10c42e0d1a1cf..6bb9dea15d1ce 100644 --- a/pandas/tests/frame/test_rank.py +++ b/pandas/tests/frame/test_rank.py @@ -310,6 +310,7 @@ def test_rank_pct_true(self, method, exp): tm.assert_frame_equal(result, expected) @pytest.mark.single + @pytest.mark.high_memory def test_pct_max_many_rows(self): # GH 18271 df = DataFrame({'A': np.arange(2**24 + 1), diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py index 219f7a1585fc2..50c66d3f8db00 100644 --- a/pandas/tests/frame/test_replace.py +++ b/pandas/tests/frame/test_replace.py @@ -466,6 +466,13 @@ def test_regex_replace_dict_nested(self): assert_frame_equal(res3, expec) assert_frame_equal(res4, expec) + def test_regex_replace_dict_nested_non_first_character(self): + # GH 25259 + df = pd.DataFrame({'first': ['abc', 'bca', 'cab']}) + expected = pd.DataFrame({'first': ['.bc', 'bc.', 'c.b']}) + result = df.replace({'a': '.'}, regex=True) + assert_frame_equal(result, expected) + def test_regex_replace_dict_nested_gh4115(self): df = pd.DataFrame({'Type': ['Q', 'T', 'Q', 'Q', 'T'], 'tmp': 2}) expected = DataFrame({'Type': [0, 1, 0, 0, 1], 'tmp': 2}) @@ -830,7 +837,9 @@ def test_replace_input_formats_listlike(self): expected.replace(to_rep[i], values[i], inplace=True) assert_frame_equal(result, expected) - pytest.raises(ValueError, df.replace, to_rep, values[1:]) + msg = r"Replacement lists must match in length\. Expecting 3 got 2" + with pytest.raises(ValueError, match=msg): + df.replace(to_rep, values[1:]) def test_replace_input_formats_scalar(self): df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5], @@ -843,7 +852,9 @@ def test_replace_input_formats_scalar(self): for k, v in compat.iteritems(df)} assert_frame_equal(filled, DataFrame(expected)) - pytest.raises(TypeError, df.replace, to_rep, [np.nan, 0, '']) + msg = "value argument must be scalar, dict, or Series" + with pytest.raises(TypeError, match=msg): + df.replace(to_rep, [np.nan, 0, '']) # list to scalar to_rep = [np.nan, 0, ''] diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 28222a82945be..8abf3a6706886 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -4,7 +4,6 @@ from datetime import datetime import itertools -from warnings import catch_warnings, simplefilter import numpy as np import pytest @@ -49,14 +48,6 @@ def test_pivot(self): assert pivoted.index.name == 'index' assert pivoted.columns.names == (None, 'columns') - with catch_warnings(record=True): - # pivot multiple columns - simplefilter("ignore", FutureWarning) - wp = tm.makePanel() - lp = wp.to_frame() - df = lp.reset_index() - tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack()) - def test_pivot_duplicates(self): data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'], 'b': ['one', 'two', 'one', 'one', 'two'], @@ -67,7 +58,7 @@ def test_pivot_duplicates(self): def test_pivot_empty(self): df = DataFrame({}, columns=['a', 'b', 'c']) result = df.pivot('a', 'b', 'c') - expected = DataFrame({}) + expected = DataFrame() tm.assert_frame_equal(result, expected, check_names=False) def test_pivot_integer_bug(self): @@ -403,7 +394,10 @@ def test_stack_mixed_levels(self): # When mixed types are passed and the ints are not level # names, raise - pytest.raises(ValueError, df2.stack, level=['animal', 0]) + msg = ("level should contain all level names or all level numbers, not" + " a mixture of the two") + with pytest.raises(ValueError, match=msg): + df2.stack(level=['animal', 0]) # GH #8584: Having 0 in the level names could raise a # strange error about lexsort depth diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index 85e6373b384e4..8b29394bcab84 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -7,7 +7,7 @@ import numpy as np import pytest -from pandas.compat import lrange +from pandas.compat import PY2, lrange import pandas as pd from pandas import ( @@ -21,6 +21,7 @@ class TestDataFrameSorting(TestData): + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_sort_values(self): frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list('ABC')) @@ -54,8 +55,9 @@ def test_sort_values(self): sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False]) assert_frame_equal(sorted_df, expected) - pytest.raises(ValueError, lambda: frame.sort_values( - by=['A', 'B'], axis=2, inplace=True)) + msg = "No axis named 2 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + frame.sort_values(by=['A', 'B'], axis=2, inplace=True) # by row (axis=1): GH 10806 sorted_df = frame.sort_values(by=3, axis=1) diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index 4f0747c0d6945..2e3696e7e04cc 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -6,7 +6,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Index, MultiIndex, Panel, Series +from pandas import DataFrame, Index, MultiIndex, Series from pandas.tests.frame.common import TestData import pandas.util.testing as tm @@ -125,29 +125,6 @@ def test_indexing_sliced(self): tm.assert_series_equal(res, exp) assert isinstance(res, tm.SubclassedSeries) - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_to_panel_expanddim(self): - # GH 9762 - - class SubclassedFrame(DataFrame): - - @property - def _constructor_expanddim(self): - return SubclassedPanel - - class SubclassedPanel(Panel): - pass - - index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)]) - df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index) - result = df.to_panel() - assert isinstance(result, SubclassedPanel) - expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]], - items=['X', 'Y'], major_axis=[0], - minor_axis=[0, 1, 2], - dtype='int64') - tm.assert_panel_equal(result, expected) - def test_subclass_attr_err_propagation(self): # GH 11808 class A(DataFrame): diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index bc37317f72802..716a9e30e4cc3 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -6,8 +6,9 @@ import numpy as np import pytest +import pytz -from pandas.compat import product +from pandas.compat import PY2, product import pandas as pd from pandas import ( @@ -394,7 +395,9 @@ def test_tshift(self): assert_frame_equal(unshifted, inferred_ts) no_freq = self.tsframe.iloc[[0, 5, 7], :] - pytest.raises(ValueError, no_freq.tshift) + msg = "Freq was not given and was not set in the index" + with pytest.raises(ValueError, match=msg): + no_freq.tshift() def test_truncate(self): ts = self.tsframe[::3] @@ -435,9 +438,10 @@ def test_truncate(self): truncated = ts.truncate(after=end_missing) assert_frame_equal(truncated, expected) - pytest.raises(ValueError, ts.truncate, - before=ts.index[-1] - ts.index.freq, - after=ts.index[0] + ts.index.freq) + msg = "Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00" + with pytest.raises(ValueError, match=msg): + ts.truncate(before=ts.index[-1] - ts.index.freq, + after=ts.index[0] + ts.index.freq) def test_truncate_copy(self): index = self.tsframe.index @@ -647,6 +651,28 @@ def test_at_time(self): rs = ts.at_time('16:00') assert len(rs) == 0 + @pytest.mark.parametrize('hour', ['1:00', '1:00AM', time(1), + time(1, tzinfo=pytz.UTC)]) + def test_at_time_errors(self, hour): + # GH 24043 + dti = pd.date_range('2018', periods=3, freq='H') + df = pd.DataFrame(list(range(len(dti))), index=dti) + if getattr(hour, 'tzinfo', None) is None: + result = df.at_time(hour) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(ValueError, match="Index must be timezone"): + df.at_time(hour) + + def test_at_time_tz(self): + # GH 24043 + dti = pd.date_range('2018', periods=3, freq='H', tz='US/Pacific') + df = pd.DataFrame(list(range(len(dti))), index=dti) + result = df.at_time(time(4, tzinfo=pytz.timezone('US/Eastern'))) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + def test_at_time_raises(self): # GH20725 df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) @@ -758,14 +784,18 @@ def test_between_time_axis_raises(self, axis): ts = DataFrame(rand_data, index=rng, columns=rng) stime, etime = ('08:00:00', '09:00:00') + msg = "Index must be DatetimeIndex" if axis in ['columns', 1]: ts.index = mask - pytest.raises(TypeError, ts.between_time, stime, etime) - pytest.raises(TypeError, ts.between_time, stime, etime, axis=0) + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime) + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=0) if axis in ['index', 0]: ts.columns = mask - pytest.raises(TypeError, ts.between_time, stime, etime, axis=1) + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=1) def test_operation_on_NaT(self): # Both NaT and Timestamp are in DataFrame. @@ -806,6 +836,7 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): 'new': [1e9, None]}, dtype='datetime64[ns]') tm.assert_frame_equal(result, expected) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_frame_to_period(self): K = 5 @@ -831,7 +862,9 @@ def test_frame_to_period(self): pts = df.to_period('M', axis=1) tm.assert_index_equal(pts.columns, exp.columns.asfreq('M')) - pytest.raises(ValueError, df.to_period, axis=2) + msg = "No axis named 2 for object type <class 'type'>" + with pytest.raises(ValueError, match=msg): + df.to_period(axis=2) @pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert']) def test_tz_convert_and_localize(self, fn): diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 61eefccede5dd..54a8712a9c645 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -109,8 +109,9 @@ def test_to_csv_from_csv2(self): xp.columns = col_aliases assert_frame_equal(xp, rs) - pytest.raises(ValueError, self.frame2.to_csv, path, - header=['AA', 'X']) + msg = "Writing 4 cols but got 2 aliases" + with pytest.raises(ValueError, match=msg): + self.frame2.to_csv(path, header=['AA', 'X']) def test_to_csv_from_csv3(self): diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 7183fea85a069..c2f6cbf4c564c 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -14,8 +14,7 @@ import pandas as pd from pandas import DataFrame, MultiIndex, Panel, Series, date_range import pandas.util.testing as tm -from pandas.util.testing import ( - assert_frame_equal, assert_panel_equal, assert_series_equal) +from pandas.util.testing import assert_frame_equal, assert_series_equal import pandas.io.formats.printing as printing @@ -701,16 +700,9 @@ def test_sample(sel): assert_frame_equal(sample1, df[['colString']]) # Test default axes - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - p = Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6], - minor_axis=[1, 3, 5]) - assert_panel_equal( - p.sample(n=3, random_state=42), p.sample(n=3, axis=1, - random_state=42)) - assert_frame_equal( - df.sample(n=3, random_state=42), df.sample(n=3, axis=0, - random_state=42)) + assert_frame_equal( + df.sample(n=3, random_state=42), df.sample(n=3, axis=0, + random_state=42)) # Test that function aligns weights with frame df = DataFrame( @@ -740,23 +732,11 @@ def test_squeeze(self): tm.assert_series_equal(s.squeeze(), s) for df in [tm.makeTimeDataFrame()]: tm.assert_frame_equal(df.squeeze(), df) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - for p in [tm.makePanel()]: - tm.assert_panel_equal(p.squeeze(), p) # squeezing df = tm.makeTimeDataFrame().reindex(columns=['A']) tm.assert_series_equal(df.squeeze(), df['A']) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - p = tm.makePanel().reindex(items=['ItemA']) - tm.assert_frame_equal(p.squeeze(), p['ItemA']) - - p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A']) - tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A']) - # don't fail with 0 length dimensions GH11229 & GH8999 empty_series = Series([], name='five') empty_frame = DataFrame([empty_series]) @@ -789,8 +769,6 @@ def test_numpy_squeeze(self): tm.assert_series_equal(np.squeeze(df), df['A']) def test_transpose(self): - msg = (r"transpose\(\) got multiple values for " - r"keyword argument 'axes'") for s in [tm.makeFloatSeries(), tm.makeStringSeries(), tm.makeObjectSeries()]: # calls implementation in pandas/core/base.py @@ -798,14 +776,6 @@ def test_transpose(self): for df in [tm.makeTimeDataFrame()]: tm.assert_frame_equal(df.transpose().transpose(), df) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - for p in [tm.makePanel()]: - tm.assert_panel_equal(p.transpose(2, 0, 1) - .transpose(1, 2, 0), p) - with pytest.raises(TypeError, match=msg): - p.transpose(2, 0, 1, axes=(2, 0, 1)) - def test_numpy_transpose(self): msg = "the 'axes' parameter is not supported" @@ -821,13 +791,6 @@ def test_numpy_transpose(self): with pytest.raises(ValueError, match=msg): np.transpose(df, axes=1) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - p = tm.makePanel() - tm.assert_panel_equal(np.transpose( - np.transpose(p, axes=(2, 0, 1)), - axes=(1, 2, 0)), p) - def test_take(self): indices = [1, 5, -2, 6, 3, -1] for s in [tm.makeFloatSeries(), tm.makeStringSeries(), @@ -843,27 +806,12 @@ def test_take(self): columns=df.columns) tm.assert_frame_equal(out, expected) - indices = [-3, 2, 0, 1] - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - for p in [tm.makePanel()]: - out = p.take(indices) - expected = Panel(data=p.values.take(indices, axis=0), - items=p.items.take(indices), - major_axis=p.major_axis, - minor_axis=p.minor_axis) - tm.assert_panel_equal(out, expected) - def test_take_invalid_kwargs(self): indices = [-3, 2, 0, 1] s = tm.makeFloatSeries() df = tm.makeTimeDataFrame() - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - p = tm.makePanel() - - for obj in (s, df, p): + for obj in (s, df): msg = r"take\(\) got an unexpected keyword argument 'foo'" with pytest.raises(TypeError, match=msg): obj.take(indices, foo=2) @@ -966,12 +914,6 @@ def test_equals(self): assert a.equals(e) assert e.equals(f) - def test_describe_raises(self): - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - with pytest.raises(NotImplementedError): - tm.makePanel().describe() - def test_pipe(self): df = DataFrame({'A': [1, 2, 3]}) f = lambda x, y: x ** y @@ -1000,22 +942,6 @@ def test_pipe_tuple_error(self): with pytest.raises(ValueError): df.A.pipe((f, 'y'), x=1, y=0) - def test_pipe_panel(self): - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - wp = Panel({'r1': DataFrame({"A": [1, 2, 3]})}) - f = lambda x, y: x + y - result = wp.pipe(f, 2) - expected = wp + 2 - assert_panel_equal(result, expected) - - result = wp.pipe((f, 'y'), x=1) - expected = wp + 1 - assert_panel_equal(result, expected) - - with pytest.raises(ValueError): - wp.pipe((f, 'y'), x=1, y=1) - @pytest.mark.parametrize('box', [pd.Series, pd.DataFrame]) def test_axis_classmethods(self, box): obj = box() diff --git a/pandas/tests/generic/test_panel.py b/pandas/tests/generic/test_panel.py deleted file mode 100644 index 8b090d951957e..0000000000000 --- a/pandas/tests/generic/test_panel.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# pylint: disable-msg=E1101,W0612 - -from warnings import catch_warnings, simplefilter - -import pandas.util._test_decorators as td - -from pandas import Panel -import pandas.util.testing as tm -from pandas.util.testing import assert_almost_equal, assert_panel_equal - -from .test_generic import Generic - - -class TestPanel(Generic): - _typ = Panel - _comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True) - - @td.skip_if_no('xarray', min_version='0.7.0') - def test_to_xarray(self): - from xarray import DataArray - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - p = tm.makePanel() - - result = p.to_xarray() - assert isinstance(result, DataArray) - assert len(result.coords) == 3 - assert_almost_equal(list(result.coords.keys()), - ['items', 'major_axis', 'minor_axis']) - assert len(result.dims) == 3 - - # idempotency - assert_panel_equal(result.to_pandas(), p) - - -# run all the tests, but wrap each in a warning catcher -for t in ['test_rename', 'test_get_numeric_data', - 'test_get_default', 'test_nonzero', - 'test_downcast', 'test_constructor_compound_dtypes', - 'test_head_tail', - 'test_size_compat', 'test_split_compat', - 'test_unexpected_keyword', - 'test_stat_unexpected_keyword', 'test_api_compat', - 'test_stat_non_defaults_args', - 'test_truncate_out_of_bounds', - 'test_metadata_propagation', 'test_copy_and_deepcopy', - 'test_pct_change', 'test_sample']: - - def f(): - def tester(self): - f = getattr(super(TestPanel, self), t) - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - f() - return tester - - setattr(TestPanel, t, f()) diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index 62ec0555f9033..0c2e74c0b735f 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -3,12 +3,11 @@ """ test .agg behavior / note that .apply is tested generally in test_groupby.py """ +from collections import OrderedDict import numpy as np import pytest -from pandas.compat import OrderedDict - import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series, concat from pandas.core.base import SpecificationError @@ -287,3 +286,20 @@ def test_multi_function_flexible_mix(df): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): result = grouped.aggregate(d) tm.assert_frame_equal(result, expected) + + +def test_groupby_agg_coercing_bools(): + # issue 14873 + dat = pd.DataFrame( + {'a': [1, 1, 2, 2], 'b': [0, 1, 2, 3], 'c': [None, None, 1, 1]}) + gp = dat.groupby('a') + + index = Index([1, 2], name='a') + + result = gp['b'].aggregate(lambda x: (x != 0).all()) + expected = Series([False, True], index=index, name='b') + tm.assert_series_equal(result, expected) + + result = gp['c'].aggregate(lambda x: x.isnull().all()) + expected = Series([True, False], index=index, name='c') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index b5214b11bddcc..cacfdb7694de1 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -512,3 +512,18 @@ def test_agg_list_like_func(): expected = pd.DataFrame({'A': [str(x) for x in range(3)], 'B': [[str(x)] for x in range(3)]}) tm.assert_frame_equal(result, expected) + + +def test_agg_lambda_with_timezone(): + # GH 23683 + df = pd.DataFrame({ + 'tag': [1, 1], + 'date': [ + pd.Timestamp('2018-01-01', tz='UTC'), + pd.Timestamp('2018-01-02', tz='UTC')] + }) + result = df.groupby('tag').agg({'date': lambda e: e.head(1)}) + expected = pd.DataFrame([pd.Timestamp('2018-01-01', tz='UTC')], + index=pd.Index([1], name='tag'), + columns=['date']) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index a884a37840f8a..b5e328ef64424 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -897,6 +897,15 @@ def test_nunique_with_timegrouper(): tm.assert_series_equal(result, expected) +def test_nunique_preserves_column_level_names(): + # GH 23222 + test = pd.DataFrame([1, 2, 2], + columns=pd.Index(['A'], name="level_0")) + result = test.groupby([0, 0, 0]).nunique() + expected = pd.DataFrame([2], columns=test.columns) + tm.assert_frame_equal(result, expected) + + # count # -------------------------------- @@ -1060,6 +1069,55 @@ def test_size(df): tm.assert_series_equal(df.groupby('A').size(), out) +# quantile +# -------------------------------- +@pytest.mark.parametrize("interpolation", [ + "linear", "lower", "higher", "nearest", "midpoint"]) +@pytest.mark.parametrize("a_vals,b_vals", [ + # Ints + ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]), + ([1, 2, 3, 4], [4, 3, 2, 1]), + ([1, 2, 3, 4, 5], [4, 3, 2, 1]), + # Floats + ([1., 2., 3., 4., 5.], [5., 4., 3., 2., 1.]), + # Missing data + ([1., np.nan, 3., np.nan, 5.], [5., np.nan, 3., np.nan, 1.]), + ([np.nan, 4., np.nan, 2., np.nan], [np.nan, 4., np.nan, 2., np.nan]), + # Timestamps + ([x for x in pd.date_range('1/1/18', freq='D', periods=5)], + [x for x in pd.date_range('1/1/18', freq='D', periods=5)][::-1]), + # All NA + ([np.nan] * 5, [np.nan] * 5), +]) +@pytest.mark.parametrize('q', [0, .25, .5, .75, 1]) +def test_quantile(interpolation, a_vals, b_vals, q): + if interpolation == 'nearest' and q == 0.5 and b_vals == [4, 3, 2, 1]: + pytest.skip("Unclear numpy expectation for nearest result with " + "equidistant data") + + a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation) + b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation) + + df = DataFrame({ + 'key': ['a'] * len(a_vals) + ['b'] * len(b_vals), + 'val': a_vals + b_vals}) + + expected = DataFrame([a_expected, b_expected], columns=['val'], + index=Index(['a', 'b'], name='key')) + result = df.groupby('key').quantile(q, interpolation=interpolation) + + tm.assert_frame_equal(result, expected) + + +def test_quantile_raises(): + df = pd.DataFrame([ + ['foo', 'a'], ['foo', 'b'], ['foo', 'c']], columns=['key', 'val']) + + with pytest.raises(TypeError, match="cannot be performed against " + "'object' dtypes"): + df.groupby('key').quantile() + + # pipe # -------------------------------- diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 98c917a6eca3c..6a11f0ae9b44a 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1,15 +1,14 @@ # -*- coding: utf-8 -*- from __future__ import print_function -from collections import defaultdict +from collections import OrderedDict, defaultdict from datetime import datetime from decimal import Decimal import numpy as np import pytest -from pandas.compat import ( - OrderedDict, StringIO, lmap, lrange, lzip, map, range, zip) +from pandas.compat import StringIO, lmap, lrange, lzip, map, range, zip from pandas.errors import PerformanceWarning import pandas as pd @@ -209,7 +208,7 @@ def f(x, q=None, axis=0): trans_expected = ts_grouped.transform(g) assert_series_equal(apply_result, agg_expected) - assert_series_equal(agg_result, agg_expected, check_names=False) + assert_series_equal(agg_result, agg_expected) assert_series_equal(trans_result, trans_expected) agg_result = ts_grouped.agg(f, q=80) @@ -224,13 +223,13 @@ def f(x, q=None, axis=0): agg_result = df_grouped.agg(np.percentile, 80, axis=0) apply_result = df_grouped.apply(DataFrame.quantile, .8) expected = df_grouped.quantile(.8) - assert_frame_equal(apply_result, expected) - assert_frame_equal(agg_result, expected, check_names=False) + assert_frame_equal(apply_result, expected, check_names=False) + assert_frame_equal(agg_result, expected) agg_result = df_grouped.agg(f, q=80) apply_result = df_grouped.apply(DataFrame.quantile, q=.8) - assert_frame_equal(agg_result, expected, check_names=False) - assert_frame_equal(apply_result, expected) + assert_frame_equal(agg_result, expected) + assert_frame_equal(apply_result, expected, check_names=False) def test_len(): @@ -1219,51 +1218,6 @@ def test_groupby_nat_exclude(): grouped.get_group(pd.NaT) -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -def test_sparse_friendly(df): - sdf = df[['C', 'D']].to_sparse() - panel = tm.makePanel() - tm.add_nans(panel) - - def _check_work(gp): - gp.mean() - gp.agg(np.mean) - dict(iter(gp)) - - # it works! - _check_work(sdf.groupby(lambda x: x // 2)) - _check_work(sdf['C'].groupby(lambda x: x // 2)) - _check_work(sdf.groupby(df['A'])) - - # do this someday - # _check_work(panel.groupby(lambda x: x.month, axis=1)) - - -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -def test_panel_groupby(): - panel = tm.makePanel() - tm.add_nans(panel) - grouped = panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1}, - axis='items') - agged = grouped.mean() - agged2 = grouped.agg(lambda x: x.mean('items')) - - tm.assert_panel_equal(agged, agged2) - - tm.assert_index_equal(agged.items, Index([0, 1])) - - grouped = panel.groupby(lambda x: x.month, axis='major') - agged = grouped.mean() - - exp = Index(sorted(list(set(panel.major_axis.month)))) - tm.assert_index_equal(agged.major_axis, exp) - - grouped = panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1}, - axis='minor') - agged = grouped.mean() - tm.assert_index_equal(agged.minor_axis, Index([0, 1])) - - def test_groupby_2d_malformed(): d = DataFrame(index=lrange(2)) d['group'] = ['g1', 'g2'] @@ -1744,3 +1698,19 @@ def test_groupby_agg_ohlc_non_first(): result = df.groupby(pd.Grouper(freq='D')).agg(['sum', 'ohlc']) tm.assert_frame_equal(result, expected) + + +def test_groupby_multiindex_nat(): + # GH 9236 + values = [ + (pd.NaT, 'a'), + (datetime(2012, 1, 2), 'a'), + (datetime(2012, 1, 2), 'b'), + (datetime(2012, 1, 3), 'a') + ] + mi = pd.MultiIndex.from_tuples(values, names=['date', None]) + ser = pd.Series([3, 2, 2.5, 4], index=mi) + + result = ser.groupby(level=1).mean() + expected = pd.Series([3., 2.5], index=["a", "b"]) + assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index a509a7cb57c97..44b5bd5f13992 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -14,8 +14,7 @@ from pandas.core.groupby.grouper import Grouping import pandas.util.testing as tm from pandas.util.testing import ( - assert_almost_equal, assert_frame_equal, assert_panel_equal, - assert_series_equal) + assert_almost_equal, assert_frame_equal, assert_series_equal) # selection # -------------------------------- @@ -563,17 +562,7 @@ def test_list_grouper_with_nat(self): # -------------------------------- class TestGetGroup(): - - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") def test_get_group(self): - wp = tm.makePanel() - grouped = wp.groupby(lambda x: x.month, axis='major') - - gp = grouped.get_group(1) - expected = wp.reindex( - major=[x for x in wp.major_axis if x.month == 1]) - assert_panel_equal(gp, expected) - # GH 5267 # be datelike friendly df = DataFrame({'DATE': pd.to_datetime( @@ -755,19 +744,6 @@ def test_multi_iter_frame(self, three_group): for key, group in grouped: pass - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_multi_iter_panel(self): - wp = tm.makePanel() - grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()], - axis=1) - - for (month, wd), group in grouped: - exp_axis = [x - for x in wp.major_axis - if x.month == month and x.weekday() == wd] - expected = wp.reindex(major=exp_axis) - assert_panel_equal(group, expected) - def test_dictify(self, df): dict(iter(df.groupby('A'))) dict(iter(df.groupby(['A', 'B']))) diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index 255d9a8acf2d0..7a3d189d3020e 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -278,6 +278,26 @@ def test_first_last_tz(data, expected_first, expected_last): assert_frame_equal(result, expected[['id', 'time']]) +@pytest.mark.parametrize('method, ts, alpha', [ + ['first', Timestamp('2013-01-01', tz='US/Eastern'), 'a'], + ['last', Timestamp('2013-01-02', tz='US/Eastern'), 'b'] +]) +def test_first_last_tz_multi_column(method, ts, alpha): + # GH 21603 + df = pd.DataFrame({'group': [1, 1, 2], + 'category_string': pd.Series(list('abc')).astype( + 'category'), + 'datetimetz': pd.date_range('20130101', periods=3, + tz='US/Eastern')}) + result = getattr(df.groupby('group'), method)() + expepcted = pd.DataFrame({'category_string': [alpha, 'c'], + 'datetimetz': [ts, + Timestamp('2013-01-03', + tz='US/Eastern')]}, + index=pd.Index([1, 2], name='group')) + assert_frame_equal(result, expepcted) + + def test_nth_multi_index_as_expected(): # PR 9090, related to issue 8979 # test nth on MultiIndex diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index f120402e6e8ca..b645073fcf72a 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -834,3 +834,14 @@ def demean_rename(x): tm.assert_frame_equal(result, expected) result_single = df.groupby('group').value.transform(demean_rename) tm.assert_series_equal(result_single, expected['value']) + + +@pytest.mark.parametrize('func', [min, max, np.min, np.max, 'first', 'last']) +def test_groupby_transform_timezone_column(func): + # GH 24198 + ts = pd.to_datetime('now', utc=True).tz_convert('Asia/Singapore') + result = pd.DataFrame({'end_time': [ts], 'id': [1]}) + result['max_end_time'] = result.groupby('id').end_time.transform(func) + expected = pd.DataFrame([[ts, 1, ts]], columns=['end_time', 'id', + 'max_end_time']) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 499f01f0e7f7b..6d29c147c4a4a 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -30,7 +30,12 @@ def setup_indices(self): def test_pickle_compat_construction(self): # need an object to create with - pytest.raises(TypeError, self._holder) + msg = (r"Index\(\.\.\.\) must be called with a collection of some" + r" kind, None was passed|" + r"__new__\(\) missing 1 required positional argument: 'data'|" + r"__new__\(\) takes at least 2 arguments \(1 given\)") + with pytest.raises(TypeError, match=msg): + self._holder() def test_to_series(self): # assert that we are creating a copy of the index @@ -84,8 +89,11 @@ def test_shift(self): # GH8083 test the base class for shift idx = self.create_index() - pytest.raises(NotImplementedError, idx.shift, 1) - pytest.raises(NotImplementedError, idx.shift, 1, 2) + msg = "Not supported for type {}".format(type(idx).__name__) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1, 2) def test_create_index_existing_name(self): @@ -478,7 +486,7 @@ def test_union_base(self): with pytest.raises(TypeError, match=msg): first.union([1, 2, 3]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): for name, idx in compat.iteritems(self.indices): first = idx[2:] @@ -905,3 +913,24 @@ def test_astype_category(self, copy, name, ordered): result = index.astype('category', copy=copy) expected = CategoricalIndex(index.values, name=name) tm.assert_index_equal(result, expected) + + def test_is_unique(self): + # initialize a unique index + index = self.create_index().drop_duplicates() + assert index.is_unique is True + + # empty index should be unique + index_empty = index[:0] + assert index_empty.is_unique is True + + # test basic dupes + index_dup = index.insert(0, index[0]) + assert index_dup.is_unique is False + + # single NA should be unique + index_na = index.insert(0, np.nan) + assert index_na.is_unique is True + + # multiple NA should not be unique + index_na_dup = index_na.insert(0, np.nan) + assert index_na_dup.is_unique is False diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 7ebebbf6dee28..6893f635c82ac 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -135,8 +135,10 @@ def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture): tm.assert_index_equal(i2, expected) # incompat tz/dtype - pytest.raises(ValueError, lambda: DatetimeIndex( - i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific')) + msg = "cannot supply both a tz and a dtype with a tz" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(i.tz_localize(None).asi8, + dtype=i.dtype, tz='US/Pacific') def test_construction_index_with_mixed_timezones(self): # gh-11488: no tz results in DatetimeIndex @@ -439,14 +441,19 @@ def test_constructor_coverage(self): tm.assert_index_equal(from_ints, expected) # non-conforming - pytest.raises(ValueError, DatetimeIndex, - ['2000-01-01', '2000-01-02', '2000-01-04'], freq='D') + msg = ("Inferred frequency None from passed values does not conform" + " to passed frequency D") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'], freq='D') - pytest.raises(ValueError, date_range, start='2011-01-01', - freq='b') - pytest.raises(ValueError, date_range, end='2011-01-01', - freq='B') - pytest.raises(ValueError, date_range, periods=10, freq='D') + msg = ("Of the four parameters: start, end, periods, and freq, exactly" + " three must be specified") + with pytest.raises(ValueError, match=msg): + date_range(start='2011-01-01', freq='b') + with pytest.raises(ValueError, match=msg): + date_range(end='2011-01-01', freq='B') + with pytest.raises(ValueError, match=msg): + date_range(periods=10, freq='D') @pytest.mark.parametrize('freq', ['AS', 'W-SUN']) def test_constructor_datetime64_tzformat(self, freq): @@ -511,18 +518,20 @@ def test_constructor_dtype(self): idx = DatetimeIndex(['2013-01-01', '2013-01-02'], dtype='datetime64[ns, US/Eastern]') - pytest.raises(ValueError, - lambda: DatetimeIndex(idx, - dtype='datetime64[ns]')) + msg = ("cannot supply both a tz and a timezone-naive dtype" + r" \(i\.e\. datetime64\[ns\]\)") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(idx, dtype='datetime64[ns]') # this is effectively trying to convert tz's - pytest.raises(TypeError, - lambda: DatetimeIndex(idx, - dtype='datetime64[ns, CET]')) - pytest.raises(ValueError, - lambda: DatetimeIndex( - idx, tz='CET', - dtype='datetime64[ns, US/Eastern]')) + msg = ("data is already tz-aware US/Eastern, unable to set specified" + " tz: CET") + with pytest.raises(TypeError, match=msg): + DatetimeIndex(idx, dtype='datetime64[ns, CET]') + msg = "cannot supply both a tz and a dtype with a tz" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(idx, tz='CET', dtype='datetime64[ns, US/Eastern]') + result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]') tm.assert_index_equal(idx, result) @@ -732,7 +741,9 @@ def test_from_freq_recreate_from_data(self, freq): def test_datetimeindex_constructor_misc(self): arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04'] - pytest.raises(Exception, DatetimeIndex, arr) + msg = r"(\(u?')?Unknown string format(:', 'Jn 3, 2005'\))?" + with pytest.raises(ValueError, match=msg): + DatetimeIndex(arr) arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'] idx1 = DatetimeIndex(arr) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index a9bece248e9d0..a38ee264d362c 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -346,8 +346,10 @@ def test_compat_replace(self, f): def test_catch_infinite_loop(self): offset = offsets.DateOffset(minute=5) # blow up, don't loop forever - pytest.raises(Exception, date_range, datetime(2011, 11, 11), - datetime(2011, 11, 12), freq=offset) + msg = "Offset <DateOffset: minute=5> did not increment date" + with pytest.raises(ValueError, match=msg): + date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), + freq=offset) @pytest.mark.parametrize('periods', (1, 2)) def test_wom_len(self, periods): diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index e1ba0e1708442..c7147e6fe7063 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -100,9 +100,8 @@ def test_hash_error(self): def test_stringified_slice_with_tz(self): # GH#2658 - import datetime - start = datetime.datetime.now() - idx = date_range(start=start, freq="1d", periods=10) + start = '2013-01-07' + idx = date_range(start=start, freq="1d", periods=10, tz='US/Eastern') df = DataFrame(lrange(10), index=idx) df["2013-01-14 23:44:34.437768-05:00":] # no exception here diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index cec181161fc11..fc6080e68a803 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -190,7 +190,9 @@ def test_datetimeindex_accessors(self): # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu') dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) - pytest.raises(ValueError, lambda: dti.is_month_start) + msg = "Custom business days is not supported by is_month_start" + with pytest.raises(ValueError, match=msg): + dti.is_month_start dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 2a546af79931e..84085141fcf92 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -37,15 +37,19 @@ def test_ops_properties_basic(self): # sanity check that the behavior didn't change # GH#7206 + msg = "'Series' object has no attribute '{}'" for op in ['year', 'day', 'second', 'weekday']: - pytest.raises(TypeError, lambda x: getattr(self.dt_series, op)) + with pytest.raises(AttributeError, match=msg.format(op)): + getattr(self.dt_series, op) # attribute access should still work! s = Series(dict(year=2000, month=1, day=10)) assert s.year == 2000 assert s.month == 1 assert s.day == 10 - pytest.raises(AttributeError, lambda: s.weekday) + msg = "'Series' object has no attribute 'weekday'" + with pytest.raises(AttributeError, match=msg): + s.weekday def test_repeat_range(self, tz_naive_fixture): tz = tz_naive_fixture diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 1b2aab9d370a3..64693324521b3 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -170,7 +170,8 @@ def test_partial_slice(self): result = s['2005-1-1'] assert result == s.iloc[0] - pytest.raises(Exception, s.__getitem__, '2004-12-31') + with pytest.raises(KeyError, match=r"^'2004-12-31'$"): + s['2004-12-31'] def test_partial_slice_daily(self): rng = date_range(freq='H', start=datetime(2005, 1, 31), periods=500) @@ -179,7 +180,8 @@ def test_partial_slice_daily(self): result = s['2005-1-31'] tm.assert_series_equal(result, s.iloc[:24]) - pytest.raises(Exception, s.__getitem__, '2004-12-31 00') + with pytest.raises(KeyError, match=r"^'2004-12-31 00'$"): + s['2004-12-31 00'] def test_partial_slice_hourly(self): rng = date_range(freq='T', start=datetime(2005, 1, 1, 20, 0, 0), @@ -193,7 +195,8 @@ def test_partial_slice_hourly(self): tm.assert_series_equal(result, s.iloc[:60]) assert s['2005-1-1 20:00'] == s.iloc[0] - pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15') + with pytest.raises(KeyError, match=r"^'2004-12-31 00:15'$"): + s['2004-12-31 00:15'] def test_partial_slice_minutely(self): rng = date_range(freq='S', start=datetime(2005, 1, 1, 23, 59, 0), @@ -207,7 +210,8 @@ def test_partial_slice_minutely(self): tm.assert_series_equal(result, s.iloc[:60]) assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0] - pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00') + with pytest.raises(KeyError, match=r"^'2004-12-31 00:00:00'$"): + s['2004-12-31 00:00:00'] def test_partial_slice_second_precision(self): rng = date_range(start=datetime(2005, 1, 1, 0, 0, 59, @@ -255,7 +259,9 @@ def test_partial_slicing_dataframe(self): result = df['a'][ts_string] assert isinstance(result, np.int64) assert result == expected - pytest.raises(KeyError, df.__getitem__, ts_string) + msg = r"^'{}'$".format(ts_string) + with pytest.raises(KeyError, match=msg): + df[ts_string] # Timestamp with resolution less precise than index for fmt in formats[:rnum]: @@ -282,15 +288,20 @@ def test_partial_slicing_dataframe(self): result = df['a'][ts_string] assert isinstance(result, np.int64) assert result == 2 - pytest.raises(KeyError, df.__getitem__, ts_string) + msg = r"^'{}'$".format(ts_string) + with pytest.raises(KeyError, match=msg): + df[ts_string] # Not compatible with existing key # Should raise KeyError for fmt, res in list(zip(formats, resolutions))[rnum + 1:]: ts = index[1] + Timedelta("1 " + res) ts_string = ts.strftime(fmt) - pytest.raises(KeyError, df['a'].__getitem__, ts_string) - pytest.raises(KeyError, df.__getitem__, ts_string) + msg = r"^'{}'$".format(ts_string) + with pytest.raises(KeyError, match=msg): + df['a'][ts_string] + with pytest.raises(KeyError, match=msg): + df[ts_string] def test_partial_slicing_with_multiindex(self): @@ -316,11 +327,10 @@ def test_partial_slicing_with_multiindex(self): # this is an IndexingError as we don't do partial string selection on # multi-levels. - def f(): + msg = "Too many indexers" + with pytest.raises(IndexingError, match=msg): df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')] - pytest.raises(IndexingError, f) - # GH 4294 # partial slice on a series mi s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range( @@ -386,3 +396,30 @@ def test_selection_by_datetimelike(self, datetimelike, op, expected): result = op(df.A, datetimelike) expected = Series(expected, name='A') tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('start', [ + '2018-12-02 21:50:00+00:00', pd.Timestamp('2018-12-02 21:50:00+00:00'), + pd.Timestamp('2018-12-02 21:50:00+00:00').to_pydatetime() + ]) + @pytest.mark.parametrize('end', [ + '2018-12-02 21:52:00+00:00', pd.Timestamp('2018-12-02 21:52:00+00:00'), + pd.Timestamp('2018-12-02 21:52:00+00:00').to_pydatetime() + ]) + def test_getitem_with_datestring_with_UTC_offset(self, start, end): + # GH 24076 + idx = pd.date_range(start='2018-12-02 14:50:00-07:00', + end='2018-12-02 14:50:00-07:00', freq='1min') + df = pd.DataFrame(1, index=idx, columns=['A']) + result = df[start:end] + expected = df.iloc[0:3, :] + tm.assert_frame_equal(result, expected) + + # GH 16785 + start = str(start) + end = str(end) + with pytest.raises(ValueError, match="Both dates must"): + df[start:end[:-4] + '1:00'] + + with pytest.raises(ValueError, match="The index must be timezone"): + df = df.tz_localize(None) + df[start:end] diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 680eddd27cf9f..42338a751e0fc 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -7,6 +7,8 @@ import numpy as np import pytest +from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime + import pandas as pd from pandas import DatetimeIndex, Timestamp, date_range import pandas.util.testing as tm @@ -27,10 +29,14 @@ def test_dti_date(self): expected = [t.date() for t in rng] assert (result == expected).all() - def test_dti_date_out_of_range(self): + @pytest.mark.parametrize('data', [ + ['1400-01-01'], + [datetime(1400, 1, 1)]]) + def test_dti_date_out_of_range(self, data): # GH#1475 - pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) - pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) + msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00" + with pytest.raises(OutOfBoundsDatetime, match=msg): + DatetimeIndex(data) @pytest.mark.parametrize('field', [ 'dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', @@ -74,9 +80,15 @@ def test_round_daily(self): result = dti.round('s') tm.assert_index_equal(result, dti) - # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: dti.round(freq)) + @pytest.mark.parametrize('freq, error_msg', [ + ('Y', '<YearEnd: month=12> is a non-fixed frequency'), + ('M', '<MonthEnd> is a non-fixed frequency'), + ('foobar', 'Invalid frequency: foobar')]) + def test_round_invalid(self, freq, error_msg): + dti = date_range('20130101 09:10:11', periods=5) + dti = dti.tz_localize('UTC').tz_convert('US/Eastern') + with pytest.raises(ValueError, match=error_msg): + dti.round(freq) def test_round(self, tz_naive_fixture): tz = tz_naive_fixture diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index bd37cc815d0f7..cf1f75234ec62 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -21,83 +21,107 @@ class TestDatetimeIndexSetOps(object): 'dateutil/US/Pacific'] # TODO: moved from test_datetimelike; dedup with version below - def test_union2(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union2(self, sort): everything = tm.makeDateIndex(10) first = everything[:5] second = everything[5:] - union = first.union(second) - assert tm.equalContents(union, everything) + union = first.union(second, sort=sort) + tm.assert_index_equal(union, everything) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - result = first.union(case) - assert tm.equalContents(result, everything) + result = first.union(case, sort=sort) + tm.assert_index_equal(result, everything) @pytest.mark.parametrize("tz", tz) - def test_union(self, tz): + @pytest.mark.parametrize("sort", [None, False]) + def test_union(self, tz, sort): rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz) expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz) + expected1_notsorted = pd.DatetimeIndex(list(other1) + list(rng1)) rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz) expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz) + expected2_notsorted = pd.DatetimeIndex(list(other2) + list(rng2[:3])) rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) other3 = pd.DatetimeIndex([], tz=tz) expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz) - - for rng, other, expected in [(rng1, other1, expected1), - (rng2, other2, expected2), - (rng3, other3, expected3)]: - - result_union = rng.union(other) - tm.assert_index_equal(result_union, expected) - - def test_union_coverage(self): + expected3_notsorted = rng3 + + for rng, other, exp, exp_notsorted in [(rng1, other1, expected1, + expected1_notsorted), + (rng2, other2, expected2, + expected2_notsorted), + (rng3, other3, expected3, + expected3_notsorted)]: + + result_union = rng.union(other, sort=sort) + tm.assert_index_equal(result_union, exp) + + result_union = other.union(rng, sort=sort) + if sort is None: + tm.assert_index_equal(result_union, exp) + else: + tm.assert_index_equal(result_union, exp_notsorted) + + @pytest.mark.parametrize("sort", [None, False]) + def test_union_coverage(self, sort): idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02']) ordered = DatetimeIndex(idx.sort_values(), freq='infer') - result = ordered.union(idx) + result = ordered.union(idx, sort=sort) tm.assert_index_equal(result, ordered) - result = ordered[:0].union(ordered) + result = ordered[:0].union(ordered, sort=sort) tm.assert_index_equal(result, ordered) assert result.freq == ordered.freq - def test_union_bug_1730(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union_bug_1730(self, sort): rng_a = date_range('1/1/2012', periods=4, freq='3H') rng_b = date_range('1/1/2012', periods=4, freq='4H') - result = rng_a.union(rng_b) + result = rng_a.union(rng_b, sort=sort) exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b)))) tm.assert_index_equal(result, exp) - def test_union_bug_1745(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union_bug_1745(self, sort): left = DatetimeIndex(['2012-05-11 15:19:49.695000']) right = DatetimeIndex(['2012-05-29 13:04:21.322000', '2012-05-11 15:27:24.873000', '2012-05-11 15:31:05.350000']) - result = left.union(right) - exp = DatetimeIndex(sorted(set(list(left)) | set(list(right)))) + result = left.union(right, sort=sort) + exp = DatetimeIndex(['2012-05-11 15:19:49.695000', + '2012-05-29 13:04:21.322000', + '2012-05-11 15:27:24.873000', + '2012-05-11 15:31:05.350000']) + if sort is None: + exp = exp.sort_values() tm.assert_index_equal(result, exp) - def test_union_bug_4564(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union_bug_4564(self, sort): from pandas import DateOffset left = date_range("2013-01-01", "2013-02-01") right = left + DateOffset(minutes=15) - result = left.union(right) + result = left.union(right, sort=sort) exp = DatetimeIndex(sorted(set(list(left)) | set(list(right)))) tm.assert_index_equal(result, exp) - def test_union_freq_both_none(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union_freq_both_none(self, sort): # GH11086 expected = bdate_range('20150101', periods=10) expected.freq = None - result = expected.union(expected) + result = expected.union(expected, sort=sort) tm.assert_index_equal(result, expected) assert result.freq is None @@ -112,11 +136,14 @@ def test_union_dataframe_index(self): exp = pd.date_range('1/1/1980', '1/1/2012', freq='MS') tm.assert_index_equal(df.index, exp) - def test_union_with_DatetimeIndex(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union_with_DatetimeIndex(self, sort): i1 = Int64Index(np.arange(0, 20, 2)) i2 = date_range(start='2012-01-03 00:00:00', periods=10, freq='D') - i1.union(i2) # Works - i2.union(i1) # Fails with "AttributeError: can't set attribute" + # Works + i1.union(i2, sort=sort) + # Fails with "AttributeError: can't set attribute" + i2.union(i1, sort=sort) # TODO: moved from test_datetimelike; de-duplicate with version below def test_intersection2(self): @@ -138,7 +165,7 @@ def test_intersection2(self): @pytest.mark.parametrize("tz", [None, 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, tz, sort): # GH 4690 (with tz) base = date_range('6/1/2000', '6/30/2000', freq='D', name='idx') @@ -187,7 +214,7 @@ def test_intersection(self, tz, sort): for (rng, expected) in [(rng2, expected2), (rng3, expected3), (rng4, expected4)]: result = base.intersection(rng, sort=sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) assert result.name == expected.name @@ -212,7 +239,7 @@ def test_intersection_bug_1708(self): assert len(result) == 0 @pytest.mark.parametrize("tz", tz) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference(self, tz, sort): rng_dates = ['1/2/2000', '1/3/2000', '1/1/2000', '1/4/2000', '1/5/2000'] @@ -233,11 +260,11 @@ def test_difference(self, tz, sort): (rng2, other2, expected2), (rng3, other3, expected3)]: result_diff = rng.difference(other, sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result_diff, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_freq(self, sort): # GH14323: difference of DatetimeIndex should not preserve frequency @@ -254,7 +281,7 @@ def test_difference_freq(self, sort): tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal('freq', idx_diff, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_datetimeindex_diff(self, sort): dti1 = date_range(freq='Q-JAN', start=datetime(1997, 12, 31), periods=100) @@ -262,11 +289,12 @@ def test_datetimeindex_diff(self, sort): periods=98) assert len(dti1.difference(dti2, sort)) == 2 - def test_datetimeindex_union_join_empty(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_datetimeindex_union_join_empty(self, sort): dti = date_range(start='1/1/2001', end='2/1/2001', freq='D') empty = Index([]) - result = dti.union(empty) + result = dti.union(empty, sort=sort) assert isinstance(result, DatetimeIndex) assert result is result @@ -287,35 +315,40 @@ class TestBusinessDatetimeIndex(object): def setup_method(self, method): self.rng = bdate_range(START, END) - def test_union(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union(self, sort): # overlapping left = self.rng[:10] right = self.rng[5:10] - the_union = left.union(right) + the_union = left.union(right, sort=sort) assert isinstance(the_union, DatetimeIndex) # non-overlapping, gap in middle left = self.rng[:5] right = self.rng[10:] - the_union = left.union(right) + the_union = left.union(right, sort=sort) assert isinstance(the_union, Index) # non-overlapping, no gap left = self.rng[:5] right = self.rng[5:10] - the_union = left.union(right) + the_union = left.union(right, sort=sort) assert isinstance(the_union, DatetimeIndex) # order does not matter - tm.assert_index_equal(right.union(left), the_union) + if sort is None: + tm.assert_index_equal(right.union(left, sort=sort), the_union) + else: + expected = pd.DatetimeIndex(list(right) + list(left)) + tm.assert_index_equal(right.union(left, sort=sort), expected) # overlapping, but different offset rng = date_range(START, END, freq=BMonthEnd()) - the_union = self.rng.union(rng) + the_union = self.rng.union(rng, sort=sort) assert isinstance(the_union, DatetimeIndex) def test_outer_join(self): @@ -350,16 +383,21 @@ def test_outer_join(self): assert isinstance(the_join, DatetimeIndex) assert the_join.freq is None - def test_union_not_cacheable(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union_not_cacheable(self, sort): rng = date_range('1/1/2000', periods=50, freq=Minute()) rng1 = rng[10:] rng2 = rng[:25] - the_union = rng1.union(rng2) - tm.assert_index_equal(the_union, rng) + the_union = rng1.union(rng2, sort=sort) + if sort is None: + tm.assert_index_equal(the_union, rng) + else: + expected = pd.DatetimeIndex(list(rng[10:]) + list(rng[:10])) + tm.assert_index_equal(the_union, expected) rng1 = rng[10:] rng2 = rng[15:35] - the_union = rng1.union(rng2) + the_union = rng1.union(rng2, sort=sort) expected = rng[10:] tm.assert_index_equal(the_union, expected) @@ -388,7 +426,8 @@ def test_intersection_bug(self): result = a.intersection(b) tm.assert_index_equal(result, b) - def test_month_range_union_tz_pytz(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_month_range_union_tz_pytz(self, sort): from pytz import timezone tz = timezone('US/Eastern') @@ -403,10 +442,11 @@ def test_month_range_union_tz_pytz(self): late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd()) - early_dr.union(late_dr) + early_dr.union(late_dr, sort=sort) @td.skip_if_windows_python_3 - def test_month_range_union_tz_dateutil(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_month_range_union_tz_dateutil(self, sort): from pandas._libs.tslibs.timezones import dateutil_gettz tz = dateutil_gettz('US/Eastern') @@ -421,7 +461,7 @@ def test_month_range_union_tz_dateutil(self): late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd()) - early_dr.union(late_dr) + early_dr.union(late_dr, sort=sort) class TestCustomDatetimeIndex(object): @@ -429,35 +469,37 @@ class TestCustomDatetimeIndex(object): def setup_method(self, method): self.rng = bdate_range(START, END, freq='C') - def test_union(self): + @pytest.mark.parametrize("sort", [None, False]) + def test_union(self, sort): # overlapping left = self.rng[:10] right = self.rng[5:10] - the_union = left.union(right) + the_union = left.union(right, sort=sort) assert isinstance(the_union, DatetimeIndex) # non-overlapping, gap in middle left = self.rng[:5] right = self.rng[10:] - the_union = left.union(right) + the_union = left.union(right, sort) assert isinstance(the_union, Index) # non-overlapping, no gap left = self.rng[:5] right = self.rng[5:10] - the_union = left.union(right) + the_union = left.union(right, sort=sort) assert isinstance(the_union, DatetimeIndex) # order does not matter - tm.assert_index_equal(right.union(left), the_union) + if sort is None: + tm.assert_index_equal(right.union(left, sort=sort), the_union) # overlapping, but different offset rng = date_range(START, END, freq=BMonthEnd()) - the_union = self.rng.union(rng) + the_union = self.rng.union(rng, sort=sort) assert isinstance(the_union, DatetimeIndex) def test_outer_join(self): diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 8bcc9296cb010..b25918417efcd 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -434,24 +434,19 @@ def test_dti_tz_localize_utc_conversion(self, tz): with pytest.raises(pytz.NonExistentTimeError): rng.tz_localize(tz) - @pytest.mark.parametrize('idx', [ - date_range(start='2014-01-01', end='2014-12-31', freq='M'), - date_range(start='2014-01-01', end='2014-12-31', freq='D'), - date_range(start='2014-01-01', end='2014-03-01', freq='H'), - date_range(start='2014-08-01', end='2014-10-31', freq='T') - ]) - def test_dti_tz_localize_roundtrip(self, tz_aware_fixture, idx): + def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): + # note: this tz tests that a tz-naive index can be localized + # and de-localized successfully, when there are no DST transitions + # in the range. + idx = date_range(start='2014-06-01', end='2014-08-30', freq='15T') tz = tz_aware_fixture localized = idx.tz_localize(tz) - expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq, - tz=tz) - tm.assert_index_equal(localized, expected) + # cant localize a tz-aware object with pytest.raises(TypeError): localized.tz_localize(tz) - reset = localized.tz_localize(None) - tm.assert_index_equal(reset, idx) assert reset.tzinfo is None + tm.assert_index_equal(reset, idx) def test_dti_tz_localize_naive(self): rng = date_range('1/1/2011', periods=100, freq='H') @@ -830,6 +825,13 @@ def test_dti_drop_dont_lose_tz(self): assert ind.tz is not None + def test_dti_tz_conversion_freq(self, tz_naive_fixture): + # GH25241 + t3 = DatetimeIndex(['2019-01-01 10:00'], freq='H') + assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq + t4 = DatetimeIndex(['2019-01-02 12:00'], tz='UTC', freq='T') + assert t4.tz_convert(tz='UTC').freq == t4.freq + def test_drop_dst_boundary(self): # see gh-18031 tz = "Europe/Brussels" diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index bec2fa66c43cd..a72aacce2f86d 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -247,6 +247,55 @@ def test_to_datetime_parse_timezone_keeps_name(self): class TestToDatetime(object): + @pytest.mark.parametrize("s, _format, dt", [ + ['2015-1-1', '%G-%V-%u', datetime(2014, 12, 29, 0, 0)], + ['2015-1-4', '%G-%V-%u', datetime(2015, 1, 1, 0, 0)], + ['2015-1-7', '%G-%V-%u', datetime(2015, 1, 4, 0, 0)] + ]) + def test_to_datetime_iso_week_year_format(self, s, _format, dt): + # See GH#16607 + assert to_datetime(s, format=_format) == dt + + @pytest.mark.parametrize("msg, s, _format", [ + ["ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 50", + "%Y %V"], + ["ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 51", + "%G %V"], + ["ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 " + "Monday", "%G %A"], + ["ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 Mon", + "%G %a"], + ["ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 6", + "%G %w"], + ["ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "1999 6", + "%G %u"], + ["ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", "2051", + "%G"], + ["Day of the year directive '%j' is not compatible with ISO year " + "directive '%G'. Use '%Y' instead.", "1999 51 6 256", "%G %V %u %j"], + ["ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", "1999 51 Sunday", "%Y %V %A"], + ["ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", "1999 51 Sun", "%Y %V %a"], + ["ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", "1999 51 1", "%Y %V %w"], + ["ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", "1999 51 1", "%Y %V %u"], + ["ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", "20", "%V"] + ]) + def test_ValueError_iso_week_year(self, msg, s, _format): + # See GH#16607 + with pytest.raises(ValueError, match=msg): + to_datetime(s, format=_format) + @pytest.mark.parametrize('tz', [None, 'US/Central']) def test_to_datetime_dtarr(self, tz): # DatetimeArray @@ -346,12 +395,16 @@ def test_to_datetime_dt64s(self, cache): for dt in in_bound_dts: assert pd.to_datetime(dt, cache=cache) == Timestamp(dt) - oob_dts = [np.datetime64('1000-01-01'), np.datetime64('5000-01-02'), ] - - for dt in oob_dts: - pytest.raises(ValueError, pd.to_datetime, dt, errors='raise') - pytest.raises(ValueError, Timestamp, dt) - assert pd.to_datetime(dt, errors='coerce', cache=cache) is NaT + @pytest.mark.parametrize('dt', [np.datetime64('1000-01-01'), + np.datetime64('5000-01-02')]) + @pytest.mark.parametrize('cache', [True, False]) + def test_to_datetime_dt64s_out_of_bounds(self, cache, dt): + msg = "Out of bounds nanosecond timestamp: {}".format(dt) + with pytest.raises(OutOfBoundsDatetime, match=msg): + pd.to_datetime(dt, errors='raise') + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(dt) + assert pd.to_datetime(dt, errors='coerce', cache=cache) is NaT @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_array_of_dt64s(self, cache): @@ -367,8 +420,9 @@ def test_to_datetime_array_of_dt64s(self, cache): # A list of datetimes where the last one is out of bounds dts_with_oob = dts + [np.datetime64('9999-01-01')] - pytest.raises(ValueError, pd.to_datetime, dts_with_oob, - errors='raise') + msg = "Out of bounds nanosecond timestamp: 9999-01-01 00:00:00" + with pytest.raises(OutOfBoundsDatetime, match=msg): + pd.to_datetime(dts_with_oob, errors='raise') tm.assert_numpy_array_equal( pd.to_datetime(dts_with_oob, box=False, errors='coerce', @@ -410,7 +464,10 @@ def test_to_datetime_tz(self, cache): # mixed tzs will raise arr = [pd.Timestamp('2013-01-01 13:00:00', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00', tz='US/Eastern')] - pytest.raises(ValueError, lambda: pd.to_datetime(arr, cache=cache)) + msg = ("Tz-aware datetime.datetime cannot be converted to datetime64" + " unless utc=True") + with pytest.raises(ValueError, match=msg): + pd.to_datetime(arr, cache=cache) @pytest.mark.parametrize('cache', [True, False]) def test_to_datetime_tz_pytz(self, cache): @@ -706,6 +763,29 @@ def test_iso_8601_strings_with_different_offsets(self): NaT], tz='UTC') tm.assert_index_equal(result, expected) + def test_iss8601_strings_mixed_offsets_with_naive(self): + # GH 24992 + result = pd.to_datetime([ + '2018-11-28T00:00:00', + '2018-11-28T00:00:00+12:00', + '2018-11-28T00:00:00', + '2018-11-28T00:00:00+06:00', + '2018-11-28T00:00:00' + ], utc=True) + expected = pd.to_datetime([ + '2018-11-28T00:00:00', + '2018-11-27T12:00:00', + '2018-11-28T00:00:00', + '2018-11-27T18:00:00', + '2018-11-28T00:00:00' + ], utc=True) + tm.assert_index_equal(result, expected) + + items = ['2018-11-28T00:00:00+12:00', '2018-11-28T00:00:00'] + result = pd.to_datetime(items, utc=True) + expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1] + tm.assert_index_equal(result, expected) + def test_non_iso_strings_with_tz_offset(self): result = to_datetime(['March 1, 2018 12:00:00+0400'] * 2) expected = DatetimeIndex([datetime(2018, 3, 1, 12, @@ -1088,9 +1168,9 @@ def test_to_datetime_on_datetime64_series(self, cache): def test_to_datetime_with_space_in_series(self, cache): # GH 6428 s = Series(['10/18/2006', '10/18/2008', ' ']) - pytest.raises(ValueError, lambda: to_datetime(s, - errors='raise', - cache=cache)) + msg = r"(\(u?')?String does not contain a date(:', ' '\))?" + with pytest.raises(ValueError, match=msg): + to_datetime(s, errors='raise', cache=cache) result_coerce = to_datetime(s, errors='coerce', cache=cache) expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), @@ -1111,13 +1191,12 @@ def test_to_datetime_with_apply(self, cache): assert_series_equal(result, expected) td = pd.Series(['May 04', 'Jun 02', ''], index=[1, 2, 3]) - pytest.raises(ValueError, - lambda: pd.to_datetime(td, format='%b %y', - errors='raise', - cache=cache)) - pytest.raises(ValueError, - lambda: td.apply(pd.to_datetime, format='%b %y', - errors='raise', cache=cache)) + msg = r"time data '' does not match format '%b %y' \(match\)" + with pytest.raises(ValueError, match=msg): + pd.to_datetime(td, format='%b %y', errors='raise', cache=cache) + with pytest.raises(ValueError, match=msg): + td.apply(pd.to_datetime, format='%b %y', + errors='raise', cache=cache) expected = pd.to_datetime(td, format='%b %y', errors='coerce', cache=cache) @@ -1168,8 +1247,9 @@ def test_to_datetime_unprocessable_input(self, cache, box, klass): result = to_datetime([1, '1'], errors='ignore', cache=cache, box=box) expected = klass(np.array([1, '1'], dtype='O')) tm.assert_equal(result, expected) - pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise', - cache=cache, box=box) + msg = "invalid string coercion to datetime" + with pytest.raises(TypeError, match=msg): + to_datetime([1, '1'], errors='raise', cache=cache, box=box) def test_to_datetime_other_datetime64_units(self): # 5/25/2012 @@ -1225,17 +1305,18 @@ def test_string_na_nat_conversion(self, cache): malformed = np.array(['1/100/2000', np.nan], dtype=object) # GH 10636, default is now 'raise' - pytest.raises(ValueError, - lambda: to_datetime(malformed, errors='raise', - cache=cache)) + msg = (r"\(u?'Unknown string format:', '1/100/2000'\)|" + "day is out of range for month") + with pytest.raises(ValueError, match=msg): + to_datetime(malformed, errors='raise', cache=cache) result = to_datetime(malformed, errors='ignore', cache=cache) # GH 21864 expected = Index(malformed) tm.assert_index_equal(result, expected) - pytest.raises(ValueError, to_datetime, malformed, errors='raise', - cache=cache) + with pytest.raises(ValueError, match=msg): + to_datetime(malformed, errors='raise', cache=cache) idx = ['a', 'b', 'c', 'd', 'e'] series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan, @@ -1414,14 +1495,24 @@ def test_day_not_in_month_coerce(self, cache): @pytest.mark.parametrize('cache', [True, False]) def test_day_not_in_month_raise(self, cache): - pytest.raises(ValueError, to_datetime, '2015-02-29', - errors='raise', cache=cache) - pytest.raises(ValueError, to_datetime, '2015-02-29', - errors='raise', format="%Y-%m-%d", cache=cache) - pytest.raises(ValueError, to_datetime, '2015-02-32', - errors='raise', format="%Y-%m-%d", cache=cache) - pytest.raises(ValueError, to_datetime, '2015-04-31', - errors='raise', format="%Y-%m-%d", cache=cache) + msg = "day is out of range for month" + with pytest.raises(ValueError, match=msg): + to_datetime('2015-02-29', errors='raise', cache=cache) + + msg = "time data 2015-02-29 doesn't match format specified" + with pytest.raises(ValueError, match=msg): + to_datetime('2015-02-29', errors='raise', format="%Y-%m-%d", + cache=cache) + + msg = "time data 2015-02-32 doesn't match format specified" + with pytest.raises(ValueError, match=msg): + to_datetime('2015-02-32', errors='raise', format="%Y-%m-%d", + cache=cache) + + msg = "time data 2015-04-31 doesn't match format specified" + with pytest.raises(ValueError, match=msg): + to_datetime('2015-04-31', errors='raise', format="%Y-%m-%d", + cache=cache) @pytest.mark.parametrize('cache', [True, False]) def test_day_not_in_month_ignore(self, cache): @@ -1656,7 +1747,9 @@ def test_parsers_time(self): assert tools.to_time(time_string) == expected new_string = "14.15" - pytest.raises(ValueError, tools.to_time, new_string) + msg = r"Cannot convert arg \['14\.15'\] to a time" + with pytest.raises(ValueError, match=msg): + tools.to_time(new_string) assert tools.to_time(new_string, format="%H.%M") == expected arg = ["14:15", "20:20"] @@ -1824,6 +1917,15 @@ def test_invalid_origins_tzinfo(self): pd.to_datetime(1, unit='D', origin=datetime(2000, 1, 1, tzinfo=pytz.utc)) + @pytest.mark.parametrize("format", [ + None, "%Y-%m-%d %H:%M:%S" + ]) + def test_to_datetime_out_of_bounds_with_format_arg(self, format): + # see gh-23830 + msg = "Out of bounds nanosecond timestamp" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime("2417-10-27 00:00:00", format=format) + def test_processing_order(self): # make sure we handle out-of-bounds *before* # constructing the dates diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index db69258c1d3d2..ba451da10573a 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -242,12 +242,10 @@ def test_take(self, closed): [0, 0, 1], [1, 1, 2], closed=closed) tm.assert_index_equal(result, expected) - def test_unique(self, closed): - # unique non-overlapping - idx = IntervalIndex.from_tuples( - [(0, 1), (2, 3), (4, 5)], closed=closed) - assert idx.is_unique is True - + def test_is_unique_interval(self, closed): + """ + Interval specific tests for is_unique in addition to base class tests + """ # unique overlapping - distinct endpoints idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed) assert idx.is_unique is True @@ -261,15 +259,6 @@ def test_unique(self, closed): idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed) assert idx.is_unique is True - # duplicate - idx = IntervalIndex.from_tuples( - [(0, 1), (0, 1), (2, 3)], closed=closed) - assert idx.is_unique is False - - # empty - idx = IntervalIndex([], closed=closed) - assert idx.is_unique is True - def test_monotonic(self, closed): # increasing non-overlapping idx = IntervalIndex.from_tuples( @@ -414,13 +403,16 @@ def test_get_item(self, closed): # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_get_loc_value(self): - pytest.raises(KeyError, self.index.get_loc, 0) + with pytest.raises(KeyError, match="^0$"): + self.index.get_loc(0) assert self.index.get_loc(0.5) == 0 assert self.index.get_loc(1) == 0 assert self.index.get_loc(1.5) == 1 assert self.index.get_loc(2) == 1 - pytest.raises(KeyError, self.index.get_loc, -1) - pytest.raises(KeyError, self.index.get_loc, 3) + with pytest.raises(KeyError, match="^-1$"): + self.index.get_loc(-1) + with pytest.raises(KeyError, match="^3$"): + self.index.get_loc(3) idx = IntervalIndex.from_tuples([(0, 2), (1, 3)]) assert idx.get_loc(0.5) == 0 @@ -430,10 +422,12 @@ def test_get_loc_value(self): tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)), np.array([0, 1], dtype='intp')) assert idx.get_loc(3) == 1 - pytest.raises(KeyError, idx.get_loc, 3.5) + with pytest.raises(KeyError, match=r"^3\.5$"): + idx.get_loc(3.5) idx = IntervalIndex.from_arrays([0, 2], [1, 3]) - pytest.raises(KeyError, idx.get_loc, 1.5) + with pytest.raises(KeyError, match=r"^1\.5$"): + idx.get_loc(1.5) # To be removed, replaced by test_interval_new.py (see #16316, #16386) def slice_locs_cases(self, breaks): @@ -497,7 +491,9 @@ def test_slice_locs_decreasing_float64(self): # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_slice_locs_fails(self): index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)]) - with pytest.raises(KeyError): + msg = ("'can only get slices from an IntervalIndex if bounds are" + " non-overlapping and all monotonic increasing or decreasing'") + with pytest.raises(KeyError, match=msg): index.slice_locs(1, 2) # To be removed, replaced by test_interval_new.py (see #16316, #16386) @@ -505,9 +501,12 @@ def test_get_loc_interval(self): assert self.index.get_loc(Interval(0, 1)) == 0 assert self.index.get_loc(Interval(0, 0.5)) == 0 assert self.index.get_loc(Interval(0, 1, 'left')) == 0 - pytest.raises(KeyError, self.index.get_loc, Interval(2, 3)) - pytest.raises(KeyError, self.index.get_loc, - Interval(-1, 0, 'left')) + msg = r"Interval\(2, 3, closed='right'\)" + with pytest.raises(KeyError, match=msg): + self.index.get_loc(Interval(2, 3)) + msg = r"Interval\(-1, 0, closed='left'\)" + with pytest.raises(KeyError, match=msg): + self.index.get_loc(Interval(-1, 0, 'left')) # Make consistent with test_interval_new.py (see #16316, #16386) @pytest.mark.parametrize('item', [3, Interval(1, 4)]) @@ -783,19 +782,19 @@ def test_non_contiguous(self, closed): assert 1.5 not in index - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union(self, closed, sort): index = self.create_index(closed=closed) other = IntervalIndex.from_breaks(range(5, 13), closed=closed) expected = IntervalIndex.from_breaks(range(13), closed=closed) result = index[::-1].union(other, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) result = other[::-1].union(index, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) @@ -812,19 +811,19 @@ def test_union(self, closed, sort): result = index.union(other, sort=sort) tm.assert_index_equal(result, index) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, closed, sort): index = self.create_index(closed=closed) other = IntervalIndex.from_breaks(range(5, 13), closed=closed) expected = IntervalIndex.from_breaks(range(5, 11), closed=closed) result = index[::-1].intersection(other, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) result = other[::-1].intersection(index, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) @@ -842,14 +841,14 @@ def test_intersection(self, closed, sort): result = index.intersection(other, sort=sort) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference(self, closed, sort): index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], closed=closed) result = index.difference(index[:1], sort=sort) expected = index[1:] - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) @@ -864,19 +863,19 @@ def test_difference(self, closed, sort): result = index.difference(other, sort=sort) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference(self, closed, sort): index = self.create_index(closed=closed) result = index[1:].symmetric_difference(index[:-1], sort=sort) expected = IntervalIndex([index[0], index[-1]]) - if sort: + if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) # GH 19101: empty result, same dtype result = index.symmetric_difference(index, sort=sort) expected = IntervalIndex(np.array([], dtype='int64'), closed=closed) - if sort: + if sort is None: tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) @@ -888,7 +887,7 @@ def test_symmetric_difference(self, closed, sort): @pytest.mark.parametrize('op_name', [ 'union', 'intersection', 'difference', 'symmetric_difference']) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_set_operation_errors(self, closed, op_name, sort): index = self.create_index(closed=closed) set_op = getattr(index, op_name) @@ -992,9 +991,11 @@ def test_comparison(self): self.index > 0 with pytest.raises(TypeError, match='unorderable types'): self.index <= 0 - with pytest.raises(TypeError): + msg = r"unorderable types: Interval\(\) > int\(\)" + with pytest.raises(TypeError, match=msg): self.index > np.arange(2) - with pytest.raises(ValueError): + msg = "Lengths must match to compare" + with pytest.raises(ValueError, match=msg): self.index > np.arange(3) def test_missing_values(self, closed): @@ -1004,7 +1005,9 @@ def test_missing_values(self, closed): [np.nan, 0, 1], [np.nan, 1, 2], closed=closed) assert idx.equals(idx2) - with pytest.raises(ValueError): + msg = ("missing values must be missing in the same location both left" + " and right sides") + with pytest.raises(ValueError, match=msg): IntervalIndex.from_arrays( [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed) diff --git a/pandas/tests/indexes/interval/test_interval_tree.py b/pandas/tests/indexes/interval/test_interval_tree.py index 90722e66d8d8c..46b2d12015a22 100644 --- a/pandas/tests/indexes/interval/test_interval_tree.py +++ b/pandas/tests/indexes/interval/test_interval_tree.py @@ -171,3 +171,13 @@ def test_is_overlapping_trivial(self, closed, left, right): # GH 23309 tree = IntervalTree(left, right, closed=closed) assert tree.is_overlapping is False + + def test_construction_overflow(self): + # GH 25485 + left, right = np.arange(101), [np.iinfo(np.int64).max] * 101 + tree = IntervalTree(left, right) + + # pivot should be average of left/right medians + result = tree.root.pivot + expected = (50 + np.iinfo(np.int64).max) / 2 + assert result == expected diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index dca6180f39664..d5a6e9acaa5f3 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -3,7 +3,8 @@ import numpy as np import pytest -from pandas.compat import lrange +from pandas.compat import PY2, lrange +from pandas.compat.numpy import _np_version_under1p17 import pandas as pd from pandas import Index, MultiIndex, date_range, period_range @@ -13,8 +14,11 @@ def test_shift(idx): # GH8083 test the base class for shift - pytest.raises(NotImplementedError, idx.shift, 1) - pytest.raises(NotImplementedError, idx.shift, 1, 2) + msg = "Not supported for type MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1, 2) def test_groupby(idx): @@ -50,25 +54,26 @@ def test_truncate(): result = index.truncate(before=1, after=2) assert len(result.levels[0]) == 2 - # after < before - pytest.raises(ValueError, index.truncate, 3, 1) + msg = "after < before" + with pytest.raises(ValueError, match=msg): + index.truncate(3, 1) def test_where(): i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - with pytest.raises(NotImplementedError): + msg = r"\.where is not supported for MultiIndex operations" + with pytest.raises(NotImplementedError, match=msg): i.where(True) -def test_where_array_like(): +@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series]) +def test_where_array_like(klass): i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - klasses = [list, tuple, np.array, pd.Series] cond = [False, True] - - for klass in klasses: - with pytest.raises(NotImplementedError): - i.where(klass(cond)) + msg = r"\.where is not supported for MultiIndex operations" + with pytest.raises(NotImplementedError, match=msg): + i.where(klass(cond)) # TODO: reshape @@ -141,7 +146,8 @@ def test_take(idx): # if not isinstance(idx, # (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # GH 10791 - with pytest.raises(AttributeError): + msg = "'MultiIndex' object has no attribute 'freq'" + with pytest.raises(AttributeError, match=msg): idx.freq @@ -199,7 +205,8 @@ def test_take_fill_value(): with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) - with pytest.raises(IndexError): + msg = "index -5 is out of bounds for size 4" + with pytest.raises(IndexError, match=msg): idx.take(np.array([1, -5])) @@ -215,13 +222,15 @@ def test_sub(idx): first = idx # - now raises (previously was set op difference) - with pytest.raises(TypeError): + msg = "cannot perform __sub__ with this index type: MultiIndex" + with pytest.raises(TypeError, match=msg): first - idx[-3:] - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): idx[-3:] - first - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): idx[-3:] - first.tolist() - with pytest.raises(TypeError): + msg = "cannot perform __rsub__ with this index type: MultiIndex" + with pytest.raises(TypeError, match=msg): first.tolist() - idx[-3:] @@ -266,56 +275,35 @@ def test_map_dictlike(idx, mapper): tm.assert_index_equal(result, expected) +@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") @pytest.mark.parametrize('func', [ np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, np.rad2deg -]) -def test_numpy_ufuncs(func): +], ids=lambda func: func.__name__) +def test_numpy_ufuncs(idx, func): # test ufuncs of numpy. see: # http://docs.scipy.org/doc/numpy/reference/ufuncs.html - # copy and paste from idx fixture as pytest doesn't support - # parameters and fixtures at the same time. - major_axis = Index(['foo', 'bar', 'baz', 'qux']) - minor_axis = Index(['one', 'two']) - major_codes = np.array([0, 0, 1, 2, 3, 3]) - minor_codes = np.array([0, 1, 0, 1, 0, 1]) - index_names = ['first', 'second'] - - idx = MultiIndex( - levels=[major_axis, minor_axis], - codes=[major_codes, minor_codes], - names=index_names, - verify_integrity=False - ) - - with pytest.raises(Exception): - with np.errstate(all='ignore'): - func(idx) + if _np_version_under1p17: + expected_exception = AttributeError + msg = "'tuple' object has no attribute '{}'".format(func.__name__) + else: + expected_exception = TypeError + msg = ("loop of ufunc does not support argument 0 of type tuple which" + " has no callable {} method").format(func.__name__) + with pytest.raises(expected_exception, match=msg): + func(idx) @pytest.mark.parametrize('func', [ np.isfinite, np.isinf, np.isnan, np.signbit -]) -def test_numpy_type_funcs(func): - # for func in [np.isfinite, np.isinf, np.isnan, np.signbit]: - # copy and paste from idx fixture as pytest doesn't support - # parameters and fixtures at the same time. - major_axis = Index(['foo', 'bar', 'baz', 'qux']) - minor_axis = Index(['one', 'two']) - major_codes = np.array([0, 0, 1, 2, 3, 3]) - minor_codes = np.array([0, 1, 0, 1, 0, 1]) - index_names = ['first', 'second'] - - idx = MultiIndex( - levels=[major_axis, minor_axis], - codes=[major_codes, minor_codes], - names=index_names, - verify_integrity=False - ) - - with pytest.raises(Exception): +], ids=lambda func: func.__name__) +def test_numpy_type_funcs(idx, func): + msg = ("ufunc '{}' not supported for the input types, and the inputs" + " could not be safely coerced to any supported types according to" + " the casting rule ''safe''").format(func.__name__) + with pytest.raises(TypeError, match=msg): func(idx) diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py index f405fc659c709..89685b9feec27 100644 --- a/pandas/tests/indexes/multi/test_compat.py +++ b/pandas/tests/indexes/multi/test_compat.py @@ -124,8 +124,6 @@ def test_compat(indices): def test_pickle_compat_construction(holder): # this is testing for pickle compat - if holder is None: - return - # need an object to create with - pytest.raises(TypeError, holder) + with pytest.raises(TypeError, match="Must pass both levels and codes"): + holder() diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py index e6678baf8a996..fe90e85cf93c8 100644 --- a/pandas/tests/indexes/multi/test_constructor.py +++ b/pandas/tests/indexes/multi/test_constructor.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from collections import OrderedDict -import re import numpy as np import pytest @@ -30,10 +29,10 @@ def test_constructor_no_levels(): with pytest.raises(ValueError, match=msg): MultiIndex(levels=[], codes=[]) - both_re = re.compile('Must pass both levels and codes') - with pytest.raises(TypeError, match=both_re): + msg = "Must pass both levels and codes" + with pytest.raises(TypeError, match=msg): MultiIndex(levels=[]) - with pytest.raises(TypeError, match=both_re): + with pytest.raises(TypeError, match=msg): MultiIndex(codes=[]) @@ -42,8 +41,8 @@ def test_constructor_nonhashable_names(): levels = [[1, 2], [u'one', u'two']] codes = [[0, 0, 1, 1], [0, 1, 0, 1]] names = (['foo'], ['bar']) - message = "MultiIndex.name must be a hashable type" - with pytest.raises(TypeError, match=message): + msg = r"MultiIndex\.name must be a hashable type" + with pytest.raises(TypeError, match=msg): MultiIndex(levels=levels, codes=codes, names=names) # With .rename() @@ -51,11 +50,11 @@ def test_constructor_nonhashable_names(): codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=('foo', 'bar')) renamed = [['foor'], ['barr']] - with pytest.raises(TypeError, match=message): + with pytest.raises(TypeError, match=msg): mi.rename(names=renamed) # With .set_names() - with pytest.raises(TypeError, match=message): + with pytest.raises(TypeError, match=msg): mi.set_names(names=renamed) @@ -67,8 +66,9 @@ def test_constructor_mismatched_codes_levels(idx): with pytest.raises(ValueError, match=msg): MultiIndex(levels=levels, codes=codes) - length_error = re.compile('>= length of level') - label_error = re.compile(r'Unequal code lengths: \[4, 2\]') + length_error = (r"On level 0, code max \(3\) >= length of level \(1\)\." + " NOTE: this index is in an inconsistent state") + label_error = r"Unequal code lengths: \[4, 2\]" # important to check that it's looking at the right thing. with pytest.raises(ValueError, match=length_error): @@ -142,6 +142,15 @@ def test_from_arrays_iterator(idx): MultiIndex.from_arrays(0) +def test_from_arrays_tuples(idx): + arrays = tuple(tuple(np.asarray(lev).take(level_codes)) + for lev, level_codes in zip(idx.levels, idx.codes)) + + # tuple of tuples as input + result = MultiIndex.from_arrays(arrays, names=idx.names) + tm.assert_index_equal(result, idx) + + def test_from_arrays_index_series_datetimetz(): idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3, tz='US/Eastern') @@ -253,21 +262,16 @@ def test_from_arrays_empty(): tm.assert_index_equal(result, expected) -@pytest.mark.parametrize('invalid_array', [ - (1), - ([1]), - ([1, 2]), - ([[1], 2]), - ('a'), - (['a']), - (['a', 'b']), - ([['a'], 'b']), +@pytest.mark.parametrize('invalid_sequence_of_arrays', [ + 1, [1], [1, 2], [[1], 2], [1, [2]], 'a', ['a'], ['a', 'b'], [['a'], 'b'], + (1,), (1, 2), ([1], 2), (1, [2]), 'a', ('a',), ('a', 'b'), (['a'], 'b'), + [(1,), 2], [1, (2,)], [('a',), 'b'], + ((1,), 2), (1, (2,)), (('a',), 'b') ]) -def test_from_arrays_invalid_input(invalid_array): - invalid_inputs = [1, [1], [1, 2], [[1], 2], - 'a', ['a'], ['a', 'b'], [['a'], 'b']] - for i in invalid_inputs: - pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i) +def test_from_arrays_invalid_input(invalid_sequence_of_arrays): + msg = "Input must be a list / sequence of array-likes" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays) @pytest.mark.parametrize('idx1, idx2', [ @@ -332,9 +336,10 @@ def test_tuples_with_name_string(): # GH 15110 and GH 14848 li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] - with pytest.raises(ValueError): + msg = "Names should be list-like for a MultiIndex" + with pytest.raises(ValueError, match=msg): pd.Index(li, name='abc') - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): pd.Index(li, name='a') @@ -398,7 +403,10 @@ def test_from_product_empty_three_levels(N): [['a'], 'b'], ]) def test_from_product_invalid_input(invalid_input): - pytest.raises(TypeError, MultiIndex.from_product, iterables=invalid_input) + msg = (r"Input must be a list / sequence of iterables|" + "Input must be list-like") + with pytest.raises(TypeError, match=msg): + MultiIndex.from_product(iterables=invalid_input) def test_from_product_datetimeindex(): @@ -563,15 +571,15 @@ def test_from_frame_valid_names(names_in, names_out): assert mi.names == names_out -@pytest.mark.parametrize('names_in,names_out', [ - ('bad_input', ValueError("Names should be list-like for a MultiIndex")), - (['a', 'b', 'c'], ValueError("Length of names must match number of " - "levels in MultiIndex.")) +@pytest.mark.parametrize('names,expected_error_msg', [ + ('bad_input', "Names should be list-like for a MultiIndex"), + (['a', 'b', 'c'], + "Length of names must match number of levels in MultiIndex") ]) -def test_from_frame_invalid_names(names_in, names_out): +def test_from_frame_invalid_names(names, expected_error_msg): # GH 22420 df = pd.DataFrame([['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']], columns=pd.MultiIndex.from_tuples([('L1', 'x'), ('L2', 'y')])) - with pytest.raises(type(names_out), match=names_out.args[0]): - pd.MultiIndex.from_frame(df, names=names_in) + with pytest.raises(ValueError, match=expected_error_msg): + pd.MultiIndex.from_frame(df, names=names) diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py index b73ff11a4dd4e..56836b94a6b03 100644 --- a/pandas/tests/indexes/multi/test_contains.py +++ b/pandas/tests/indexes/multi/test_contains.py @@ -83,15 +83,24 @@ def test_isin_level_kwarg(): tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1)) tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1)) - pytest.raises(IndexError, idx.isin, vals_0, level=5) - pytest.raises(IndexError, idx.isin, vals_0, level=-5) - - pytest.raises(KeyError, idx.isin, vals_0, level=1.0) - pytest.raises(KeyError, idx.isin, vals_1, level=-1.0) - pytest.raises(KeyError, idx.isin, vals_1, level='A') + msg = "Too many levels: Index has only 2 levels, not 6" + with pytest.raises(IndexError, match=msg): + idx.isin(vals_0, level=5) + msg = ("Too many levels: Index has only 2 levels, -5 is not a valid level" + " number") + with pytest.raises(IndexError, match=msg): + idx.isin(vals_0, level=-5) + + with pytest.raises(KeyError, match=r"'Level 1\.0 not found'"): + idx.isin(vals_0, level=1.0) + with pytest.raises(KeyError, match=r"'Level -1\.0 not found'"): + idx.isin(vals_1, level=-1.0) + with pytest.raises(KeyError, match="'Level A not found'"): + idx.isin(vals_1, level='A') idx.names = ['A', 'B'] tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level='A')) tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level='B')) - pytest.raises(KeyError, idx.isin, vals_1, level='C') + with pytest.raises(KeyError, match="'Level C not found'"): + idx.isin(vals_1, level='C') diff --git a/pandas/tests/indexes/multi/test_drop.py b/pandas/tests/indexes/multi/test_drop.py index 0cf73d3d752ad..ac167c126fd13 100644 --- a/pandas/tests/indexes/multi/test_drop.py +++ b/pandas/tests/indexes/multi/test_drop.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from pandas.compat import lrange +from pandas.compat import PY2, lrange from pandas.errors import PerformanceWarning import pandas as pd @@ -12,6 +12,7 @@ import pandas.util.testing as tm +@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_drop(idx): dropped = idx.drop([('foo', 'two'), ('qux', 'one')]) @@ -31,13 +32,17 @@ def test_drop(idx): tm.assert_index_equal(dropped, expected) index = MultiIndex.from_tuples([('bar', 'two')]) - pytest.raises(KeyError, idx.drop, [('bar', 'two')]) - pytest.raises(KeyError, idx.drop, index) - pytest.raises(KeyError, idx.drop, ['foo', 'two']) + with pytest.raises(KeyError, match=r"^10$"): + idx.drop([('bar', 'two')]) + with pytest.raises(KeyError, match=r"^10$"): + idx.drop(index) + with pytest.raises(KeyError, match=r"^'two'$"): + idx.drop(['foo', 'two']) # partially correct argument mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')]) - pytest.raises(KeyError, idx.drop, mixed_index) + with pytest.raises(KeyError, match=r"^10$"): + idx.drop(mixed_index) # error='ignore' dropped = idx.drop(index, errors='ignore') @@ -59,7 +64,8 @@ def test_drop(idx): # mixed partial / full drop / error='ignore' mixed_index = ['foo', ('qux', 'one'), 'two'] - pytest.raises(KeyError, idx.drop, mixed_index) + with pytest.raises(KeyError, match=r"^'two'$"): + idx.drop(mixed_index) dropped = idx.drop(mixed_index, errors='ignore') expected = idx[[2, 3, 5]] tm.assert_index_equal(dropped, expected) @@ -98,10 +104,12 @@ def test_droplevel_list(): expected = index[:2] assert dropped.equals(expected) - with pytest.raises(ValueError): + msg = ("Cannot remove 3 levels from an index with 3 levels: at least one" + " level must be left") + with pytest.raises(ValueError, match=msg): index[:2].droplevel(['one', 'two', 'three']) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'Level four not found'"): index[:2].droplevel(['one', 'four']) diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py index af15026de2b34..35034dc57b4b8 100644 --- a/pandas/tests/indexes/multi/test_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -143,6 +143,18 @@ def test_has_duplicates(idx, idx_dup): assert mi.is_unique is False assert mi.has_duplicates is True + # single instance of NaN + mi_nan = MultiIndex(levels=[['a', 'b'], [0, 1]], + codes=[[-1, 0, 0, 1, 1], [-1, 0, 1, 0, 1]]) + assert mi_nan.is_unique is True + assert mi_nan.has_duplicates is False + + # multiple instances of NaN + mi_nan_dup = MultiIndex(levels=[['a', 'b'], [0, 1]], + codes=[[-1, -1, 0, 0, 1, 1], [-1, -1, 0, 1, 0, 1]]) + assert mi_nan_dup.is_unique is False + assert mi_nan_dup.has_duplicates is True + def test_has_duplicates_from_tuples(): # GH 9075 diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index d201cb2eb178b..62911c7032aca 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -25,7 +25,9 @@ def test_get_level_number_integer(idx): idx.names = [1, 0] assert idx._get_level_number(1) == 0 assert idx._get_level_number(0) == 1 - pytest.raises(IndexError, idx._get_level_number, 2) + msg = "Too many levels: Index has only 2 levels, not 3" + with pytest.raises(IndexError, match=msg): + idx._get_level_number(2) with pytest.raises(KeyError, match='Level fourth not found'): idx._get_level_number('fourth') @@ -62,7 +64,7 @@ def test_get_value_duplicates(): names=['tag', 'day']) assert index.get_loc('D') == slice(0, 3) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^'D'$"): index._engine.get_value(np.array([]), 'D') @@ -125,7 +127,8 @@ def test_set_name_methods(idx, index_names): ind = idx.set_names(new_names) assert idx.names == index_names assert ind.names == new_names - with pytest.raises(ValueError, match="^Length"): + msg = "Length of names must match number of levels in MultiIndex" + with pytest.raises(ValueError, match=msg): ind.set_names(new_names + new_names) new_names2 = [name + "SUFFIX2" for name in new_names] res = ind.set_names(new_names2, inplace=True) @@ -163,10 +166,10 @@ def test_set_levels_codes_directly(idx): minor_codes = [(x + 1) % 1 for x in minor_codes] new_codes = [major_codes, minor_codes] - with pytest.raises(AttributeError): + msg = "can't set attribute" + with pytest.raises(AttributeError, match=msg): idx.levels = new_levels - - with pytest.raises(AttributeError): + with pytest.raises(AttributeError, match=msg): idx.codes = new_codes diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index c40ecd9e82a07..c2af3b2050d8d 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -6,7 +6,7 @@ import numpy as np import pytest -from pandas.compat import lrange +from pandas.compat import PY2, lrange import pandas as pd from pandas import ( @@ -112,13 +112,14 @@ def test_slice_locs_not_contained(): def test_putmask_with_wrong_mask(idx): # GH18368 - with pytest.raises(ValueError): + msg = "putmask: mask and data must be the same size" + with pytest.raises(ValueError, match=msg): idx.putmask(np.ones(len(idx) + 1, np.bool), 1) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): idx.putmask(np.ones(len(idx) - 1, np.bool), 1) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): idx.putmask('foo', 1) @@ -176,9 +177,12 @@ def test_get_indexer(): def test_get_indexer_nearest(): midx = MultiIndex.from_tuples([('a', 1), ('b', 2)]) - with pytest.raises(NotImplementedError): + msg = ("method='nearest' not implemented yet for MultiIndex; see GitHub" + " issue 9365") + with pytest.raises(NotImplementedError, match=msg): midx.get_indexer(['a'], method='nearest') - with pytest.raises(NotImplementedError): + msg = "tolerance not implemented yet for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): midx.get_indexer(['a'], method='pad', tolerance=2) @@ -251,20 +255,26 @@ def test_getitem_bool_index_single(ind1, ind2): tm.assert_index_equal(idx[ind2], expected) +@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_get_loc(idx): assert idx.get_loc(('foo', 'two')) == 1 assert idx.get_loc(('baz', 'two')) == 3 - pytest.raises(KeyError, idx.get_loc, ('bar', 'two')) - pytest.raises(KeyError, idx.get_loc, 'quux') + with pytest.raises(KeyError, match=r"^10$"): + idx.get_loc(('bar', 'two')) + with pytest.raises(KeyError, match=r"^'quux'$"): + idx.get_loc('quux') - pytest.raises(NotImplementedError, idx.get_loc, 'foo', - method='nearest') + msg = ("only the default get_loc method is currently supported for" + " MultiIndex") + with pytest.raises(NotImplementedError, match=msg): + idx.get_loc('foo', method='nearest') # 3 levels index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index( lrange(4))], codes=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array( [0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])]) - pytest.raises(KeyError, index.get_loc, (1, 1)) + with pytest.raises(KeyError, match=r"^\(1, 1\)$"): + index.get_loc((1, 1)) assert index.get_loc((2, 0)) == slice(3, 5) @@ -297,11 +307,14 @@ def test_get_loc_level(): assert loc == expected assert new_index is None - pytest.raises(KeyError, index.get_loc_level, (2, 2)) + with pytest.raises(KeyError, match=r"^\(2, 2\)$"): + index.get_loc_level((2, 2)) # GH 22221: unused label - pytest.raises(KeyError, index.drop(2).get_loc_level, 2) + with pytest.raises(KeyError, match=r"^2$"): + index.drop(2).get_loc_level(2) # Unused label on unsorted level: - pytest.raises(KeyError, index.drop(1, level=2).get_loc_level, 2, 2) + with pytest.raises(KeyError, match=r"^2$"): + index.drop(1, level=2).get_loc_level(2, level=2) index = MultiIndex(levels=[[2000], lrange(4)], codes=[np.array( [0, 0, 0, 0]), np.array([0, 1, 2, 3])]) @@ -342,8 +355,10 @@ def test_get_loc_cast_bool(): assert idx.get_loc((0, 1)) == 1 assert idx.get_loc((1, 0)) == 2 - pytest.raises(KeyError, idx.get_loc, (False, True)) - pytest.raises(KeyError, idx.get_loc, (True, False)) + with pytest.raises(KeyError, match=r"^\(False, True\)$"): + idx.get_loc((False, True)) + with pytest.raises(KeyError, match=r"^\(True, False\)$"): + idx.get_loc((True, False)) @pytest.mark.parametrize('level', [0, 1]) @@ -361,9 +376,12 @@ def test_get_loc_missing_nan(): # GH 8569 idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]]) assert isinstance(idx.get_loc(1), slice) - pytest.raises(KeyError, idx.get_loc, 3) - pytest.raises(KeyError, idx.get_loc, np.nan) - pytest.raises(KeyError, idx.get_loc, [np.nan]) + with pytest.raises(KeyError, match=r"^3\.0$"): + idx.get_loc(3) + with pytest.raises(KeyError, match=r"^nan$"): + idx.get_loc(np.nan) + with pytest.raises(KeyError, match=r"^\[nan\]$"): + idx.get_loc([np.nan]) def test_get_indexer_categorical_time(): diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py index c1638a9cde660..a7dc093147725 100644 --- a/pandas/tests/indexes/multi/test_integrity.py +++ b/pandas/tests/indexes/multi/test_integrity.py @@ -159,7 +159,8 @@ def test_isna_behavior(idx): # should not segfault GH5123 # NOTE: if MI representation changes, may make sense to allow # isna(MI) - with pytest.raises(NotImplementedError): + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): pd.isna(idx) @@ -168,16 +169,16 @@ def test_large_multiindex_error(): df_below_1000000 = pd.DataFrame( 1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), columns=['dest']) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): df_below_1000000.loc[(-1, 0), 'dest'] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^\(3, 0\)$"): df_below_1000000.loc[(3, 0), 'dest'] df_above_1000000 = pd.DataFrame( 1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), columns=['dest']) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): df_above_1000000.loc[(-1, 0), 'dest'] - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^\(3, 0\)$"): df_above_1000000.loc[(3, 0), 'dest'] @@ -260,7 +261,9 @@ def test_hash_error(indices): def test_mutability(indices): if not len(indices): return - pytest.raises(TypeError, indices.__setitem__, 0, indices[0]) + msg = "Index does not support mutable operations" + with pytest.raises(TypeError, match=msg): + indices[0] = indices[0] def test_wrong_number_names(indices): diff --git a/pandas/tests/indexes/multi/test_set_ops.py b/pandas/tests/indexes/multi/test_set_ops.py index 208d6cf1c639f..41a0e1e59e8a5 100644 --- a/pandas/tests/indexes/multi/test_set_ops.py +++ b/pandas/tests/indexes/multi/test_set_ops.py @@ -9,7 +9,7 @@ @pytest.mark.parametrize("case", [0.5, "xxx"]) -@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("sort", [None, False]) @pytest.mark.parametrize("method", ["intersection", "union", "difference", "symmetric_difference"]) def test_set_ops_error_cases(idx, case, sort, method): @@ -19,13 +19,13 @@ def test_set_ops_error_cases(idx, case, sort, method): getattr(idx, method)(case, sort=sort) -@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("sort", [None, False]) def test_intersection_base(idx, sort): first = idx[:5] second = idx[:3] intersect = first.intersection(second, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(intersect, second.sort_values()) assert tm.equalContents(intersect, second) @@ -34,7 +34,7 @@ def test_intersection_base(idx, sort): for klass in [np.array, Series, list]] for case in cases: result = first.intersection(case, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, second.sort_values()) assert tm.equalContents(result, second) @@ -43,13 +43,13 @@ def test_intersection_base(idx, sort): first.intersection([1, 2, 3], sort=sort) -@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("sort", [None, False]) def test_union_base(idx, sort): first = idx[3:] second = idx[:5] everything = idx union = first.union(second, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(union, everything.sort_values()) assert tm.equalContents(union, everything) @@ -58,7 +58,7 @@ def test_union_base(idx, sort): for klass in [np.array, Series, list]] for case in cases: result = first.union(case, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, everything.sort_values()) assert tm.equalContents(result, everything) @@ -67,13 +67,13 @@ def test_union_base(idx, sort): first.union([1, 2, 3], sort=sort) -@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("sort", [None, False]) def test_difference_base(idx, sort): second = idx[4:] answer = idx[:4] result = idx.difference(second, sort=sort) - if sort: + if sort is None: answer = answer.sort_values() assert result.equals(answer) @@ -91,14 +91,14 @@ def test_difference_base(idx, sort): idx.difference([1, 2, 3], sort=sort) -@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference(idx, sort): first = idx[1:] second = idx[:-1] answer = idx[[-1, 0]] result = first.symmetric_difference(second, sort=sort) - if sort: + if sort is None: answer = answer.sort_values() tm.assert_index_equal(result, answer) @@ -121,14 +121,14 @@ def test_empty(idx): assert idx[:0].empty -@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("sort", [None, False]) def test_difference(idx, sort): first = idx result = first.difference(idx[-3:], sort=sort) vals = idx[:-3].values - if sort: + if sort is None: vals = sorted(vals) expected = MultiIndex.from_tuples(vals, @@ -189,14 +189,62 @@ def test_difference(idx, sort): first.difference([1, 2, 3, 4, 5], sort=sort) -@pytest.mark.parametrize("sort", [True, False]) +def test_difference_sort_special(): + # GH-24959 + idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']]) + # sort=None, the default + result = idx.difference([]) + tm.assert_index_equal(result, idx) + + +@pytest.mark.xfail(reason="Not implemented.") +def test_difference_sort_special_true(): + # TODO decide on True behaviour + idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']]) + result = idx.difference([], sort=True) + expected = pd.MultiIndex.from_product([[0, 1], ['a', 'b']]) + tm.assert_index_equal(result, expected) + + +def test_difference_sort_incomparable(): + # GH-24959 + idx = pd.MultiIndex.from_product([[1, pd.Timestamp('2000'), 2], + ['a', 'b']]) + + other = pd.MultiIndex.from_product([[3, pd.Timestamp('2000'), 4], + ['c', 'd']]) + # sort=None, the default + # MultiIndex.difference deviates here from other difference + # implementations in not catching the TypeError + with pytest.raises(TypeError): + result = idx.difference(other) + + # sort=False + result = idx.difference(other, sort=False) + tm.assert_index_equal(result, idx) + + +@pytest.mark.xfail(reason="Not implemented.") +def test_difference_sort_incomparable_true(): + # TODO decide on True behaviour + # # sort=True, raises + idx = pd.MultiIndex.from_product([[1, pd.Timestamp('2000'), 2], + ['a', 'b']]) + other = pd.MultiIndex.from_product([[3, pd.Timestamp('2000'), 4], + ['c', 'd']]) + + with pytest.raises(TypeError): + idx.difference(other, sort=True) + + +@pytest.mark.parametrize("sort", [None, False]) def test_union(idx, sort): piece1 = idx[:5][::-1] piece2 = idx[3:] the_union = piece1.union(piece2, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(the_union, idx.sort_values()) assert tm.equalContents(the_union, idx) @@ -225,14 +273,14 @@ def test_union(idx, sort): # assert result.equals(result2) -@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("sort", [None, False]) def test_intersection(idx, sort): piece1 = idx[:5][::-1] piece2 = idx[3:] the_int = piece1.intersection(piece2, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(the_int, idx[3:5]) assert tm.equalContents(the_int, idx[3:5]) @@ -249,3 +297,76 @@ def test_intersection(idx, sort): # tuples = _index.values # result = _index & tuples # assert result.equals(tuples) + + +def test_intersect_equal_sort(): + # GH-24959 + idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']]) + tm.assert_index_equal(idx.intersection(idx, sort=False), idx) + tm.assert_index_equal(idx.intersection(idx, sort=None), idx) + + +@pytest.mark.xfail(reason="Not implemented.") +def test_intersect_equal_sort_true(): + # TODO decide on True behaviour + idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']]) + sorted_ = pd.MultiIndex.from_product([[0, 1], ['a', 'b']]) + tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_) + + +@pytest.mark.parametrize('slice_', [slice(None), slice(0)]) +def test_union_sort_other_empty(slice_): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']]) + + # default, sort=None + other = idx[slice_] + tm.assert_index_equal(idx.union(other), idx) + # MultiIndex does not special case empty.union(idx) + # tm.assert_index_equal(other.union(idx), idx) + + # sort=False + tm.assert_index_equal(idx.union(other, sort=False), idx) + + +@pytest.mark.xfail(reason="Not implemented.") +def test_union_sort_other_empty_sort(slice_): + # TODO decide on True behaviour + # # sort=True + idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']]) + other = idx[:0] + result = idx.union(other, sort=True) + expected = pd.MultiIndex.from_product([[0, 1], ['a', 'b']]) + tm.assert_index_equal(result, expected) + + +def test_union_sort_other_incomparable(): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = pd.MultiIndex.from_product([[1, pd.Timestamp('2000')], ['a', 'b']]) + + # default, sort=None + result = idx.union(idx[:1]) + tm.assert_index_equal(result, idx) + + # sort=False + result = idx.union(idx[:1], sort=False) + tm.assert_index_equal(result, idx) + + +@pytest.mark.xfail(reason="Not implemented.") +def test_union_sort_other_incomparable_sort(): + # TODO decide on True behaviour + # # sort=True + idx = pd.MultiIndex.from_product([[1, pd.Timestamp('2000')], ['a', 'b']]) + with pytest.raises(TypeError, match='Cannot compare'): + idx.union(idx[:1], sort=True) + + +@pytest.mark.parametrize("method", ['union', 'intersection', 'difference', + 'symmetric_difference']) +def test_setops_disallow_true(method): + idx1 = pd.MultiIndex.from_product([['a', 'b'], [1, 2]]) + idx2 = pd.MultiIndex.from_product([['b', 'c'], [1, 2]]) + + with pytest.raises(ValueError, match="The 'sort' keyword only takes"): + getattr(idx1, method)(idx2, sort=True) diff --git a/pandas/tests/indexes/period/test_asfreq.py b/pandas/tests/indexes/period/test_asfreq.py index 2dd49e7e0845e..30b416e3fe9dd 100644 --- a/pandas/tests/indexes/period/test_asfreq.py +++ b/pandas/tests/indexes/period/test_asfreq.py @@ -67,7 +67,9 @@ def test_asfreq(self): assert pi7.asfreq('H', 'S') == pi5 assert pi7.asfreq('Min', 'S') == pi6 - pytest.raises(ValueError, pi7.asfreq, 'T', 'foo') + msg = "How must be one of S or E" + with pytest.raises(ValueError, match=msg): + pi7.asfreq('T', 'foo') result1 = pi1.asfreq('3M') result2 = pi1.asfreq('M') expected = period_range(freq='M', start='2001-12', end='2001-12') diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py index 916260c4cee7e..f1adeca7245f6 100644 --- a/pandas/tests/indexes/period/test_construction.py +++ b/pandas/tests/indexes/period/test_construction.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from pandas._libs.tslibs.period import IncompatibleFrequency from pandas.compat import PY3, lmap, lrange, text_type from pandas.core.dtypes.dtypes import PeriodDtype @@ -66,12 +67,17 @@ def test_constructor_field_arrays(self): years = [2007, 2007, 2007] months = [1, 2] - pytest.raises(ValueError, PeriodIndex, year=years, month=months, - freq='M') - pytest.raises(ValueError, PeriodIndex, year=years, month=months, - freq='2M') - pytest.raises(ValueError, PeriodIndex, year=years, month=months, - freq='M', start=Period('2007-01', freq='M')) + + msg = "Mismatched Period array lengths" + with pytest.raises(ValueError, match=msg): + PeriodIndex(year=years, month=months, freq='M') + with pytest.raises(ValueError, match=msg): + PeriodIndex(year=years, month=months, freq='2M') + + msg = "Can either instantiate from fields or endpoints, but not both" + with pytest.raises(ValueError, match=msg): + PeriodIndex(year=years, month=months, freq='M', + start=Period('2007-01', freq='M')) years = [2007, 2007, 2007] months = [1, 2, 3] @@ -81,8 +87,8 @@ def test_constructor_field_arrays(self): def test_constructor_U(self): # U was used as undefined period - pytest.raises(ValueError, period_range, '2007-1-1', periods=500, - freq='X') + with pytest.raises(ValueError, match="Invalid frequency: X"): + period_range('2007-1-1', periods=500, freq='X') def test_constructor_nano(self): idx = period_range(start=Period(ordinal=1, freq='N'), @@ -103,17 +109,29 @@ def test_constructor_arrays_negative_year(self): tm.assert_index_equal(pindex.quarter, pd.Index(quarters)) def test_constructor_invalid_quarters(self): - pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004), - quarter=lrange(4), freq='Q-DEC') + msg = "Quarter must be 1 <= q <= 4" + with pytest.raises(ValueError, match=msg): + PeriodIndex(year=lrange(2000, 2004), quarter=lrange(4), + freq='Q-DEC') def test_constructor_corner(self): - pytest.raises(ValueError, PeriodIndex, periods=10, freq='A') + msg = "Not enough parameters to construct Period range" + with pytest.raises(ValueError, match=msg): + PeriodIndex(periods=10, freq='A') start = Period('2007', freq='A-JUN') end = Period('2010', freq='A-DEC') - pytest.raises(ValueError, PeriodIndex, start=start, end=end) - pytest.raises(ValueError, PeriodIndex, start=start) - pytest.raises(ValueError, PeriodIndex, end=end) + + msg = "start and end must have same freq" + with pytest.raises(ValueError, match=msg): + PeriodIndex(start=start, end=end) + + msg = ("Of the three parameters: start, end, and periods, exactly two" + " must be specified") + with pytest.raises(ValueError, match=msg): + PeriodIndex(start=start) + with pytest.raises(ValueError, match=msg): + PeriodIndex(end=end) result = period_range('2007-01', periods=10.5, freq='M') exp = period_range('2007-01', periods=10, freq='M') @@ -126,10 +144,15 @@ def test_constructor_fromarraylike(self): tm.assert_index_equal(PeriodIndex(idx.values), idx) tm.assert_index_equal(PeriodIndex(list(idx.values)), idx) - pytest.raises(ValueError, PeriodIndex, idx._ndarray_values) - pytest.raises(ValueError, PeriodIndex, list(idx._ndarray_values)) - pytest.raises(TypeError, PeriodIndex, - data=Period('2007', freq='A')) + msg = "freq not specified and cannot be inferred" + with pytest.raises(ValueError, match=msg): + PeriodIndex(idx._ndarray_values) + with pytest.raises(ValueError, match=msg): + PeriodIndex(list(idx._ndarray_values)) + + msg = "'Period' object is not iterable" + with pytest.raises(TypeError, match=msg): + PeriodIndex(data=Period('2007', freq='A')) result = PeriodIndex(iter(idx)) tm.assert_index_equal(result, idx) @@ -160,7 +183,9 @@ def test_constructor_datetime64arr(self): vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64) vals = vals.view(np.dtype('M8[us]')) - pytest.raises(ValueError, PeriodIndex, vals, freq='D') + msg = r"Wrong dtype: datetime64\[us\]" + with pytest.raises(ValueError, match=msg): + PeriodIndex(vals, freq='D') @pytest.mark.parametrize('box', [None, 'series', 'index']) def test_constructor_datetime64arr_ok(self, box): @@ -300,17 +325,20 @@ def test_constructor_simple_new_empty(self): @pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])]) def test_constructor_floats(self, floats): - with pytest.raises(TypeError): + msg = r"PeriodIndex\._simple_new does not accept floats" + with pytest.raises(TypeError, match=msg): pd.PeriodIndex._simple_new(floats, freq='M') - with pytest.raises(TypeError): + msg = "PeriodIndex does not allow floating point in construction" + with pytest.raises(TypeError, match=msg): pd.PeriodIndex(floats, freq='M') def test_constructor_nat(self): - pytest.raises(ValueError, period_range, start='NaT', - end='2011-01-01', freq='M') - pytest.raises(ValueError, period_range, start='2011-01-01', - end='NaT', freq='M') + msg = "start and end must not be NaT" + with pytest.raises(ValueError, match=msg): + period_range(start='NaT', end='2011-01-01', freq='M') + with pytest.raises(ValueError, match=msg): + period_range(start='2011-01-01', end='NaT', freq='M') def test_constructor_year_and_quarter(self): year = pd.Series([2001, 2002, 2003]) @@ -455,9 +483,12 @@ def test_constructor(self): # Mixed freq should fail vals = [end_intv, Period('2006-12-31', 'w')] - pytest.raises(ValueError, PeriodIndex, vals) + msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)" + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex(vals) vals = np.array(vals) - pytest.raises(ValueError, PeriodIndex, vals) + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex(vals) def test_constructor_error(self): start = Period('02-Apr-2005', 'B') @@ -508,7 +539,8 @@ def setup_method(self, method): self.series = Series(period_range('2000-01-01', periods=10, freq='D')) def test_constructor_cant_cast_period(self): - with pytest.raises(TypeError): + msg = "Cannot cast PeriodArray to dtype float64" + with pytest.raises(TypeError, match=msg): Series(period_range('2000-01-01', periods=10, freq='D'), dtype=float) diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 47c2edfd13395..fa8199b4e6163 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -84,7 +84,8 @@ def test_getitem_partial(self): rng = period_range('2007-01', periods=50, freq='M') ts = Series(np.random.randn(len(rng)), rng) - pytest.raises(KeyError, ts.__getitem__, '2006') + with pytest.raises(KeyError, match=r"^'2006'$"): + ts['2006'] result = ts['2008'] assert (result.index.year == 2008).all() @@ -326,7 +327,8 @@ def test_take_fill_value(self): with pytest.raises(ValueError, match=msg): idx.take(np.array([1, 0, -5]), fill_value=True) - with pytest.raises(IndexError): + msg = "index -5 is out of bounds for size 3" + with pytest.raises(IndexError, match=msg): idx.take(np.array([1, -5])) @@ -335,7 +337,8 @@ class TestIndexing(object): def test_get_loc_msg(self): idx = period_range('2000-1-1', freq='A', periods=10) bad_period = Period('2012', 'A') - pytest.raises(KeyError, idx.get_loc, bad_period) + with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"): + idx.get_loc(bad_period) try: idx.get_loc(bad_period) @@ -373,8 +376,13 @@ def test_get_loc(self): msg = "Cannot interpret 'foo' as period" with pytest.raises(KeyError, match=msg): idx0.get_loc('foo') - pytest.raises(KeyError, idx0.get_loc, 1.1) - pytest.raises(TypeError, idx0.get_loc, idx0) + with pytest.raises(KeyError, match=r"^1\.1$"): + idx0.get_loc(1.1) + + msg = (r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\]," + r" dtype='period\[D\]', freq='D'\)' is an invalid key") + with pytest.raises(TypeError, match=msg): + idx0.get_loc(idx0) # get the location of p1/p2 from # monotonic increasing PeriodIndex with duplicate @@ -391,8 +399,13 @@ def test_get_loc(self): with pytest.raises(KeyError, match=msg): idx1.get_loc('foo') - pytest.raises(KeyError, idx1.get_loc, 1.1) - pytest.raises(TypeError, idx1.get_loc, idx1) + with pytest.raises(KeyError, match=r"^1\.1$"): + idx1.get_loc(1.1) + + msg = (r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\]," + r" dtype='period\[D\]', freq='D'\)' is an invalid key") + with pytest.raises(TypeError, match=msg): + idx1.get_loc(idx1) # get the location of p1/p2 from # non-monotonic increasing/decreasing PeriodIndex with duplicate @@ -441,18 +454,6 @@ def test_is_monotonic_decreasing(self): assert idx_dec1.is_monotonic_decreasing is True assert idx.is_monotonic_decreasing is False - def test_is_unique(self): - # GH 17717 - p0 = pd.Period('2017-09-01') - p1 = pd.Period('2017-09-02') - p2 = pd.Period('2017-09-03') - - idx0 = pd.PeriodIndex([p0, p1, p2]) - assert idx0.is_unique is True - - idx1 = pd.PeriodIndex([p1, p1, p2]) - assert idx1.is_unique is False - def test_contains(self): # GH 17717 p0 = pd.Period('2017-09-01') @@ -581,7 +582,7 @@ def test_get_loc2(self): msg = 'Input has different freq=None from PeriodArray\\(freq=D\\)' with pytest.raises(ValueError, match=msg): idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour') - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^Period\('2000-01-10', 'D'\)$"): idx.get_loc('2000-01-10', method='nearest', tolerance='1 day') with pytest.raises( ValueError, diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 464ff7aa5d58d..89bcf56dbda71 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -71,13 +71,15 @@ def test_fillna_period(self): pd.Period('2011-01-01', freq='D')), exp) def test_no_millisecond_field(self): - with pytest.raises(AttributeError): + msg = "type object 'DatetimeIndex' has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): DatetimeIndex.millisecond - with pytest.raises(AttributeError): + msg = "'DatetimeIndex' object has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): DatetimeIndex([]).millisecond - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_freq(self, sort): # GH14323: difference of Period MUST preserve frequency # but the ability to union results must be preserved @@ -98,8 +100,8 @@ def test_difference_freq(self, sort): def test_hash_error(self): index = period_range('20010101', periods=10) - with pytest.raises(TypeError, match=("unhashable type: %r" % - type(index).__name__)): + msg = "unhashable type: '{}'".format(type(index).__name__) + with pytest.raises(TypeError, match=msg): hash(index) def test_make_time_series(self): @@ -124,7 +126,8 @@ def test_shallow_copy_i8(self): def test_shallow_copy_changing_freq_raises(self): pi = period_range("2018-01-01", periods=3, freq="2D") - with pytest.raises(IncompatibleFrequency, match="are different"): + msg = "specified freq and dtype are different" + with pytest.raises(IncompatibleFrequency, match=msg): pi._shallow_copy(pi, freq="H") def test_dtype_str(self): @@ -214,21 +217,17 @@ def test_period_index_length(self): assert (i1 == i2).all() assert i1.freq == i2.freq - try: + msg = "start and end must have same freq" + with pytest.raises(ValueError, match=msg): period_range(start=start, end=end_intv) - raise AssertionError('Cannot allow mixed freq for start and end') - except ValueError: - pass end_intv = Period('2005-05-01', 'B') i1 = period_range(start=start, end=end_intv) - try: + msg = ("Of the three parameters: start, end, and periods, exactly two" + " must be specified") + with pytest.raises(ValueError, match=msg): period_range(start=start) - raise AssertionError( - 'Must specify periods if missing start or end') - except ValueError: - pass # infer freq from first element i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')]) @@ -241,9 +240,12 @@ def test_period_index_length(self): # Mixed freq should fail vals = [end_intv, Period('2006-12-31', 'w')] - pytest.raises(ValueError, PeriodIndex, vals) + msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)" + with pytest.raises(IncompatibleFrequency, match=msg): + PeriodIndex(vals) vals = np.array(vals) - pytest.raises(ValueError, PeriodIndex, vals) + with pytest.raises(ValueError, match=msg): + PeriodIndex(vals) def test_fields(self): # year, month, day, hour, minute @@ -381,7 +383,9 @@ def test_contains_nat(self): assert np.nan in idx def test_periods_number_check(self): - with pytest.raises(ValueError): + msg = ("Of the three parameters: start, end, and periods, exactly two" + " must be specified") + with pytest.raises(ValueError, match=msg): period_range('2011-1-1', '2012-1-1', 'B') def test_start_time(self): @@ -500,7 +504,8 @@ def test_is_full(self): assert index.is_full index = PeriodIndex([2006, 2005, 2005], freq='A') - pytest.raises(ValueError, getattr, index, 'is_full') + with pytest.raises(ValueError, match="Index is not monotonic"): + index.is_full assert index[:0].is_full @@ -574,5 +579,6 @@ def test_maybe_convert_timedelta(): assert pi._maybe_convert_timedelta(2) == 2 offset = offsets.BusinessDay() - with pytest.raises(ValueError, match='freq'): + msg = r"Input has different freq=B from PeriodIndex\(freq=D\)" + with pytest.raises(ValueError, match=msg): pi._maybe_convert_timedelta(offset) diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index a97ab47bcda16..bf29edad4841e 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -38,7 +38,7 @@ def test_join_does_not_recur(self): df.columns[0], df.columns[1]], object) tm.assert_index_equal(res, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union(self, sort): # union other1 = pd.period_range('1/1/2000', freq='D', periods=5) @@ -97,11 +97,11 @@ def test_union(self, sort): (rng8, other8, expected8)]: result_union = rng.union(other, sort=sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result_union, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union_misc(self, sort): index = period_range('1/1/2000', '1/20/2000', freq='D') @@ -110,7 +110,7 @@ def test_union_misc(self, sort): # not in order result = _permute(index[:-5]).union(_permute(index[10:]), sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, index) assert tm.equalContents(result, index) @@ -139,7 +139,7 @@ def test_union_dataframe_index(self): exp = pd.period_range('1/1/1980', '1/1/2012', freq='M') tm.assert_index_equal(df.index, exp) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, sort): index = period_range('1/1/2000', '1/20/2000', freq='D') @@ -150,7 +150,7 @@ def test_intersection(self, sort): left = _permute(index[:-5]) right = _permute(index[10:]) result = left.intersection(right, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, index[10:-5]) assert tm.equalContents(result, index[10:-5]) @@ -164,7 +164,7 @@ def test_intersection(self, sort): with pytest.raises(period.IncompatibleFrequency): index.intersection(index3, sort=sort) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_cases(self, sort): base = period_range('6/1/2000', '6/30/2000', freq='D', name='idx') @@ -210,7 +210,7 @@ def test_intersection_cases(self, sort): for (rng, expected) in [(rng2, expected2), (rng3, expected3), (rng4, expected4)]: result = base.intersection(rng, sort=sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) assert result.name == expected.name @@ -224,7 +224,7 @@ def test_intersection_cases(self, sort): result = rng.intersection(rng[0:0]) assert len(result) == 0 - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference(self, sort): # diff period_rng = ['1/3/2000', '1/2/2000', '1/1/2000', '1/5/2000', @@ -276,6 +276,6 @@ def test_difference(self, sort): (rng6, other6, expected6), (rng7, other7, expected7), ]: result_difference = rng.difference(other, sort=sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result_difference, expected) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index f3e9d835c7391..26dcf7d6bc234 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -3,6 +3,8 @@ from collections import defaultdict from datetime import datetime, timedelta import math +import operator +import re import sys import numpy as np @@ -106,7 +108,10 @@ def test_constructor_copy(self): def test_constructor_corner(self): # corner case - pytest.raises(TypeError, Index, 0) + msg = (r"Index\(\.\.\.\) must be called with a collection of some" + " kind, 0 was passed") + with pytest.raises(TypeError, match=msg): + Index(0) @pytest.mark.parametrize("index_vals", [ [('A', 1), 'B'], ['B', ('A', 1)]]) @@ -487,21 +492,22 @@ def test_constructor_cast(self): Index(["a", "b", "c"], dtype=float) def test_view_with_args(self): - restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex', 'empty'] - - for i in restricted: - ind = self.indices[i] - - # with arguments - pytest.raises(TypeError, lambda: ind.view('i8')) - - # these are ok for i in list(set(self.indices.keys()) - set(restricted)): ind = self.indices[i] + ind.view('i8') - # with arguments + @pytest.mark.parametrize('index_type', [ + 'unicodeIndex', + 'strIndex', + pytest.param('catIndex', marks=pytest.mark.xfail(reason="gh-25464")), + 'boolIndex', + 'empty']) + def test_view_with_args_object_array_raises(self, index_type): + ind = self.indices[index_type] + msg = "Cannot change data-type for object array" + with pytest.raises(TypeError, match=msg): ind.view('i8') def test_astype(self): @@ -564,8 +570,8 @@ def test_delete(self, pos, expected): def test_delete_raises(self): index = Index(['a', 'b', 'c', 'd'], name='index') - with pytest.raises((IndexError, ValueError)): - # either depending on numpy version + msg = "index 5 is out of bounds for axis 0 with size 4" + with pytest.raises(IndexError, match=msg): index.delete(5) def test_identical(self): @@ -682,14 +688,16 @@ def test_empty_fancy_raises(self, attr): assert index[[]].identical(empty_index) # np.ndarray only accepts ndarray of int & bool dtypes, so should Index - pytest.raises(IndexError, index.__getitem__, empty_farr) + msg = r"arrays used as indices must be of integer \(or boolean\) type" + with pytest.raises(IndexError, match=msg): + index[empty_farr] - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, sort): first = self.strIndex[:20] second = self.strIndex[:10] intersect = first.intersection(second, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(intersect, second.sort_values()) assert tm.equalContents(intersect, second) @@ -701,7 +709,7 @@ def test_intersection(self, sort): (Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name (Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names (Index([3, 4, 5, 6, 7]), False)]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_name_preservation(self, index2, keeps_name, sort): index1 = Index([1, 2, 3, 4, 5], name='index') expected = Index([3, 4, 5]) @@ -715,7 +723,7 @@ def test_intersection_name_preservation(self, index2, keeps_name, sort): @pytest.mark.parametrize("first_name,second_name,expected_name", [ ('A', 'A', 'A'), ('A', 'B', None), (None, 'B', None)]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_name_preservation2(self, first_name, second_name, expected_name, sort): first = self.strIndex[5:20] @@ -728,7 +736,7 @@ def test_intersection_name_preservation2(self, first_name, second_name, @pytest.mark.parametrize("index2,keeps_name", [ (Index([4, 7, 6, 5, 3], name='index'), True), (Index([4, 7, 6, 5, 3], name='other'), False)]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_monotonic(self, index2, keeps_name, sort): index1 = Index([5, 3, 2, 4, 1], name='index') expected = Index([5, 3, 4]) @@ -737,25 +745,25 @@ def test_intersection_monotonic(self, index2, keeps_name, sort): expected.name = "index" result = index1.intersection(index2, sort=sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) @pytest.mark.parametrize("index2,expected_arr", [ (Index(['B', 'D']), ['B']), (Index(['B', 'D', 'A']), ['A', 'B', 'A'])]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort): # non-monotonic non-unique index1 = Index(['A', 'B', 'A', 'C']) expected = Index(expected_arr, dtype='object') result = index1.intersection(index2, sort=sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersect_str_dates(self, sort): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] @@ -765,7 +773,24 @@ def test_intersect_str_dates(self, sort): assert len(result) == 0 - @pytest.mark.parametrize("sort", [True, False]) + def test_intersect_nosort(self): + result = pd.Index(['c', 'b', 'a']).intersection(['b', 'a']) + expected = pd.Index(['b', 'a']) + tm.assert_index_equal(result, expected) + + def test_intersection_equal_sort(self): + idx = pd.Index(['c', 'a', 'b']) + tm.assert_index_equal(idx.intersection(idx, sort=False), idx) + tm.assert_index_equal(idx.intersection(idx, sort=None), idx) + + @pytest.mark.xfail(reason="Not implemented") + def test_intersection_equal_sort_true(self): + # TODO decide on True behaviour + idx = pd.Index(['c', 'a', 'b']) + sorted_ = pd.Index(['a', 'b', 'c']) + tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_) + + @pytest.mark.parametrize("sort", [None, False]) def test_chained_union(self, sort): # Chained unions handles names correctly i1 = Index([1, 2], name='i1') @@ -782,7 +807,7 @@ def test_chained_union(self, sort): expected = j1.union(j2, sort=sort).union(j3, sort=sort) tm.assert_index_equal(union, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union(self, sort): # TODO: Replace with fixturesult first = self.strIndex[5:20] @@ -790,13 +815,65 @@ def test_union(self, sort): everything = self.strIndex[:20] union = first.union(second, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(union, everything.sort_values()) assert tm.equalContents(union, everything) + @pytest.mark.parametrize('slice_', [slice(None), slice(0)]) + def test_union_sort_other_special(self, slice_): + # https://github.com/pandas-dev/pandas/issues/24959 + + idx = pd.Index([1, 0, 2]) + # default, sort=None + other = idx[slice_] + tm.assert_index_equal(idx.union(other), idx) + tm.assert_index_equal(other.union(idx), idx) + + # sort=False + tm.assert_index_equal(idx.union(other, sort=False), idx) + + @pytest.mark.xfail(reason="Not implemented") + @pytest.mark.parametrize('slice_', [slice(None), slice(0)]) + def test_union_sort_special_true(self, slice_): + # TODO decide on True behaviour + # sort=True + idx = pd.Index([1, 0, 2]) + # default, sort=None + other = idx[slice_] + + result = idx.union(other, sort=True) + expected = pd.Index([0, 1, 2]) + tm.assert_index_equal(result, expected) + + def test_union_sort_other_incomparable(self): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = pd.Index([1, pd.Timestamp('2000')]) + # default (sort=None) + with tm.assert_produces_warning(RuntimeWarning): + result = idx.union(idx[:1]) + + tm.assert_index_equal(result, idx) + + # sort=None + with tm.assert_produces_warning(RuntimeWarning): + result = idx.union(idx[:1], sort=None) + tm.assert_index_equal(result, idx) + + # sort=False + result = idx.union(idx[:1], sort=False) + tm.assert_index_equal(result, idx) + + @pytest.mark.xfail(reason="Not implemented") + def test_union_sort_other_incomparable_true(self): + # TODO decide on True behaviour + # sort=True + idx = pd.Index([1, pd.Timestamp('2000')]) + with pytest.raises(TypeError, match='.*'): + idx.union(idx[:1], sort=True) + @pytest.mark.parametrize("klass", [ np.array, Series, list]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union_from_iterables(self, klass, sort): # GH 10149 # TODO: Replace with fixturesult @@ -806,29 +883,30 @@ def test_union_from_iterables(self, klass, sort): case = klass(second.values) result = first.union(case, sort=sort) - if sort: + if sort is None: tm.assert_index_equal(result, everything.sort_values()) assert tm.equalContents(result, everything) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union_identity(self, sort): # TODO: replace with fixturesult first = self.strIndex[5:20] union = first.union(first, sort=sort) - assert union is first + # i.e. identity is not preserved when sort is True + assert (union is first) is (not sort) union = first.union([], sort=sort) - assert union is first + assert (union is first) is (not sort) union = Index([]).union(first, sort=sort) - assert union is first + assert (union is first) is (not sort) @pytest.mark.parametrize("first_list", [list('ba'), list()]) @pytest.mark.parametrize("second_list", [list('ab'), list()]) @pytest.mark.parametrize("first_name, second_name, expected_name", [ ('A', 'B', None), (None, 'B', None), ('A', None, None)]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union_name_preservation(self, first_list, second_list, first_name, second_name, expected_name, sort): first = Index(first_list, name=first_name) @@ -837,14 +915,14 @@ def test_union_name_preservation(self, first_list, second_list, first_name, vals = set(first_list).union(second_list) - if sort and len(first_list) > 0 and len(second_list) > 0: + if sort is None and len(first_list) > 0 and len(second_list) > 0: expected = Index(sorted(vals), name=expected_name) tm.assert_index_equal(union, expected) else: expected = Index(vals, name=expected_name) assert tm.equalContents(union, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_union_dt_as_obj(self, sort): # TODO: Replace with fixturesult firstCat = self.strIndex.union(self.dateIndex) @@ -861,6 +939,15 @@ def test_union_dt_as_obj(self, sort): tm.assert_contains_all(self.strIndex, secondCat) tm.assert_contains_all(self.dateIndex, firstCat) + @pytest.mark.parametrize("method", ['union', 'intersection', 'difference', + 'symmetric_difference']) + def test_setops_disallow_true(self, method): + idx1 = pd.Index(['a', 'b']) + idx2 = pd.Index(['b', 'c']) + + with pytest.raises(ValueError, match="The 'sort' keyword only takes"): + getattr(idx1, method)(idx2, sort=True) + def test_map_identity_mapping(self): # GH 12766 # TODO: replace with fixture @@ -982,7 +1069,7 @@ def test_append_empty_preserve_name(self, name, expected): @pytest.mark.parametrize("second_name,expected", [ (None, None), ('name', 'name')]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_name_preservation(self, second_name, expected, sort): # TODO: replace with fixturesult first = self.strIndex[5:20] @@ -1000,7 +1087,7 @@ def test_difference_name_preservation(self, second_name, expected, sort): else: assert result.name == expected - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_empty_arg(self, sort): first = self.strIndex[5:20] first.name == 'name' @@ -1009,7 +1096,7 @@ def test_difference_empty_arg(self, sort): assert tm.equalContents(result, first) assert result.name == first.name - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_identity(self, sort): first = self.strIndex[5:20] first.name == 'name' @@ -1018,7 +1105,7 @@ def test_difference_identity(self, sort): assert len(result) == 0 assert result.name == first.name - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_sort(self, sort): first = self.strIndex[5:20] second = self.strIndex[:10] @@ -1026,12 +1113,12 @@ def test_difference_sort(self, sort): result = first.difference(second, sort) expected = self.strIndex[10:20] - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference(self, sort): # smoke index1 = Index([5, 2, 3, 4], name='index1') @@ -1040,7 +1127,7 @@ def test_symmetric_difference(self, sort): expected = Index([5, 1]) assert tm.equalContents(result, expected) assert result.name is None - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) @@ -1049,13 +1136,43 @@ def test_symmetric_difference(self, sort): assert tm.equalContents(result, expected) assert result.name is None - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize('opname', ['difference', 'symmetric_difference']) + def test_difference_incomparable(self, opname): + a = pd.Index([3, pd.Timestamp('2000'), 1]) + b = pd.Index([2, pd.Timestamp('1999'), 1]) + op = operator.methodcaller(opname, b) + + # sort=None, the default + result = op(a) + expected = pd.Index([3, pd.Timestamp('2000'), 2, pd.Timestamp('1999')]) + if opname == 'difference': + expected = expected[:2] + tm.assert_index_equal(result, expected) + + # sort=False + op = operator.methodcaller(opname, b, sort=False) + result = op(a) + tm.assert_index_equal(result, expected) + + @pytest.mark.xfail(reason="Not implemented") + @pytest.mark.parametrize('opname', ['difference', 'symmetric_difference']) + def test_difference_incomparable_true(self, opname): + # TODO decide on True behaviour + # # sort=True, raises + a = pd.Index([3, pd.Timestamp('2000'), 1]) + b = pd.Index([2, pd.Timestamp('1999'), 1]) + op = operator.methodcaller(opname, b, sort=True) + + with pytest.raises(TypeError, match='Cannot compare'): + op(a) + + @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference_mi(self, sort): index1 = MultiIndex.from_tuples(self.tuples) index2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)]) result = index1.symmetric_difference(index2, sort=sort) expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)]) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) assert tm.equalContents(result, expected) @@ -1063,18 +1180,18 @@ def test_symmetric_difference_mi(self, sort): @pytest.mark.parametrize("index2,expected", [ (Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])), (Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0]))]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference_missing(self, index2, expected, sort): # GH 13514 change: {nan} - {nan} == {} # (GH 6444, sorting of nans, is no longer an issue) index1 = Index([1, np.nan, 2, 3]) result = index1.symmetric_difference(index2, sort=sort) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference_non_index(self, sort): index1 = Index([1, 2, 3, 4], name='index1') index2 = np.array([2, 3, 4, 5]) @@ -1088,7 +1205,7 @@ def test_symmetric_difference_non_index(self, sort): assert tm.equalContents(result, expected) assert result.name == 'new_name' - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_type(self, sort): # GH 20040 # If taking difference of a set and itself, it @@ -1099,7 +1216,7 @@ def test_difference_type(self, sort): expected = index.drop(index) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_difference(self, sort): # GH 20040 # Test that the intersection of an index with an @@ -1316,13 +1433,14 @@ def test_get_indexer_strings(self, method, expected): def test_get_indexer_strings_raises(self): index = pd.Index(['b', 'c']) - with pytest.raises(TypeError): + msg = r"unsupported operand type\(s\) for -: 'str' and 'str'" + with pytest.raises(TypeError, match=msg): index.get_indexer(['a', 'b', 'c', 'd'], method='nearest') - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): index.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=[2, 2, 2, 2]) @@ -1431,8 +1549,9 @@ def test_slice_locs(self, dtype): assert index2.slice_locs(8, 2) == (2, 6) assert index2.slice_locs(7, 3) == (2, 5) - def test_slice_float_locs(self): - index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float)) + @pytest.mark.parametrize("dtype", [int, float]) + def test_slice_float_locs(self, dtype): + index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype)) n = len(index) assert index.slice_locs(5.0, 10.0) == (3, n) assert index.slice_locs(4.5, 10.5) == (3, 8) @@ -1441,24 +1560,6 @@ def test_slice_float_locs(self): assert index2.slice_locs(8.5, 1.5) == (2, 6) assert index2.slice_locs(10.5, -1) == (0, n) - @pytest.mark.xfail(reason="Assertions were not correct - see GH#20915") - def test_slice_ints_with_floats_raises(self): - # int slicing with floats - # GH 4892, these are all TypeErrors - index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int)) - n = len(index) - - pytest.raises(TypeError, - lambda: index.slice_locs(5.0, 10.0)) - pytest.raises(TypeError, - lambda: index.slice_locs(4.5, 10.5)) - - index2 = index[::-1] - pytest.raises(TypeError, - lambda: index2.slice_locs(8.5, 1.5), (2, 6)) - pytest.raises(TypeError, - lambda: index2.slice_locs(10.5, -1), (0, n)) - def test_slice_locs_dup(self): index = Index(['a', 'a', 'b', 'c', 'd', 'd']) assert index.slice_locs('a', 'd') == (0, 6) @@ -1592,23 +1693,33 @@ def test_drop_tuple(self, values, to_drop): tm.assert_index_equal(result, expected) removed = index.drop(to_drop[1]) + msg = r"\"\[{}\] not found in axis\"".format( + re.escape(to_drop[1].__repr__())) for drop_me in to_drop[1], [to_drop[1]]: - pytest.raises(KeyError, removed.drop, drop_me) + with pytest.raises(KeyError, match=msg): + removed.drop(drop_me) + + @pytest.mark.parametrize("method,expected,sort", [ + ('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')], + dtype=[('num', int), ('let', 'a1')]), + False), - @pytest.mark.parametrize("method,expected", [ ('intersection', np.array([(1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')], - dtype=[('num', int), ('let', 'a1')])), + dtype=[('num', int), ('let', 'a1')]), + None), + ('union', np.array([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'), - (2, 'C')], dtype=[('num', int), ('let', 'a1')])) + (2, 'C')], dtype=[('num', int), ('let', 'a1')]), + None) ]) - def test_tuple_union_bug(self, method, expected): + def test_tuple_union_bug(self, method, expected, sort): index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')], dtype=[('num', int), ('let', 'a1')])) index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2, 'C')], dtype=[('num', int), ('let', 'a1')])) - result = getattr(index1, method)(index2) + result = getattr(index1, method)(index2, sort=sort) assert result.ndim == 1 expected = Index(expected) @@ -2247,20 +2358,20 @@ def test_unique_na(self): result = idx.unique() tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_base(self, sort): # (same results for py2 and py3 but sortedness not tested elsewhere) index = self.create_index() first = index[:5] second = index[:3] - expected = Index([0, 1, 'a']) if sort else Index([0, 'a', 1]) + expected = Index([0, 1, 'a']) if sort is None else Index([0, 'a', 1]) result = first.intersection(second, sort=sort) tm.assert_index_equal(result, expected) @pytest.mark.parametrize("klass", [ np.array, Series, list]) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection_different_type_base(self, klass, sort): # GH 10149 index = self.create_index() @@ -2270,7 +2381,7 @@ def test_intersection_different_type_base(self, klass, sort): result = first.intersection(klass(second.values), sort=sort) assert tm.equalContents(result, second) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): # (same results for py2 and py3 but sortedness not tested elsewhere) index = self.create_index() @@ -2279,7 +2390,7 @@ def test_difference_base(self, sort): result = first.difference(second, sort) expected = Index([0, 'a', 1]) - if sort: + if sort is None: expected = Index(safe_sort(expected)) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 582d466c6178e..95fac2f6ae05b 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -181,18 +181,21 @@ def test_create_categorical(self): expected = Categorical(['a', 'b', 'c']) tm.assert_categorical_equal(result, expected) - def test_disallow_set_ops(self): - + @pytest.mark.parametrize('func,op_name', [ + (lambda idx: idx - idx, '__sub__'), + (lambda idx: idx + idx, '__add__'), + (lambda idx: idx - ['a', 'b'], '__sub__'), + (lambda idx: idx + ['a', 'b'], '__add__'), + (lambda idx: ['a', 'b'] - idx, '__rsub__'), + (lambda idx: ['a', 'b'] + idx, '__radd__'), + ]) + def test_disallow_set_ops(self, func, op_name): # GH 10039 # set ops (+/-) raise TypeError idx = pd.Index(pd.Categorical(['a', 'b'])) - - pytest.raises(TypeError, lambda: idx - idx) - pytest.raises(TypeError, lambda: idx + idx) - pytest.raises(TypeError, lambda: idx - ['a', 'b']) - pytest.raises(TypeError, lambda: idx + ['a', 'b']) - pytest.raises(TypeError, lambda: ['a', 'b'] - idx) - pytest.raises(TypeError, lambda: ['a', 'b'] + idx) + msg = "cannot perform {} with this index type: CategoricalIndex" + with pytest.raises(TypeError, match=msg.format(op_name)): + func(idx) def test_method_delegation(self): @@ -231,8 +234,9 @@ def test_method_delegation(self): list('aabbca'), categories=list('cabdef'), ordered=True)) # invalid - pytest.raises(ValueError, lambda: ci.set_categories( - list('cab'), inplace=True)) + msg = "cannot use inplace with CategoricalIndex" + with pytest.raises(ValueError, match=msg): + ci.set_categories(list('cab'), inplace=True) def test_contains(self): @@ -357,12 +361,11 @@ def test_append(self): tm.assert_index_equal(result, ci, exact=True) # appending with different categories or reordered is not ok - pytest.raises( - TypeError, - lambda: ci.append(ci.values.set_categories(list('abcd')))) - pytest.raises( - TypeError, - lambda: ci.append(ci.values.reorder_categories(list('abc')))) + msg = "all inputs must be Index" + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.set_categories(list('abcd'))) + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.reorder_categories(list('abc'))) # with objects result = ci.append(Index(['c', 'a'])) @@ -370,7 +373,9 @@ def test_append(self): tm.assert_index_equal(result, expected, exact=True) # invalid objects - pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd']))) + msg = "cannot append a non-category item to a CategoricalIndex" + with pytest.raises(TypeError, match=msg): + ci.append(Index(['a', 'd'])) # GH14298 - if base object is not categorical -> coerce to object result = Index(['c', 'a']).append(ci) @@ -406,7 +411,10 @@ def test_insert(self): tm.assert_index_equal(result, expected, exact=True) # invalid - pytest.raises(TypeError, lambda: ci.insert(0, 'd')) + msg = ("cannot insert an item into a CategoricalIndex that is not" + " already an existing category") + with pytest.raises(TypeError, match=msg): + ci.insert(0, 'd') # GH 18295 (test missing) expected = CategoricalIndex(['a', np.nan, 'a', 'b', 'c', 'b']) @@ -611,15 +619,6 @@ def test_is_monotonic(self, data, non_lexsorted_data): assert c.is_monotonic_increasing is True assert c.is_monotonic_decreasing is False - @pytest.mark.parametrize('values, expected', [ - ([1, 2, 3], True), - ([1, 3, 1], False), - (list('abc'), True), - (list('aba'), False)]) - def test_is_unique(self, values, expected): - ci = CategoricalIndex(values) - assert ci.is_unique is expected - def test_has_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo') @@ -642,12 +641,16 @@ def test_get_indexer(self): r1 = idx1.get_indexer(idx2) assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp)) - pytest.raises(NotImplementedError, - lambda: idx2.get_indexer(idx1, method='pad')) - pytest.raises(NotImplementedError, - lambda: idx2.get_indexer(idx1, method='backfill')) - pytest.raises(NotImplementedError, - lambda: idx2.get_indexer(idx1, method='nearest')) + msg = ("method='pad' and method='backfill' not implemented yet for" + " CategoricalIndex") + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method='pad') + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method='backfill') + + msg = "method='nearest' not implemented yet for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method='nearest') def test_get_loc(self): # GH 12531 @@ -785,12 +788,15 @@ def test_equals_categorical(self): # invalid comparisons with pytest.raises(ValueError, match="Lengths must match"): ci1 == Index(['a', 'b', 'c']) - pytest.raises(TypeError, lambda: ci1 == ci2) - pytest.raises( - TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False)) - pytest.raises( - TypeError, - lambda: ci1 == Categorical(ci1.values, categories=list('abc'))) + + msg = ("categorical index comparisons must have the same categories" + " and ordered attributes") + with pytest.raises(TypeError, match=msg): + ci1 == ci2 + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, ordered=False) + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, categories=list('abc')) # tests # make sure that we are testing for category inclusion properly diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index fd356202a8ce5..03448129a48fc 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -3,6 +3,8 @@ any index subclass. Makes use of the `indices` fixture defined in pandas/tests/indexes/conftest.py. """ +import re + import numpy as np import pytest @@ -189,8 +191,14 @@ def test_unique(self, indices): result = indices.unique(level=level) tm.assert_index_equal(result, expected) - for level in 3, 'wrong': - pytest.raises((IndexError, KeyError), indices.unique, level=level) + msg = "Too many levels: Index has only 1 level, not 4" + with pytest.raises(IndexError, match=msg): + indices.unique(level=3) + + msg = r"Level wrong must be same as name \({}\)".format( + re.escape(indices.name.__repr__())) + with pytest.raises(KeyError, match=msg): + indices.unique(level='wrong') def test_get_unique_index(self, indices): # MultiIndex tested separately @@ -239,12 +247,16 @@ def test_get_unique_index(self, indices): tm.assert_index_equal(result, expected) def test_sort(self, indices): - pytest.raises(TypeError, indices.sort) + msg = "cannot sort an Index object in-place, use sort_values instead" + with pytest.raises(TypeError, match=msg): + indices.sort() def test_mutability(self, indices): if not len(indices): pytest.skip('Skip check for empty Index') - pytest.raises(TypeError, indices.__setitem__, 0, indices[0]) + msg = "Index does not support mutable operations" + with pytest.raises(TypeError, match=msg): + indices[0] = indices[0] def test_view(self, indices): assert indices.view().name == indices.name diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index a64340c02cd22..26413f4519eff 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -1,15 +1,17 @@ # -*- coding: utf-8 -*- from datetime import datetime +import re import numpy as np import pytest from pandas._libs.tslibs import Timestamp -from pandas.compat import range +from pandas.compat import PY2, range import pandas as pd from pandas import Float64Index, Index, Int64Index, Series, UInt64Index +from pandas.api.types import pandas_dtype from pandas.tests.indexes.common import Base import pandas.util.testing as tm @@ -153,12 +155,22 @@ def test_constructor(self): result = Index(np.array([np.nan])) assert pd.isna(result.values).all() + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_constructor_invalid(self): # invalid - pytest.raises(TypeError, Float64Index, 0.) - pytest.raises(TypeError, Float64Index, ['a', 'b', 0.]) - pytest.raises(TypeError, Float64Index, [Timestamp('20130101')]) + msg = (r"Float64Index\(\.\.\.\) must be called with a collection of" + r" some kind, 0\.0 was passed") + with pytest.raises(TypeError, match=msg): + Float64Index(0.) + msg = ("String dtype not supported, you may need to explicitly cast to" + " a numeric type") + with pytest.raises(TypeError, match=msg): + Float64Index(['a', 'b', 0.]) + msg = (r"float\(\) argument must be a string or a number, not" + " 'Timestamp'") + with pytest.raises(TypeError, match=msg): + Float64Index([Timestamp('20130101')]) def test_constructor_coerce(self): @@ -216,12 +228,17 @@ def test_astype(self): # invalid for dtype in ['M8[ns]', 'm8[ns]']: - pytest.raises(TypeError, lambda: i.astype(dtype)) + msg = ("Cannot convert Float64Index to dtype {}; integer values" + " are required for conversion").format(pandas_dtype(dtype)) + with pytest.raises(TypeError, match=re.escape(msg)): + i.astype(dtype) # GH 13149 for dtype in ['int16', 'int32', 'int64']: i = Float64Index([0, 1.1, np.NAN]) - pytest.raises(ValueError, lambda: i.astype(dtype)) + msg = "Cannot convert NA to integer" + with pytest.raises(ValueError, match=msg): + i.astype(dtype) def test_type_coercion_fail(self, any_int_dtype): # see gh-15832 @@ -275,12 +292,16 @@ def test_get_loc(self): assert idx.get_loc(1.1, method) == loc assert idx.get_loc(1.1, method, tolerance=0.9) == loc - pytest.raises(KeyError, idx.get_loc, 'foo') - pytest.raises(KeyError, idx.get_loc, 1.5) - pytest.raises(KeyError, idx.get_loc, 1.5, method='pad', - tolerance=0.1) - pytest.raises(KeyError, idx.get_loc, True) - pytest.raises(KeyError, idx.get_loc, False) + with pytest.raises(KeyError, match="^'foo'$"): + idx.get_loc('foo') + with pytest.raises(KeyError, match=r"^1\.5$"): + idx.get_loc(1.5) + with pytest.raises(KeyError, match=r"^1\.5$"): + idx.get_loc(1.5, method='pad', tolerance=0.1) + with pytest.raises(KeyError, match="^True$"): + idx.get_loc(True) + with pytest.raises(KeyError, match="^False$"): + idx.get_loc(False) with pytest.raises(ValueError, match='must be numeric'): idx.get_loc(1.4, method='nearest', tolerance='foo') @@ -310,15 +331,20 @@ def test_get_loc_na(self): # not representable by slice idx = Float64Index([np.nan, 1, np.nan, np.nan]) assert idx.get_loc(1) == 1 - pytest.raises(KeyError, idx.slice_locs, np.nan) + msg = "'Cannot get left slice bound for non-unique label: nan" + with pytest.raises(KeyError, match=msg): + idx.slice_locs(np.nan) def test_get_loc_missing_nan(self): # GH 8569 idx = Float64Index([1, 2]) assert idx.get_loc(1) == 0 - pytest.raises(KeyError, idx.get_loc, 3) - pytest.raises(KeyError, idx.get_loc, np.nan) - pytest.raises(KeyError, idx.get_loc, [np.nan]) + with pytest.raises(KeyError, match=r"^3\.0$"): + idx.get_loc(3) + with pytest.raises(KeyError, match="^nan$"): + idx.get_loc(np.nan) + with pytest.raises(KeyError, match=r"^\[nan\]$"): + idx.get_loc([np.nan]) def test_contains_nans(self): i = Float64Index([1.0, 2.0, np.nan]) @@ -499,13 +525,17 @@ def test_union_noncomparable(self): tm.assert_index_equal(result, expected) def test_cant_or_shouldnt_cast(self): + msg = ("String dtype not supported, you may need to explicitly cast to" + " a numeric type") # can't data = ['foo', 'bar', 'baz'] - pytest.raises(TypeError, self._holder, data) + with pytest.raises(TypeError, match=msg): + self._holder(data) # shouldn't data = ['0', '1', '2'] - pytest.raises(TypeError, self._holder, data) + with pytest.raises(TypeError, match=msg): + self._holder(data) def test_view_index(self): self.index.view(Index) @@ -576,7 +606,10 @@ def test_constructor(self): tm.assert_index_equal(index, expected) # scalar raise Exception - pytest.raises(TypeError, Int64Index, 5) + msg = (r"Int64Index\(\.\.\.\) must be called with a collection of some" + " kind, 5 was passed") + with pytest.raises(TypeError, match=msg): + Int64Index(5) # copy arr = self.index.values diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index bbd1e0ccc19b1..96cf83d477376 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -503,7 +503,7 @@ def test_join_self(self): joined = self.index.join(self.index, how=kind) assert self.index is joined - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, sort): # intersect with Int64Index other = Index(np.arange(1, 6)) diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 04977023d7c62..3173252e174ab 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -198,20 +198,34 @@ def test_ops_ndarray(self): expected = pd.to_timedelta(['2 days']).values tm.assert_numpy_array_equal(td + other, expected) tm.assert_numpy_array_equal(other + td, expected) - pytest.raises(TypeError, lambda: td + np.array([1])) - pytest.raises(TypeError, lambda: np.array([1]) + td) + msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'" + with pytest.raises(TypeError, match=msg): + td + np.array([1]) + msg = (r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and" + " 'Timedelta'") + with pytest.raises(TypeError, match=msg): + np.array([1]) + td expected = pd.to_timedelta(['0 days']).values tm.assert_numpy_array_equal(td - other, expected) tm.assert_numpy_array_equal(-other + td, expected) - pytest.raises(TypeError, lambda: td - np.array([1])) - pytest.raises(TypeError, lambda: np.array([1]) - td) + msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'" + with pytest.raises(TypeError, match=msg): + td - np.array([1]) + msg = (r"unsupported operand type\(s\) for -: 'numpy.ndarray' and" + " 'Timedelta'") + with pytest.raises(TypeError, match=msg): + np.array([1]) - td expected = pd.to_timedelta(['2 days']).values tm.assert_numpy_array_equal(td * np.array([2]), expected) tm.assert_numpy_array_equal(np.array([2]) * td, expected) - pytest.raises(TypeError, lambda: td * other) - pytest.raises(TypeError, lambda: other * td) + msg = ("ufunc multiply cannot use operands with types" + r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)") + with pytest.raises(TypeError, match=msg): + td * other + with pytest.raises(TypeError, match=msg): + other * td tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64)) diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index 3938d6acad2f0..0028f1e2edad5 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -168,10 +168,15 @@ def test_constructor_coverage(self): tm.assert_index_equal(from_ints, expected) # non-conforming freq - pytest.raises(ValueError, TimedeltaIndex, - ['1 days', '2 days', '4 days'], freq='D') + msg = ("Inferred frequency None from passed values does not conform to" + " passed frequency D") + with pytest.raises(ValueError, match=msg): + TimedeltaIndex(['1 days', '2 days', '4 days'], freq='D') - pytest.raises(ValueError, timedelta_range, periods=10, freq='D') + msg = ("Of the four parameters: start, end, periods, and freq, exactly" + " three must be specified") + with pytest.raises(ValueError, match=msg): + timedelta_range(periods=10, freq='D') def test_constructor_name(self): idx = timedelta_range(start='1 days', periods=1, freq='D', name='TEST') diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 40377e4362b75..63210f67c2dbd 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -71,7 +71,9 @@ def test_unknown_attribute(self): tdi = pd.timedelta_range(start=0, periods=10, freq='1s') ts = pd.Series(np.random.normal(size=10), index=tdi) assert 'foo' not in ts.__dict__.keys() - pytest.raises(AttributeError, lambda: ts.foo) + msg = "'Series' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + ts.foo def test_order(self): # GH 10295 diff --git a/pandas/tests/indexes/timedeltas/test_partial_slicing.py b/pandas/tests/indexes/timedeltas/test_partial_slicing.py index 62bf2a0b4a1cf..9fce1c9acd488 100644 --- a/pandas/tests/indexes/timedeltas/test_partial_slicing.py +++ b/pandas/tests/indexes/timedeltas/test_partial_slicing.py @@ -31,7 +31,9 @@ def test_partial_slice(self): result = s['6 days, 23:11:12'] assert result == s.iloc[133] - pytest.raises(KeyError, s.__getitem__, '50 days') + msg = r"^Timedelta\('50 days 00:00:00'\)$" + with pytest.raises(KeyError, match=msg): + s['50 days'] def test_partial_slice_high_reso(self): diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 547366ec79094..062e1c1e9f46d 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -1,4 +1,5 @@ from datetime import timedelta +import re import numpy as np import pytest @@ -51,7 +52,7 @@ def test_fillna_timedelta(self): [pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object) tm.assert_index_equal(idx.fillna('x'), exp) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_freq(self, sort): # GH14323: Difference of TimedeltaIndex should not preserve frequency @@ -69,7 +70,7 @@ def test_difference_freq(self, sort): tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal('freq', idx_diff, expected) - @pytest.mark.parametrize("sort", [True, False]) + @pytest.mark.parametrize("sort", [None, False]) def test_difference_sort(self, sort): index = pd.TimedeltaIndex(["5 days", "3 days", "2 days", "4 days", @@ -80,7 +81,7 @@ def test_difference_sort(self, sort): expected = TimedeltaIndex(["5 days", "0 days"], freq=None) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(idx_diff, expected) @@ -90,7 +91,7 @@ def test_difference_sort(self, sort): idx_diff = index.difference(other, sort) expected = TimedeltaIndex(["1 days", "0 days"], freq=None) - if sort: + if sort is None: expected = expected.sort_values() tm.assert_index_equal(idx_diff, expected) @@ -263,9 +264,13 @@ def test_fields(self): tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype='int64')) - pytest.raises(AttributeError, lambda: rng.hours) - pytest.raises(AttributeError, lambda: rng.minutes) - pytest.raises(AttributeError, lambda: rng.milliseconds) + msg = "'TimedeltaIndex' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format('hours')): + rng.hours + with pytest.raises(AttributeError, match=msg.format('minutes')): + rng.minutes + with pytest.raises(AttributeError, match=msg.format('milliseconds')): + rng.milliseconds # with nat s = Series(rng) @@ -325,6 +330,13 @@ def test_freq_conversion(self): result = td.astype('timedelta64[s]') assert_index_equal(result, expected) + @pytest.mark.parametrize('unit', ['Y', 'y', 'M']) + def test_unit_m_y_deprecated(self, unit): + with tm.assert_produces_warning(FutureWarning) as w: + TimedeltaIndex([1, 3, 7], unit) + msg = r'.* units are deprecated .*' + assert re.match(msg, str(w[0].message)) + class TestTimeSeries(object): diff --git a/pandas/tests/indexes/timedeltas/test_tools.py b/pandas/tests/indexes/timedeltas/test_tools.py index d211219159233..58482a174dfd1 100644 --- a/pandas/tests/indexes/timedeltas/test_tools.py +++ b/pandas/tests/indexes/timedeltas/test_tools.py @@ -115,14 +115,22 @@ def test_to_timedelta_invalid(self): to_timedelta(['foo'], errors='never') # these will error - pytest.raises(ValueError, lambda: to_timedelta([1, 2], unit='foo')) - pytest.raises(ValueError, lambda: to_timedelta(1, unit='foo')) + msg = "invalid unit abbreviation: foo" + with pytest.raises(ValueError, match=msg): + to_timedelta([1, 2], unit='foo') + with pytest.raises(ValueError, match=msg): + to_timedelta(1, unit='foo') # time not supported ATM - pytest.raises(ValueError, lambda: to_timedelta(time(second=1))) + msg = ("Value must be Timedelta, string, integer, float, timedelta or" + " convertible") + with pytest.raises(ValueError, match=msg): + to_timedelta(time(second=1)) assert to_timedelta(time(second=1), errors='coerce') is pd.NaT - pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar'])) + msg = "unit abbreviation w/o a number" + with pytest.raises(ValueError, match=msg): + to_timedelta(['foo', 'bar']) tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]), to_timedelta(['foo', 'bar'], errors='coerce')) diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index f4d6fe428515e..91ea38920c702 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -233,8 +233,6 @@ def _print(result, error=None): tm.assert_series_equal(rs, xp) elif xp.ndim == 2: tm.assert_frame_equal(rs, xp) - elif xp.ndim == 3: - tm.assert_panel_equal(rs, xp) result = 'ok' except AssertionError as e: detail = str(e) diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index ea451d40eb5d3..073d40001a16b 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -123,10 +123,12 @@ def test_loc_multiindex(self): tm.assert_frame_equal(rs, xp) # missing label - pytest.raises(KeyError, lambda: mi_int.loc[2]) + with pytest.raises(KeyError, match=r"^2L?$"): + mi_int.loc[2] with catch_warnings(record=True): # GH 21593 - pytest.raises(KeyError, lambda: mi_int.ix[2]) + with pytest.raises(KeyError, match=r"^2L?$"): + mi_int.ix[2] def test_loc_multiindex_indexer_none(self): diff --git a/pandas/tests/indexing/multiindex/test_multiindex.py b/pandas/tests/indexing/multiindex/test_multiindex.py index 4f5517f89e852..ccf017489e046 100644 --- a/pandas/tests/indexing/multiindex/test_multiindex.py +++ b/pandas/tests/indexing/multiindex/test_multiindex.py @@ -84,3 +84,11 @@ def test_multi_nan_indexing(self): name='a'), Index(['C1', 'C2', 'C3', 'C4'], name='b')]) tm.assert_frame_equal(result, expected) + + def test_contains(self): + # GH 24570 + tx = pd.timedelta_range('09:30:00', '16:00:00', freq='30 min') + idx = MultiIndex.from_arrays([tx, np.arange(len(tx))]) + assert tx[0] in idx + assert 'element_not_exit' not in idx + assert '0 day 09:30:00' in idx diff --git a/pandas/tests/indexing/multiindex/test_panel.py b/pandas/tests/indexing/multiindex/test_panel.py index 68c8fadd2f0dd..314009146911a 100644 --- a/pandas/tests/indexing/multiindex/test_panel.py +++ b/pandas/tests/indexing/multiindex/test_panel.py @@ -55,49 +55,3 @@ def test_iloc_getitem_panel_multiindex(self): result = p.loc[:, (1, 'y'), 'u'] tm.assert_series_equal(result, expected) - - def test_panel_setitem_with_multiindex(self): - - # 10360 - # failing with a multi-index - arr = np.array([[[1, 2, 3], [0, 0, 0]], - [[0, 0, 0], [0, 0, 0]]], - dtype=np.float64) - - # reg index - axes = dict(items=['A', 'B'], major_axis=[0, 1], - minor_axis=['X', 'Y', 'Z']) - p1 = Panel(0., **axes) - p1.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p1, expected) - - # multi-indexes - axes['items'] = MultiIndex.from_tuples( - [('A', 'a'), ('B', 'b')]) - p2 = Panel(0., **axes) - p2.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p2, expected) - - axes['major_axis'] = MultiIndex.from_tuples( - [('A', 1), ('A', 2)]) - p3 = Panel(0., **axes) - p3.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p3, expected) - - axes['minor_axis'] = MultiIndex.from_product( - [['X'], range(3)]) - p4 = Panel(0., **axes) - p4.iloc[0, 0, :] = [1, 2, 3] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p4, expected) - - arr = np.array( - [[[1, 0, 0], [2, 0, 0]], [[0, 0, 0], [0, 0, 0]]], - dtype=np.float64) - p5 = Panel(0., **axes) - p5.iloc[0, :, 0] = [1, 2] - expected = Panel(arr, **axes) - tm.assert_panel_equal(p5, expected) diff --git a/pandas/tests/indexing/multiindex/test_partial.py b/pandas/tests/indexing/multiindex/test_partial.py index 2e37ebe4a0629..473463def2b87 100644 --- a/pandas/tests/indexing/multiindex/test_partial.py +++ b/pandas/tests/indexing/multiindex/test_partial.py @@ -104,8 +104,8 @@ def test_getitem_partial_column_select(self): result = df.ix[('a', 'y'), [1, 0]] tm.assert_frame_equal(result, expected) - pytest.raises(KeyError, df.loc.__getitem__, - (('a', 'foo'), slice(None, None))) + with pytest.raises(KeyError, match=r"\('a', 'foo'\)"): + df.loc[('a', 'foo'), :] def test_partial_set( self, multiindex_year_month_day_dataframe_random_data): diff --git a/pandas/tests/indexing/multiindex/test_slice.py b/pandas/tests/indexing/multiindex/test_slice.py index fcecb2b454eb6..db7d079186708 100644 --- a/pandas/tests/indexing/multiindex/test_slice.py +++ b/pandas/tests/indexing/multiindex/test_slice.py @@ -107,7 +107,8 @@ def test_per_axis_per_level_getitem(self): # ambiguous cases # these can be multiply interpreted (e.g. in this case # as df.loc[slice(None),[1]] as well - pytest.raises(KeyError, lambda: df.loc[slice(None), [1]]) + with pytest.raises(KeyError, match=r"'\[1\] not in index'"): + df.loc[slice(None), [1]] result = df.loc[(slice(None), [1]), :] expected = df.iloc[[0, 3]] diff --git a/pandas/tests/indexing/test_categorical.py b/pandas/tests/indexing/test_categorical.py index b7443e242137b..317aac1766cf8 100644 --- a/pandas/tests/indexing/test_categorical.py +++ b/pandas/tests/indexing/test_categorical.py @@ -53,23 +53,20 @@ def test_loc_scalar(self): assert_frame_equal(df, expected) # value not in the categories - pytest.raises(KeyError, lambda: df.loc['d']) + with pytest.raises(KeyError, match=r"^'d'$"): + df.loc['d'] - def f(): + msg = "cannot append a non-category item to a CategoricalIndex" + with pytest.raises(TypeError, match=msg): df.loc['d'] = 10 - pytest.raises(TypeError, f) - - def f(): + msg = ("cannot insert an item into a CategoricalIndex that is not" + " already an existing category") + with pytest.raises(TypeError, match=msg): df.loc['d', 'A'] = 10 - - pytest.raises(TypeError, f) - - def f(): + with pytest.raises(TypeError, match=msg): df.loc['d', 'C'] = 10 - pytest.raises(TypeError, f) - def test_getitem_scalar(self): cats = Categorical([Timestamp('12-31-1999'), @@ -318,7 +315,8 @@ def test_loc_listlike(self): assert_frame_equal(result, expected, check_index_type=True) # element in the categories but not in the values - pytest.raises(KeyError, lambda: self.df2.loc['e']) + with pytest.raises(KeyError, match=r"^'e'$"): + self.df2.loc['e'] # assign is ok df = self.df2.copy() @@ -616,22 +614,29 @@ def test_reindexing(self): assert_frame_equal(result, expected, check_index_type=True) # passed duplicate indexers are not allowed - pytest.raises(ValueError, lambda: self.df2.reindex(['a', 'a'])) + msg = "cannot reindex with a non-unique indexer" + with pytest.raises(ValueError, match=msg): + self.df2.reindex(['a', 'a']) # args NotImplemented ATM - pytest.raises(NotImplementedError, - lambda: self.df2.reindex(['a'], method='ffill')) - pytest.raises(NotImplementedError, - lambda: self.df2.reindex(['a'], level=1)) - pytest.raises(NotImplementedError, - lambda: self.df2.reindex(['a'], limit=2)) + msg = r"argument {} is not implemented for CategoricalIndex\.reindex" + with pytest.raises(NotImplementedError, match=msg.format('method')): + self.df2.reindex(['a'], method='ffill') + with pytest.raises(NotImplementedError, match=msg.format('level')): + self.df2.reindex(['a'], level=1) + with pytest.raises(NotImplementedError, match=msg.format('limit')): + self.df2.reindex(['a'], limit=2) def test_loc_slice(self): # slicing # not implemented ATM # GH9748 - pytest.raises(TypeError, lambda: self.df.loc[1:5]) + msg = ("cannot do slice indexing on {klass} with these " + r"indexers \[1\] of {kind}".format( + klass=str(CategoricalIndex), kind=str(int))) + with pytest.raises(TypeError, match=msg): + self.df.loc[1:5] # result = df.loc[1:5] # expected = df.iloc[[1,2,3,4]] @@ -679,8 +684,11 @@ def test_boolean_selection(self): # categories=[3, 2, 1], # ordered=False, # name=u'B') - pytest.raises(TypeError, lambda: df4[df4.index < 2]) - pytest.raises(TypeError, lambda: df4[df4.index > 1]) + msg = "Unordered Categoricals can only compare equality or not" + with pytest.raises(TypeError, match=msg): + df4[df4.index < 2] + with pytest.raises(TypeError, match=msg): + df4[df4.index > 1] def test_indexing_with_category(self): diff --git a/pandas/tests/indexing/test_chaining_and_caching.py b/pandas/tests/indexing/test_chaining_and_caching.py index e38c1b16b3b60..6070edca075c2 100644 --- a/pandas/tests/indexing/test_chaining_and_caching.py +++ b/pandas/tests/indexing/test_chaining_and_caching.py @@ -302,11 +302,11 @@ def test_setting_with_copy_bug(self): 'c': ['a', 'b', np.nan, 'd']}) mask = pd.isna(df.c) - def f(): + msg = ("A value is trying to be set on a copy of a slice from a" + " DataFrame") + with pytest.raises(com.SettingWithCopyError, match=msg): df[['c']][mask] = df[['b']][mask] - pytest.raises(com.SettingWithCopyError, f) - # invalid warning as we are returning a new object # GH 8730 df1 = DataFrame({'x': Series(['a', 'b', 'c']), @@ -357,7 +357,6 @@ def check(result, expected): check(result4, expected) @pytest.mark.filterwarnings("ignore::DeprecationWarning") - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") def test_cache_updating(self): # GH 4939, make sure to update the cache on setitem @@ -367,12 +366,6 @@ def test_cache_updating(self): assert "Hello Friend" in df['A'].index assert "Hello Friend" in df['B'].index - panel = tm.makePanel() - panel.ix[0] # get first item into cache - panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1 - assert "A+1" in panel.ix[0].columns - assert "A+1" in panel.ix[1].columns - # 10264 df = DataFrame(np.zeros((5, 5), dtype='int64'), columns=[ 'a', 'b', 'c', 'd', 'e'], index=range(5)) diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index de91b8f4a796c..b9b47338c9de2 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -6,7 +6,7 @@ import pytest from pandas import ( - DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series) + DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series, compat) import pandas.util.testing as tm from pandas.util.testing import assert_almost_equal, assert_series_equal @@ -54,9 +54,11 @@ def test_scalar_error(self): with pytest.raises(TypeError, match=msg): s.iloc[3.0] - def f(): + msg = ("cannot do positional indexing on {klass} with these " + r"indexers \[3\.0\] of {kind}".format( + klass=type(i), kind=str(float))) + with pytest.raises(TypeError, match=msg): s.iloc[3.0] = 0 - pytest.raises(TypeError, f) @ignore_ix def test_scalar_non_numeric(self): @@ -82,35 +84,46 @@ def test_scalar_non_numeric(self): (lambda x: x.iloc, False), (lambda x: x, True)]: - def f(): - with catch_warnings(record=True): - idxr(s)[3.0] - # gettitem on a DataFrame is a KeyError as it is indexing # via labels on the columns if getitem and isinstance(s, DataFrame): error = KeyError + msg = r"^3(\.0)?$" else: error = TypeError - pytest.raises(error, f) + msg = (r"cannot do (label|index|positional) indexing" + r" on {klass} with these indexers \[3\.0\] of" + r" {kind}|" + "Cannot index by location index with a" + " non-integer key" + .format(klass=type(i), kind=str(float))) + with catch_warnings(record=True): + with pytest.raises(error, match=msg): + idxr(s)[3.0] # label based can be a TypeError or KeyError - def f(): - s.loc[3.0] - if s.index.inferred_type in ['string', 'unicode', 'mixed']: error = KeyError + msg = r"^3$" else: error = TypeError - pytest.raises(error, f) + msg = (r"cannot do (label|index) indexing" + r" on {klass} with these indexers \[3\.0\] of" + r" {kind}" + .format(klass=type(i), kind=str(float))) + with pytest.raises(error, match=msg): + s.loc[3.0] # contains assert 3.0 not in s # setting with a float fails with iloc - def f(): + msg = (r"cannot do (label|index|positional) indexing" + r" on {klass} with these indexers \[3\.0\] of" + r" {kind}" + .format(klass=type(i), kind=str(float))) + with pytest.raises(TypeError, match=msg): s.iloc[3.0] = 0 - pytest.raises(TypeError, f) # setting with an indexer if s.index.inferred_type in ['categorical']: @@ -145,7 +158,12 @@ def f(): # fallsback to position selection, series only s = Series(np.arange(len(i)), index=i) s[3] - pytest.raises(TypeError, lambda: s[3.0]) + msg = (r"cannot do (label|index) indexing" + r" on {klass} with these indexers \[3\.0\] of" + r" {kind}" + .format(klass=type(i), kind=str(float))) + with pytest.raises(TypeError, match=msg): + s[3.0] @ignore_ix def test_scalar_with_mixed(self): @@ -153,19 +171,23 @@ def test_scalar_with_mixed(self): s2 = Series([1, 2, 3], index=['a', 'b', 'c']) s3 = Series([1, 2, 3], index=['a', 'b', 1.5]) - # lookup in a pure string index + # lookup in a pure stringstr # with an invalid indexer for idxr in [lambda x: x.ix, lambda x: x, lambda x: x.iloc]: - def f(): - with catch_warnings(record=True): + msg = (r"cannot do label indexing" + r" on {klass} with these indexers \[1\.0\] of" + r" {kind}|" + "Cannot index by location index with a non-integer key" + .format(klass=str(Index), kind=str(float))) + with catch_warnings(record=True): + with pytest.raises(TypeError, match=msg): idxr(s2)[1.0] - pytest.raises(TypeError, f) - - pytest.raises(KeyError, lambda: s2.loc[1.0]) + with pytest.raises(KeyError, match=r"^1$"): + s2.loc[1.0] result = s2.loc['b'] expected = 2 @@ -175,11 +197,13 @@ def f(): # indexing for idxr in [lambda x: x]: - def f(): + msg = (r"cannot do label indexing" + r" on {klass} with these indexers \[1\.0\] of" + r" {kind}" + .format(klass=str(Index), kind=str(float))) + with pytest.raises(TypeError, match=msg): idxr(s3)[1.0] - pytest.raises(TypeError, f) - result = idxr(s3)[1] expected = 2 assert result == expected @@ -189,17 +213,22 @@ def f(): for idxr in [lambda x: x.ix]: with catch_warnings(record=True): - def f(): + msg = (r"cannot do label indexing" + r" on {klass} with these indexers \[1\.0\] of" + r" {kind}" + .format(klass=str(Index), kind=str(float))) + with pytest.raises(TypeError, match=msg): idxr(s3)[1.0] - pytest.raises(TypeError, f) - result = idxr(s3)[1] expected = 2 assert result == expected - pytest.raises(TypeError, lambda: s3.iloc[1.0]) - pytest.raises(KeyError, lambda: s3.loc[1.0]) + msg = "Cannot index by location index with a non-integer key" + with pytest.raises(TypeError, match=msg): + s3.iloc[1.0] + with pytest.raises(KeyError, match=r"^1$"): + s3.loc[1.0] result = s3.loc[1.5] expected = 3 @@ -280,16 +309,14 @@ def test_scalar_float(self): # setting s2 = s.copy() - def f(): - with catch_warnings(record=True): - idxr(s2)[indexer] = expected with catch_warnings(record=True): result = idxr(s2)[indexer] self.check(result, s, 3, getitem) # random integer is a KeyError with catch_warnings(record=True): - pytest.raises(KeyError, lambda: idxr(s)[3.5]) + with pytest.raises(KeyError, match=r"^3\.5$"): + idxr(s)[3.5] # contains assert 3.0 in s @@ -303,11 +330,16 @@ def f(): self.check(result, s, 3, False) # iloc raises with a float - pytest.raises(TypeError, lambda: s.iloc[3.0]) + msg = "Cannot index by location index with a non-integer key" + with pytest.raises(TypeError, match=msg): + s.iloc[3.0] - def g(): + msg = (r"cannot do positional indexing" + r" on {klass} with these indexers \[3\.0\] of" + r" {kind}" + .format(klass=str(Float64Index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s2.iloc[3.0] = 0 - pytest.raises(TypeError, g) @ignore_ix def test_slice_non_numeric(self): @@ -329,37 +361,55 @@ def test_slice_non_numeric(self): slice(3, 4.0), slice(3.0, 4.0)]: - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(3|4)\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s.iloc[l] - pytest.raises(TypeError, f) for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x.iloc, lambda x: x]: - def f(): - with catch_warnings(record=True): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers" + r" \[(3|4)(\.0)?\]" + r" of ({kind_float}|{kind_int})" + .format(klass=type(index), + kind_float=str(float), + kind_int=str(int))) + with catch_warnings(record=True): + with pytest.raises(TypeError, match=msg): idxr(s)[l] - pytest.raises(TypeError, f) # setitem for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(3|4)\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s.iloc[l] = 0 - pytest.raises(TypeError, f) for idxr in [lambda x: x.ix, lambda x: x.loc, lambda x: x.iloc, lambda x: x]: - def f(): - with catch_warnings(record=True): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers" + r" \[(3|4)(\.0)?\]" + r" of ({kind_float}|{kind_int})" + .format(klass=type(index), + kind_float=str(float), + kind_int=str(int))) + with catch_warnings(record=True): + with pytest.raises(TypeError, match=msg): idxr(s)[l] = 0 - pytest.raises(TypeError, f) @ignore_ix def test_slice_integer(self): @@ -396,11 +446,13 @@ def test_slice_integer(self): self.check(result, s, indexer, False) # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(3|4)\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[l] - pytest.raises(TypeError, f) - # getitem out-of-bounds for l in [slice(-6, 6), slice(-6.0, 6.0)]: @@ -420,11 +472,13 @@ def f(): self.check(result, s, indexer, False) # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[-6\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[slice(-6.0, 6.0)] - pytest.raises(TypeError, f) - # getitem odd floats for l, res1 in [(slice(2.5, 4), slice(3, 5)), (slice(2, 3.5), slice(2, 4)), @@ -443,11 +497,13 @@ def f(): self.check(result, s, res, False) # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(2|3)\.5\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[l] - pytest.raises(TypeError, f) - # setitem for l in [slice(3.0, 4), slice(3, 4.0), @@ -462,11 +518,13 @@ def f(): assert (result == 0).all() # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(3|4)\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[l] = 0 - pytest.raises(TypeError, f) - def test_integer_positional_indexing(self): """ make sure that we are raising on positional indexing w.r.t. an integer index """ @@ -484,11 +542,17 @@ def test_integer_positional_indexing(self): slice(2.0, 4), slice(2.0, 4.0)]: - def f(): + if compat.PY2: + klass = Int64Index + else: + klass = RangeIndex + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(2|4)\.0\] of" + " {kind}" + .format(klass=str(klass), kind=str(float))) + with pytest.raises(TypeError, match=msg): idxr(s)[l] - pytest.raises(TypeError, f) - @ignore_ix def test_slice_integer_frame_getitem(self): @@ -509,11 +573,13 @@ def f(idxr): self.check(result, s, indexer, False) # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(0|1)\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[l] - pytest.raises(TypeError, f) - # getitem out-of-bounds for l in [slice(-10, 10), slice(-10.0, 10.0)]: @@ -522,11 +588,13 @@ def f(): self.check(result, s, slice(-10, 10), True) # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[-10\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[slice(-10.0, 10.0)] - pytest.raises(TypeError, f) - # getitem odd floats for l, res in [(slice(0.5, 1), slice(1, 2)), (slice(0, 0.5), slice(0, 1)), @@ -536,11 +604,13 @@ def f(): self.check(result, s, res, False) # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[0\.5\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[l] - pytest.raises(TypeError, f) - # setitem for l in [slice(3.0, 4), slice(3, 4.0), @@ -552,11 +622,13 @@ def f(): assert (result == 0).all() # positional indexing - def f(): + msg = ("cannot do slice indexing" + r" on {klass} with these indexers \[(3|4)\.0\] of" + " {kind}" + .format(klass=type(index), kind=str(float))) + with pytest.raises(TypeError, match=msg): s[l] = 0 - pytest.raises(TypeError, f) - f(lambda x: x.loc) with catch_warnings(record=True): f(lambda x: x.ix) @@ -632,9 +704,12 @@ def test_floating_misc(self): # value not found (and no fallbacking at all) # scalar integers - pytest.raises(KeyError, lambda: s.loc[4]) - pytest.raises(KeyError, lambda: s.loc[4]) - pytest.raises(KeyError, lambda: s[4]) + with pytest.raises(KeyError, match=r"^4\.0$"): + s.loc[4] + with pytest.raises(KeyError, match=r"^4\.0$"): + s.loc[4] + with pytest.raises(KeyError, match=r"^4\.0$"): + s[4] # fancy floats/integers create the correct entry (as nan) # fancy tests diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index a867387db4b46..69ec6454e952a 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -26,26 +26,33 @@ def test_iloc_exceeds_bounds(self): msg = 'positional indexers are out-of-bounds' with pytest.raises(IndexError, match=msg): df.iloc[:, [0, 1, 2, 3, 4, 5]] - pytest.raises(IndexError, lambda: df.iloc[[1, 30]]) - pytest.raises(IndexError, lambda: df.iloc[[1, -30]]) - pytest.raises(IndexError, lambda: df.iloc[[100]]) + with pytest.raises(IndexError, match=msg): + df.iloc[[1, 30]] + with pytest.raises(IndexError, match=msg): + df.iloc[[1, -30]] + with pytest.raises(IndexError, match=msg): + df.iloc[[100]] s = df['A'] - pytest.raises(IndexError, lambda: s.iloc[[100]]) - pytest.raises(IndexError, lambda: s.iloc[[-100]]) + with pytest.raises(IndexError, match=msg): + s.iloc[[100]] + with pytest.raises(IndexError, match=msg): + s.iloc[[-100]] # still raise on a single indexer msg = 'single positional indexer is out-of-bounds' with pytest.raises(IndexError, match=msg): df.iloc[30] - pytest.raises(IndexError, lambda: df.iloc[-30]) + with pytest.raises(IndexError, match=msg): + df.iloc[-30] # GH10779 # single positive/negative indexer exceeding Series bounds should raise # an IndexError with pytest.raises(IndexError, match=msg): s.iloc[30] - pytest.raises(IndexError, lambda: s.iloc[-30]) + with pytest.raises(IndexError, match=msg): + s.iloc[-30] # slices are ok result = df.iloc[:, 4:10] # 0 < start < len < stop @@ -104,8 +111,12 @@ def check(result, expected): check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]]) check(dfl.iloc[4:6], dfl.iloc[[4]]) - pytest.raises(IndexError, lambda: dfl.iloc[[4, 5, 6]]) - pytest.raises(IndexError, lambda: dfl.iloc[:, 4]) + msg = "positional indexers are out-of-bounds" + with pytest.raises(IndexError, match=msg): + dfl.iloc[[4, 5, 6]] + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + dfl.iloc[:, 4] def test_iloc_getitem_int(self): @@ -437,10 +448,16 @@ def test_iloc_getitem_labelled_frame(self): assert result == exp # out-of-bounds exception - pytest.raises(IndexError, df.iloc.__getitem__, tuple([10, 5])) + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + df.iloc[10, 5] # trying to use a label - pytest.raises(ValueError, df.iloc.__getitem__, tuple(['j', 'D'])) + msg = (r"Location based indexing can only have \[integer, integer" + r" slice \(START point is INCLUDED, END point is EXCLUDED\)," + r" listlike of integers, boolean array\] types") + with pytest.raises(ValueError, match=msg): + df.iloc['j', 'D'] def test_iloc_getitem_doc_issue(self): @@ -555,10 +572,15 @@ def test_iloc_mask(self): # GH 3631, iloc with a mask (of a series) should raise df = DataFrame(lrange(5), list('ABCDE'), columns=['a']) mask = (df.a % 2 == 0) - pytest.raises(ValueError, df.iloc.__getitem__, tuple([mask])) + msg = ("iLocation based boolean indexing cannot use an indexable as" + " a mask") + with pytest.raises(ValueError, match=msg): + df.iloc[mask] mask.index = lrange(len(mask)) - pytest.raises(NotImplementedError, df.iloc.__getitem__, - tuple([mask])) + msg = ("iLocation based boolean indexing on an integer type is not" + " available") + with pytest.raises(NotImplementedError, match=msg): + df.iloc[mask] # ndarray ok result = df.iloc[np.array([True] * len(mask), dtype=bool)] @@ -675,3 +697,16 @@ def test_identity_slice_returns_new_object(self): # should also be a shallow copy original_series[:3] = [7, 8, 9] assert all(sliced_series[:3] == [7, 8, 9]) + + def test_indexing_zerodim_np_array(self): + # GH24919 + df = DataFrame([[1, 2], [3, 4]]) + result = df.iloc[np.array(0)] + s = pd.Series([1, 2], name=0) + tm.assert_series_equal(result, s) + + def test_series_indexing_zerodim_np_array(self): + # GH24919 + s = Series([1, 2]) + result = s.iloc[np.array(0)] + assert result == 1 diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py index 35805bce07705..fb4dfbb39ce94 100644 --- a/pandas/tests/indexing/test_ix.py +++ b/pandas/tests/indexing/test_ix.py @@ -102,7 +102,12 @@ def compare(result, expected): with catch_warnings(record=True): df.ix[key] - pytest.raises(TypeError, lambda: df.loc[key]) + msg = (r"cannot do slice indexing" + r" on {klass} with these indexers \[(0|1)\] of" + r" {kind}" + .format(klass=type(df.index), kind=str(int))) + with pytest.raises(TypeError, match=msg): + df.loc[key] df = DataFrame(np.random.randn(5, 4), columns=list('ABCD'), index=pd.date_range('2012-01-01', periods=5)) @@ -122,7 +127,8 @@ def compare(result, expected): with catch_warnings(record=True): expected = df.ix[key] except KeyError: - pytest.raises(KeyError, lambda: df.loc[key]) + with pytest.raises(KeyError, match=r"^'2012-01-31'$"): + df.loc[key] continue result = df.loc[key] @@ -279,14 +285,18 @@ def test_ix_setitem_out_of_bounds_axis_0(self): np.random.randn(2, 5), index=["row%s" % i for i in range(2)], columns=["col%s" % i for i in range(5)]) with catch_warnings(record=True): - pytest.raises(ValueError, df.ix.__setitem__, (2, 0), 100) + msg = "cannot set by positional indexing with enlargement" + with pytest.raises(ValueError, match=msg): + df.ix[2, 0] = 100 def test_ix_setitem_out_of_bounds_axis_1(self): df = DataFrame( np.random.randn(5, 2), index=["row%s" % i for i in range(5)], columns=["col%s" % i for i in range(2)]) with catch_warnings(record=True): - pytest.raises(ValueError, df.ix.__setitem__, (0, 2), 100) + msg = "cannot set by positional indexing with enlargement" + with pytest.raises(ValueError, match=msg): + df.ix[0, 2] = 100 def test_ix_empty_list_indexer_is_ok(self): with catch_warnings(record=True): diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 17e107c7a1130..29f70929624fc 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -233,8 +233,10 @@ def test_loc_to_fail(self): columns=['e', 'f', 'g']) # raise a KeyError? - pytest.raises(KeyError, df.loc.__getitem__, - tuple([[1, 2], [1, 2]])) + msg = (r"\"None of \[Int64Index\(\[1, 2\], dtype='int64'\)\] are" + r" in the \[index\]\"") + with pytest.raises(KeyError, match=msg): + df.loc[[1, 2], [1, 2]] # GH 7496 # loc should not fallback @@ -243,10 +245,18 @@ def test_loc_to_fail(self): s.loc[1] = 1 s.loc['a'] = 2 - pytest.raises(KeyError, lambda: s.loc[-1]) - pytest.raises(KeyError, lambda: s.loc[[-1, -2]]) + with pytest.raises(KeyError, match=r"^-1$"): + s.loc[-1] - pytest.raises(KeyError, lambda: s.loc[['4']]) + msg = (r"\"None of \[Int64Index\(\[-1, -2\], dtype='int64'\)\] are" + r" in the \[index\]\"") + with pytest.raises(KeyError, match=msg): + s.loc[[-1, -2]] + + msg = (r"\"None of \[Index\(\[u?'4'\], dtype='object'\)\] are" + r" in the \[index\]\"") + with pytest.raises(KeyError, match=msg): + s.loc[['4']] s.loc[-1] = 3 with tm.assert_produces_warning(FutureWarning, @@ -256,29 +266,28 @@ def test_loc_to_fail(self): tm.assert_series_equal(result, expected) s['a'] = 2 - pytest.raises(KeyError, lambda: s.loc[[-2]]) + msg = (r"\"None of \[Int64Index\(\[-2\], dtype='int64'\)\] are" + r" in the \[index\]\"") + with pytest.raises(KeyError, match=msg): + s.loc[[-2]] del s['a'] - def f(): + with pytest.raises(KeyError, match=msg): s.loc[[-2]] = 0 - pytest.raises(KeyError, f) - # inconsistency between .loc[values] and .loc[values,:] # GH 7999 df = DataFrame([['a'], ['b']], index=[1, 2], columns=['value']) - def f(): + msg = (r"\"None of \[Int64Index\(\[3\], dtype='int64'\)\] are" + r" in the \[index\]\"") + with pytest.raises(KeyError, match=msg): df.loc[[3], :] - pytest.raises(KeyError, f) - - def f(): + with pytest.raises(KeyError, match=msg): df.loc[[3]] - pytest.raises(KeyError, f) - def test_loc_getitem_list_with_fail(self): # 15747 # should KeyError if *any* missing labels @@ -600,11 +609,15 @@ def test_loc_non_unique(self): # these are going to raise because the we are non monotonic df = DataFrame({'A': [1, 2, 3, 4, 5, 6], 'B': [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]) - pytest.raises(KeyError, df.loc.__getitem__, - tuple([slice(1, None)])) - pytest.raises(KeyError, df.loc.__getitem__, - tuple([slice(0, None)])) - pytest.raises(KeyError, df.loc.__getitem__, tuple([slice(1, 2)])) + msg = "'Cannot get left slice bound for non-unique label: 1'" + with pytest.raises(KeyError, match=msg): + df.loc[1:] + msg = "'Cannot get left slice bound for non-unique label: 0'" + with pytest.raises(KeyError, match=msg): + df.loc[0:] + msg = "'Cannot get left slice bound for non-unique label: 1'" + with pytest.raises(KeyError, match=msg): + df.loc[1:2] # monotonic are ok df = DataFrame({'A': [1, 2, 3, 4, 5, 6], @@ -765,3 +778,16 @@ def test_loc_setitem_empty_append_raises(self): msg = "cannot copy sequence with size 2 to array axis with dimension 0" with pytest.raises(ValueError, match=msg): df.loc[0:2, 'x'] = data + + def test_indexing_zerodim_np_array(self): + # GH24924 + df = DataFrame([[1, 2], [3, 4]]) + result = df.loc[np.array(0)] + s = pd.Series([1, 2], name=0) + tm.assert_series_equal(result, s) + + def test_series_indexing_zerodim_np_array(self): + # GH24924 + s = Series([1, 2]) + result = s.loc[np.array(0)] + assert result == 1 diff --git a/pandas/tests/indexing/test_panel.py b/pandas/tests/indexing/test_panel.py index 34708e1148c90..8033d19f330b3 100644 --- a/pandas/tests/indexing/test_panel.py +++ b/pandas/tests/indexing/test_panel.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas import DataFrame, Panel, date_range +from pandas import Panel, date_range from pandas.util import testing as tm @@ -31,30 +31,6 @@ def test_iloc_getitem_panel(self): expected = p.loc['B', 'b', 'two'] assert result == expected - # slice - result = p.iloc[1:3] - expected = p.loc[['B', 'C']] - tm.assert_panel_equal(result, expected) - - result = p.iloc[:, 0:2] - expected = p.loc[:, ['a', 'b']] - tm.assert_panel_equal(result, expected) - - # list of integers - result = p.iloc[[0, 2]] - expected = p.loc[['A', 'C']] - tm.assert_panel_equal(result, expected) - - # neg indices - result = p.iloc[[-1, 1], [-1, 1]] - expected = p.loc[['D', 'B'], ['c', 'b']] - tm.assert_panel_equal(result, expected) - - # dups indices - result = p.iloc[[-1, -1, 1], [-1, 1]] - expected = p.loc[['D', 'D', 'B'], ['c', 'b']] - tm.assert_panel_equal(result, expected) - # combined result = p.iloc[0, [True, True], [0, 1]] expected = p.loc['A', ['a', 'b'], ['one', 'two']] @@ -110,40 +86,6 @@ def test_iloc_panel_issue(self): def test_panel_getitem(self): with catch_warnings(record=True): - # GH4016, date selection returns a frame when a partial string - # selection - ind = date_range(start="2000", freq="D", periods=1000) - df = DataFrame( - np.random.randn( - len(ind), 5), index=ind, columns=list('ABCDE')) - panel = Panel({'frame_' + c: df for c in list('ABC')}) - - test2 = panel.loc[:, "2002":"2002-12-31"] - test1 = panel.loc[:, "2002"] - tm.assert_panel_equal(test1, test2) - - # GH8710 - # multi-element getting with a list - panel = tm.makePanel() - - expected = panel.iloc[[0, 1]] - - result = panel.loc[['ItemA', 'ItemB']] - tm.assert_panel_equal(result, expected) - - result = panel.loc[['ItemA', 'ItemB'], :, :] - tm.assert_panel_equal(result, expected) - - result = panel[['ItemA', 'ItemB']] - tm.assert_panel_equal(result, expected) - - result = panel.loc['ItemA':'ItemB'] - tm.assert_panel_equal(result, expected) - - with catch_warnings(record=True): - result = panel.ix[['ItemA', 'ItemB']] - tm.assert_panel_equal(result, expected) - # with an object-like # GH 9140 class TestObject(object): @@ -160,55 +102,3 @@ def __str__(self): expected = p.iloc[0] result = p[obj] tm.assert_frame_equal(result, expected) - - def test_panel_setitem(self): - - with catch_warnings(record=True): - # GH 7763 - # loc and setitem have setting differences - np.random.seed(0) - index = range(3) - columns = list('abc') - - panel = Panel({'A': DataFrame(np.random.randn(3, 3), - index=index, columns=columns), - 'B': DataFrame(np.random.randn(3, 3), - index=index, columns=columns), - 'C': DataFrame(np.random.randn(3, 3), - index=index, columns=columns)}) - - replace = DataFrame(np.eye(3, 3), index=range(3), columns=columns) - expected = Panel({'A': replace, 'B': replace, 'C': replace}) - - p = panel.copy() - for idx in list('ABC'): - p[idx] = replace - tm.assert_panel_equal(p, expected) - - p = panel.copy() - for idx in list('ABC'): - p.loc[idx, :, :] = replace - tm.assert_panel_equal(p, expected) - - def test_panel_assignment(self): - - with catch_warnings(record=True): - # GH3777 - wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - wp2 = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - - # TODO: unused? - # expected = wp.loc[['Item1', 'Item2'], :, ['A', 'B']] - - with pytest.raises(NotImplementedError): - wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = wp2.loc[ - ['Item1', 'Item2'], :, ['A', 'B']] - - # to_assign = wp2.loc[['Item1', 'Item2'], :, ['A', 'B']] - # wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = to_assign - # result = wp.loc[['Item1', 'Item2'], :, ['A', 'B']] - # tm.assert_panel_equal(result,expected) diff --git a/pandas/tests/indexing/test_partial.py b/pandas/tests/indexing/test_partial.py index b863afe02c2e8..e8ce5bc4c36ef 100644 --- a/pandas/tests/indexing/test_partial.py +++ b/pandas/tests/indexing/test_partial.py @@ -10,13 +10,12 @@ import pytest import pandas as pd -from pandas import DataFrame, Index, Panel, Series, date_range +from pandas import DataFrame, Index, Series, date_range from pandas.util import testing as tm class TestPartialSetting(object): - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") @pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning") def test_partial_setting(self): @@ -116,35 +115,6 @@ def test_partial_setting(self): df.ix[:, 'C'] = df.ix[:, 'A'] tm.assert_frame_equal(df, expected) - with catch_warnings(record=True): - # ## panel ## - p_orig = Panel(np.arange(16).reshape(2, 4, 2), - items=['Item1', 'Item2'], - major_axis=pd.date_range('2001/1/12', periods=4), - minor_axis=['A', 'B'], dtype='float64') - - # panel setting via item - p_orig = Panel(np.arange(16).reshape(2, 4, 2), - items=['Item1', 'Item2'], - major_axis=pd.date_range('2001/1/12', periods=4), - minor_axis=['A', 'B'], dtype='float64') - expected = p_orig.copy() - expected['Item3'] = expected['Item1'] - p = p_orig.copy() - p.loc['Item3'] = p['Item1'] - tm.assert_panel_equal(p, expected) - - # panel with aligned series - expected = p_orig.copy() - expected = expected.transpose(2, 1, 0) - expected['C'] = DataFrame({'Item1': [30, 30, 30, 30], - 'Item2': [32, 32, 32, 32]}, - index=p_orig.major_axis) - expected = expected.transpose(2, 1, 0) - p = p_orig.copy() - p.loc[:, :, 'C'] = Series([30, 32], index=p_orig.items) - tm.assert_panel_equal(p, expected) - # GH 8473 dates = date_range('1/1/2000', periods=8) df_orig = DataFrame(np.random.randn(8, 4), index=dates, @@ -246,7 +216,10 @@ def test_series_partial_set(self): tm.assert_series_equal(result, expected, check_index_type=True) # raises as nothing in in the index - pytest.raises(KeyError, lambda: ser.loc[[3, 3, 3]]) + msg = (r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64'\)\] are" + r" in the \[index\]\"") + with pytest.raises(KeyError, match=msg): + ser.loc[[3, 3, 3]] expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3]) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): @@ -342,7 +315,10 @@ def test_series_partial_set_with_name(self): tm.assert_series_equal(result, expected, check_index_type=True) # raises as nothing in in the index - pytest.raises(KeyError, lambda: ser.loc[[3, 3, 3]]) + msg = (r"\"None of \[Int64Index\(\[3, 3, 3\], dtype='int64'," + r" name=u?'idx'\)\] are in the \[index\]\"") + with pytest.raises(KeyError, match=msg): + ser.loc[[3, 3, 3]] exp_idx = Index([2, 2, 3], dtype='int64', name='idx') expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s') diff --git a/pandas/tests/indexing/test_scalar.py b/pandas/tests/indexing/test_scalar.py index e4b8181a67514..0cd41562541d1 100644 --- a/pandas/tests/indexing/test_scalar.py +++ b/pandas/tests/indexing/test_scalar.py @@ -30,7 +30,9 @@ def _check(f, func, values=False): for f in [d['labels'], d['ts'], d['floats']]: if f is not None: - pytest.raises(ValueError, self.check_values, f, 'iat') + msg = "iAt based indexing can only have integer indexers" + with pytest.raises(ValueError, match=msg): + self.check_values(f, 'iat') # at for f in [d['ints'], d['uints'], d['labels'], @@ -57,7 +59,9 @@ def _check(f, func, values=False): for f in [d['labels'], d['ts'], d['floats']]: if f is not None: - pytest.raises(ValueError, _check, f, 'iat') + msg = "iAt based indexing can only have integer indexers" + with pytest.raises(ValueError, match=msg): + _check(f, 'iat') # at for f in [d['ints'], d['uints'], d['labels'], @@ -107,8 +111,12 @@ def test_imethods_with_dups(self): result = s.iat[2] assert result == 2 - pytest.raises(IndexError, lambda: s.iat[10]) - pytest.raises(IndexError, lambda: s.iat[-10]) + msg = "index 10 is out of bounds for axis 0 with size 5" + with pytest.raises(IndexError, match=msg): + s.iat[10] + msg = "index -10 is out of bounds for axis 0 with size 5" + with pytest.raises(IndexError, match=msg): + s.iat[-10] result = s.iloc[[2, 3]] expected = Series([2, 3], [2, 2], dtype='int64') @@ -128,22 +136,30 @@ def test_at_to_fail(self): s = Series([1, 2, 3], index=list('abc')) result = s.at['a'] assert result == 1 - pytest.raises(ValueError, lambda: s.at[0]) + msg = ("At based indexing on an non-integer index can only have" + " non-integer indexers") + with pytest.raises(ValueError, match=msg): + s.at[0] df = DataFrame({'A': [1, 2, 3]}, index=list('abc')) result = df.at['a', 'A'] assert result == 1 - pytest.raises(ValueError, lambda: df.at['a', 0]) + with pytest.raises(ValueError, match=msg): + df.at['a', 0] s = Series([1, 2, 3], index=[3, 2, 1]) result = s.at[1] assert result == 3 - pytest.raises(ValueError, lambda: s.at['a']) + msg = ("At based indexing on an integer index can only have integer" + " indexers") + with pytest.raises(ValueError, match=msg): + s.at['a'] df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1]) result = df.at[1, 0] assert result == 3 - pytest.raises(ValueError, lambda: df.at['a', 0]) + with pytest.raises(ValueError, match=msg): + df.at['a', 0] # GH 13822, incorrect error string with non-unique columns when missing # column is accessed @@ -205,3 +221,16 @@ def test_iat_setter_incompatible_assignment(self): result.iat[0, 0] = None expected = DataFrame({"a": [None, 1], "b": [4, 5]}) tm.assert_frame_equal(result, expected) + + def test_getitem_zerodim_np_array(self): + # GH24924 + # dataframe __getitem__ + df = DataFrame([[1, 2], [3, 4]]) + result = df[np.array(0)] + expected = Series([1, 3], name=0) + tm.assert_series_equal(result, expected) + + # series __getitem__ + s = Series([1, 2]) + result = s[np.array(0)] + assert result == 1 diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index fe0706efdc4f8..bda486411e01e 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # pylint: disable=W0102 - +from collections import OrderedDict from datetime import date, datetime from distutils.version import LooseVersion import itertools @@ -12,7 +12,7 @@ import pytest from pandas._libs.internals import BlockPlacement -from pandas.compat import OrderedDict, lrange, u, zip +from pandas.compat import lrange, u, zip import pandas as pd from pandas import ( diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table.h5 b/pandas/tests/io/data/legacy_hdf/legacy_table.h5 deleted file mode 100644 index 1c90382d9125c..0000000000000 Binary files a/pandas/tests/io/data/legacy_hdf/legacy_table.h5 and /dev/null differ diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table_py2.h5 b/pandas/tests/io/data/legacy_hdf/legacy_table_py2.h5 new file mode 100644 index 0000000000000..3863d714a315b Binary files /dev/null and b/pandas/tests/io/data/legacy_hdf/legacy_table_py2.h5 differ diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py index 055763bf62d6e..a3e0e195f4864 100644 --- a/pandas/tests/io/formats/test_console.py +++ b/pandas/tests/io/formats/test_console.py @@ -1,6 +1,9 @@ +import subprocess # noqa: F401 + import pytest from pandas.io.formats.console import detect_console_encoding +from pandas.io.formats.terminal import _get_terminal_size_tput class MockEncoding(object): # TODO(py27): replace with mock @@ -72,3 +75,18 @@ def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale): context.setattr('sys.stdout', MockEncoding(std)) context.setattr('sys.getdefaultencoding', lambda: 'sysDefaultEncoding') assert detect_console_encoding() == 'sysDefaultEncoding' + + +@pytest.mark.parametrize("size", ['', ['']]) +def test_terminal_unknown_dimensions(monkeypatch, size, mocker): + + def communicate(*args, **kwargs): + return size + + monkeypatch.setattr('subprocess.Popen', mocker.Mock()) + monkeypatch.setattr('subprocess.Popen.return_value.returncode', None) + monkeypatch.setattr( + 'subprocess.Popen.return_value.communicate', communicate) + result = _get_terminal_size_tput() + + assert result is None diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 5d922ccaf1fd5..b0cf5a2f17609 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -12,6 +12,7 @@ import os import re import sys +import textwrap import warnings import dateutil @@ -2777,3 +2778,17 @@ def test_format_percentiles(): fmt.format_percentiles([2, 0.1, 0.5]) with pytest.raises(ValueError, match=msg): fmt.format_percentiles([0.1, 0.5, 'a']) + + +def test_repr_html_ipython_config(ip): + code = textwrap.dedent("""\ + import pandas as pd + df = pd.DataFrame({"A": [1, 2]}) + df._repr_html_() + + cfg = get_ipython().config + cfg['IPKernelApp']['parent_appname'] + df._repr_html_() + """) + result = ip.run_cell(code) + assert not result.error_in_exec diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 554cfd306e2a7..428f1411a10a6 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -15,6 +15,15 @@ import pandas.io.formats.format as fmt +lorem_ipsum = ( + "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod" + " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" + " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex" + " ea commodo consequat. Duis aute irure dolor in reprehenderit in" + " voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur" + " sint occaecat cupidatat non proident, sunt in culpa qui officia" + " deserunt mollit anim id est laborum.") + def expected_html(datapath, name): """ @@ -600,3 +609,17 @@ def test_to_html_render_links(render_links, expected, datapath): result = df.to_html(render_links=render_links) expected = expected_html(datapath, expected) assert result == expected + + +@pytest.mark.parametrize('method,expected', [ + ('to_html', lambda x:lorem_ipsum), + ('_repr_html_', lambda x:lorem_ipsum[:x - 4] + '...') # regression case +]) +@pytest.mark.parametrize('max_colwidth', [10, 20, 50, 100]) +def test_ignore_display_max_colwidth(method, expected, max_colwidth): + # see gh-17004 + df = DataFrame([lorem_ipsum]) + with pd.option_context('display.max_colwidth', max_colwidth): + result = getattr(df, method)() + expected = expected(max_colwidth) + assert expected in result diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index 6774eac6d6c1a..6c6e28cb1c090 100755 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -41,7 +41,6 @@ import os import platform as pl import sys -from warnings import catch_warnings, filterwarnings import numpy as np @@ -49,7 +48,7 @@ import pandas from pandas import ( - Categorical, DataFrame, Index, MultiIndex, NaT, Panel, Period, Series, + Categorical, DataFrame, Index, MultiIndex, NaT, Period, Series, SparseDataFrame, SparseSeries, Timestamp, bdate_range, date_range, period_range, timedelta_range, to_msgpack) @@ -187,18 +186,6 @@ def create_data(): u'C': Timestamp('20130603', tz='UTC')}, index=range(5)) ) - with catch_warnings(record=True): - filterwarnings("ignore", "\\nPanel", FutureWarning) - mixed_dup_panel = Panel({u'ItemA': frame[u'float'], - u'ItemB': frame[u'int']}) - mixed_dup_panel.items = [u'ItemA', u'ItemA'] - panel = dict(float=Panel({u'ItemA': frame[u'float'], - u'ItemB': frame[u'float'] + 1}), - dup=Panel( - np.arange(30).reshape(3, 5, 2).astype(np.float64), - items=[u'A', u'B', u'A']), - mixed_dup=mixed_dup_panel) - cat = dict(int8=Categorical(list('abcdefg')), int16=Categorical(np.arange(1000)), int32=Categorical(np.arange(10000))) @@ -241,7 +228,6 @@ def create_data(): return dict(series=series, frame=frame, - panel=panel, index=index, scalars=scalars, mi=mi, diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 6fa3b5b3b2ed4..351b495e5d8fc 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -502,12 +502,12 @@ class TestTableOrientReader(object): @pytest.mark.parametrize("vals", [ {'ints': [1, 2, 3, 4]}, {'objects': ['a', 'b', 'c', 'd']}, + {'objects': ['1', '2', '3', '4']}, {'date_ranges': pd.date_range('2016-01-01', freq='d', periods=4)}, {'categoricals': pd.Series(pd.Categorical(['a', 'b', 'c', 'c']))}, {'ordered_cats': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'], ordered=True))}, - pytest.param({'floats': [1., 2., 3., 4.]}, - marks=pytest.mark.xfail), + {'floats': [1., 2., 3., 4.]}, {'floats': [1.1, 2.2, 3.3, 4.4]}, {'bools': [True, False, False, True]}]) def test_read_json_table_orient(self, index_nm, vals, recwarn): @@ -564,17 +564,10 @@ def test_multiindex(self, index_names): result = pd.read_json(out, orient="table") tm.assert_frame_equal(df, result) - @pytest.mark.parametrize("strict_check", [ - pytest.param(True, marks=pytest.mark.xfail), - False - ]) - def test_empty_frame_roundtrip(self, strict_check): + def test_empty_frame_roundtrip(self): # GH 21287 df = pd.DataFrame([], columns=['a', 'b', 'c']) expected = df.copy() out = df.to_json(orient='table') result = pd.read_json(out, orient='table') - # TODO: When DF coercion issue (#21345) is resolved tighten type checks - tm.assert_frame_equal(expected, result, - check_dtype=strict_check, - check_index_type=strict_check) + tm.assert_frame_equal(expected, result) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 23c40276072d6..ed598b730d960 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 +from collections import OrderedDict from datetime import timedelta import json import os @@ -7,8 +8,7 @@ import numpy as np import pytest -from pandas.compat import ( - OrderedDict, StringIO, is_platform_32bit, lrange, range) +from pandas.compat import StringIO, is_platform_32bit, lrange, range import pandas.util._test_decorators as td import pandas as pd @@ -194,7 +194,7 @@ def _check_orient(df, orient, dtype=None, numpy=False, else: unser = unser.sort_index() - if dtype is False: + if not dtype: check_dtype = False if not convert_axes and df.index.dtype.type == np.datetime64: @@ -1202,6 +1202,40 @@ def test_data_frame_size_after_to_json(self): assert size_before == size_after + @pytest.mark.parametrize('index', [None, [1, 2], [1., 2.], ['a', 'b'], + ['1', '2'], ['1.', '2.']]) + @pytest.mark.parametrize('columns', [['a', 'b'], ['1', '2'], ['1.', '2.']]) + def test_from_json_to_json_table_index_and_columns(self, index, columns): + # GH25433 GH25435 + expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns) + dfjson = expected.to_json(orient='table') + result = pd.read_json(dfjson, orient='table') + assert_frame_equal(result, expected) + + def test_from_json_to_json_table_dtypes(self): + # GH21345 + expected = pd.DataFrame({'a': [1, 2], 'b': [3., 4.], 'c': ['5', '6']}) + dfjson = expected.to_json(orient='table') + result = pd.read_json(dfjson, orient='table') + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('dtype', [True, {'b': int, 'c': int}]) + def test_read_json_table_dtype_raises(self, dtype): + # GH21345 + df = pd.DataFrame({'a': [1, 2], 'b': [3., 4.], 'c': ['5', '6']}) + dfjson = df.to_json(orient='table') + msg = "cannot pass both dtype and orient='table'" + with pytest.raises(ValueError, match=msg): + pd.read_json(dfjson, orient='table', dtype=dtype) + + def test_read_json_table_convert_axes_raises(self): + # GH25433 GH25435 + df = DataFrame([[1, 2], [3, 4]], index=[1., 2.], columns=['1.', '2.']) + dfjson = df.to_json(orient='table') + msg = "cannot pass both convert_axes and orient='table'" + with pytest.raises(ValueError, match=msg): + pd.read_json(dfjson, orient='table', convert_axes=True) + @pytest.mark.parametrize('data, expected', [ (DataFrame([[1, 2], [4, 5]], columns=['a', 'b']), {'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}), @@ -1262,3 +1296,13 @@ def test_index_false_error_to_json(self, orient): "'orient' is 'split' or 'table'") with pytest.raises(ValueError, match=msg): df.to_json(orient=orient, index=False) + + @pytest.mark.parametrize('orient', ['split', 'table']) + @pytest.mark.parametrize('index', [True, False]) + def test_index_false_from_json_to_json(self, orient, index): + # GH25170 + # Test index=False in from_json to_json + expected = DataFrame({'a': [1, 2], 'b': [3, 4]}) + dfjson = expected.to_json(orient=orient, index=index) + result = read_json(dfjson, orient=orient) + assert_frame_equal(result, expected) diff --git a/pandas/tests/io/msgpack/test_pack.py b/pandas/tests/io/msgpack/test_pack.py index 8c82d0d2cf870..078d9f4ceb649 100644 --- a/pandas/tests/io/msgpack/test_pack.py +++ b/pandas/tests/io/msgpack/test_pack.py @@ -1,10 +1,10 @@ # coding: utf-8 - +from collections import OrderedDict import struct import pytest -from pandas.compat import OrderedDict, u +from pandas.compat import u from pandas import compat diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 8eb26d9f3dec5..565db92210b0a 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -12,6 +12,7 @@ from pandas.util import testing as tm from pandas.util.testing import makeCustomDataframe as mkdf +from pandas.io.clipboard import clipboard_get, clipboard_set from pandas.io.clipboard.exceptions import PyperclipException try: @@ -30,8 +31,8 @@ def build_kwargs(sep, excel): return kwargs -@pytest.fixture(params=['delims', 'utf8', 'string', 'long', 'nonascii', - 'colwidth', 'mixed', 'float', 'int']) +@pytest.fixture(params=['delims', 'utf8', 'utf16', 'string', 'long', + 'nonascii', 'colwidth', 'mixed', 'float', 'int']) def df(request): data_type = request.param @@ -41,6 +42,10 @@ def df(request): elif data_type == 'utf8': return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'], 'b': ['øπ∆˚¬', 'œ∑´®']}) + elif data_type == 'utf16': + return pd.DataFrame({'a': ['\U0001f44d\U0001f44d', + '\U0001f44d\U0001f44d'], + 'b': ['abc', 'def']}) elif data_type == 'string': return mkdf(5, 3, c_idx_type='s', r_idx_type='i', c_idx_names=[None], r_idx_names=[None]) @@ -225,3 +230,14 @@ def test_invalid_encoding(self, df): @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8']) def test_round_trip_valid_encodings(self, enc, df): self.check_round_trip_frame(df, encoding=enc) + + +@pytest.mark.single +@pytest.mark.clipboard +@pytest.mark.skipif(not _DEPS_INSTALLED, + reason="clipboard primitives not installed") +@pytest.mark.parametrize('data', [u'\U0001f44d...', u'Ωœ∑´...', 'abcd...']) +def test_raw_roundtrip(data): + # PR #25040 wide unicode wasn't copied correctly on PY3 on windows + clipboard_set(data) + assert data == clipboard_get() diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 717e9bc23c6b1..04c9c58a326a4 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -5,7 +5,6 @@ from functools import partial import os import warnings -from warnings import catch_warnings import numpy as np from numpy import nan @@ -2360,7 +2359,7 @@ def test_register_writer(self): class DummyClass(ExcelWriter): called_save = False called_write_cells = False - supported_extensions = ['test', 'xlsx', 'xls'] + supported_extensions = ['xlsx', 'xls'] engine = 'dummy' def save(self): @@ -2378,19 +2377,13 @@ def check_called(func): with pd.option_context('io.excel.xlsx.writer', 'dummy'): register_writer(DummyClass) - writer = ExcelWriter('something.test') + writer = ExcelWriter('something.xlsx') assert isinstance(writer, DummyClass) df = tm.makeCustomDataframe(1, 1) - - with catch_warnings(record=True): - panel = tm.makePanel() - func = lambda: df.to_excel('something.test') - check_called(func) - check_called(lambda: panel.to_excel('something.test')) - check_called(lambda: df.to_excel('something.xlsx')) - check_called( - lambda: df.to_excel( - 'something.xls', engine='dummy')) + check_called(lambda: df.to_excel('something.xlsx')) + check_called( + lambda: df.to_excel( + 'something.xls', engine='dummy')) @pytest.mark.parametrize('engine', [ @@ -2417,7 +2410,10 @@ def style(df): ['', '', '']], index=df.index, columns=df.columns) - def assert_equal_style(cell1, cell2): + def assert_equal_style(cell1, cell2, engine): + if engine in ['xlsxwriter', 'openpyxl']: + pytest.xfail(reason=("GH25351: failing on some attribute " + "comparisons in {}".format(engine))) # XXX: should find a better way to check equality assert cell1.alignment.__dict__ == cell2.alignment.__dict__ assert cell1.border.__dict__ == cell2.border.__dict__ @@ -2461,7 +2457,7 @@ def custom_converter(css): assert len(col1) == len(col2) for cell1, cell2 in zip(col1, col2): assert cell1.value == cell2.value - assert_equal_style(cell1, cell2) + assert_equal_style(cell1, cell2, engine) n_cells += 1 # ensure iteration actually happened: @@ -2519,7 +2515,7 @@ def custom_converter(css): assert cell1.number_format == 'General' assert cell2.number_format == '0%' else: - assert_equal_style(cell1, cell2) + assert_equal_style(cell1, cell2, engine) assert cell1.value == cell2.value n_cells += 1 @@ -2537,7 +2533,7 @@ def custom_converter(css): assert not cell1.font.bold assert cell2.font.bold else: - assert_equal_style(cell1, cell2) + assert_equal_style(cell1, cell2, engine) assert cell1.value == cell2.value n_cells += 1 diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index 9eb6d327be025..375557c43a3ae 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -13,9 +13,8 @@ import pandas from pandas import ( - Categorical, DataFrame, Index, Interval, MultiIndex, NaT, Panel, Period, - Series, Timestamp, bdate_range, compat, date_range, period_range) -from pandas.tests.test_panel import assert_panel_equal + Categorical, DataFrame, Index, Interval, MultiIndex, NaT, Period, Series, + Timestamp, bdate_range, compat, date_range, period_range) import pandas.util.testing as tm from pandas.util.testing import ( assert_categorical_equal, assert_frame_equal, assert_index_equal, @@ -62,8 +61,6 @@ def check_arbitrary(a, b): assert(len(a) == len(b)) for a_, b_ in zip(a, b): check_arbitrary(a_, b_) - elif isinstance(a, Panel): - assert_panel_equal(a, b) elif isinstance(a, DataFrame): assert_frame_equal(a, b) elif isinstance(a, Series): @@ -490,23 +487,12 @@ def setup_method(self, method): 'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)), 'mixed': DataFrame(data)} - self.panel = { - 'float': Panel(dict(ItemA=self.frame['float'], - ItemB=self.frame['float'] + 1))} - def test_basic_frame(self): for s, i in self.frame.items(): i_rec = self.encode_decode(i) assert_frame_equal(i, i_rec) - def test_basic_panel(self): - - with catch_warnings(record=True): - for s, i in self.panel.items(): - i_rec = self.encode_decode(i) - assert_panel_equal(i, i_rec) - def test_multi(self): i_rec = self.encode_decode(self.frame) @@ -876,6 +862,10 @@ class TestMsgpack(object): def check_min_structure(self, data, version): for typ, v in self.minimum_structure.items(): + if typ == "panel": + # FIXME: kludge; get this key out of the legacy file + continue + assert typ in data, '"{0}" not found in unpacked data'.format(typ) for kind in v: msg = '"{0}" not found in data["{1}"]'.format(kind, typ) @@ -887,6 +877,11 @@ def compare(self, current_data, all_data, vf, version): data = read_msgpack(vf, encoding='latin-1') else: data = read_msgpack(vf) + + if "panel" in data: + # FIXME: kludge; get the key out of the stored file + del data["panel"] + self.check_min_structure(data, version) for typ, dv in data.items(): assert typ in all_data, ('unpacked data contains ' diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 7f3fe1aa401ea..b4befadaddc42 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -75,6 +75,10 @@ def compare(data, vf, version): m = globals() for typ, dv in data.items(): + if typ == "panel": + # FIXME: kludge; get this key out of the legacy file + continue + for dt, result in dv.items(): try: expected = data[typ][dt] diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 517a3e059469c..69ff32d1b728b 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -19,11 +19,11 @@ import pandas as pd from pandas import ( Categorical, DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex, - Panel, RangeIndex, Series, Timestamp, bdate_range, compat, concat, - date_range, isna, timedelta_range) + RangeIndex, Series, Timestamp, bdate_range, compat, concat, date_range, + isna, timedelta_range) import pandas.util.testing as tm from pandas.util.testing import ( - assert_frame_equal, assert_panel_equal, assert_series_equal, set_timezone) + assert_frame_equal, assert_series_equal, set_timezone) from pandas.io import pytables as pytables # noqa:E402 from pandas.io.formats.printing import pprint_thing @@ -34,6 +34,15 @@ tables = pytest.importorskip('tables') +# TODO: +# remove when gh-24839 is fixed; this affects numpy 1.16 +# and pytables 3.4.4 +xfail_non_writeable = pytest.mark.xfail( + LooseVersion(np.__version__) >= LooseVersion('1.16'), + reason=('gh-25511, gh-24839. pytables needs a ' + 'release beyong 3.4.4 to support numpy 1.16x')) + + _default_compressor = ('blosc' if LooseVersion(tables.__version__) >= LooseVersion('2.2') else 'zlib') @@ -141,7 +150,6 @@ def teardown_method(self, method): @pytest.mark.single -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class TestHDFStore(Base): def test_format_kwarg_in_constructor(self): @@ -185,11 +193,6 @@ def roundtrip(key, obj, **kwargs): o = tm.makeDataFrame() assert_frame_equal(o, roundtrip('frame', o)) - with catch_warnings(record=True): - - o = tm.makePanel() - assert_panel_equal(o, roundtrip('panel', o)) - # table df = DataFrame(dict(A=lrange(5), B=lrange(5))) df.to_hdf(path, 'table', append=True) @@ -348,11 +351,9 @@ def test_keys(self): store['a'] = tm.makeTimeSeries() store['b'] = tm.makeStringSeries() store['c'] = tm.makeDataFrame() - with catch_warnings(record=True): - store['d'] = tm.makePanel() - store['foo/bar'] = tm.makePanel() - assert len(store) == 5 - expected = {'/a', '/b', '/c', '/d', '/foo/bar'} + + assert len(store) == 3 + expected = {'/a', '/b', '/c'} assert set(store.keys()) == expected assert set(store) == expected @@ -388,11 +389,6 @@ def test_repr(self): store['b'] = tm.makeStringSeries() store['c'] = tm.makeDataFrame() - with catch_warnings(record=True): - store['d'] = tm.makePanel() - store['foo/bar'] = tm.makePanel() - store.append('e', tm.makePanel()) - df = tm.makeDataFrame() df['obj1'] = 'foo' df['obj2'] = 'bar' @@ -875,6 +871,7 @@ def test_put_integer(self): df = DataFrame(np.random.randn(50, 100)) self._check_roundtrip(df, tm.assert_frame_equal) + @xfail_non_writeable def test_put_mixed_type(self): df = tm.makeTimeDataFrame() df['obj1'] = 'foo' @@ -936,21 +933,6 @@ def test_append(self): store.append('/df3 foo', df[10:]) tm.assert_frame_equal(store['df3 foo'], df) - # panel - wp = tm.makePanel() - _maybe_remove(store, 'wp1') - store.append('wp1', wp.iloc[:, :10, :]) - store.append('wp1', wp.iloc[:, 10:, :]) - assert_panel_equal(store['wp1'], wp) - - # test using differt order of items on the non-index axes - _maybe_remove(store, 'wp1') - wp_append1 = wp.iloc[:, :10, :] - store.append('wp1', wp_append1) - wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1]) - store.append('wp1', wp_append2) - assert_panel_equal(store['wp1'], wp) - # dtype issues - mizxed type in a single object column df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]]) df['mixed_column'] = 'testing' @@ -1254,22 +1236,6 @@ def test_append_all_nans(self): reloaded = read_hdf(path, 'df_with_missing') tm.assert_frame_equal(df_with_missing, reloaded) - matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]], - [[np.nan, np.nan, np.nan], [np.nan, 5, 6]], - [[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]] - - with catch_warnings(record=True): - panel_with_missing = Panel(matrix, - items=['Item1', 'Item2', 'Item3'], - major_axis=[1, 2], - minor_axis=['A', 'B', 'C']) - - with ensure_clean_path(self.path) as path: - panel_with_missing.to_hdf( - path, 'panel_with_missing', format='table') - reloaded_panel = read_hdf(path, 'panel_with_missing') - tm.assert_panel_equal(panel_with_missing, reloaded_panel) - def test_append_frame_column_oriented(self): with ensure_clean_store(self.path) as store: @@ -1342,40 +1308,11 @@ def test_append_with_strings(self): with ensure_clean_store(self.path) as store: with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - wp = tm.makePanel() - wp2 = wp.rename( - minor_axis={x: "%s_extra" % x for x in wp.minor_axis}) def check_col(key, name, size): assert getattr(store.get_storer(key) .table.description, name).itemsize == size - store.append('s1', wp, min_itemsize=20) - store.append('s1', wp2) - expected = concat([wp, wp2], axis=2) - expected = expected.reindex( - minor_axis=sorted(expected.minor_axis)) - assert_panel_equal(store['s1'], expected) - check_col('s1', 'minor_axis', 20) - - # test dict format - store.append('s2', wp, min_itemsize={'minor_axis': 20}) - store.append('s2', wp2) - expected = concat([wp, wp2], axis=2) - expected = expected.reindex( - minor_axis=sorted(expected.minor_axis)) - assert_panel_equal(store['s2'], expected) - check_col('s2', 'minor_axis', 20) - - # apply the wrong field (similar to #1) - store.append('s3', wp, min_itemsize={'major_axis': 20}) - pytest.raises(ValueError, store.append, 's3', wp2) - - # test truncation of bigger strings - store.append('s4', wp) - pytest.raises(ValueError, store.append, 's4', wp2) - # avoid truncation on elements df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']]) store.append('df_big', df) @@ -1511,7 +1448,10 @@ def test_to_hdf_with_min_itemsize(self): tm.assert_series_equal(pd.read_hdf(path, 'ss4'), pd.concat([df['B'], df2['B']])) - @pytest.mark.parametrize("format", ['fixed', 'table']) + @pytest.mark.parametrize( + "format", + [pytest.param('fixed', marks=xfail_non_writeable), + 'table']) def test_to_hdf_errors(self, format): data = ['\ud800foo'] @@ -1674,32 +1614,6 @@ def check_col(key, name, size): (df_dc.string == 'foo')] tm.assert_frame_equal(result, expected) - with ensure_clean_store(self.path) as store: - with catch_warnings(record=True): - # panel - # GH5717 not handling data_columns - np.random.seed(1234) - p = tm.makePanel() - - store.append('p1', p) - tm.assert_panel_equal(store.select('p1'), p) - - store.append('p2', p, data_columns=True) - tm.assert_panel_equal(store.select('p2'), p) - - result = store.select('p2', where='ItemA>0') - expected = p.to_frame() - expected = expected[expected['ItemA'] > 0] - tm.assert_frame_equal(result.to_frame(), expected) - - result = store.select( - 'p2', where='ItemA>0 & minor_axis=["A","B"]') - expected = p.to_frame() - expected = expected[expected['ItemA'] > 0] - expected = expected[expected.reset_index( - level=['major']).index.isin(['A', 'B'])] - tm.assert_frame_equal(result.to_frame(), expected) - def test_create_table_index(self): with ensure_clean_store(self.path) as store: @@ -1708,37 +1622,6 @@ def test_create_table_index(self): def col(t, column): return getattr(store.get_storer(t).table.cols, column) - # index=False - wp = tm.makePanel() - store.append('p5', wp, index=False) - store.create_table_index('p5', columns=['major_axis']) - assert(col('p5', 'major_axis').is_indexed is True) - assert(col('p5', 'minor_axis').is_indexed is False) - - # index=True - store.append('p5i', wp, index=True) - assert(col('p5i', 'major_axis').is_indexed is True) - assert(col('p5i', 'minor_axis').is_indexed is True) - - # default optlevels - store.get_storer('p5').create_index() - assert(col('p5', 'major_axis').index.optlevel == 6) - assert(col('p5', 'minor_axis').index.kind == 'medium') - - # let's change the indexing scheme - store.create_table_index('p5') - assert(col('p5', 'major_axis').index.optlevel == 6) - assert(col('p5', 'minor_axis').index.kind == 'medium') - store.create_table_index('p5', optlevel=9) - assert(col('p5', 'major_axis').index.optlevel == 9) - assert(col('p5', 'minor_axis').index.kind == 'medium') - store.create_table_index('p5', kind='full') - assert(col('p5', 'major_axis').index.optlevel == 9) - assert(col('p5', 'minor_axis').index.kind == 'full') - store.create_table_index('p5', optlevel=1, kind='light') - assert(col('p5', 'major_axis').index.optlevel == 1) - assert(col('p5', 'minor_axis').index.kind == 'light') - # data columns df = tm.makeTimeDataFrame() df['string'] = 'foo' @@ -1761,19 +1644,6 @@ def col(t, column): store.put('f2', df) pytest.raises(TypeError, store.create_table_index, 'f2') - def test_append_diff_item_order(self): - - with catch_warnings(record=True): - wp = tm.makePanel() - wp1 = wp.iloc[:, :10, :] - wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']), - 10:, :] - - with ensure_clean_store(self.path) as store: - store.put('panel', wp1, format='table') - pytest.raises(ValueError, store.put, 'panel', wp2, - append=True) - def test_append_hierarchical(self): index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], @@ -1958,6 +1828,7 @@ def test_pass_spec_to_storer(self): pytest.raises(TypeError, store.select, 'df', where=[('columns=A')]) + @xfail_non_writeable def test_append_misc(self): with ensure_clean_store(self.path) as store: @@ -1987,10 +1858,6 @@ def check(obj, comparator): df['time2'] = Timestamp('20130102') check(df, tm.assert_frame_equal) - with catch_warnings(record=True): - p = tm.makePanel() - check(p, assert_panel_equal) - # empty frame, GH4273 with ensure_clean_store(self.path) as store: @@ -2011,24 +1878,6 @@ def check(obj, comparator): store.put('df2', df) assert_frame_equal(store.select('df2'), df) - with catch_warnings(record=True): - - # 0 len - p_empty = Panel(items=list('ABC')) - store.append('p', p_empty) - pytest.raises(KeyError, store.select, 'p') - - # repeated append of 0/non-zero frames - p = Panel(np.random.randn(3, 4, 5), items=list('ABC')) - store.append('p', p) - assert_panel_equal(store.select('p'), p) - store.append('p', p_empty) - assert_panel_equal(store.select('p'), p) - - # store - store.put('p2', p_empty) - assert_panel_equal(store.select('p2'), p_empty) - def test_append_raise(self): with ensure_clean_store(self.path) as store: @@ -2143,24 +1992,6 @@ def test_table_mixed_dtypes(self): store.append('df1_mixed', df) tm.assert_frame_equal(store.select('df1_mixed'), df) - with catch_warnings(record=True): - - # panel - wp = tm.makePanel() - wp['obj1'] = 'foo' - wp['obj2'] = 'bar' - wp['bool1'] = wp['ItemA'] > 0 - wp['bool2'] = wp['ItemB'] > 0 - wp['int1'] = 1 - wp['int2'] = 2 - wp = wp._consolidate() - - with catch_warnings(record=True): - - with ensure_clean_store(self.path) as store: - store.append('p1_mixed', wp) - assert_panel_equal(store.select('p1_mixed'), wp) - def test_unimplemented_dtypes_table_columns(self): with ensure_clean_store(self.path) as store: @@ -2189,6 +2020,7 @@ def test_unimplemented_dtypes_table_columns(self): # this fails because we have a date in the object block...... pytest.raises(TypeError, store.append, 'df_unimplemented', df) + @xfail_non_writeable @pytest.mark.skipif( LooseVersion(np.__version__) == LooseVersion('1.15.0'), reason=("Skipping pytables test when numpy version is " @@ -2308,193 +2140,6 @@ def test_remove(self): del store['b'] assert len(store) == 0 - def test_remove_where(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - - # non-existance - crit1 = 'index>foo' - pytest.raises(KeyError, store.remove, 'a', [crit1]) - - # try to remove non-table (with crit) - # non-table ok (where = None) - wp = tm.makePanel(30) - store.put('wp', wp, format='table') - store.remove('wp', ["minor_axis=['A', 'D']"]) - rs = store.select('wp') - expected = wp.reindex(minor_axis=['B', 'C']) - assert_panel_equal(rs, expected) - - # empty where - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - - # deleted number (entire table) - n = store.remove('wp', []) - assert n == 120 - - # non - empty where - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - pytest.raises(ValueError, store.remove, - 'wp', ['foo']) - - def test_remove_startstop(self): - # GH #4835 and #6177 - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = tm.makePanel(30) - - # start - _maybe_remove(store, 'wp1') - store.put('wp1', wp, format='t') - n = store.remove('wp1', start=32) - assert n == 120 - 32 - result = store.select('wp1') - expected = wp.reindex(major_axis=wp.major_axis[:32 // 4]) - assert_panel_equal(result, expected) - - _maybe_remove(store, 'wp2') - store.put('wp2', wp, format='t') - n = store.remove('wp2', start=-32) - assert n == 32 - result = store.select('wp2') - expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4]) - assert_panel_equal(result, expected) - - # stop - _maybe_remove(store, 'wp3') - store.put('wp3', wp, format='t') - n = store.remove('wp3', stop=32) - assert n == 32 - result = store.select('wp3') - expected = wp.reindex(major_axis=wp.major_axis[32 // 4:]) - assert_panel_equal(result, expected) - - _maybe_remove(store, 'wp4') - store.put('wp4', wp, format='t') - n = store.remove('wp4', stop=-32) - assert n == 120 - 32 - result = store.select('wp4') - expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:]) - assert_panel_equal(result, expected) - - # start n stop - _maybe_remove(store, 'wp5') - store.put('wp5', wp, format='t') - n = store.remove('wp5', start=16, stop=-16) - assert n == 120 - 32 - result = store.select('wp5') - expected = wp.reindex( - major_axis=(wp.major_axis[:16 // 4] - .union(wp.major_axis[-16 // 4:]))) - assert_panel_equal(result, expected) - - _maybe_remove(store, 'wp6') - store.put('wp6', wp, format='t') - n = store.remove('wp6', start=16, stop=16) - assert n == 0 - result = store.select('wp6') - expected = wp.reindex(major_axis=wp.major_axis) - assert_panel_equal(result, expected) - - # with where - _maybe_remove(store, 'wp7') - - # TODO: unused? - date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa - - crit = 'major_axis=date' - store.put('wp7', wp, format='t') - n = store.remove('wp7', where=[crit], stop=80) - assert n == 28 - result = store.select('wp7') - expected = wp.reindex(major_axis=wp.major_axis.difference( - wp.major_axis[np.arange(0, 20, 3)])) - assert_panel_equal(result, expected) - - def test_remove_crit(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = tm.makePanel(30) - - # group row removal - _maybe_remove(store, 'wp3') - date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10]) - crit4 = 'major_axis=date4' - store.put('wp3', wp, format='t') - n = store.remove('wp3', where=[crit4]) - assert n == 36 - - result = store.select('wp3') - expected = wp.reindex( - major_axis=wp.major_axis.difference(date4)) - assert_panel_equal(result, expected) - - # upper half - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - date = wp.major_axis[len(wp.major_axis) // 2] - - crit1 = 'major_axis>date' - crit2 = "minor_axis=['A', 'D']" - n = store.remove('wp', where=[crit1]) - assert n == 56 - - n = store.remove('wp', where=[crit2]) - assert n == 32 - - result = store['wp'] - expected = wp.truncate(after=date).reindex(minor=['B', 'C']) - assert_panel_equal(result, expected) - - # individual row elements - _maybe_remove(store, 'wp2') - store.put('wp2', wp, format='table') - - date1 = wp.major_axis[1:3] - crit1 = 'major_axis=date1' - store.remove('wp2', where=[crit1]) - result = store.select('wp2') - expected = wp.reindex( - major_axis=wp.major_axis.difference(date1)) - assert_panel_equal(result, expected) - - date2 = wp.major_axis[5] - crit2 = 'major_axis=date2' - store.remove('wp2', where=[crit2]) - result = store['wp2'] - expected = wp.reindex( - major_axis=(wp.major_axis - .difference(date1) - .difference(Index([date2])) - )) - assert_panel_equal(result, expected) - - date3 = [wp.major_axis[7], wp.major_axis[9]] - crit3 = 'major_axis=date3' - store.remove('wp2', where=[crit3]) - result = store['wp2'] - expected = wp.reindex(major_axis=wp.major_axis - .difference(date1) - .difference(Index([date2])) - .difference(Index(date3))) - assert_panel_equal(result, expected) - - # corners - _maybe_remove(store, 'wp4') - store.put('wp4', wp, format='table') - n = store.remove( - 'wp4', where="major_axis>wp.major_axis[-1]") - result = store.select('wp4') - assert_panel_equal(result, wp) - def test_invalid_terms(self): with ensure_clean_store(self.path) as store: @@ -2504,27 +2149,16 @@ def test_invalid_terms(self): df = tm.makeTimeDataFrame() df['string'] = 'foo' df.loc[0:4, 'string'] = 'bar' - wp = tm.makePanel() store.put('df', df, format='table') - store.put('wp', wp, format='table') # some invalid terms - pytest.raises(ValueError, store.select, - 'wp', "minor=['A', 'B']") - pytest.raises(ValueError, store.select, - 'wp', ["index=['20121114']"]) - pytest.raises(ValueError, store.select, 'wp', [ - "index=['20121114', '20121114']"]) pytest.raises(TypeError, Term) # more invalid pytest.raises( ValueError, store.select, 'df', 'df.index[3]') pytest.raises(SyntaxError, store.select, 'df', 'index>') - pytest.raises( - ValueError, store.select, 'wp', - "major_axis<'20000108' & minor_axis['A', 'B']") # from the docs with ensure_clean_path(self.path) as path: @@ -2546,127 +2180,6 @@ def test_invalid_terms(self): pytest.raises(ValueError, read_hdf, path, 'dfq', where="A>0 or C>0") - def test_terms(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - - wp = tm.makePanel() - wpneg = Panel.fromDict({-1: tm.makeDataFrame(), - 0: tm.makeDataFrame(), - 1: tm.makeDataFrame()}) - - store.put('wp', wp, format='table') - store.put('wpneg', wpneg, format='table') - - # panel - result = store.select( - 'wp', - "major_axis<'20000108' and minor_axis=['A', 'B']") - expected = wp.truncate( - after='20000108').reindex(minor=['A', 'B']) - assert_panel_equal(result, expected) - - # with deprecation - result = store.select( - 'wp', where=("major_axis<'20000108' " - "and minor_axis=['A', 'B']")) - expected = wp.truncate( - after='20000108').reindex(minor=['A', 'B']) - tm.assert_panel_equal(result, expected) - - with catch_warnings(record=True): - - # valid terms - terms = [('major_axis=20121114'), - ('major_axis>20121114'), - (("major_axis=['20121114', '20121114']"),), - ('major_axis=datetime.datetime(2012, 11, 14)'), - 'major_axis> 20121114', - 'major_axis >20121114', - 'major_axis > 20121114', - (("minor_axis=['A', 'B']"),), - (("minor_axis=['A', 'B']"),), - ((("minor_axis==['A', 'B']"),),), - (("items=['ItemA', 'ItemB']"),), - ('items=ItemA'), - ] - - for t in terms: - store.select('wp', t) - - with pytest.raises(TypeError, - match='Only named functions are supported'): - store.select( - 'wp', - 'major_axis == (lambda x: x)("20130101")') - - with catch_warnings(record=True): - # check USub node parsing - res = store.select('wpneg', 'items == -1') - expected = Panel({-1: wpneg[-1]}) - tm.assert_panel_equal(res, expected) - - msg = 'Unary addition not supported' - with pytest.raises(NotImplementedError, match=msg): - store.select('wpneg', 'items == +1') - - def test_term_compat(self): - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - store.append('wp', wp) - - result = store.select( - 'wp', where=("major_axis>20000102 " - "and minor_axis=['A', 'B']")) - expected = wp.loc[:, wp.major_axis > - Timestamp('20000102'), ['A', 'B']] - assert_panel_equal(result, expected) - - store.remove('wp', 'major_axis>20000103') - result = store.select('wp') - expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :] - assert_panel_equal(result, expected) - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = Panel(np.random.randn(2, 5, 4), - items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - store.append('wp', wp) - - # stringified datetimes - result = store.select( - 'wp', 'major_axis>datetime.datetime(2000, 1, 2)') - expected = wp.loc[:, wp.major_axis > Timestamp('20000102')] - assert_panel_equal(result, expected) - - result = store.select( - 'wp', 'major_axis>datetime.datetime(2000, 1, 2)') - expected = wp.loc[:, wp.major_axis > Timestamp('20000102')] - assert_panel_equal(result, expected) - - result = store.select( - 'wp', - "major_axis=[datetime.datetime(2000, 1, 2, 0, 0), " - "datetime.datetime(2000, 1, 3, 0, 0)]") - expected = wp.loc[:, [Timestamp('20000102'), - Timestamp('20000103')]] - assert_panel_equal(result, expected) - - result = store.select( - 'wp', "minor_axis=['A', 'B']") - expected = wp.loc[:, :, ['A', 'B']] - assert_panel_equal(result, expected) - def test_same_name_scoping(self): with ensure_clean_store(self.path) as store: @@ -2747,6 +2260,7 @@ def test_float_index(self): s = Series(np.random.randn(10), index=index) self._check_roundtrip(s, tm.assert_series_equal) + @xfail_non_writeable def test_tuple_index(self): # GH #492 @@ -2759,6 +2273,7 @@ def test_tuple_index(self): simplefilter("ignore", pd.errors.PerformanceWarning) self._check_roundtrip(DF, tm.assert_frame_equal) + @xfail_non_writeable @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") def test_index_types(self): @@ -2822,6 +2337,7 @@ def test_timeseries_preepoch(self): except OverflowError: pytest.skip('known failer on some windows platforms') + @xfail_non_writeable @pytest.mark.parametrize("compression", [ False, pytest.param(True, marks=td.skip_if_windows_python_3) ]) @@ -2852,6 +2368,7 @@ def test_frame(self, compression): # empty self._check_roundtrip(df[:0], tm.assert_frame_equal) + @xfail_non_writeable def test_empty_series_frame(self): s0 = Series() s1 = Series(name='myseries') @@ -2865,8 +2382,10 @@ def test_empty_series_frame(self): self._check_roundtrip(df1, tm.assert_frame_equal) self._check_roundtrip(df2, tm.assert_frame_equal) - def test_empty_series(self): - for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']: + @xfail_non_writeable + @pytest.mark.parametrize( + 'dtype', [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']) + def test_empty_series(self, dtype): s = Series(dtype=dtype) self._check_roundtrip(s, tm.assert_series_equal) @@ -2947,6 +2466,7 @@ def test_store_series_name(self): recons = store['series'] tm.assert_series_equal(recons, series) + @xfail_non_writeable @pytest.mark.parametrize("compression", [ False, pytest.param(True, marks=td.skip_if_windows_python_3) ]) @@ -2982,12 +2502,6 @@ def _make_one(): self._check_roundtrip(df1['int1'], tm.assert_series_equal, compression=compression) - def test_wide(self): - - with catch_warnings(record=True): - wp = tm.makePanel() - self._check_roundtrip(wp, assert_panel_equal) - @pytest.mark.filterwarnings( "ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning" ) @@ -3050,29 +2564,6 @@ def test_select_with_dups(self): result = store.select('df', columns=['B', 'A']) assert_frame_equal(result, expected, by_blocks=True) - @pytest.mark.filterwarnings( - "ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning" - ) - def test_wide_table_dups(self): - with ensure_clean_store(self.path) as store: - with catch_warnings(record=True): - - wp = tm.makePanel() - store.put('panel', wp, format='table') - store.put('panel', wp, format='table', append=True) - - recons = store['panel'] - - assert_panel_equal(recons, wp) - - def test_long(self): - def _check(left, right): - assert_panel_equal(left.to_panel(), right.to_panel()) - - with catch_warnings(record=True): - wp = tm.makePanel() - self._check_roundtrip(wp.to_frame(), _check) - def test_overwrite_node(self): with ensure_clean_store(self.path) as store: @@ -3119,34 +2610,6 @@ def test_select(self): with ensure_clean_store(self.path) as store: with catch_warnings(record=True): - wp = tm.makePanel() - - # put/select ok - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - store.select('wp') - - # non-table ok (where = None) - _maybe_remove(store, 'wp') - store.put('wp2', wp) - store.select('wp2') - - # selection on the non-indexable with a large number of columns - wp = Panel(np.random.randn(100, 100, 100), - items=['Item%03d' % i for i in range(100)], - major_axis=date_range('1/1/2000', periods=100), - minor_axis=['E%03d' % i for i in range(100)]) - - _maybe_remove(store, 'wp') - store.append('wp', wp) - items = ['Item%03d' % i for i in range(80)] - result = store.select('wp', 'items=items') - expected = wp.reindex(items=items) - assert_panel_equal(expected, result) - - # selectin non-table with a where - # pytest.raises(ValueError, store.select, - # 'wp2', ('column', ['A', 'D'])) # select with columns= df = tm.makeTimeDataFrame() @@ -3675,31 +3138,6 @@ def test_retain_index_attributes2(self): assert read_hdf(path, 'data').index.name is None - def test_panel_select(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - - wp = tm.makePanel() - - store.put('wp', wp, format='table') - date = wp.major_axis[len(wp.major_axis) // 2] - - crit1 = ('major_axis>=date') - crit2 = ("minor_axis=['A', 'D']") - - result = store.select('wp', [crit1, crit2]) - expected = wp.truncate(before=date).reindex(minor=['A', 'D']) - assert_panel_equal(result, expected) - - result = store.select( - 'wp', ['major_axis>="20000124"', - ("minor_axis=['A', 'B']")]) - expected = wp.truncate( - before='20000124').reindex(minor=['A', 'B']) - assert_panel_equal(result, expected) - def test_frame_select(self): df = tm.makeTimeDataFrame() @@ -4538,9 +3976,10 @@ def test_pytables_native2_read(self, datapath): d1 = store['detector'] assert isinstance(d1, DataFrame) + @xfail_non_writeable def test_legacy_table_fixed_format_read_py2(self, datapath): # GH 24510 - # legacy table with fixed format written en Python 2 + # legacy table with fixed format written in Python 2 with ensure_clean_store( datapath('io', 'data', 'legacy_hdf', 'legacy_table_fixed_py2.h5'), @@ -4552,29 +3991,20 @@ def test_legacy_table_fixed_format_read_py2(self, datapath): name='INDEX_NAME')) assert_frame_equal(expected, result) - def test_legacy_table_read(self, datapath): - # legacy table types + def test_legacy_table_read_py2(self, datapath): + # issue: 24925 + # legacy table written in Python 2 with ensure_clean_store( - datapath('io', 'data', 'legacy_hdf', 'legacy_table.h5'), + datapath('io', 'data', 'legacy_hdf', + 'legacy_table_py2.h5'), mode='r') as store: + result = store.select('table') - with catch_warnings(): - simplefilter("ignore", pd.io.pytables.IncompatibilityWarning) - store.select('df1') - store.select('df2') - store.select('wp1') - - # force the frame - store.select('df2', typ='legacy_frame') - - # old version warning - pytest.raises( - Exception, store.select, 'wp1', 'minor_axis=B') - - df2 = store.select('df2') - result = store.select('df2', 'index>df2.index[2]') - expected = df2[df2.index > df2.index[2]] - assert_frame_equal(expected, result) + expected = pd.DataFrame({ + "a": ["a", "b"], + "b": [2, 3] + }) + assert_frame_equal(expected, result) def test_copy(self): @@ -4710,6 +4140,7 @@ def test_unicode_longer_encoded(self): result = store.get('df') tm.assert_frame_equal(result, df) + @xfail_non_writeable def test_store_datetime_mixed(self): df = DataFrame( @@ -5270,6 +4701,7 @@ def test_complex_table(self): reread = read_hdf(path, 'df') assert_frame_equal(df, reread) + @xfail_non_writeable def test_complex_mixed_fixed(self): complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64) @@ -5308,35 +4740,30 @@ def test_complex_mixed_table(self): reread = read_hdf(path, 'df') assert_frame_equal(df, reread) - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") def test_complex_across_dimensions_fixed(self): with catch_warnings(record=True): complex128 = np.array( [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) s = Series(complex128, index=list('abcd')) df = DataFrame({'A': s, 'B': s}) - p = Panel({'One': df, 'Two': df}) - objs = [s, df, p] - comps = [tm.assert_series_equal, tm.assert_frame_equal, - tm.assert_panel_equal] + objs = [s, df] + comps = [tm.assert_series_equal, tm.assert_frame_equal] for obj, comp in zip(objs, comps): with ensure_clean_path(self.path) as path: obj.to_hdf(path, 'obj', format='fixed') reread = read_hdf(path, 'obj') comp(obj, reread) - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") def test_complex_across_dimensions(self): complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) s = Series(complex128, index=list('abcd')) df = DataFrame({'A': s, 'B': s}) with catch_warnings(record=True): - p = Panel({'One': df, 'Two': df}) - objs = [df, p] - comps = [tm.assert_frame_equal, tm.assert_panel_equal] + objs = [df] + comps = [tm.assert_frame_equal] for obj, comp in zip(objs, comps): with ensure_clean_path(self.path) as path: obj.to_hdf(path, 'obj', format='table') diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 75a6d8d009083..9d0bce3b342b4 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -605,12 +605,6 @@ def test_to_sql_series(self): s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn) tm.assert_frame_equal(s.to_frame(), s2) - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_to_sql_panel(self): - panel = tm.makePanel() - pytest.raises(NotImplementedError, sql.to_sql, panel, - 'test_panel', self.conn) - def test_roundtrip(self): sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn) diff --git a/pandas/tests/plotting/test_boxplot_method.py b/pandas/tests/plotting/test_boxplot_method.py index 7d721c7de3398..e6b9795aebe7c 100644 --- a/pandas/tests/plotting/test_boxplot_method.py +++ b/pandas/tests/plotting/test_boxplot_method.py @@ -267,13 +267,20 @@ def test_grouped_box_return_type(self): def test_grouped_box_layout(self): df = self.hist_df - pytest.raises(ValueError, df.boxplot, column=['weight', 'height'], - by=df.gender, layout=(1, 1)) - pytest.raises(ValueError, df.boxplot, - column=['height', 'weight', 'category'], - layout=(2, 1), return_type='dict') - pytest.raises(ValueError, df.boxplot, column=['weight', 'height'], - by=df.gender, layout=(-1, -1)) + msg = "Layout of 1x1 must be larger than required size 2" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=['weight', 'height'], by=df.gender, + layout=(1, 1)) + + msg = "The 'layout' keyword is not supported when 'by' is None" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=['height', 'weight', 'category'], + layout=(2, 1), return_type='dict') + + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=['weight', 'height'], by=df.gender, + layout=(-1, -1)) # _check_plot_works adds an ax so catch warning. see GH #13188 with tm.assert_produces_warning(UserWarning): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index ad79cc97f8b77..6702ad6cfb761 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -97,7 +97,9 @@ def test_nonnumeric_exclude(self): assert len(ax.get_lines()) == 1 # B was plotted self.plt.close(fig) - pytest.raises(TypeError, df['A'].plot) + msg = "Empty 'DataFrame': no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df['A'].plot() def test_tsplot_deprecated(self): from pandas.tseries.plotting import tsplot @@ -140,10 +142,15 @@ def f(*args, **kwds): def test_both_style_and_color(self): ts = tm.makeTimeSeries() - pytest.raises(ValueError, ts.plot, style='b-', color='#000099') + msg = ("Cannot pass 'style' string with a color symbol and 'color' " + "keyword argument. Please use one or the other or pass 'style'" + " without a color symbol") + with pytest.raises(ValueError, match=msg): + ts.plot(style='b-', color='#000099') s = ts.reset_index(drop=True) - pytest.raises(ValueError, s.plot, style='b-', color='#000099') + with pytest.raises(ValueError, match=msg): + s.plot(style='b-', color='#000099') @pytest.mark.slow def test_high_freq(self): diff --git a/pandas/tests/plotting/test_hist_method.py b/pandas/tests/plotting/test_hist_method.py index 7bdbdac54f7a6..4f0bef52b5e15 100644 --- a/pandas/tests/plotting/test_hist_method.py +++ b/pandas/tests/plotting/test_hist_method.py @@ -332,12 +332,17 @@ def test_grouped_hist_legacy2(self): @pytest.mark.slow def test_grouped_hist_layout(self): df = self.hist_df - pytest.raises(ValueError, df.hist, column='weight', by=df.gender, - layout=(1, 1)) - pytest.raises(ValueError, df.hist, column='height', by=df.category, - layout=(1, 3)) - pytest.raises(ValueError, df.hist, column='height', by=df.category, - layout=(-1, -1)) + msg = "Layout of 1x1 must be larger than required size 2" + with pytest.raises(ValueError, match=msg): + df.hist(column='weight', by=df.gender, layout=(1, 1)) + + msg = "Layout of 1x3 must be larger than required size 4" + with pytest.raises(ValueError, match=msg): + df.hist(column='height', by=df.category, layout=(1, 3)) + + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.hist(column='height', by=df.category, layout=(-1, -1)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.hist, column='height', by=df.gender, diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 44b95f7d1b00b..98248586f3d27 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -278,14 +278,20 @@ def test_subplot_titles(self, iris): assert [p.get_title() for p in plot] == title # Case len(title) > len(df) - pytest.raises(ValueError, df.plot, subplots=True, - title=title + ["kittens > puppies"]) + msg = ("The length of `title` must equal the number of columns if" + " using `title` of type `list` and `subplots=True`") + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title + ["kittens > puppies"]) # Case len(title) < len(df) - pytest.raises(ValueError, df.plot, subplots=True, title=title[:2]) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title[:2]) # Case subplots=False and title is of type list - pytest.raises(ValueError, df.plot, subplots=False, title=title) + msg = ("Using `title` of type `list` is not supported unless" + " `subplots=True` is passed") + with pytest.raises(ValueError, match=msg): + df.plot(subplots=False, title=title) # Case df with 3 numeric columns but layout of (2,2) plot = df.drop('SepalWidth', axis=1).plot(subplots=True, layout=(2, 2), diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index 173f719edd465..fbf7f610688ba 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -276,7 +276,9 @@ def test_timedelta_ops(self): # invalid ops for op in ['skew', 'kurt', 'sem', 'prod']: - pytest.raises(TypeError, getattr(td, op)) + msg = "reduction operation '{}' not allowed for this dtype" + with pytest.raises(TypeError, match=msg.format(op)): + getattr(td, op)() # GH#10040 # make sure NaT is properly handled by median() @@ -960,6 +962,27 @@ def test_min_max(self): assert np.isnan(_min) assert _max == 1 + def test_min_max_numeric_only(self): + # TODO deprecate numeric_only argument for Categorical and use + # skipna as well, see GH25303 + cat = Series(Categorical( + ["a", "b", np.nan, "a"], categories=['b', 'a'], ordered=True)) + + _min = cat.min() + _max = cat.max() + assert np.isnan(_min) + assert _max == "a" + + _min = cat.min(numeric_only=True) + _max = cat.max(numeric_only=True) + assert _min == "b" + assert _max == "a" + + _min = cat.min(numeric_only=False) + _max = cat.max(numeric_only=False) + assert np.isnan(_min) + assert _max == "a" + class TestSeriesMode(object): # Note: the name TestSeriesMode indicates these tests diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 911cd990ab881..8f912ea5c524a 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -28,15 +28,10 @@ period_range, 'pi', datetime(2005, 1, 1), datetime(2005, 1, 10)) TIMEDELTA_RANGE = (timedelta_range, 'tdi', '1 day', '10 day') -ALL_TIMESERIES_INDEXES = [DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE] - - -def pytest_generate_tests(metafunc): - # called once per each test function - if metafunc.function.__name__.endswith('_all_ts'): - metafunc.parametrize( - '_index_factory,_series_name,_index_start,_index_end', - ALL_TIMESERIES_INDEXES) +all_ts = pytest.mark.parametrize( + '_index_factory,_series_name,_index_start,_index_end', + [DATE_RANGE, PERIOD_RANGE, TIMEDELTA_RANGE] +) @pytest.fixture @@ -84,7 +79,8 @@ def test_asfreq_fill_value(series, create_index): assert_frame_equal(result, expected) -def test_resample_interpolate_all_ts(frame): +@all_ts +def test_resample_interpolate(frame): # # 12925 df = frame assert_frame_equal( @@ -95,11 +91,15 @@ def test_resample_interpolate_all_ts(frame): def test_raises_on_non_datetimelike_index(): # this is a non datetimelike index xp = DataFrame() - pytest.raises(TypeError, lambda: xp.resample('A').mean()) + msg = ("Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex," + " but got an instance of 'Index'") + with pytest.raises(TypeError, match=msg): + xp.resample('A').mean() +@all_ts @pytest.mark.parametrize('freq', ['M', 'D', 'H']) -def test_resample_empty_series_all_ts(freq, empty_series, resample_method): +def test_resample_empty_series(freq, empty_series, resample_method): # GH12771 & GH12868 if resample_method == 'ohlc': @@ -118,8 +118,9 @@ def test_resample_empty_series_all_ts(freq, empty_series, resample_method): assert_series_equal(result, expected, check_dtype=False) +@all_ts @pytest.mark.parametrize('freq', ['M', 'D', 'H']) -def test_resample_empty_dataframe_all_ts(empty_frame, freq, resample_method): +def test_resample_empty_dataframe(empty_frame, freq, resample_method): # GH13212 df = empty_frame # count retains dimensions too @@ -159,7 +160,8 @@ def test_resample_empty_dtypes(index, dtype, resample_method): pass -def test_resample_loffset_arg_type_all_ts(frame, create_index): +@all_ts +def test_resample_loffset_arg_type(frame, create_index): # GH 13218, 15002 df = frame expected_means = [df.values[i:i + 2].mean() @@ -189,15 +191,18 @@ def test_resample_loffset_arg_type_all_ts(frame, create_index): # GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex if isinstance(expected.index, TimedeltaIndex): - with pytest.raises(AssertionError): + msg = "DataFrame are different" + with pytest.raises(AssertionError, match=msg): assert_frame_equal(result_agg, expected) + with pytest.raises(AssertionError, match=msg): assert_frame_equal(result_how, expected) else: assert_frame_equal(result_agg, expected) assert_frame_equal(result_how, expected) -def test_apply_to_empty_series_all_ts(empty_series): +@all_ts +def test_apply_to_empty_series(empty_series): # GH 14313 s = empty_series for freq in ['M', 'D', 'H']: @@ -207,7 +212,8 @@ def test_apply_to_empty_series_all_ts(empty_series): assert_series_equal(result, expected, check_dtype=False) -def test_resampler_is_iterable_all_ts(series): +@all_ts +def test_resampler_is_iterable(series): # GH 15314 freq = 'H' tg = TimeGrouper(freq, convention='start') @@ -218,7 +224,8 @@ def test_resampler_is_iterable_all_ts(series): assert_series_equal(rv, gv) -def test_resample_quantile_all_ts(series): +@all_ts +def test_resample_quantile(series): # GH 15023 s = series q = 0.75 diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index 73995cbe79ecd..ce675893d9907 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -1,6 +1,5 @@ from datetime import datetime, timedelta from functools import partial -from warnings import catch_warnings, simplefilter import numpy as np import pytest @@ -10,7 +9,7 @@ from pandas.errors import UnsupportedFunctionCall import pandas as pd -from pandas import DataFrame, Panel, Series, Timedelta, Timestamp, isna, notna +from pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import Period, period_range from pandas.core.resample import ( @@ -113,16 +112,18 @@ def test_resample_basic_grouper(series): @pytest.mark.parametrize( '_index_start,_index_end,_index_name', [('1/1/2000 00:00:00', '1/1/2000 00:13:00', 'index')]) -@pytest.mark.parametrize('kwargs', [ - dict(label='righttt'), - dict(closed='righttt'), - dict(convention='starttt') +@pytest.mark.parametrize('keyword,value', [ + ('label', 'righttt'), + ('closed', 'righttt'), + ('convention', 'starttt') ]) -def test_resample_string_kwargs(series, kwargs): +def test_resample_string_kwargs(series, keyword, value): # see gh-19303 # Check that wrong keyword argument strings raise an error - with pytest.raises(ValueError, match='Unsupported value'): - series.resample('5min', **kwargs) + msg = "Unsupported value {value} for `{keyword}`".format( + value=value, keyword=keyword) + with pytest.raises(ValueError, match=msg): + series.resample('5min', **({keyword: value})) @pytest.mark.parametrize( @@ -676,7 +677,7 @@ def test_asfreq_non_unique(): ts = Series(np.random.randn(len(rng2)), index=rng2) msg = 'cannot reindex from a duplicate axis' - with pytest.raises(Exception, match=msg): + with pytest.raises(ValueError, match=msg): ts.asfreq('B') @@ -690,56 +691,6 @@ def test_resample_axis1(): tm.assert_frame_equal(result, expected) -def test_resample_panel(): - rng = date_range('1/1/2000', '6/30/2000') - n = len(rng) - - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - panel = Panel(np.random.randn(3, n, 5), - items=['one', 'two', 'three'], - major_axis=rng, - minor_axis=['a', 'b', 'c', 'd', 'e']) - - result = panel.resample('M', axis=1).mean() - - def p_apply(panel, f): - result = {} - for item in panel.items: - result[item] = f(panel[item]) - return Panel(result, items=panel.items) - - expected = p_apply(panel, lambda x: x.resample('M').mean()) - tm.assert_panel_equal(result, expected) - - panel2 = panel.swapaxes(1, 2) - result = panel2.resample('M', axis=2).mean() - expected = p_apply(panel2, - lambda x: x.resample('M', axis=1).mean()) - tm.assert_panel_equal(result, expected) - - -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -def test_resample_panel_numpy(): - rng = date_range('1/1/2000', '6/30/2000') - n = len(rng) - - with catch_warnings(record=True): - panel = Panel(np.random.randn(3, n, 5), - items=['one', 'two', 'three'], - major_axis=rng, - minor_axis=['a', 'b', 'c', 'd', 'e']) - - result = panel.resample('M', axis=1).apply(lambda x: x.mean(1)) - expected = panel.resample('M', axis=1).mean() - tm.assert_panel_equal(result, expected) - - panel = panel.swapaxes(1, 2) - result = panel.resample('M', axis=2).apply(lambda x: x.mean(2)) - expected = panel.resample('M', axis=2).mean() - tm.assert_panel_equal(result, expected) - - def test_resample_anchored_ticks(): # If a fixed delta (5 minute, 4 hour) evenly divides a day, we should # "anchor" the origin at midnight so we get regular intervals rather @@ -1184,6 +1135,15 @@ def test_resample_nunique(): assert_series_equal(result, expected) +def test_resample_nunique_preserves_column_level_names(): + # see gh-23222 + df = tm.makeTimeDataFrame(freq="1D").abs() + df.columns = pd.MultiIndex.from_arrays([df.columns.tolist()] * 2, + names=["lev0", "lev1"]) + result = df.resample("1h").nunique() + tm.assert_index_equal(df.columns, result.columns) + + def test_resample_nunique_with_date_gap(): # GH 13453 index = pd.date_range('1-1-2000', '2-15-2000', freq='h') @@ -1209,9 +1169,13 @@ def test_resample_nunique_with_date_gap(): @pytest.mark.parametrize('k', [10, 100, 1000]) def test_resample_group_info(n, k): # GH10914 + + # use a fixed seed to always have the same uniques + prng = np.random.RandomState(1234) + dr = date_range(start='2015-08-27', periods=n // 10, freq='T') - ts = Series(np.random.randint(0, n // k, n).astype('int64'), - index=np.random.choice(dr, n)) + ts = Series(prng.randint(0, n // k, n).astype('int64'), + index=prng.choice(dr, n)) left = ts.resample('30T').nunique() ix = date_range(start=ts.index.min(), end=ts.index.max(), @@ -1276,6 +1240,21 @@ def test_resample_across_dst(): assert_frame_equal(result, expected) +def test_groupby_with_dst_time_change(): + # GH 24972 + index = pd.DatetimeIndex([1478064900001000000, 1480037118776792000], + tz='UTC').tz_convert('America/Chicago') + + df = pd.DataFrame([1, 2], index=index) + result = df.groupby(pd.Grouper(freq='1d')).last() + expected_index_values = pd.date_range('2016-11-02', '2016-11-24', + freq='d', tz='America/Chicago') + + index = pd.DatetimeIndex(expected_index_values) + expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index) + assert_frame_equal(result, expected) + + def test_resample_dst_anchor(): # 5172 dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern') diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index c2fbb5bbb088c..8abdf9034527b 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -11,6 +11,7 @@ import pandas as pd from pandas import DataFrame, Series, Timestamp +from pandas.core.indexes.base import InvalidIndexError from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range from pandas.core.resample import _get_period_range_edges @@ -72,17 +73,19 @@ def test_asfreq_fill_value(self, series): @pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W']) @pytest.mark.parametrize('kind', [None, 'period', 'timestamp']) - def test_selection(self, index, freq, kind): + @pytest.mark.parametrize('kwargs', [dict(on='date'), dict(level='d')]) + def test_selection(self, index, freq, kind, kwargs): # This is a bug, these should be implemented # GH 14008 rng = np.arange(len(index), dtype=np.int64) df = DataFrame({'date': index, 'a': rng}, index=pd.MultiIndex.from_arrays([rng, index], names=['v', 'd'])) - with pytest.raises(NotImplementedError): - df.resample(freq, on='date', kind=kind) - with pytest.raises(NotImplementedError): - df.resample(freq, level='d', kind=kind) + msg = ("Resampling from level= or on= selection with a PeriodIndex is" + r" not currently supported, use \.set_index\(\.\.\.\) to" + " explicitly set index") + with pytest.raises(NotImplementedError, match=msg): + df.resample(freq, kind=kind, **kwargs) @pytest.mark.parametrize('month', MONTHS) @pytest.mark.parametrize('meth', ['ffill', 'bfill']) @@ -110,13 +113,20 @@ def test_basic_downsample(self, simple_period_range_series): assert_series_equal(ts.resample('a-dec').mean(), result) assert_series_equal(ts.resample('a').mean(), result) - def test_not_subperiod(self, simple_period_range_series): + @pytest.mark.parametrize('rule,expected_error_msg', [ + ('a-dec', '<YearEnd: month=12>'), + ('q-mar', '<QuarterEnd: startingMonth=3>'), + ('M', '<MonthEnd>'), + ('w-thu', '<Week: weekday=3>') + ]) + def test_not_subperiod( + self, simple_period_range_series, rule, expected_error_msg): # These are incompatible period rules for resampling ts = simple_period_range_series('1/1/1990', '6/30/1995', freq='w-wed') - pytest.raises(ValueError, lambda: ts.resample('a-dec').mean()) - pytest.raises(ValueError, lambda: ts.resample('q-mar').mean()) - pytest.raises(ValueError, lambda: ts.resample('M').mean()) - pytest.raises(ValueError, lambda: ts.resample('w-thu').mean()) + msg = ("Frequency <Week: weekday=2> cannot be resampled to {}, as they" + " are not sub or super periods").format(expected_error_msg) + with pytest.raises(IncompatibleFrequency, match=msg): + ts.resample(rule).mean() @pytest.mark.parametrize('freq', ['D', '2D']) def test_basic_upsample(self, freq, simple_period_range_series): @@ -212,8 +222,9 @@ def test_resample_same_freq(self, resample_method): assert_series_equal(result, expected) def test_resample_incompat_freq(self): - - with pytest.raises(IncompatibleFrequency): + msg = ("Frequency <MonthEnd> cannot be resampled to <Week: weekday=6>," + " as they are not sub or super periods") + with pytest.raises(IncompatibleFrequency, match=msg): Series(range(3), index=pd.period_range( start='2000', periods=3, freq='M')).resample('W').mean() @@ -373,7 +384,9 @@ def test_resample_fill_missing(self): def test_cant_fill_missing_dups(self): rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A') s = Series(np.random.randn(5), index=rng) - pytest.raises(Exception, lambda: s.resample('A').ffill()) + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + s.resample('A').ffill() @pytest.mark.parametrize('freq', ['5min']) @pytest.mark.parametrize('kind', ['period', None, 'timestamp']) diff --git a/pandas/tests/resample/test_resample_api.py b/pandas/tests/resample/test_resample_api.py index 69684daf05f3d..97f1e07380ef9 100644 --- a/pandas/tests/resample/test_resample_api.py +++ b/pandas/tests/resample/test_resample_api.py @@ -1,11 +1,12 @@ # pylint: disable=E1101 +from collections import OrderedDict from datetime import datetime import numpy as np import pytest -from pandas.compat import OrderedDict, range +from pandas.compat import range import pandas as pd from pandas import DataFrame, Series @@ -113,16 +114,14 @@ def test_getitem(): test_frame.columns[[0, 1]]) -def test_select_bad_cols(): - +@pytest.mark.parametrize('key', [['D'], ['A', 'D']]) +def test_select_bad_cols(key): g = test_frame.resample('H') - pytest.raises(KeyError, g.__getitem__, ['D']) - - pytest.raises(KeyError, g.__getitem__, ['A', 'D']) - with pytest.raises(KeyError, match='^[^A]+$'): - # A should not be referenced as a bad column... - # will have to rethink regex if you change message! - g[['A', 'D']] + # 'A' should not be referenced as a bad column... + # will have to rethink regex if you change message! + msg = r"^\"Columns not found: 'D'\"$" + with pytest.raises(KeyError, match=msg): + g[key] def test_attribute_access(): @@ -216,7 +215,9 @@ def test_fillna(): result = r.fillna(method='bfill') assert_series_equal(result, expected) - with pytest.raises(ValueError): + msg = (r"Invalid fill method\. Expecting pad \(ffill\), backfill" + r" \(bfill\) or nearest\. Got 0") + with pytest.raises(ValueError, match=msg): r.fillna(0) @@ -437,12 +438,11 @@ def test_agg_misc(): # errors # invalid names in the agg specification + msg = "\"Column 'B' does not exist!\"" for t in cases: - with pytest.raises(KeyError): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - t[['A']].agg({'A': ['sum', 'std'], - 'B': ['mean', 'std']}) + with pytest.raises(KeyError, match=msg): + t[['A']].agg({'A': ['sum', 'std'], + 'B': ['mean', 'std']}) def test_agg_nested_dicts(): @@ -464,11 +464,11 @@ def test_agg_nested_dicts(): df.groupby(pd.Grouper(freq='2D')) ] + msg = r"cannot perform renaming for r(1|2) with a nested dictionary" for t in cases: - def f(): + with pytest.raises(pd.core.base.SpecificationError, match=msg): t.aggregate({'r1': {'A': ['mean', 'sum']}, 'r2': {'B': ['mean', 'sum']}}) - pytest.raises(ValueError, f) for t in cases: expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(), @@ -499,7 +499,8 @@ def test_try_aggregate_non_existing_column(): df = DataFrame(data).set_index('dt') # Error as we don't have 'z' column - with pytest.raises(KeyError): + msg = "\"Column 'z' does not exist!\"" + with pytest.raises(KeyError, match=msg): df.resample('30T').agg({'x': ['mean'], 'y': ['median'], 'z': ['sum']}) @@ -517,23 +518,29 @@ def test_selection_api_validation(): df_exp = DataFrame({'a': rng}, index=index) # non DatetimeIndex - with pytest.raises(TypeError): + msg = ("Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex," + " but got an instance of 'Int64Index'") + with pytest.raises(TypeError, match=msg): df.resample('2D', level='v') - with pytest.raises(ValueError): + msg = "The Grouper cannot specify both a key and a level!" + with pytest.raises(ValueError, match=msg): df.resample('2D', on='date', level='d') - with pytest.raises(TypeError): + msg = "unhashable type: 'list'" + with pytest.raises(TypeError, match=msg): df.resample('2D', on=['a', 'date']) - with pytest.raises(KeyError): + msg = r"\"Level \['a', 'date'\] not found\"" + with pytest.raises(KeyError, match=msg): df.resample('2D', level=['a', 'date']) # upsampling not allowed - with pytest.raises(ValueError): + msg = ("Upsampling from level= or on= selection is not supported, use" + r" \.set_index\(\.\.\.\) to explicitly set index to datetime-like") + with pytest.raises(ValueError, match=msg): df.resample('2D', level='d').asfreq() - - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): df.resample('2D', on='date').asfreq() exp = df_exp.resample('2D').sum() @@ -542,3 +549,25 @@ def test_selection_api_validation(): exp.index.name = 'd' assert_frame_equal(exp, df.resample('2D', level='d').sum()) + + +@pytest.mark.parametrize('col_name', ['t2', 't2x', 't2q', 'T_2M', + 't2p', 't2m', 't2m1', 'T2M']) +def test_agg_with_datetime_index_list_agg_func(col_name): + # GH 22660 + # The parametrized column names would get converted to dates by our + # date parser. Some would result in OutOfBoundsError (ValueError) while + # others would result in OverflowError when passed into Timestamp. + # We catch these errors and move on to the correct branch. + df = pd.DataFrame(list(range(200)), + index=pd.date_range(start='2017-01-01', freq='15min', + periods=200, tz='Europe/Berlin'), + columns=[col_name]) + result = df.resample('1d').aggregate(['mean']) + expected = pd.DataFrame([47.5, 143.5, 195.5], + index=pd.date_range(start='2017-01-01', freq='D', + periods=3, tz='Europe/Berlin'), + columns=pd.MultiIndex(levels=[[col_name], + ['mean']], + codes=[[0], [0]])) + assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index ec29b55ac9d67..2f330d1f2484b 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -5,7 +5,7 @@ import pytest import pandas as pd -from pandas import DataFrame, Panel, Series +from pandas import DataFrame, Series from pandas.core.indexes.datetimes import date_range from pandas.core.resample import TimeGrouper import pandas.util.testing as tm @@ -79,27 +79,6 @@ def f(df): tm.assert_index_equal(result.index, df.index) -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -def test_panel_aggregation(): - ind = pd.date_range('1/1/2000', periods=100) - data = np.random.randn(2, len(ind), 4) - - wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind, - minor_axis=['A', 'B', 'C', 'D']) - - tg = TimeGrouper('M', axis=1) - _, grouper, _ = tg._get_grouper(wp) - bingrouped = wp.groupby(grouper) - binagg = bingrouped.mean() - - def f(x): - assert (isinstance(x, Panel)) - return x.mean(1) - - result = bingrouped.agg(f) - tm.assert_panel_equal(result, binagg) - - @pytest.mark.parametrize('name, func', [ ('Int64Index', tm.makeIntIndex), ('Index', tm.makeUnicodeIndex), @@ -112,7 +91,7 @@ def test_fails_on_no_datetime_index(name, func): df = DataFrame({'a': np.random.randn(n)}, index=index) msg = ("Only valid with DatetimeIndex, TimedeltaIndex " - "or PeriodIndex, but got an instance of %r" % name) + "or PeriodIndex, but got an instance of '{}'".format(name)) with pytest.raises(TypeError, match=msg): df.groupby(TimeGrouper('D')) diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index e21f9d0291afa..62c9047b17f3d 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -1,7 +1,5 @@ # pylint: disable=E1103 -from warnings import catch_warnings - import numpy as np from numpy.random import randn import pytest @@ -657,95 +655,6 @@ def test_join_dups(self): 'y_y', 'x_x', 'y_x', 'x_y', 'y_y'] assert_frame_equal(dta, expected) - def test_panel_join(self): - with catch_warnings(record=True): - panel = tm.makePanel() - tm.add_nans(panel) - - p1 = panel.iloc[:2, :10, :3] - p2 = panel.iloc[2:, 5:, 2:] - - # left join - result = p1.join(p2) - expected = p1.copy() - expected['ItemC'] = p2['ItemC'] - tm.assert_panel_equal(result, expected) - - # right join - result = p1.join(p2, how='right') - expected = p2.copy() - expected['ItemA'] = p1['ItemA'] - expected['ItemB'] = p1['ItemB'] - expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC']) - tm.assert_panel_equal(result, expected) - - # inner join - result = p1.join(p2, how='inner') - expected = panel.iloc[:, 5:10, 2:3] - tm.assert_panel_equal(result, expected) - - # outer join - result = p1.join(p2, how='outer') - expected = p1.reindex(major=panel.major_axis, - minor=panel.minor_axis) - expected = expected.join(p2.reindex(major=panel.major_axis, - minor=panel.minor_axis)) - tm.assert_panel_equal(result, expected) - - def test_panel_join_overlap(self): - with catch_warnings(record=True): - panel = tm.makePanel() - tm.add_nans(panel) - - p1 = panel.loc[['ItemA', 'ItemB', 'ItemC']] - p2 = panel.loc[['ItemB', 'ItemC']] - - # Expected index is - # - # ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2 - joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2') - p1_suf = p1.loc[['ItemB', 'ItemC']].add_suffix('_p1') - p2_suf = p2.loc[['ItemB', 'ItemC']].add_suffix('_p2') - no_overlap = panel.loc[['ItemA']] - expected = no_overlap.join(p1_suf.join(p2_suf)) - tm.assert_panel_equal(joined, expected) - - def test_panel_join_many(self): - with catch_warnings(record=True): - tm.K = 10 - panel = tm.makePanel() - tm.K = 4 - - panels = [panel.iloc[:2], panel.iloc[2:6], panel.iloc[6:]] - - joined = panels[0].join(panels[1:]) - tm.assert_panel_equal(joined, panel) - - panels = [panel.iloc[:2, :-5], - panel.iloc[2:6, 2:], - panel.iloc[6:, 5:-7]] - - data_dict = {} - for p in panels: - data_dict.update(p.iteritems()) - - joined = panels[0].join(panels[1:], how='inner') - expected = pd.Panel.from_dict(data_dict, intersect=True) - tm.assert_panel_equal(joined, expected) - - joined = panels[0].join(panels[1:], how='outer') - expected = pd.Panel.from_dict(data_dict, intersect=False) - tm.assert_panel_equal(joined, expected) - - # edge cases - msg = "Suffixes not supported when passing multiple panels" - with pytest.raises(ValueError, match=msg): - panels[0].join(panels[1:], how='outer', lsuffix='foo', - rsuffix='bar') - msg = "Right join not supported with multiple panels" - with pytest.raises(ValueError, match=msg): - panels[0].join(panels[1:], how='right') - def test_join_multi_to_multi(self, join_type): # GH 20475 leftindex = MultiIndex.from_product([list('abc'), list('xy'), [1, 2]], @@ -773,6 +682,28 @@ def test_join_multi_to_multi(self, join_type): with pytest.raises(ValueError, match=msg): right.join(left, on=['abc', 'xy'], how=join_type) + def test_join_on_tz_aware_datetimeindex(self): + # GH 23931 + df1 = pd.DataFrame( + { + 'date': pd.date_range(start='2018-01-01', periods=5, + tz='America/Chicago'), + 'vals': list('abcde') + } + ) + + df2 = pd.DataFrame( + { + 'date': pd.date_range(start='2018-01-03', periods=5, + tz='America/Chicago'), + 'vals_2': list('tuvwx') + } + ) + result = df1.join(df2.set_index('date'), on='date') + expected = df1.copy() + expected['vals_2'] = pd.Series([np.nan] * len(expected), dtype=object) + assert_frame_equal(result, expected) + def _check_join(left, right, result, join_col, how='left', lsuffix='_x', rsuffix='_y'): diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index c17c301968269..7a97368504fd6 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -39,6 +39,54 @@ def get_test_data(ngroups=NGROUPS, n=N): return arr +def get_series(): + return [ + pd.Series([1], dtype='int64'), + pd.Series([1], dtype='Int64'), + pd.Series([1.23]), + pd.Series(['foo']), + pd.Series([True]), + pd.Series([pd.Timestamp('2018-01-01')]), + pd.Series([pd.Timestamp('2018-01-01', tz='US/Eastern')]), + ] + + +def get_series_na(): + return [ + pd.Series([np.nan], dtype='Int64'), + pd.Series([np.nan], dtype='float'), + pd.Series([np.nan], dtype='object'), + pd.Series([pd.NaT]), + ] + + +@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name) +def series_of_dtype(request): + """ + A parametrized fixture returning a variety of Series of different + dtypes + """ + return request.param + + +@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name) +def series_of_dtype2(request): + """ + A duplicate of the series_of_dtype fixture, so that it can be used + twice by a single function + """ + return request.param + + +@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name) +def series_of_dtype_all_na(request): + """ + A parametrized fixture returning a variety of Series with all NA + values + """ + return request.param + + class TestMerge(object): def setup_method(self, method): @@ -428,6 +476,36 @@ def check2(exp, kwarg): check1(exp_in, kwarg) check2(exp_out, kwarg) + def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2): + # GH 25183 + df = pd.DataFrame({'key': series_of_dtype, 'value': series_of_dtype2}, + columns=['key', 'value']) + df_empty = df[:0] + expected = pd.DataFrame({ + 'value_x': pd.Series(dtype=df.dtypes['value']), + 'key': pd.Series(dtype=df.dtypes['key']), + 'value_y': pd.Series(dtype=df.dtypes['value']), + }, columns=['value_x', 'key', 'value_y']) + actual = df_empty.merge(df, on='key') + assert_frame_equal(actual, expected) + + def test_merge_all_na_column(self, series_of_dtype, + series_of_dtype_all_na): + # GH 25183 + df_left = pd.DataFrame( + {'key': series_of_dtype, 'value': series_of_dtype_all_na}, + columns=['key', 'value']) + df_right = pd.DataFrame( + {'key': series_of_dtype, 'value': series_of_dtype_all_na}, + columns=['key', 'value']) + expected = pd.DataFrame({ + 'key': series_of_dtype, + 'value_x': series_of_dtype_all_na, + 'value_y': series_of_dtype_all_na, + }, columns=['key', 'value_x', 'value_y']) + actual = df_left.merge(df_right, on='key') + assert_frame_equal(actual, expected) + def test_merge_nosort(self): # #2098, anything to do? @@ -616,6 +694,24 @@ def test_merge_on_datetime64tz(self): assert result['value_x'].dtype == 'datetime64[ns, US/Eastern]' assert result['value_y'].dtype == 'datetime64[ns, US/Eastern]' + def test_merge_on_datetime64tz_empty(self): + # https://github.com/pandas-dev/pandas/issues/25014 + dtz = pd.DatetimeTZDtype(tz='UTC') + right = pd.DataFrame({'date': [pd.Timestamp('2018', tz=dtz.tz)], + 'value': [4.0], + 'date2': [pd.Timestamp('2019', tz=dtz.tz)]}, + columns=['date', 'value', 'date2']) + left = right[:0] + result = left.merge(right, on='date') + expected = pd.DataFrame({ + 'value_x': pd.Series(dtype=float), + 'date2_x': pd.Series(dtype=dtz), + 'date': pd.Series(dtype=dtz), + 'value_y': pd.Series(dtype=float), + 'date2_y': pd.Series(dtype=dtz), + }, columns=['value_x', 'date2_x', 'date', 'value_y', 'date2_y']) + tm.assert_frame_equal(result, expected) + def test_merge_datetime64tz_with_dst_transition(self): # GH 18885 df1 = pd.DataFrame(pd.date_range( @@ -1508,3 +1604,65 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm): with pytest.raises(ValueError, match=msg): result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index) + + +@pytest.mark.parametrize("col1, col2, kwargs, expected_cols", [ + (0, 0, dict(suffixes=("", "_dup")), ["0", "0_dup"]), + (0, 0, dict(suffixes=(None, "_dup")), [0, "0_dup"]), + (0, 0, dict(suffixes=("_x", "_y")), ["0_x", "0_y"]), + ("a", 0, dict(suffixes=(None, "_y")), ["a", 0]), + (0.0, 0.0, dict(suffixes=("_x", None)), ["0.0_x", 0.0]), + ("b", "b", dict(suffixes=(None, "_y")), ["b", "b_y"]), + ("a", "a", dict(suffixes=("_x", None)), ["a_x", "a"]), + ("a", "b", dict(suffixes=("_x", None)), ["a", "b"]), + ("a", "a", dict(suffixes=[None, "_x"]), ["a", "a_x"]), + (0, 0, dict(suffixes=["_a", None]), ["0_a", 0]), + ("a", "a", dict(), ["a_x", "a_y"]), + (0, 0, dict(), ["0_x", "0_y"]) +]) +def test_merge_suffix(col1, col2, kwargs, expected_cols): + # issue: 24782 + a = pd.DataFrame({col1: [1, 2, 3]}) + b = pd.DataFrame({col2: [4, 5, 6]}) + + expected = pd.DataFrame([[1, 4], [2, 5], [3, 6]], + columns=expected_cols) + + result = a.merge(b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + result = pd.merge(a, b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("col1, col2, suffixes", [ + ("a", "a", [None, None]), + ("a", "a", (None, None)), + ("a", "a", ("", None)), + (0, 0, [None, None]), + (0, 0, (None, "")) +]) +def test_merge_suffix_error(col1, col2, suffixes): + # issue: 24782 + a = pd.DataFrame({col1: [1, 2, 3]}) + b = pd.DataFrame({col2: [3, 4, 5]}) + + # TODO: might reconsider current raise behaviour, see issue 24782 + msg = "columns overlap but no suffix specified" + with pytest.raises(ValueError, match=msg): + pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize("col1, col2, suffixes", [ + ("a", "a", None), + (0, 0, None) +]) +def test_merge_suffix_none_error(col1, col2, suffixes): + # issue: 24782 + a = pd.DataFrame({col1: [1, 2, 3]}) + b = pd.DataFrame({col2: [3, 4, 5]}) + + # TODO: might reconsider current raise behaviour, see GH24782 + msg = "iterable" + with pytest.raises(TypeError, match=msg): + pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index ec6123bae327e..a186d32ed8800 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -3,7 +3,7 @@ from datetime import datetime from decimal import Decimal from itertools import combinations -from warnings import catch_warnings, simplefilter +from warnings import catch_warnings import dateutil import numpy as np @@ -1499,15 +1499,6 @@ def test_concat_mixed_objs(self): result = concat([s1, df, s2], ignore_index=True) assert_frame_equal(result, expected) - # invalid concatente of mixed dims - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - panel = tm.makePanel() - msg = ("cannot concatenate unaligned mixed dimensional NDFrame" - " objects") - with pytest.raises(ValueError, match=msg): - concat([panel, s1], axis=1) - def test_empty_dtype_coerce(self): # xref to #12411 @@ -1543,34 +1534,6 @@ def test_dtype_coerceion(self): result = concat([df.iloc[[0]], df.iloc[[1]]]) tm.assert_series_equal(result.dtypes, df.dtypes) - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_panel_concat_other_axes(self): - panel = tm.makePanel() - - p1 = panel.iloc[:, :5, :] - p2 = panel.iloc[:, 5:, :] - - result = concat([p1, p2], axis=1) - tm.assert_panel_equal(result, panel) - - p1 = panel.iloc[:, :, :2] - p2 = panel.iloc[:, :, 2:] - - result = concat([p1, p2], axis=2) - tm.assert_panel_equal(result, panel) - - # if things are a bit misbehaved - p1 = panel.iloc[:2, :, :2] - p2 = panel.iloc[:, :, 2:] - p1['ItemC'] = 'baz' - - result = concat([p1, p2], axis=2) - - expected = panel.copy() - expected['ItemC'] = expected['ItemC'].astype('O') - expected.loc['ItemC', :, :2] = 'baz' - tm.assert_panel_equal(result, expected) - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") # Panel.rename warning we don't care about @pytest.mark.filterwarnings("ignore:Using:FutureWarning") diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index 7b544b7981c1f..a5b6cffd1d86c 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -580,23 +580,28 @@ def test_get_dummies_duplicate_columns(self, df): class TestCategoricalReshape(object): - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_reshaping_panel_categorical(self): + def test_reshaping_multi_index_categorical(self): - p = tm.makePanel() - p['str'] = 'foo' - df = p.to_frame() + # construct a MultiIndexed DataFrame formerly created + # via `tm.makePanel().to_frame()` + cols = ['ItemA', 'ItemB', 'ItemC'] + data = {c: tm.makeTimeDataFrame() for c in cols} + df = pd.concat({c: data[c].stack() for c in data}, axis='columns') + df.index.names = ['major', 'minor'] + df['str'] = 'foo' + + dti = df.index.levels[0] df['category'] = df['str'].astype('category') result = df['category'].unstack() - c = Categorical(['foo'] * len(p.major_axis)) + c = Categorical(['foo'] * len(dti)) expected = DataFrame({'A': c.copy(), 'B': c.copy(), 'C': c.copy(), 'D': c.copy()}, columns=Index(list('ABCD'), name='minor'), - index=p.major_axis.set_names('major')) + index=dti) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index d0f87618ad3af..8ca19745055a3 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -8,6 +8,7 @@ from pandas._libs.tslibs.ccalendar import DAYS, MONTHS from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG from pandas._libs.tslibs.parsing import DateParseError +from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz from pandas.compat import iteritems, text_type from pandas.compat.numpy import np_datetime64_compat @@ -35,7 +36,9 @@ def test_construction(self): i4 = Period('2005', freq='M') i5 = Period('2005', freq='m') - pytest.raises(ValueError, i1.__ne__, i4) + msg = r"Input has different freq=M from Period\(freq=A-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + i1 != i4 assert i4 == i5 i1 = Period.now('Q') @@ -74,9 +77,12 @@ def test_construction(self): freq='U') assert i1 == expected - pytest.raises(ValueError, Period, ordinal=200701) + msg = "Must supply freq for ordinal value" + with pytest.raises(ValueError, match=msg): + Period(ordinal=200701) - pytest.raises(ValueError, Period, '2007-1-1', freq='X') + with pytest.raises(ValueError, match="Invalid frequency: X"): + Period('2007-1-1', freq='X') def test_construction_bday(self): @@ -233,10 +239,6 @@ def test_period_constructor_offsets(self): freq='U') assert i1 == expected - pytest.raises(ValueError, Period, ordinal=200701) - - pytest.raises(ValueError, Period, '2007-1-1', freq='X') - def test_invalid_arguments(self): with pytest.raises(ValueError): Period(datetime.now()) @@ -925,8 +927,9 @@ def test_properties_secondly(self): class TestPeriodField(object): def test_get_period_field_array_raises_on_out_of_range(self): - pytest.raises(ValueError, libperiod.get_period_field_arr, -1, - np.empty(1), 0) + msg = "Buffer dtype mismatch, expected 'int64_t' but got 'double'" + with pytest.raises(ValueError, match=msg): + libperiod.get_period_field_arr(-1, np.empty(1), 0) class TestComparisons(object): diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index abf95b276cda1..43747ea8621d9 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -9,7 +9,7 @@ from pandas import ( DatetimeIndex, Index, NaT, Period, Series, Timedelta, TimedeltaIndex, - Timestamp) + Timestamp, isna) from pandas.core.arrays import PeriodArray from pandas.util import testing as tm @@ -201,9 +201,10 @@ def _get_overlap_public_nat_methods(klass, as_tuple=False): "fromtimestamp", "isocalendar", "isoformat", "isoweekday", "month_name", "now", "replace", "round", "strftime", "strptime", "time", "timestamp", "timetuple", "timetz", - "to_datetime64", "to_pydatetime", "today", "toordinal", - "tz_convert", "tz_localize", "tzname", "utcfromtimestamp", - "utcnow", "utcoffset", "utctimetuple", "weekday"]), + "to_datetime64", "to_numpy", "to_pydatetime", "today", + "toordinal", "tz_convert", "tz_localize", "tzname", + "utcfromtimestamp", "utcnow", "utcoffset", "utctimetuple", + "weekday"]), (Timedelta, ["total_seconds"]) ]) def test_overlap_public_nat_methods(klass, expected): @@ -339,3 +340,11 @@ def test_nat_arithmetic_td64_vector(op_name, box): def test_nat_pinned_docstrings(): # see gh-17327 assert NaT.ctime.__doc__ == datetime.ctime.__doc__ + + +def test_to_numpy_alias(): + # GH 24653: alias .to_numpy() for scalars + expected = NaT.to_datetime64() + result = NaT.to_numpy() + + assert isna(expected) and isna(result) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 9b5fdfb06a9fa..ee2c2e9e1959c 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -1,5 +1,6 @@ """ test the scalar Timedelta """ from datetime import timedelta +import re import numpy as np import pytest @@ -249,9 +250,13 @@ def check(value): assert rng.microseconds == 0 assert rng.nanoseconds == 0 - pytest.raises(AttributeError, lambda: rng.hours) - pytest.raises(AttributeError, lambda: rng.minutes) - pytest.raises(AttributeError, lambda: rng.milliseconds) + msg = "'Timedelta' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format('hours')): + rng.hours + with pytest.raises(AttributeError, match=msg.format('minutes')): + rng.minutes + with pytest.raises(AttributeError, match=msg.format('milliseconds')): + rng.milliseconds # GH 10050 check(rng.days) @@ -271,9 +276,13 @@ def check(value): assert rng.seconds == 10 * 3600 + 11 * 60 + 12 assert rng.microseconds == 100 * 1000 + 123 assert rng.nanoseconds == 456 - pytest.raises(AttributeError, lambda: rng.hours) - pytest.raises(AttributeError, lambda: rng.minutes) - pytest.raises(AttributeError, lambda: rng.milliseconds) + msg = "'Timedelta' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format('hours')): + rng.hours + with pytest.raises(AttributeError, match=msg.format('minutes')): + rng.minutes + with pytest.raises(AttributeError, match=msg.format('milliseconds')): + rng.milliseconds # components tup = pd.to_timedelta(-1, 'us').components @@ -309,9 +318,15 @@ def test_iso_conversion(self): assert to_timedelta('P0DT0H0M1S') == expected def test_nat_converters(self): - assert to_timedelta('nat', box=False).astype('int64') == iNaT - assert to_timedelta('nan', box=False).astype('int64') == iNaT + result = to_timedelta('nat', box=False) + assert result.dtype.kind == 'm' + assert result.astype('int64') == iNaT + result = to_timedelta('nan', box=False) + assert result.dtype.kind == 'm' + assert result.astype('int64') == iNaT + + @pytest.mark.filterwarnings("ignore:M and Y units are deprecated") @pytest.mark.parametrize('units, np_unit', [(['Y', 'y'], 'Y'), (['M'], 'M'), @@ -371,6 +386,24 @@ def test_unit_parser(self, units, np_unit, wrapper): result = Timedelta('2{}'.format(unit)) assert result == expected + @pytest.mark.skipif(compat.PY2, reason="requires python3.5 or higher") + @pytest.mark.parametrize('unit', ['Y', 'y', 'M']) + def test_unit_m_y_deprecated(self, unit): + with tm.assert_produces_warning(FutureWarning) as w1: + Timedelta(10, unit) + msg = r'.* units are deprecated .*' + assert re.match(msg, str(w1[0].message)) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w2: + to_timedelta(10, unit) + msg = r'.* units are deprecated .*' + assert re.match(msg, str(w2[0].message)) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w3: + to_timedelta([1, 2], unit) + msg = r'.* units are deprecated .*' + assert re.match(msg, str(w3[0].message)) + def test_numeric_conversions(self): assert Timedelta(0) == np.timedelta64(0, 'ns') assert Timedelta(10) == np.timedelta64(10, 'ns') @@ -389,6 +422,11 @@ def test_timedelta_conversions(self): assert (Timedelta(timedelta(days=1)) == np.timedelta64(1, 'D').astype('m8[ns]')) + def test_to_numpy_alias(self): + # GH 24653: alias .to_numpy() for scalars + td = Timedelta('10m7s') + assert td.to_timedelta64() == td.to_numpy() + def test_round(self): t1 = Timedelta('1 days 02:34:56.789123456') @@ -419,8 +457,12 @@ def test_round(self): assert r2 == s2 # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: t1.round(freq)) + for freq, msg in [ + ('Y', '<YearEnd: month=12> is a non-fixed frequency'), + ('M', '<MonthEnd> is a non-fixed frequency'), + ('foobar', 'Invalid frequency: foobar')]: + with pytest.raises(ValueError, match=msg): + t1.round(freq) t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us') t2 = -1 * t1 @@ -465,11 +507,15 @@ def test_round(self): r1 = t1.round(freq) tm.assert_index_equal(r1, s1) r2 = t2.round(freq) - tm.assert_index_equal(r2, s2) + tm.assert_index_equal(r2, s2) # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: t1.round(freq)) + for freq, msg in [ + ('Y', '<YearEnd: month=12> is a non-fixed frequency'), + ('M', '<MonthEnd> is a non-fixed frequency'), + ('foobar', 'Invalid frequency: foobar')]: + with pytest.raises(ValueError, match=msg): + t1.round(freq) def test_contains(self): # Checking for any NaT-like objects @@ -579,9 +625,12 @@ def test_overflow(self): assert np.allclose(result.value / 1000, expected.value / 1000) # sum - pytest.raises(ValueError, lambda: (s - s.min()).sum()) + msg = "overflow in timedelta operation" + with pytest.raises(ValueError, match=msg): + (s - s.min()).sum() s1 = s[0:10000] - pytest.raises(ValueError, lambda: (s1 - s1.min()).sum()) + with pytest.raises(ValueError, match=msg): + (s1 - s1.min()).sum() s2 = s[0:1000] result = (s2 - s2.min()).sum() diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index b2c05d1564a48..b55d00b44fd67 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -60,7 +60,9 @@ def check(value, equal): check(ts.hour, 9) check(ts.minute, 6) check(ts.second, 3) - pytest.raises(AttributeError, lambda: ts.millisecond) + msg = "'Timestamp' object has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): + ts.millisecond check(ts.microsecond, 100) check(ts.nanosecond, 1) check(ts.dayofweek, 6) @@ -78,7 +80,9 @@ def check(value, equal): check(ts.hour, 23) check(ts.minute, 59) check(ts.second, 0) - pytest.raises(AttributeError, lambda: ts.millisecond) + msg = "'Timestamp' object has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): + ts.millisecond check(ts.microsecond, 0) check(ts.nanosecond, 0) check(ts.dayofweek, 2) @@ -355,6 +359,14 @@ def test_constructor_invalid_tz(self): # interpreted as a `freq` Timestamp('2012-01-01', 'US/Pacific') + def test_constructor_strptime(self): + # GH25016 + # Test support for Timestamp.strptime + fmt = '%Y%m%d-%H%M%S-%f%z' + ts = '20190129-235348-000001+0000' + with pytest.raises(NotImplementedError): + Timestamp.strptime(ts, fmt) + def test_constructor_tz_or_tzinfo(self): # GH#17943, GH#17690, GH#5168 stamps = [Timestamp(year=2017, month=10, day=22, tz='UTC'), @@ -780,6 +792,13 @@ def test_hash_equivalent(self): stamp = Timestamp(datetime(2011, 1, 1)) assert d[stamp] == 5 + def test_tz_conversion_freq(self, tz_naive_fixture): + # GH25241 + t1 = Timestamp('2019-01-01 10:00', freq='H') + assert t1.tz_localize(tz=tz_naive_fixture).freq == t1.freq + t2 = Timestamp('2019-01-02 12:00', tz='UTC', freq='T') + assert t2.tz_convert(tz='UTC').freq == t2.freq + class TestTimestampNsOperations(object): @@ -962,3 +981,8 @@ def test_to_period_tz_warning(self): with tm.assert_produces_warning(UserWarning): # warning that timezone info will be lost ts.to_period('D') + + def test_to_numpy_alias(self): + # GH 24653: alias .to_numpy() for scalars + ts = Timestamp(datetime.now()) + assert ts.to_datetime64() == ts.to_numpy() diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 3f9a30d254126..adcf66200a672 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -8,7 +8,7 @@ from pandas._libs.tslibs import conversion from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG -from pandas.compat import PY3 +from pandas.compat import PY3, PY36 import pandas.util._test_decorators as td from pandas import NaT, Timestamp @@ -329,6 +329,19 @@ def test_replace_dst_border(self): expected = Timestamp('2013-11-3 03:00:00', tz='America/Chicago') assert result == expected + @pytest.mark.skipif(not PY36, reason='Fold not available until PY3.6') + @pytest.mark.parametrize('fold', [0, 1]) + @pytest.mark.parametrize('tz', ['dateutil/Europe/London', 'Europe/London']) + def test_replace_dst_fold(self, fold, tz): + # GH 25017 + d = datetime(2019, 10, 27, 2, 30) + ts = Timestamp(d, tz=tz) + result = ts.replace(hour=1, fold=fold) + expected = Timestamp(datetime(2019, 10, 27, 1, 30)).tz_localize( + tz, ambiguous=not fold + ) + assert result == expected + # -------------------------------------------------------------- # Timestamp.normalize diff --git a/pandas/tests/series/conftest.py b/pandas/tests/series/conftest.py index 431aacb1c8d56..367e7a1baa7f3 100644 --- a/pandas/tests/series/conftest.py +++ b/pandas/tests/series/conftest.py @@ -1,6 +1,5 @@ import pytest -from pandas import Series import pandas.util.testing as tm @@ -32,11 +31,3 @@ def object_series(): s = tm.makeObjectSeries() s.name = 'objects' return s - - -@pytest.fixture -def empty_series(): - """ - Fixture for empty Series - """ - return Series([], index=[]) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index a5855f68127f4..dbe667a166d0a 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -114,7 +114,8 @@ def test_getitem_get(test_data): # missing d = test_data.ts.index[0] - BDay() - with pytest.raises(KeyError, match=r"Timestamp\('1999-12-31 00:00:00'\)"): + msg = r"Timestamp\('1999-12-31 00:00:00', freq='B'\)" + with pytest.raises(KeyError, match=msg): test_data.ts[d] # None diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 04c54bcf8c22c..73adc7d4bf82f 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -258,6 +258,17 @@ def test_rename_axis_inplace(self, datetime_series): assert no_return is None tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('kwargs', [{'mapper': None}, {'index': None}, {}]) + def test_rename_axis_none(self, kwargs): + # GH 25034 + index = Index(list('abc'), name='foo') + df = Series([1, 2, 3], index=index) + + result = df.rename_axis(**kwargs) + expected_index = index.rename(None) if kwargs else index + expected = Series([1, 2, 3], index=expected_index) + tm.assert_series_equal(result, expected) + def test_set_axis_inplace_axes(self, axis_series): # GH14636 ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype='int64') diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 6811e370726b2..1f265d574da15 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -9,7 +9,7 @@ from numpy import nan import pytest -from pandas.compat import PY35, lrange, range +from pandas.compat import PY2, PY35, is_platform_windows, lrange, range import pandas.util._test_decorators as td import pandas as pd @@ -285,6 +285,17 @@ def test_numpy_round(self): with pytest.raises(ValueError, match=msg): np.round(s, decimals=0, out=s) + @pytest.mark.xfail( + PY2 and is_platform_windows(), reason="numpy/numpy#7882", + raises=AssertionError, strict=True) + def test_numpy_round_nan(self): + # See gh-14197 + s = Series([1.53, np.nan, 0.06]) + with tm.assert_produces_warning(None): + result = s.round() + expected = Series([2., np.nan, 0.]) + assert_series_equal(result, expected) + def test_built_in_round(self): if not compat.PY3: pytest.skip( diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 90cf6916df0d1..162a27db34cb1 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -163,6 +163,18 @@ def test_apply_dict_depr(self): with tm.assert_produces_warning(FutureWarning): tsdf.A.agg({'foo': ['sum', 'mean']}) + @pytest.mark.parametrize('series', [ + ['1-1', '1-1', np.NaN], + ['1-1', '1-2', np.NaN]]) + def test_apply_categorical_with_nan_values(self, series): + # GH 20714 bug fixed in: GH 24275 + s = pd.Series(series, dtype='category') + result = s.apply(lambda x: x.split('-')[0]) + result = result.astype(object) + expected = pd.Series(['1', '1', np.NaN], dtype='category') + expected = expected.astype(object) + tm.assert_series_equal(result, expected) + class TestSeriesAggregate(): diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index d92ca48751d0a..8525b877618c9 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -47,7 +47,9 @@ def test_scalar_conversion(self): assert int(Series([1.])) == 1 assert long(Series([1.])) == 1 - def test_constructor(self, datetime_series, empty_series): + def test_constructor(self, datetime_series): + empty_series = Series() + assert datetime_series.index.is_all_dates # Pass in Series diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index e29974f56967f..d8046c4944afc 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -291,8 +291,8 @@ def test_astype_categorical_to_other(self): expected = s tm.assert_series_equal(s.astype('category'), expected) tm.assert_series_equal(s.astype(CategoricalDtype()), expected) - msg = (r"could not convert string to float: '(0 - 499|9500 - 9999)'|" - r"invalid literal for float\(\): (0 - 499|9500 - 9999)") + msg = (r"could not convert string to float|" + r"invalid literal for float\(\)") with pytest.raises(ValueError, match=msg): s.astype('float64') diff --git a/pandas/tests/series/test_duplicates.py b/pandas/tests/series/test_duplicates.py index fe47975711a17..a975edacc19c7 100644 --- a/pandas/tests/series/test_duplicates.py +++ b/pandas/tests/series/test_duplicates.py @@ -59,12 +59,18 @@ def test_unique_data_ownership(): Series(Series(["a", "c", "b"]).unique()).sort_values() -def test_is_unique(): - # GH11946 - s = Series(np.random.randint(0, 10, size=1000)) - assert s.is_unique is False - s = Series(np.arange(1000)) - assert s.is_unique is True +@pytest.mark.parametrize('data, expected', [ + (np.random.randint(0, 10, size=1000), False), + (np.arange(1000), True), + ([], True), + ([np.nan], True), + (['foo', 'bar', np.nan], True), + (['foo', 'foo', np.nan], False), + (['foo', 'bar', np.nan, np.nan], False)]) +def test_is_unique(data, expected): + # GH11946 / GH25180 + s = Series(data) + assert s.is_unique is expected def test_is_unique_class_ne(capsys): diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 985288c439917..f07dd1dfb5fda 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -854,8 +854,23 @@ def test_series_pad_backfill_limit(self): assert_series_equal(result, expected) -class TestSeriesInterpolateData(): +@pytest.fixture(params=['linear', 'index', 'values', 'nearest', 'slinear', + 'zero', 'quadratic', 'cubic', 'barycentric', 'krogh', + 'polynomial', 'spline', 'piecewise_polynomial', + 'from_derivatives', 'pchip', 'akima', ]) +def nontemporal_method(request): + """ Fixture that returns an (method name, required kwargs) pair. + + This fixture does not include method 'time' as a parameterization; that + method requires a Series with a DatetimeIndex, and is generally tested + separately from these non-temporal methods. + """ + method = request.param + kwargs = dict(order=1) if method in ('spline', 'polynomial') else dict() + return method, kwargs + +class TestSeriesInterpolateData(): def test_interpolate(self, datetime_series, string_series): ts = Series(np.arange(len(datetime_series), dtype=float), datetime_series.index) @@ -875,12 +890,12 @@ def test_interpolate(self, datetime_series, string_series): time_interp = ord_ts_copy.interpolate(method='time') tm.assert_series_equal(time_interp, ord_ts) - # try time interpolation on a non-TimeSeries - # Only raises ValueError if there are NaNs. - non_ts = string_series.copy() - non_ts[0] = np.NaN - msg = ("time-weighted interpolation only works on Series or DataFrames" - " with a DatetimeIndex") + def test_interpolate_time_raises_for_non_timeseries(self): + # When method='time' is used on a non-TimeSeries that contains a null + # value, a ValueError should be raised. + non_ts = Series([0, 1, 2, np.NaN]) + msg = ("time-weighted interpolation only works on Series.* " + "with a DatetimeIndex") with pytest.raises(ValueError, match=msg): non_ts.interpolate(method='time') @@ -1061,21 +1076,35 @@ def test_interp_limit(self): result = s.interpolate(method='linear', limit=2) assert_series_equal(result, expected) - # GH 9217, make sure limit is an int and greater than 0 - methods = ['linear', 'time', 'index', 'values', 'nearest', 'zero', - 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', - 'polynomial', 'spline', 'piecewise_polynomial', None, - 'from_derivatives', 'pchip', 'akima'] - s = pd.Series([1, 2, np.nan, np.nan, 5]) - msg = (r"Limit must be greater than 0|" - "time-weighted interpolation only works on Series or" - r" DataFrames with a DatetimeIndex|" - r"invalid method '(polynomial|spline|None)' to interpolate|" - "Limit must be an integer") - for limit in [-1, 0, 1., 2.]: - for method in methods: - with pytest.raises(ValueError, match=msg): - s.interpolate(limit=limit, method=method) + @pytest.mark.parametrize("limit", [-1, 0]) + def test_interpolate_invalid_nonpositive_limit(self, nontemporal_method, + limit): + # GH 9217: make sure limit is greater than zero. + s = pd.Series([1, 2, np.nan, 4]) + method, kwargs = nontemporal_method + with pytest.raises(ValueError, match="Limit must be greater than 0"): + s.interpolate(limit=limit, method=method, **kwargs) + + def test_interpolate_invalid_float_limit(self, nontemporal_method): + # GH 9217: make sure limit is an integer. + s = pd.Series([1, 2, np.nan, 4]) + method, kwargs = nontemporal_method + limit = 2.0 + with pytest.raises(ValueError, match="Limit must be an integer"): + s.interpolate(limit=limit, method=method, **kwargs) + + @pytest.mark.parametrize("invalid_method", [None, 'nonexistent_method']) + def test_interp_invalid_method(self, invalid_method): + s = Series([1, 3, np.nan, 12, np.nan, 25]) + + msg = "method must be one of.* Got '{}' instead".format(invalid_method) + with pytest.raises(ValueError, match=msg): + s.interpolate(method=invalid_method) + + # When an invalid method and invalid limit (such as -1) are + # provided, the error message reflects the invalid method. + with pytest.raises(ValueError, match=msg): + s.interpolate(method=invalid_method, limit=-1) def test_interp_limit_forward(self): s = Series([1, 3, np.nan, np.nan, np.nan, 11]) @@ -1276,11 +1305,20 @@ def test_interp_limit_no_nans(self): @td.skip_if_no_scipy @pytest.mark.parametrize("method", ['polynomial', 'spline']) def test_no_order(self, method): + # see GH-10633, GH-24014 s = Series([0, 1, np.nan, 3]) - msg = "invalid method '{}' to interpolate".format(method) + msg = "You must specify the order of the spline or polynomial" with pytest.raises(ValueError, match=msg): s.interpolate(method=method) + @td.skip_if_no_scipy + @pytest.mark.parametrize('order', [-1, -1.0, 0, 0.0, np.nan]) + def test_interpolate_spline_invalid_order(self, order): + s = Series([0, 1, np.nan, 3]) + msg = "order needs to be specified and greater than 0" + with pytest.raises(ValueError, match=msg): + s.interpolate(method='spline', order=order) + @td.skip_if_no_scipy def test_spline(self): s = Series([1, 2, np.nan, 4, 5, np.nan, 7]) @@ -1313,19 +1351,6 @@ def test_spline_interpolation(self): expected1 = s.interpolate(method='spline', order=1) assert_series_equal(result1, expected1) - @td.skip_if_no_scipy - def test_spline_error(self): - # see gh-10633 - s = pd.Series(np.arange(10) ** 2) - s[np.random.randint(0, 9, 3)] = np.nan - msg = "invalid method 'spline' to interpolate" - with pytest.raises(ValueError, match=msg): - s.interpolate(method='spline') - - msg = "order needs to be specified and greater than 0" - with pytest.raises(ValueError, match=msg): - s.interpolate(method='spline', order=0) - def test_interp_timedelta64(self): # GH 6424 df = Series([1, np.nan, 3], diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 4d3c9926fc5ae..b2aac441db195 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -563,6 +563,13 @@ def test_comp_ops_df_compat(self): with pytest.raises(ValueError, match=msg): left.to_frame() < right.to_frame() + def test_compare_series_interval_keyword(self): + # GH 25338 + s = Series(['IntervalA', 'IntervalB', 'IntervalC']) + result = s == 'IntervalA' + expected = Series([True, False, False]) + assert_series_equal(result, expected) + class TestSeriesFlexComparisonOps(object): diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index 510a51e002918..dfcda889269ee 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -499,6 +499,7 @@ def test_rank_first_pct(dtype, ser, exp): @pytest.mark.single +@pytest.mark.high_memory def test_pct_max_many_rows(): # GH 18271 s = Series(np.arange(2**24 + 1)) diff --git a/pandas/tests/series/test_repr.py b/pandas/tests/series/test_repr.py index b4e7708e2456e..842207f2a572f 100644 --- a/pandas/tests/series/test_repr.py +++ b/pandas/tests/series/test_repr.py @@ -198,6 +198,14 @@ def test_latex_repr(self): assert s._repr_latex_() is None + def test_index_repr_in_frame_with_nan(self): + # see gh-25061 + i = Index([1, np.nan]) + s = Series([1, 2], index=i) + exp = """1.0 1\nNaN 2\ndtype: int64""" + + assert repr(s) == exp + class TestCategoricalRepr(object): diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index bfb5103c97adc..888d1fa1bfe45 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -7,7 +7,7 @@ import pytest from pandas._libs.sparse import BlockIndex, IntIndex -from pandas.compat import lrange +from pandas.compat import PY2, lrange from pandas.errors import PerformanceWarning import pandas as pd @@ -145,8 +145,9 @@ def test_constructor_ndarray(self, float_frame): tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=['A'])) # raise on level argument - pytest.raises(TypeError, float_frame.reindex, columns=['A'], - level=1) + msg = "Reindex by level not supported for sparse" + with pytest.raises(TypeError, match=msg): + float_frame.reindex(columns=['A'], level=1) # wrong length index / columns with pytest.raises(ValueError, match="^Index length"): @@ -269,6 +270,19 @@ def test_type_coercion_at_construction(self): default_fill_value=0) tm.assert_sp_frame_equal(result, expected) + def test_default_dtype(self): + result = pd.SparseDataFrame(columns=list('ab'), index=range(2)) + expected = pd.SparseDataFrame([[np.nan, np.nan], [np.nan, np.nan]], + columns=list('ab'), index=range(2)) + tm.assert_sp_frame_equal(result, expected) + + def test_nan_data_with_int_dtype_raises_error(self): + sdf = pd.SparseDataFrame([[np.nan, np.nan], [np.nan, np.nan]], + columns=list('ab'), index=range(2)) + msg = "Cannot convert non-finite values" + with pytest.raises(ValueError, match=msg): + pd.SparseDataFrame(sdf, dtype=np.int64) + def test_dtypes(self): df = DataFrame(np.random.randn(10000, 4)) df.loc[:9998] = np.nan @@ -433,7 +447,8 @@ def test_getitem(self): exp = sdf.reindex(columns=['a', 'b']) tm.assert_sp_frame_equal(result, exp) - pytest.raises(Exception, sdf.__getitem__, ['a', 'd']) + with pytest.raises(KeyError, match=r"\['d'\] not in index"): + sdf[['a', 'd']] def test_iloc(self, float_frame): @@ -504,7 +519,9 @@ def test_getitem_overload(self, float_frame): subframe = float_frame[indexer] tm.assert_index_equal(subindex, subframe.index) - pytest.raises(Exception, float_frame.__getitem__, indexer[:-1]) + msg = "Item wrong length 9 instead of 10" + with pytest.raises(ValueError, match=msg): + float_frame[indexer[:-1]] def test_setitem(self, float_frame, float_frame_int_kind, float_frame_dense, @@ -551,8 +568,9 @@ def _check_frame(frame, orig): assert len(frame['I'].sp_values) == N // 2 # insert ndarray wrong size - pytest.raises(Exception, frame.__setitem__, 'foo', - np.random.randn(N - 1)) + msg = "Length of values does not match length of index" + with pytest.raises(AssertionError, match=msg): + frame['foo'] = np.random.randn(N - 1) # scalar value frame['J'] = 5 @@ -625,17 +643,22 @@ def test_delitem(self, float_frame): def test_set_columns(self, float_frame): float_frame.columns = float_frame.columns - pytest.raises(Exception, setattr, float_frame, 'columns', - float_frame.columns[:-1]) + msg = ("Length mismatch: Expected axis has 4 elements, new values have" + " 3 elements") + with pytest.raises(ValueError, match=msg): + float_frame.columns = float_frame.columns[:-1] def test_set_index(self, float_frame): float_frame.index = float_frame.index - pytest.raises(Exception, setattr, float_frame, 'index', - float_frame.index[:-1]) + msg = ("Length mismatch: Expected axis has 10 elements, new values" + " have 9 elements") + with pytest.raises(ValueError, match=msg): + float_frame.index = float_frame.index[:-1] def test_ctor_reindex(self): idx = pd.Index([0, 1, 2, 3]) - with pytest.raises(ValueError, match=''): + msg = "Length of passed values is 2, index implies 4" + with pytest.raises(ValueError, match=msg): pd.SparseDataFrame({"A": [1, 2]}, index=idx) def test_append(self, float_frame): @@ -858,6 +881,7 @@ def test_describe(self, float_frame): str(float_frame) desc = float_frame.describe() # noqa + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_join(self, float_frame): left = float_frame.loc[:, ['A', 'B']] right = float_frame.loc[:, ['C', 'D']] @@ -865,7 +889,10 @@ def test_join(self, float_frame): tm.assert_sp_frame_equal(joined, float_frame, exact_indices=False) right = float_frame.loc[:, ['B', 'D']] - pytest.raises(Exception, left.join, right) + msg = (r"columns overlap but no suffix specified: Index\(\['B'\]," + r" dtype='object'\)") + with pytest.raises(ValueError, match=msg): + left.join(right) with pytest.raises(ValueError, match='Other Series must have a name'): float_frame.join(Series( @@ -1046,8 +1073,11 @@ def _check(frame): _check(float_frame_int_kind) # for now - pytest.raises(Exception, _check, float_frame_fill0) - pytest.raises(Exception, _check, float_frame_fill2) + msg = "This routine assumes NaN fill value" + with pytest.raises(TypeError, match=msg): + _check(float_frame_fill0) + with pytest.raises(TypeError, match=msg): + _check(float_frame_fill2) def test_transpose(self, float_frame, float_frame_int_kind, float_frame_dense, @@ -1246,6 +1276,14 @@ def test_notna(self): 'B': [True, False, True, True, False]}) tm.assert_frame_equal(res.to_dense(), exp) + def test_default_fill_value_with_no_data(self): + # GH 16807 + expected = pd.SparseDataFrame([[1.0, 1.0], [1.0, 1.0]], + columns=list('ab'), index=range(2)) + result = pd.SparseDataFrame(columns=list('ab'), index=range(2), + default_fill_value=1.0) + tm.assert_frame_equal(expected, result) + class TestSparseDataFrameArithmetic(object): diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index 7eed47d0de888..93cf629f20957 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -452,12 +452,13 @@ def _check_getitem(sp, dense): _check_getitem(self.ziseries, self.ziseries.to_dense()) # exception handling - pytest.raises(Exception, self.bseries.__getitem__, - len(self.bseries) + 1) + with pytest.raises(IndexError, match="Out of bounds access"): + self.bseries[len(self.bseries) + 1] # index not contained - pytest.raises(Exception, self.btseries.__getitem__, - self.btseries.index[-1] + BDay()) + msg = r"Timestamp\('2011-01-31 00:00:00', freq='B'\)" + with pytest.raises(KeyError, match=msg): + self.btseries[self.btseries.index[-1] + BDay()] def test_get_get_value(self): tm.assert_almost_equal(self.bseries.get(10), self.bseries[10]) @@ -523,8 +524,9 @@ def _compare(idx): self._check_all(_compare_with_dense) - pytest.raises(Exception, self.bseries.take, - [0, len(self.bseries) + 1]) + msg = "index 21 is out of bounds for size 20" + with pytest.raises(IndexError, match=msg): + self.bseries.take([0, len(self.bseries) + 1]) # Corner case # XXX: changed test. Why wsa this considered a corner case? @@ -1138,25 +1140,35 @@ def test_to_coo_text_names_text_row_levels_nosort(self): def test_to_coo_bad_partition_nonnull_intersection(self): ss = self.sparse_series[0] - pytest.raises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D']) + msg = "Is not a partition because intersection is not null" + with pytest.raises(ValueError, match=msg): + ss.to_coo(['A', 'B', 'C'], ['C', 'D']) def test_to_coo_bad_partition_small_union(self): ss = self.sparse_series[0] - pytest.raises(ValueError, ss.to_coo, ['A'], ['C', 'D']) + msg = "Is not a partition because union is not the whole" + with pytest.raises(ValueError, match=msg): + ss.to_coo(['A'], ['C', 'D']) def test_to_coo_nlevels_less_than_two(self): ss = self.sparse_series[0] ss.index = np.arange(len(ss.index)) - pytest.raises(ValueError, ss.to_coo) + msg = "to_coo requires MultiIndex with nlevels > 2" + with pytest.raises(ValueError, match=msg): + ss.to_coo() def test_to_coo_bad_ilevel(self): ss = self.sparse_series[0] - pytest.raises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E']) + with pytest.raises(KeyError, match="Level E not found"): + ss.to_coo(['A', 'B'], ['C', 'D', 'E']) def test_to_coo_duplicate_index_entries(self): ss = pd.concat([self.sparse_series[0], self.sparse_series[0]]).to_sparse() - pytest.raises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D']) + msg = ("Duplicate index entries are not allowed in to_coo" + " transformation") + with pytest.raises(ValueError, match=msg): + ss.to_coo(['A', 'B'], ['C', 'D']) def test_from_coo_dense_index(self): ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 3d28b17750540..3f75c508d22f9 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -11,7 +11,7 @@ from pandas._libs import ( algos as libalgos, groupby as libgroupby, hashtable as ht) -from pandas.compat import lrange, range +from pandas.compat import PY2, lrange, range from pandas.compat.numpy import np_array_datetime64_compat import pandas.util._test_decorators as td @@ -224,11 +224,16 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_complex_sorting(self): # gh 12666 - check no segfault x17 = np.array([complex(i) for i in range(17)], dtype=object) - pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True) + msg = (r"'(<|>)' not supported between instances of 'complex' and" + r" 'complex'|" + r"unorderable types: complex\(\) > complex\(\)") + with pytest.raises(TypeError, match=msg): + algos.factorize(x17[::-1], sort=True) def test_float64_factorize(self, writable): data = np.array([1.0, 1e8, 1.0, 1e-8, 1e8, 1.0], dtype=np.float64) @@ -589,9 +594,14 @@ class TestIsin(object): def test_invalid(self): - pytest.raises(TypeError, lambda: algos.isin(1, 1)) - pytest.raises(TypeError, lambda: algos.isin(1, [1])) - pytest.raises(TypeError, lambda: algos.isin([1], 1)) + msg = (r"only list-like objects are allowed to be passed to isin\(\)," + r" you passed a \[int\]") + with pytest.raises(TypeError, match=msg): + algos.isin(1, 1) + with pytest.raises(TypeError, match=msg): + algos.isin(1, [1]) + with pytest.raises(TypeError, match=msg): + algos.isin([1], 1) def test_basic(self): @@ -819,8 +829,9 @@ def test_value_counts_dtypes(self): result = algos.value_counts(Series([1, 1., '1'])) # object assert len(result) == 2 - pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1), - ['1', 1]) + msg = "bins argument only works with numeric data" + with pytest.raises(TypeError, match=msg): + algos.value_counts(['1', 1], bins=1) def test_value_counts_nat(self): td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]') @@ -1222,7 +1233,7 @@ def test_group_var_constant(self): class TestGroupVarFloat64(GroupVarTestMixin): __test__ = True - algo = libgroupby.group_var_float64 + algo = staticmethod(libgroupby.group_var_float64) dtype = np.float64 rtol = 1e-5 @@ -1245,7 +1256,7 @@ def test_group_var_large_inputs(self): class TestGroupVarFloat32(GroupVarTestMixin): __test__ = True - algo = libgroupby.group_var_float32 + algo = staticmethod(libgroupby.group_var_float32) dtype = np.float32 rtol = 1e-2 @@ -1484,6 +1495,7 @@ def test_too_many_ndims(self): algos.rank(arr) @pytest.mark.single + @pytest.mark.high_memory @pytest.mark.parametrize('values', [ np.arange(2**24 + 1), np.arange(2**25 + 2).reshape(2**24 + 1, 2)], diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index 54db3887850ea..baca66e0361ad 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -3,7 +3,10 @@ import pytest +from pandas.compat import PY2 + import pandas as pd +from pandas.core.config import OptionError class TestConfig(object): @@ -48,26 +51,35 @@ def test_is_one_of_factory(self): v(12) v(None) - pytest.raises(ValueError, v, 1.1) + msg = r"Value must be one of None\|12" + with pytest.raises(ValueError, match=msg): + v(1.1) def test_register_option(self): self.cf.register_option('a', 1, 'doc') # can't register an already registered option - pytest.raises(KeyError, self.cf.register_option, 'a', 1, 'doc') + msg = "Option 'a' has already been registered" + with pytest.raises(OptionError, match=msg): + self.cf.register_option('a', 1, 'doc') # can't register an already registered option - pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d1', 1, - 'doc') - pytest.raises(KeyError, self.cf.register_option, 'a.b.c.d2', 1, - 'doc') + msg = "Path prefix to option 'a' is already an option" + with pytest.raises(OptionError, match=msg): + self.cf.register_option('a.b.c.d1', 1, 'doc') + with pytest.raises(OptionError, match=msg): + self.cf.register_option('a.b.c.d2', 1, 'doc') # no python keywords - pytest.raises(ValueError, self.cf.register_option, 'for', 0) - pytest.raises(ValueError, self.cf.register_option, 'a.for.b', 0) + msg = "for is a python keyword" + with pytest.raises(ValueError, match=msg): + self.cf.register_option('for', 0) + with pytest.raises(ValueError, match=msg): + self.cf.register_option('a.for.b', 0) # must be valid identifier (ensure attribute access works) - pytest.raises(ValueError, self.cf.register_option, - 'Oh my Goddess!', 0) + msg = "oh my goddess! is not a valid identifier" + with pytest.raises(ValueError, match=msg): + self.cf.register_option('Oh my Goddess!', 0) # we can register options several levels deep # without predefining the intermediate steps @@ -90,7 +102,9 @@ def test_describe_option(self): self.cf.register_option('l', "foo") # non-existent keys raise KeyError - pytest.raises(KeyError, self.cf.describe_option, 'no.such.key') + msg = r"No such keys\(s\)" + with pytest.raises(OptionError, match=msg): + self.cf.describe_option('no.such.key') # we can get the description for any key we registered assert 'doc' in self.cf.describe_option('a', _print_desc=False) @@ -122,7 +136,9 @@ def test_case_insensitive(self): assert self.cf.get_option('kAnBaN') == 2 # gets of non-existent keys fail - pytest.raises(KeyError, self.cf.get_option, 'no_such_option') + msg = r"No such keys\(s\): 'no_such_option'" + with pytest.raises(OptionError, match=msg): + self.cf.get_option('no_such_option') self.cf.deprecate_option('KanBan') assert self.cf._is_deprecated('kAnBaN') @@ -138,7 +154,9 @@ def test_get_option(self): assert self.cf.get_option('b.b') is None # gets of non-existent keys fail - pytest.raises(KeyError, self.cf.get_option, 'no_such_option') + msg = r"No such keys\(s\): 'no_such_option'" + with pytest.raises(OptionError, match=msg): + self.cf.get_option('no_such_option') def test_set_option(self): self.cf.register_option('a', 1, 'doc') @@ -157,16 +175,24 @@ def test_set_option(self): assert self.cf.get_option('b.c') == 'wurld' assert self.cf.get_option('b.b') == 1.1 - pytest.raises(KeyError, self.cf.set_option, 'no.such.key', None) + msg = r"No such keys\(s\): 'no.such.key'" + with pytest.raises(OptionError, match=msg): + self.cf.set_option('no.such.key', None) def test_set_option_empty_args(self): - pytest.raises(ValueError, self.cf.set_option) + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + self.cf.set_option() def test_set_option_uneven_args(self): - pytest.raises(ValueError, self.cf.set_option, 'a.b', 2, 'b.c') + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + self.cf.set_option('a.b', 2, 'b.c') def test_set_option_invalid_single_argument_type(self): - pytest.raises(ValueError, self.cf.set_option, 2) + msg = "Must provide an even number of non-keyword arguments" + with pytest.raises(ValueError, match=msg): + self.cf.set_option(2) def test_set_option_multiple(self): self.cf.register_option('a', 1, 'doc') @@ -183,27 +209,36 @@ def test_set_option_multiple(self): assert self.cf.get_option('b.c') is None assert self.cf.get_option('b.b') == 10.0 + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_validation(self): self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int) self.cf.register_option('b.c', 'hullo', 'doc2', validator=self.cf.is_text) - pytest.raises(ValueError, self.cf.register_option, 'a.b.c.d2', - 'NO', 'doc', validator=self.cf.is_int) + msg = "Value must have type '<class 'int'>'" + with pytest.raises(ValueError, match=msg): + self.cf.register_option( + 'a.b.c.d2', 'NO', 'doc', validator=self.cf.is_int) self.cf.set_option('a', 2) # int is_int self.cf.set_option('b.c', 'wurld') # str is_str - pytest.raises( - ValueError, self.cf.set_option, 'a', None) # None not is_int - pytest.raises(ValueError, self.cf.set_option, 'a', 'ab') - pytest.raises(ValueError, self.cf.set_option, 'b.c', 1) + # None not is_int + with pytest.raises(ValueError, match=msg): + self.cf.set_option('a', None) + with pytest.raises(ValueError, match=msg): + self.cf.set_option('a', 'ab') + + msg = r"Value must be an instance of <class 'str'>\|<class 'bytes'>" + with pytest.raises(ValueError, match=msg): + self.cf.set_option('b.c', 1) validator = self.cf.is_one_of_factory([None, self.cf.is_callable]) self.cf.register_option('b', lambda: None, 'doc', validator=validator) self.cf.set_option('b', '%.1f'.format) # Formatter is callable self.cf.set_option('b', None) # Formatter is none (default) - pytest.raises(ValueError, self.cf.set_option, 'b', '%.1f') + with pytest.raises(ValueError, match="Value must be a callable"): + self.cf.set_option('b', '%.1f') def test_reset_option(self): self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int) @@ -267,8 +302,9 @@ def test_deprecate_option(self): assert 'eprecated' in str(w[-1]) # we get the default message assert 'nifty_ver' in str(w[-1]) # with the removal_ver quoted - pytest.raises( - KeyError, self.cf.deprecate_option, 'a') # can't depr. twice + msg = "Option 'a' has already been defined as deprecated" + with pytest.raises(OptionError, match=msg): + self.cf.deprecate_option('a') self.cf.deprecate_option('b.c', 'zounds!') with warnings.catch_warnings(record=True) as w: @@ -374,12 +410,6 @@ def eq(val): def test_attribute_access(self): holder = [] - def f(): - options.b = 1 - - def f2(): - options.display = 1 - def f3(key): holder.append(True) @@ -397,8 +427,11 @@ def f3(key): self.cf.reset_option("a") assert options.a == self.cf.get_option("a", 0) - pytest.raises(KeyError, f) - pytest.raises(KeyError, f2) + msg = "You can only set the value of existing options" + with pytest.raises(OptionError, match=msg): + options.b = 1 + with pytest.raises(OptionError, match=msg): + options.display = 1 # make sure callback kicks when using this form of setting options.c = 1 @@ -429,5 +462,6 @@ def test_option_context_scope(self): def test_dictwrapper_getattr(self): options = self.cf.options # GH 19789 - pytest.raises(self.cf.OptionError, getattr, options, 'bananas') + with pytest.raises(OptionError, match="No such option"): + options.bananas assert not hasattr(options, 'bananas') diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index e22b9a0ef25e3..92b4e5a99041a 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -9,7 +9,7 @@ import numpy as np # noqa import pytest -from pandas.compat import PY36 +from pandas.compat import PY2, PY36, is_platform_windows from pandas import DataFrame from pandas.util import testing as tm @@ -58,6 +58,8 @@ def test_xarray(df): assert df.to_xarray() is not None +@pytest.mark.skipif(is_platform_windows() and PY2, + reason="Broken on Windows / Py2") def test_oo_optimizable(): # GH 21071 subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"]) diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index f5aa0b0b3c9c8..7a2680135ea80 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -3,19 +3,17 @@ import operator import re -from warnings import catch_warnings, simplefilter import numpy as np from numpy.random import randn import pytest from pandas import _np_version_under1p13, compat -from pandas.core.api import DataFrame, Panel +from pandas.core.api import DataFrame from pandas.core.computation import expressions as expr import pandas.util.testing as tm from pandas.util.testing import ( - assert_almost_equal, assert_frame_equal, assert_panel_equal, - assert_series_equal) + assert_almost_equal, assert_frame_equal, assert_series_equal) from pandas.io.formats.printing import pprint_thing @@ -39,23 +37,6 @@ _integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)), columns=list('ABCD'), dtype='int64') -with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - _frame_panel = Panel(dict(ItemA=_frame.copy(), - ItemB=(_frame.copy() + 3), - ItemC=_frame.copy(), - ItemD=_frame.copy())) - _frame2_panel = Panel(dict(ItemA=_frame2.copy(), - ItemB=(_frame2.copy() + 3), - ItemC=_frame2.copy(), - ItemD=_frame2.copy())) - _integer_panel = Panel(dict(ItemA=_integer, - ItemB=(_integer + 34).astype('int64'))) - _integer2_panel = Panel(dict(ItemA=_integer2, - ItemB=(_integer2 + 34).astype('int64'))) - _mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3))) - _mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3))) - @pytest.mark.skipif(not expr._USE_NUMEXPR, reason='not using numexpr') class TestExpressions(object): @@ -173,42 +154,18 @@ def run_series(self, ser, other, binary_comp=None, **kwargs): # self.run_binary(ser, binary_comp, assert_frame_equal, # test_flex=True, **kwargs) - def run_panel(self, panel, other, binary_comp=None, run_binary=True, - assert_func=assert_panel_equal, **kwargs): - self.run_arithmetic(panel, other, assert_func, test_flex=False, - **kwargs) - self.run_arithmetic(panel, other, assert_func, test_flex=True, - **kwargs) - if run_binary: - if binary_comp is None: - binary_comp = other + 1 - self.run_binary(panel, binary_comp, assert_func, - test_flex=False, **kwargs) - self.run_binary(panel, binary_comp, assert_func, - test_flex=True, **kwargs) - def test_integer_arithmetic_frame(self): self.run_frame(self.integer, self.integer) def test_integer_arithmetic_series(self): self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0]) - @pytest.mark.slow - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_integer_panel(self): - self.run_panel(_integer2_panel, np.random.randint(1, 100)) - def test_float_arithemtic_frame(self): self.run_frame(self.frame2, self.frame2) def test_float_arithmetic_series(self): self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0]) - @pytest.mark.slow - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_float_panel(self): - self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8) - def test_mixed_arithmetic_frame(self): # TODO: FIGURE OUT HOW TO GET IT TO WORK... # can't do arithmetic because comparison methods try to do *entire* @@ -219,12 +176,6 @@ def test_mixed_arithmetic_series(self): for col in self.mixed2.columns: self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4) - @pytest.mark.slow - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_mixed_panel(self): - self.run_panel(_mixed2_panel, np.random.randint(1, 100), - binary_comp=-2) - def test_float_arithemtic(self): self.run_arithmetic(self.frame, self.frame, assert_frame_equal) self.run_arithmetic(self.frame.iloc[:, 0], self.frame.iloc[:, 0], diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index a7bbbbb5033ac..a9a59c6d95373 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -15,7 +15,7 @@ from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype import pandas as pd -from pandas import DataFrame, Panel, Series, Timestamp, isna +from pandas import DataFrame, Series, Timestamp, isna from pandas.core.index import Index, MultiIndex import pandas.util.testing as tm @@ -818,18 +818,6 @@ def test_swaplevel(self): exp = self.frame.swaplevel('first', 'second').T tm.assert_frame_equal(swapped, exp) - def test_swaplevel_panel(self): - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2}) - expected = panel.copy() - expected.major_axis = expected.major_axis.swaplevel(0, 1) - - for result in (panel.swaplevel(axis='major'), - panel.swaplevel(0, axis='major'), - panel.swaplevel(0, 1, axis='major')): - tm.assert_panel_equal(result, expected) - def test_reorder_levels(self): result = self.ymd.reorder_levels(['month', 'day', 'year']) expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2) @@ -898,8 +886,11 @@ def test_count(self): tm.assert_series_equal(result, expect, check_names=False) assert result.index.name == 'a' - pytest.raises(KeyError, series.count, 'x') - pytest.raises(KeyError, frame.count, level='x') + msg = "Level x not found" + with pytest.raises(KeyError, match=msg): + series.count('x') + with pytest.raises(KeyError, match=msg): + frame.count(level='x') @pytest.mark.parametrize('op', AGG_FUNCTIONS) @pytest.mark.parametrize('level', [0, 1]) @@ -1131,7 +1122,8 @@ def test_level_with_tuples(self): tm.assert_series_equal(result, expected) tm.assert_series_equal(result2, expected) - pytest.raises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2)) + with pytest.raises(KeyError, match=r"^\(\('foo', 'bar', 0\), 2\)$"): + series[('foo', 'bar', 0), 2] result = frame.loc[('foo', 'bar', 0)] result2 = frame.xs(('foo', 'bar', 0)) diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index cf5ef6cf15eca..d1893b7efbc41 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -7,6 +7,7 @@ import numpy as np import pytest +from pandas.compat import PY2 from pandas.compat.numpy import _np_version_under1p13 import pandas.util._test_decorators as td @@ -728,6 +729,7 @@ def test_numeric_values(self): # Test complex assert nanops._ensure_numeric(1 + 2j) == 1 + 2j + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_ndarray(self): # Test numeric ndarray values = np.array([1, 2, 3]) @@ -743,7 +745,9 @@ def test_ndarray(self): # Test non-convertible string ndarray s_values = np.array(['foo', 'bar', 'baz'], dtype=object) - pytest.raises(ValueError, lambda: nanops._ensure_numeric(s_values)) + msg = r"could not convert string to float: '(foo|baz)'" + with pytest.raises(ValueError, match=msg): + nanops._ensure_numeric(s_values) def test_convertable_values(self): assert np.allclose(nanops._ensure_numeric('1'), 1.0) @@ -751,9 +755,15 @@ def test_convertable_values(self): assert np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j) def test_non_convertable_values(self): - pytest.raises(TypeError, lambda: nanops._ensure_numeric('foo')) - pytest.raises(TypeError, lambda: nanops._ensure_numeric({})) - pytest.raises(TypeError, lambda: nanops._ensure_numeric([])) + msg = "Could not convert foo to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric('foo') + msg = "Could not convert {} to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric({}) + msg = r"Could not convert \[\] to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric([]) class TestNanvarFixedValues(object): diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index ba0ad72e624f7..b418091de8d7f 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -1,57 +1,28 @@ # -*- coding: utf-8 -*- # pylint: disable=W0612,E1101 - +from collections import OrderedDict from datetime import datetime -import operator -from warnings import catch_warnings, simplefilter import numpy as np import pytest -from pandas.compat import OrderedDict, StringIO, lrange, range, signature -import pandas.util._test_decorators as td - -from pandas.core.dtypes.common import is_float_dtype +from pandas.compat import lrange -from pandas import ( - DataFrame, Index, MultiIndex, Series, compat, date_range, isna, notna) -from pandas.core.nanops import nanall, nanany +from pandas import DataFrame, MultiIndex, Series, date_range, notna import pandas.core.panel as panelm from pandas.core.panel import Panel import pandas.util.testing as tm from pandas.util.testing import ( - assert_almost_equal, assert_frame_equal, assert_panel_equal, - assert_series_equal, ensure_clean, makeCustomDataframe as mkdf, - makeMixedDataFrame) - -from pandas.io.formats.printing import pprint_thing -from pandas.tseries.offsets import BDay, MonthEnd + assert_almost_equal, assert_frame_equal, assert_series_equal, + makeCustomDataframe as mkdf, makeMixedDataFrame) - -def make_test_panel(): - with catch_warnings(record=True): - simplefilter("ignore", FutureWarning) - _panel = tm.makePanel() - tm.add_nans(_panel) - _panel = _panel.copy() - return _panel +from pandas.tseries.offsets import MonthEnd @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class PanelTests(object): panel = None - def test_pickle(self): - unpickled = tm.round_trip_pickle(self.panel) - assert_frame_equal(unpickled['ItemA'], self.panel['ItemA']) - - def test_rank(self): - pytest.raises(NotImplementedError, lambda: self.panel.rank()) - - def test_cumsum(self): - cumsum = self.panel.cumsum() - assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum()) - def not_hashable(self): c_empty = Panel() c = Panel(Panel([[[1]]])) @@ -59,298 +30,9 @@ def not_hashable(self): pytest.raises(TypeError, hash, c) -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -class SafeForLongAndSparse(object): - - def test_repr(self): - repr(self.panel) - - def test_copy_names(self): - for attr in ('major_axis', 'minor_axis'): - getattr(self.panel, attr).name = None - cp = self.panel.copy() - getattr(cp, attr).name = 'foo' - assert getattr(self.panel, attr).name is None - - def test_iter(self): - tm.equalContents(list(self.panel), self.panel.items) - - def test_count(self): - f = lambda s: notna(s).sum() - self._check_stat_op('count', f, obj=self.panel, has_skipna=False) - - def test_sum(self): - self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) - - def test_mean(self): - self._check_stat_op('mean', np.mean) - - def test_prod(self): - self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) - - @pytest.mark.filterwarnings("ignore:Invalid value:RuntimeWarning") - @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") - def test_median(self): - def wrapper(x): - if isna(x).any(): - return np.nan - return np.median(x) - - self._check_stat_op('median', wrapper) - - @pytest.mark.filterwarnings("ignore:Invalid value:RuntimeWarning") - def test_min(self): - self._check_stat_op('min', np.min) - - @pytest.mark.filterwarnings("ignore:Invalid value:RuntimeWarning") - def test_max(self): - self._check_stat_op('max', np.max) - - @td.skip_if_no_scipy - def test_skew(self): - from scipy.stats import skew - - def this_skew(x): - if len(x) < 3: - return np.nan - return skew(x, bias=False) - - self._check_stat_op('skew', this_skew) - - def test_var(self): - def alt(x): - if len(x) < 2: - return np.nan - return np.var(x, ddof=1) - - self._check_stat_op('var', alt) - - def test_std(self): - def alt(x): - if len(x) < 2: - return np.nan - return np.std(x, ddof=1) - - self._check_stat_op('std', alt) - - def test_sem(self): - def alt(x): - if len(x) < 2: - return np.nan - return np.std(x, ddof=1) / np.sqrt(len(x)) - - self._check_stat_op('sem', alt) - - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, - skipna_alternative=None): - if obj is None: - obj = self.panel - - # # set some NAs - # obj.loc[5:10] = np.nan - # obj.loc[15:20, -2:] = np.nan - - f = getattr(obj, name) - - if has_skipna: - - skipna_wrapper = tm._make_skipna_wrapper(alternative, - skipna_alternative) - - def wrapper(x): - return alternative(np.asarray(x)) - - for i in range(obj.ndim): - result = f(axis=i, skipna=False) - assert_frame_equal(result, obj.apply(wrapper, axis=i)) - else: - skipna_wrapper = alternative - wrapper = alternative - - for i in range(obj.ndim): - result = f(axis=i) - if name in ['sum', 'prod']: - assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i)) - - pytest.raises(Exception, f, axis=obj.ndim) - - # Unimplemented numeric_only parameter. - if 'numeric_only' in signature(f).args: - with pytest.raises(NotImplementedError, match=name): - f(numeric_only=True) - - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class SafeForSparse(object): - def test_get_axis(self): - assert (self.panel._get_axis(0) is self.panel.items) - assert (self.panel._get_axis(1) is self.panel.major_axis) - assert (self.panel._get_axis(2) is self.panel.minor_axis) - - def test_set_axis(self): - new_items = Index(np.arange(len(self.panel.items))) - new_major = Index(np.arange(len(self.panel.major_axis))) - new_minor = Index(np.arange(len(self.panel.minor_axis))) - - # ensure propagate to potentially prior-cached items too - item = self.panel['ItemA'] - self.panel.items = new_items - - if hasattr(self.panel, '_item_cache'): - assert 'ItemA' not in self.panel._item_cache - assert self.panel.items is new_items - - # TODO: unused? - item = self.panel[0] # noqa - - self.panel.major_axis = new_major - assert self.panel[0].index is new_major - assert self.panel.major_axis is new_major - - # TODO: unused? - item = self.panel[0] # noqa - - self.panel.minor_axis = new_minor - assert self.panel[0].columns is new_minor - assert self.panel.minor_axis is new_minor - - def test_get_axis_number(self): - assert self.panel._get_axis_number('items') == 0 - assert self.panel._get_axis_number('major') == 1 - assert self.panel._get_axis_number('minor') == 2 - - with pytest.raises(ValueError, match="No axis named foo"): - self.panel._get_axis_number('foo') - - with pytest.raises(ValueError, match="No axis named foo"): - self.panel.__ge__(self.panel, axis='foo') - - def test_get_axis_name(self): - assert self.panel._get_axis_name(0) == 'items' - assert self.panel._get_axis_name(1) == 'major_axis' - assert self.panel._get_axis_name(2) == 'minor_axis' - - def test_get_plane_axes(self): - # what to do here? - - index, columns = self.panel._get_plane_axes('items') - index, columns = self.panel._get_plane_axes('major_axis') - index, columns = self.panel._get_plane_axes('minor_axis') - index, columns = self.panel._get_plane_axes(0) - - def test_truncate(self): - dates = self.panel.major_axis - start, end = dates[1], dates[5] - - trunced = self.panel.truncate(start, end, axis='major') - expected = self.panel['ItemA'].truncate(start, end) - - assert_frame_equal(trunced['ItemA'], expected) - - trunced = self.panel.truncate(before=start, axis='major') - expected = self.panel['ItemA'].truncate(before=start) - - assert_frame_equal(trunced['ItemA'], expected) - - trunced = self.panel.truncate(after=end, axis='major') - expected = self.panel['ItemA'].truncate(after=end) - - assert_frame_equal(trunced['ItemA'], expected) - - def test_arith(self): - self._test_op(self.panel, operator.add) - self._test_op(self.panel, operator.sub) - self._test_op(self.panel, operator.mul) - self._test_op(self.panel, operator.truediv) - self._test_op(self.panel, operator.floordiv) - self._test_op(self.panel, operator.pow) - - self._test_op(self.panel, lambda x, y: y + x) - self._test_op(self.panel, lambda x, y: y - x) - self._test_op(self.panel, lambda x, y: y * x) - self._test_op(self.panel, lambda x, y: y / x) - self._test_op(self.panel, lambda x, y: y ** x) - - self._test_op(self.panel, lambda x, y: x + y) # panel + 1 - self._test_op(self.panel, lambda x, y: x - y) # panel - 1 - self._test_op(self.panel, lambda x, y: x * y) # panel * 1 - self._test_op(self.panel, lambda x, y: x / y) # panel / 1 - self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1 - - pytest.raises(Exception, self.panel.__add__, - self.panel['ItemA']) - - @staticmethod - def _test_op(panel, op): - result = op(panel, 1) - assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1)) - - def test_keys(self): - tm.equalContents(list(self.panel.keys()), self.panel.items) - - def test_iteritems(self): - # Test panel.iteritems(), aka panel.iteritems() - # just test that it works - for k, v in self.panel.iteritems(): - pass - - assert len(list(self.panel.iteritems())) == len(self.panel.items) - - def test_combineFrame(self): - def check_op(op, name): - # items - df = self.panel['ItemA'] - - func = getattr(self.panel, name) - - result = func(df, axis='items') - - assert_frame_equal( - result['ItemB'], op(self.panel['ItemB'], df)) - - # major - xs = self.panel.major_xs(self.panel.major_axis[0]) - result = func(xs, axis='major') - - idx = self.panel.major_axis[1] - - assert_frame_equal(result.major_xs(idx), - op(self.panel.major_xs(idx), xs)) - - # minor - xs = self.panel.minor_xs(self.panel.minor_axis[0]) - result = func(xs, axis='minor') - - idx = self.panel.minor_axis[1] - - assert_frame_equal(result.minor_xs(idx), - op(self.panel.minor_xs(idx), xs)) - - ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow', 'mod'] - if not compat.PY3: - ops.append('div') - - for op in ops: - try: - check_op(getattr(operator, op), op) - except AttributeError: - pprint_thing("Failing operation: %r" % op) - raise - if compat.PY3: - try: - check_op(operator.truediv, 'div') - except AttributeError: - pprint_thing("Failing operation: %r" % 'div') - raise - - def test_combinePanel(self): - result = self.panel.add(self.panel) - assert_panel_equal(result, self.panel * 2) - - def test_neg(self): - assert_panel_equal(-self.panel, self.panel * -1) - # issue 7692 def test_raise_when_not_implemented(self): p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5), @@ -364,84 +46,11 @@ def test_raise_when_not_implemented(self): with pytest.raises(NotImplementedError): getattr(p, op)(d, axis=0) - def test_select(self): - p = self.panel - - # select items - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items') - expected = p.reindex(items=['ItemA', 'ItemC']) - assert_panel_equal(result, expected) - - # select major_axis - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = p.select(lambda x: x >= datetime( - 2000, 1, 15), axis='major') - new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)] - expected = p.reindex(major=new_major) - assert_panel_equal(result, expected) - - # select minor_axis - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = p.select(lambda x: x in ('D', 'A'), axis=2) - expected = p.reindex(minor=['A', 'D']) - assert_panel_equal(result, expected) - - # corner case, empty thing - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = p.select(lambda x: x in ('foo', ), axis='items') - assert_panel_equal(result, p.reindex(items=[])) - - def test_get_value(self): - for item in self.panel.items: - for mjr in self.panel.major_axis[::2]: - for mnr in self.panel.minor_axis: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = self.panel.get_value(item, mjr, mnr) - expected = self.panel[item][mnr][mjr] - assert_almost_equal(result, expected) - - def test_abs(self): - - result = self.panel.abs() - result2 = abs(self.panel) - expected = np.abs(self.panel) - assert_panel_equal(result, expected) - assert_panel_equal(result2, expected) - - df = self.panel['ItemA'] - result = df.abs() - result2 = abs(df) - expected = np.abs(df) - assert_frame_equal(result, expected) - assert_frame_equal(result2, expected) - - s = df['A'] - result = s.abs() - result2 = abs(s) - expected = np.abs(s) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) - assert result.name == 'A' - assert result2.name == 'A' - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") class CheckIndexing(object): - def test_getitem(self): - pytest.raises(Exception, self.panel.__getitem__, 'ItemQ') - def test_delitem_and_pop(self): - expected = self.panel['ItemA'] - result = self.panel.pop('ItemA') - assert_frame_equal(expected, result) - assert 'ItemA' not in self.panel.items - - del self.panel['ItemB'] - assert 'ItemB' not in self.panel.items - pytest.raises(Exception, self.panel.__delitem__, 'ItemB') values = np.empty((3, 3, 3)) values[0] = 0 @@ -468,38 +77,6 @@ def test_delitem_and_pop(self): tm.assert_frame_equal(panelc[0], panel[0]) def test_setitem(self): - lp = self.panel.filter(['ItemA', 'ItemB']).to_frame() - - with pytest.raises(TypeError): - self.panel['ItemE'] = lp - - # DataFrame - df = self.panel['ItemA'][2:].filter(items=['A', 'B']) - self.panel['ItemF'] = df - self.panel['ItemE'] = df - - df2 = self.panel['ItemF'] - - assert_frame_equal(df, df2.reindex( - index=df.index, columns=df.columns)) - - # scalar - self.panel['ItemG'] = 1 - self.panel['ItemE'] = True - assert self.panel['ItemG'].values.dtype == np.int64 - assert self.panel['ItemE'].values.dtype == np.bool_ - - # object dtype - self.panel['ItemQ'] = 'foo' - assert self.panel['ItemQ'].values.dtype == np.object_ - - # boolean dtype - self.panel['ItemP'] = self.panel['ItemA'] > 0 - assert self.panel['ItemP'].values.dtype == np.bool_ - - pytest.raises(TypeError, self.panel.__setitem__, 'foo', - self.panel.loc[['ItemP']]) - # bad shape p = Panel(np.random.randn(4, 3, 2)) msg = (r"shape of value must be \(3, 2\), " @@ -537,159 +114,9 @@ def test_set_minor_major(self): assert_frame_equal(panel.loc[:, 'NewMajor', :], newmajor.astype(object)) - def test_major_xs(self): - ref = self.panel['ItemA'] - - idx = self.panel.major_axis[5] - xs = self.panel.major_xs(idx) - - result = xs['ItemA'] - assert_series_equal(result, ref.xs(idx), check_names=False) - assert result.name == 'ItemA' - - # not contained - idx = self.panel.major_axis[0] - BDay() - pytest.raises(Exception, self.panel.major_xs, idx) - - def test_major_xs_mixed(self): - self.panel['ItemD'] = 'foo' - xs = self.panel.major_xs(self.panel.major_axis[0]) - assert xs['ItemA'].dtype == np.float64 - assert xs['ItemD'].dtype == np.object_ - - def test_minor_xs(self): - ref = self.panel['ItemA'] - - idx = self.panel.minor_axis[1] - xs = self.panel.minor_xs(idx) - - assert_series_equal(xs['ItemA'], ref[idx], check_names=False) - - # not contained - pytest.raises(Exception, self.panel.minor_xs, 'E') - - def test_minor_xs_mixed(self): - self.panel['ItemD'] = 'foo' - - xs = self.panel.minor_xs('D') - assert xs['ItemA'].dtype == np.float64 - assert xs['ItemD'].dtype == np.object_ - - def test_xs(self): - itemA = self.panel.xs('ItemA', axis=0) - expected = self.panel['ItemA'] - tm.assert_frame_equal(itemA, expected) - - # Get a view by default. - itemA_view = self.panel.xs('ItemA', axis=0) - itemA_view.values[:] = np.nan - - assert np.isnan(self.panel['ItemA'].values).all() - - # Mixed-type yields a copy. - self.panel['strings'] = 'foo' - result = self.panel.xs('D', axis=2) - assert result._is_copy is not None - - def test_getitem_fancy_labels(self): - p = self.panel - - items = p.items[[1, 0]] - dates = p.major_axis[::2] - cols = ['D', 'C', 'F'] - - # all 3 specified - with catch_warnings(): - simplefilter("ignore", FutureWarning) - # XXX: warning in _validate_read_indexer - assert_panel_equal(p.loc[items, dates, cols], - p.reindex(items=items, major=dates, minor=cols)) - - # 2 specified - assert_panel_equal(p.loc[:, dates, cols], - p.reindex(major=dates, minor=cols)) - - assert_panel_equal(p.loc[items, :, cols], - p.reindex(items=items, minor=cols)) - - assert_panel_equal(p.loc[items, dates, :], - p.reindex(items=items, major=dates)) - - # only 1 - assert_panel_equal(p.loc[items, :, :], p.reindex(items=items)) - - assert_panel_equal(p.loc[:, dates, :], p.reindex(major=dates)) - - assert_panel_equal(p.loc[:, :, cols], p.reindex(minor=cols)) - def test_getitem_fancy_slice(self): pass - def test_getitem_fancy_ints(self): - p = self.panel - - # #1603 - result = p.iloc[:, -1, :] - expected = p.loc[:, p.major_axis[-1], :] - assert_frame_equal(result, expected) - - def test_getitem_fancy_xs(self): - p = self.panel - item = 'ItemB' - - date = p.major_axis[5] - col = 'C' - - # get DataFrame - # item - assert_frame_equal(p.loc[item], p[item]) - assert_frame_equal(p.loc[item, :], p[item]) - assert_frame_equal(p.loc[item, :, :], p[item]) - - # major axis, axis=1 - assert_frame_equal(p.loc[:, date], p.major_xs(date)) - assert_frame_equal(p.loc[:, date, :], p.major_xs(date)) - - # minor axis, axis=2 - assert_frame_equal(p.loc[:, :, 'C'], p.minor_xs('C')) - - # get Series - assert_series_equal(p.loc[item, date], p[item].loc[date]) - assert_series_equal(p.loc[item, date, :], p[item].loc[date]) - assert_series_equal(p.loc[item, :, col], p[item][col]) - assert_series_equal(p.loc[:, date, col], p.major_xs(date).loc[col]) - - def test_getitem_fancy_xs_check_view(self): - item = 'ItemB' - date = self.panel.major_axis[5] - - # make sure it's always a view - NS = slice(None, None) - - # DataFrames - comp = assert_frame_equal - self._check_view(item, comp) - self._check_view((item, NS), comp) - self._check_view((item, NS, NS), comp) - self._check_view((NS, date), comp) - self._check_view((NS, date, NS), comp) - self._check_view((NS, NS, 'C'), comp) - - # Series - comp = assert_series_equal - self._check_view((item, date), comp) - self._check_view((item, date, NS), comp) - self._check_view((item, NS, 'C'), comp) - self._check_view((NS, date, 'C'), comp) - - def test_getitem_callable(self): - p = self.panel - # GH 12533 - - assert_frame_equal(p[lambda x: 'ItemB'], p.loc['ItemB']) - assert_panel_equal(p[lambda x: ['ItemB', 'ItemC']], - p.loc[['ItemB', 'ItemC']]) - def test_ix_setitem_slice_dataframe(self): a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33], minor_axis=[111, 222, 333]) @@ -719,43 +146,6 @@ def test_ix_align(self): assert_series_equal(df.loc[0, 0, :].reindex(b.index), b) def test_ix_frame_align(self): - p_orig = tm.makePanel() - df = p_orig.iloc[0].copy() - assert_frame_equal(p_orig['ItemA'], df) - - p = p_orig.copy() - p.iloc[0, :, :] = df - assert_panel_equal(p, p_orig) - - p = p_orig.copy() - p.iloc[0] = df - assert_panel_equal(p, p_orig) - - p = p_orig.copy() - p.iloc[0, :, :] = df - assert_panel_equal(p, p_orig) - - p = p_orig.copy() - p.iloc[0] = df - assert_panel_equal(p, p_orig) - - p = p_orig.copy() - p.loc['ItemA'] = df - assert_panel_equal(p, p_orig) - - p = p_orig.copy() - p.loc['ItemA', :, :] = df - assert_panel_equal(p, p_orig) - - p = p_orig.copy() - p['ItemA'] = df - assert_panel_equal(p, p_orig) - - p = p_orig.copy() - p.iloc[0, [0, 1, 3, 5], -2:] = df - out = p.iloc[0, [0, 1, 3, 5], -2:] - assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]]) - # GH3830, panel assignent by values/frame for dtype in ['float64', 'int64']: @@ -782,13 +172,6 @@ def test_ix_frame_align(self): tm.assert_frame_equal(panel.loc['a1'], df1) tm.assert_frame_equal(panel.loc['a2'], df2) - def _check_view(self, indexer, comp): - cp = self.panel.copy() - obj = cp.loc[indexer] - obj.values[:] = 0 - assert (obj.values == 0).all() - comp(cp.loc[indexer].reindex_like(obj), obj) - def test_logical_with_nas(self): d = Panel({'ItemA': {'a': [np.nan, False]}, 'ItemB': {'a': [True, True]}}) @@ -802,157 +185,11 @@ def test_logical_with_nas(self): expected = DataFrame({'a': [True, True]}) assert_frame_equal(result, expected) - def test_neg(self): - assert_panel_equal(-self.panel, -1 * self.panel) - - def test_invert(self): - assert_panel_equal(-(self.panel < 0), ~(self.panel < 0)) - - def test_comparisons(self): - p1 = tm.makePanel() - p2 = tm.makePanel() - - tp = p1.reindex(items=p1.items + ['foo']) - df = p1[p1.items[0]] - - def test_comp(func): - - # versus same index - result = func(p1, p2) - tm.assert_numpy_array_equal(result.values, - func(p1.values, p2.values)) - - # versus non-indexed same objs - pytest.raises(Exception, func, p1, tp) - - # versus different objs - pytest.raises(Exception, func, p1, df) - - # versus scalar - result3 = func(self.panel, 0) - tm.assert_numpy_array_equal(result3.values, - func(self.panel.values, 0)) - - with np.errstate(invalid='ignore'): - test_comp(operator.eq) - test_comp(operator.ne) - test_comp(operator.lt) - test_comp(operator.gt) - test_comp(operator.ge) - test_comp(operator.le) - - def test_get_value(self): - for item in self.panel.items: - for mjr in self.panel.major_axis[::2]: - for mnr in self.panel.minor_axis: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - result = self.panel.get_value(item, mjr, mnr) - expected = self.panel[item][mnr][mjr] - assert_almost_equal(result, expected) - with catch_warnings(): - simplefilter("ignore", FutureWarning) - msg = "There must be an argument for each axis" - with pytest.raises(TypeError, match=msg): - self.panel.get_value('a') - - def test_set_value(self): - for item in self.panel.items: - for mjr in self.panel.major_axis[::2]: - for mnr in self.panel.minor_axis: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - self.panel.set_value(item, mjr, mnr, 1.) - tm.assert_almost_equal(self.panel[item][mnr][mjr], 1.) - - # resize - with catch_warnings(): - simplefilter("ignore", FutureWarning) - res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5) - assert isinstance(res, Panel) - assert res is not self.panel - assert res.get_value('ItemE', 'foo', 'bar') == 1.5 - - res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5) - assert is_float_dtype(res3['ItemE'].values) - - msg = ("There must be an argument for each " - "axis plus the value provided") - with pytest.raises(TypeError, match=msg): - self.panel.set_value('a') - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -class TestPanel(PanelTests, CheckIndexing, SafeForLongAndSparse, - SafeForSparse): - - def setup_method(self, method): - self.panel = make_test_panel() - self.panel.major_axis.name = None - self.panel.minor_axis.name = None - self.panel.items.name = None - - def test_constructor(self): - # with BlockManager - wp = Panel(self.panel._data) - assert wp._data is self.panel._data - - wp = Panel(self.panel._data, copy=True) - assert wp._data is not self.panel._data - tm.assert_panel_equal(wp, self.panel) - - # strings handled prop - wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]]) - assert wp.values.dtype == np.object_ - - vals = self.panel.values - - # no copy - wp = Panel(vals) - assert wp.values is vals - - # copy - wp = Panel(vals, copy=True) - assert wp.values is not vals - - # GH #8285, test when scalar data is used to construct a Panel - # if dtype is not passed, it should be inferred - value_and_dtype = [(1, 'int64'), (3.14, 'float64'), - ('foo', np.object_)] - for (val, dtype) in value_and_dtype: - wp = Panel(val, items=range(2), major_axis=range(3), - minor_axis=range(4)) - vals = np.empty((2, 3, 4), dtype=dtype) - vals.fill(val) - - tm.assert_panel_equal(wp, Panel(vals, dtype=dtype)) - - # test the case when dtype is passed - wp = Panel(1, items=range(2), major_axis=range(3), - minor_axis=range(4), - dtype='float32') - vals = np.empty((2, 3, 4), dtype='float32') - vals.fill(1) - - tm.assert_panel_equal(wp, Panel(vals, dtype='float32')) +class TestPanel(PanelTests, CheckIndexing, SafeForSparse): def test_constructor_cast(self): - zero_filled = self.panel.fillna(0) - - casted = Panel(zero_filled._data, dtype=int) - casted2 = Panel(zero_filled.values, dtype=int) - - exp_values = zero_filled.values.astype(int) - assert_almost_equal(casted.values, exp_values) - assert_almost_equal(casted2.values, exp_values) - - casted = Panel(zero_filled._data, dtype=np.int32) - casted2 = Panel(zero_filled.values, dtype=np.int32) - - exp_values = zero_filled.values.astype(np.int32) - assert_almost_equal(casted.values, exp_values) - assert_almost_equal(casted2.values, exp_values) - # can't cast data = [[['foo', 'bar', 'baz']]] pytest.raises(ValueError, Panel, data, dtype=float) @@ -1017,86 +254,6 @@ def test_constructor_fails_with_not_3d_input(self): with pytest.raises(ValueError, match=msg): Panel(np.random.randn(10, 2)) - def test_consolidate(self): - assert self.panel._data.is_consolidated() - - self.panel['foo'] = 1. - assert not self.panel._data.is_consolidated() - - panel = self.panel._consolidate() - assert panel._data.is_consolidated() - - def test_ctor_dict(self): - itema = self.panel['ItemA'] - itemb = self.panel['ItemB'] - - d = {'A': itema, 'B': itemb[5:]} - d2 = {'A': itema._series, 'B': itemb[5:]._series} - d3 = {'A': None, - 'B': DataFrame(itemb[5:]._series), - 'C': DataFrame(itema._series)} - - wp = Panel.from_dict(d) - wp2 = Panel.from_dict(d2) # nested Dict - - # TODO: unused? - wp3 = Panel.from_dict(d3) # noqa - - tm.assert_index_equal(wp.major_axis, self.panel.major_axis) - assert_panel_equal(wp, wp2) - - # intersect - wp = Panel.from_dict(d, intersect=True) - tm.assert_index_equal(wp.major_axis, itemb.index[5:]) - - # use constructor - assert_panel_equal(Panel(d), Panel.from_dict(d)) - assert_panel_equal(Panel(d2), Panel.from_dict(d2)) - assert_panel_equal(Panel(d3), Panel.from_dict(d3)) - - # a pathological case - d4 = {'A': None, 'B': None} - - # TODO: unused? - wp4 = Panel.from_dict(d4) # noqa - - assert_panel_equal(Panel(d4), Panel(items=['A', 'B'])) - - # cast - dcasted = {k: v.reindex(wp.major_axis).fillna(0) - for k, v in compat.iteritems(d)} - result = Panel(dcasted, dtype=int) - expected = Panel({k: v.astype(int) - for k, v in compat.iteritems(dcasted)}) - assert_panel_equal(result, expected) - - result = Panel(dcasted, dtype=np.int32) - expected = Panel({k: v.astype(np.int32) - for k, v in compat.iteritems(dcasted)}) - assert_panel_equal(result, expected) - - def test_constructor_dict_mixed(self): - data = {k: v.values for k, v in self.panel.iteritems()} - result = Panel(data) - exp_major = Index(np.arange(len(self.panel.major_axis))) - tm.assert_index_equal(result.major_axis, exp_major) - - result = Panel(data, items=self.panel.items, - major_axis=self.panel.major_axis, - minor_axis=self.panel.minor_axis) - assert_panel_equal(result, self.panel) - - data['ItemC'] = self.panel['ItemC'] - result = Panel(data) - assert_panel_equal(result, self.panel) - - # corner, blow up - data['ItemB'] = data['ItemB'][:-1] - pytest.raises(Exception, Panel, data) - - data['ItemB'] = self.panel['ItemB'].values[:, :-1] - pytest.raises(Exception, Panel, data) - def test_ctor_orderedDict(self): keys = list(set(np.random.randint(0, 5000, 100)))[ :50] # unique random int keys @@ -1107,30 +264,6 @@ def test_ctor_orderedDict(self): p = Panel.from_dict(d) assert list(p.items) == keys - def test_constructor_resize(self): - data = self.panel._data - items = self.panel.items[:-1] - major = self.panel.major_axis[:-1] - minor = self.panel.minor_axis[:-1] - - result = Panel(data, items=items, - major_axis=major, minor_axis=minor) - expected = self.panel.reindex( - items=items, major=major, minor=minor) - assert_panel_equal(result, expected) - - result = Panel(data, items=items, major_axis=major) - expected = self.panel.reindex(items=items, major=major) - assert_panel_equal(result, expected) - - result = Panel(data, items=items) - expected = self.panel.reindex(items=items) - assert_panel_equal(result, expected) - - result = Panel(data, minor_axis=minor) - expected = self.panel.reindex(minor=minor) - assert_panel_equal(result, expected) - def test_from_dict_mixed_orient(self): df = tm.makeDataFrame() df['foo'] = 'bar' @@ -1161,153 +294,7 @@ def test_constructor_error_msgs(self): Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4)) - def test_conform(self): - df = self.panel['ItemA'][:-5].filter(items=['A', 'B']) - conformed = self.panel.conform(df) - - tm.assert_index_equal(conformed.index, self.panel.major_axis) - tm.assert_index_equal(conformed.columns, self.panel.minor_axis) - - def test_convert_objects(self): - # GH 4937 - p = Panel(dict(A=dict(a=['1', '1.0']))) - expected = Panel(dict(A=dict(a=[1, 1.0]))) - result = p._convert(numeric=True, coerce=True) - assert_panel_equal(result, expected) - - def test_dtypes(self): - - result = self.panel.dtypes - expected = Series(np.dtype('float64'), index=self.panel.items) - assert_series_equal(result, expected) - - def test_astype(self): - # GH7271 - data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) - panel = Panel(data, ['a', 'b'], ['c', 'd'], ['e', 'f']) - - str_data = np.array([[['1', '2'], ['3', '4']], - [['5', '6'], ['7', '8']]]) - expected = Panel(str_data, ['a', 'b'], ['c', 'd'], ['e', 'f']) - assert_panel_equal(panel.astype(str), expected) - - pytest.raises(NotImplementedError, panel.astype, {0: str}) - - def test_apply(self): - # GH1148 - - # ufunc - applied = self.panel.apply(np.sqrt) - with np.errstate(invalid='ignore'): - expected = np.sqrt(self.panel.values) - assert_almost_equal(applied.values, expected) - - # ufunc same shape - result = self.panel.apply(lambda x: x * 2, axis='items') - expected = self.panel * 2 - assert_panel_equal(result, expected) - result = self.panel.apply(lambda x: x * 2, axis='major_axis') - expected = self.panel * 2 - assert_panel_equal(result, expected) - result = self.panel.apply(lambda x: x * 2, axis='minor_axis') - expected = self.panel * 2 - assert_panel_equal(result, expected) - - # reduction to DataFrame - result = self.panel.apply(lambda x: x.dtype, axis='items') - expected = DataFrame(np.dtype('float64'), - index=self.panel.major_axis, - columns=self.panel.minor_axis) - assert_frame_equal(result, expected) - result = self.panel.apply(lambda x: x.dtype, axis='major_axis') - expected = DataFrame(np.dtype('float64'), - index=self.panel.minor_axis, - columns=self.panel.items) - assert_frame_equal(result, expected) - result = self.panel.apply(lambda x: x.dtype, axis='minor_axis') - expected = DataFrame(np.dtype('float64'), - index=self.panel.major_axis, - columns=self.panel.items) - assert_frame_equal(result, expected) - - # reductions via other dims - expected = self.panel.sum(0) - result = self.panel.apply(lambda x: x.sum(), axis='items') - assert_frame_equal(result, expected) - expected = self.panel.sum(1) - result = self.panel.apply(lambda x: x.sum(), axis='major_axis') - assert_frame_equal(result, expected) - expected = self.panel.sum(2) - result = self.panel.apply(lambda x: x.sum(), axis='minor_axis') - assert_frame_equal(result, expected) - - # pass kwargs - result = self.panel.apply( - lambda x, y: x.sum() + y, axis='items', y=5) - expected = self.panel.sum(0) + 5 - assert_frame_equal(result, expected) - def test_apply_slabs(self): - - # same shape as original - result = self.panel.apply(lambda x: x * 2, - axis=['items', 'major_axis']) - expected = (self.panel * 2).transpose('minor_axis', 'major_axis', - 'items') - assert_panel_equal(result, expected) - result = self.panel.apply(lambda x: x * 2, - axis=['major_axis', 'items']) - assert_panel_equal(result, expected) - - result = self.panel.apply(lambda x: x * 2, - axis=['items', 'minor_axis']) - expected = (self.panel * 2).transpose('major_axis', 'minor_axis', - 'items') - assert_panel_equal(result, expected) - result = self.panel.apply(lambda x: x * 2, - axis=['minor_axis', 'items']) - assert_panel_equal(result, expected) - - result = self.panel.apply(lambda x: x * 2, - axis=['major_axis', 'minor_axis']) - expected = self.panel * 2 - assert_panel_equal(result, expected) - result = self.panel.apply(lambda x: x * 2, - axis=['minor_axis', 'major_axis']) - assert_panel_equal(result, expected) - - # reductions - result = self.panel.apply(lambda x: x.sum(0), axis=[ - 'items', 'major_axis' - ]) - expected = self.panel.sum(1).T - assert_frame_equal(result, expected) - - result = self.panel.apply(lambda x: x.sum(1), axis=[ - 'items', 'major_axis' - ]) - expected = self.panel.sum(0) - assert_frame_equal(result, expected) - - # transforms - f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T - - # make sure that we don't trigger any warnings - result = self.panel.apply(f, axis=['items', 'major_axis']) - expected = Panel({ax: f(self.panel.loc[:, :, ax]) - for ax in self.panel.minor_axis}) - assert_panel_equal(result, expected) - - result = self.panel.apply(f, axis=['major_axis', 'minor_axis']) - expected = Panel({ax: f(self.panel.loc[ax]) - for ax in self.panel.items}) - assert_panel_equal(result, expected) - - result = self.panel.apply(f, axis=['minor_axis', 'items']) - expected = Panel({ax: f(self.panel.loc[:, ax]) - for ax in self.panel.major_axis}) - assert_panel_equal(result, expected) - # with multi-indexes # GH7469 index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), ( @@ -1343,371 +330,13 @@ def test_apply_no_or_zero_ndim(self): assert_series_equal(result_float, expected_float) assert_series_equal(result_float64, expected_float64) - def test_reindex(self): - ref = self.panel['ItemB'] - - # items - result = self.panel.reindex(items=['ItemA', 'ItemB']) - assert_frame_equal(result['ItemB'], ref) - - # major - new_major = list(self.panel.major_axis[:10]) - result = self.panel.reindex(major=new_major) - assert_frame_equal(result['ItemB'], ref.reindex(index=new_major)) - - # raise exception put both major and major_axis - pytest.raises(Exception, self.panel.reindex, - major_axis=new_major, - major=new_major) - - # minor - new_minor = list(self.panel.minor_axis[:2]) - result = self.panel.reindex(minor=new_minor) - assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor)) - - # raise exception put both major and major_axis - pytest.raises(Exception, self.panel.reindex, - minor_axis=new_minor, - minor=new_minor) - - # this ok - result = self.panel.reindex() - assert_panel_equal(result, self.panel) - assert result is not self.panel - - # with filling - smaller_major = self.panel.major_axis[::5] - smaller = self.panel.reindex(major=smaller_major) - - larger = smaller.reindex(major=self.panel.major_axis, method='pad') - - assert_frame_equal(larger.major_xs(self.panel.major_axis[1]), - smaller.major_xs(smaller_major[0])) - - # don't necessarily copy - result = self.panel.reindex( - major=self.panel.major_axis, copy=False) - assert_panel_equal(result, self.panel) - assert result is self.panel - - def test_reindex_axis_style(self): - panel = Panel(np.random.rand(5, 5, 5)) - expected0 = Panel(panel.values).iloc[[0, 1]] - expected1 = Panel(panel.values).iloc[:, [0, 1]] - expected2 = Panel(panel.values).iloc[:, :, [0, 1]] - - result = panel.reindex([0, 1], axis=0) - assert_panel_equal(result, expected0) - - result = panel.reindex([0, 1], axis=1) - assert_panel_equal(result, expected1) - - result = panel.reindex([0, 1], axis=2) - assert_panel_equal(result, expected2) - - result = panel.reindex([0, 1], axis=2) - assert_panel_equal(result, expected2) - - def test_reindex_multi(self): - - # with and without copy full reindexing - result = self.panel.reindex( - items=self.panel.items, - major=self.panel.major_axis, - minor=self.panel.minor_axis, copy=False) - - assert result.items is self.panel.items - assert result.major_axis is self.panel.major_axis - assert result.minor_axis is self.panel.minor_axis - - result = self.panel.reindex( - items=self.panel.items, - major=self.panel.major_axis, - minor=self.panel.minor_axis, copy=False) - assert_panel_equal(result, self.panel) - - # multi-axis indexing consistency - # GH 5900 - df = DataFrame(np.random.randn(4, 3)) - p = Panel({'Item1': df}) - expected = Panel({'Item1': df}) - expected['Item2'] = np.nan - - items = ['Item1', 'Item2'] - major_axis = np.arange(4) - minor_axis = np.arange(3) - - results = [] - results.append(p.reindex(items=items, major_axis=major_axis, - copy=True)) - results.append(p.reindex(items=items, major_axis=major_axis, - copy=False)) - results.append(p.reindex(items=items, minor_axis=minor_axis, - copy=True)) - results.append(p.reindex(items=items, minor_axis=minor_axis, - copy=False)) - results.append(p.reindex(items=items, major_axis=major_axis, - minor_axis=minor_axis, copy=True)) - results.append(p.reindex(items=items, major_axis=major_axis, - minor_axis=minor_axis, copy=False)) - - for i, r in enumerate(results): - assert_panel_equal(expected, r) - - def test_reindex_like(self): - # reindex_like - smaller = self.panel.reindex(items=self.panel.items[:-1], - major=self.panel.major_axis[:-1], - minor=self.panel.minor_axis[:-1]) - smaller_like = self.panel.reindex_like(smaller) - assert_panel_equal(smaller, smaller_like) - - def test_take(self): - # axis == 0 - result = self.panel.take([2, 0, 1], axis=0) - expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB']) - assert_panel_equal(result, expected) - - # axis >= 1 - result = self.panel.take([3, 0, 1, 2], axis=2) - expected = self.panel.reindex(minor=['D', 'A', 'B', 'C']) - assert_panel_equal(result, expected) - - # neg indices ok - expected = self.panel.reindex(minor=['D', 'D', 'B', 'C']) - result = self.panel.take([3, -1, 1, 2], axis=2) - assert_panel_equal(result, expected) - - pytest.raises(Exception, self.panel.take, [4, 0, 1, 2], axis=2) - - def test_sort_index(self): - import random - - ritems = list(self.panel.items) - rmajor = list(self.panel.major_axis) - rminor = list(self.panel.minor_axis) - random.shuffle(ritems) - random.shuffle(rmajor) - random.shuffle(rminor) - - random_order = self.panel.reindex(items=ritems) - sorted_panel = random_order.sort_index(axis=0) - assert_panel_equal(sorted_panel, self.panel) - - # descending - random_order = self.panel.reindex(items=ritems) - sorted_panel = random_order.sort_index(axis=0, ascending=False) - assert_panel_equal( - sorted_panel, - self.panel.reindex(items=self.panel.items[::-1])) - - random_order = self.panel.reindex(major=rmajor) - sorted_panel = random_order.sort_index(axis=1) - assert_panel_equal(sorted_panel, self.panel) - - random_order = self.panel.reindex(minor=rminor) - sorted_panel = random_order.sort_index(axis=2) - assert_panel_equal(sorted_panel, self.panel) - def test_fillna(self): - filled = self.panel.fillna(0) - assert np.isfinite(filled.values).all() - - filled = self.panel.fillna(method='backfill') - assert_frame_equal(filled['ItemA'], - self.panel['ItemA'].fillna(method='backfill')) - - panel = self.panel.copy() - panel['str'] = 'foo' - - filled = panel.fillna(method='backfill') - assert_frame_equal(filled['ItemA'], - panel['ItemA'].fillna(method='backfill')) - - empty = self.panel.reindex(items=[]) - filled = empty.fillna(0) - assert_panel_equal(filled, empty) - - pytest.raises(ValueError, self.panel.fillna) - pytest.raises(ValueError, self.panel.fillna, 5, method='ffill') - - pytest.raises(TypeError, self.panel.fillna, [1, 2]) - pytest.raises(TypeError, self.panel.fillna, (1, 2)) - # limit not implemented when only value is specified p = Panel(np.random.randn(3, 4, 5)) p.iloc[0:2, 0:2, 0:2] = np.nan pytest.raises(NotImplementedError, lambda: p.fillna(999, limit=1)) - # Test in place fillNA - # Expected result - expected = Panel([[[0, 1], [2, 1]], [[10, 11], [12, 11]]], - items=['a', 'b'], minor_axis=['x', 'y'], - dtype=np.float64) - # method='ffill' - p1 = Panel([[[0, 1], [2, np.nan]], [[10, 11], [12, np.nan]]], - items=['a', 'b'], minor_axis=['x', 'y'], - dtype=np.float64) - p1.fillna(method='ffill', inplace=True) - assert_panel_equal(p1, expected) - - # method='bfill' - p2 = Panel([[[0, np.nan], [2, 1]], [[10, np.nan], [12, 11]]], - items=['a', 'b'], minor_axis=['x', 'y'], - dtype=np.float64) - p2.fillna(method='bfill', inplace=True) - assert_panel_equal(p2, expected) - - def test_ffill_bfill(self): - assert_panel_equal(self.panel.ffill(), - self.panel.fillna(method='ffill')) - assert_panel_equal(self.panel.bfill(), - self.panel.fillna(method='bfill')) - - def test_truncate_fillna_bug(self): - # #1823 - result = self.panel.truncate(before=None, after=None, axis='items') - - # it works! - result.fillna(value=0.0) - - def test_swapaxes(self): - result = self.panel.swapaxes('items', 'minor') - assert result.items is self.panel.minor_axis - - result = self.panel.swapaxes('items', 'major') - assert result.items is self.panel.major_axis - - result = self.panel.swapaxes('major', 'minor') - assert result.major_axis is self.panel.minor_axis - - panel = self.panel.copy() - result = panel.swapaxes('major', 'minor') - panel.values[0, 0, 1] = np.nan - expected = panel.swapaxes('major', 'minor') - assert_panel_equal(result, expected) - - # this should also work - result = self.panel.swapaxes(0, 1) - assert result.items is self.panel.major_axis - - # this works, but return a copy - result = self.panel.swapaxes('items', 'items') - assert_panel_equal(self.panel, result) - assert id(self.panel) != id(result) - - def test_transpose(self): - result = self.panel.transpose('minor', 'major', 'items') - expected = self.panel.swapaxes('items', 'minor') - assert_panel_equal(result, expected) - - # test kwargs - result = self.panel.transpose(items='minor', major='major', - minor='items') - expected = self.panel.swapaxes('items', 'minor') - assert_panel_equal(result, expected) - - # text mixture of args - result = self.panel.transpose( - 'minor', major='major', minor='items') - expected = self.panel.swapaxes('items', 'minor') - assert_panel_equal(result, expected) - - result = self.panel.transpose('minor', - 'major', - minor='items') - expected = self.panel.swapaxes('items', 'minor') - assert_panel_equal(result, expected) - - # duplicate axes - with pytest.raises(TypeError, - match='not enough/duplicate arguments'): - self.panel.transpose('minor', maj='major', minor='items') - - with pytest.raises(ValueError, - match='repeated axis in transpose'): - self.panel.transpose('minor', 'major', major='minor', - minor='items') - - result = self.panel.transpose(2, 1, 0) - assert_panel_equal(result, expected) - - result = self.panel.transpose('minor', 'items', 'major') - expected = self.panel.swapaxes('items', 'minor') - expected = expected.swapaxes('major', 'minor') - assert_panel_equal(result, expected) - - result = self.panel.transpose(2, 0, 1) - assert_panel_equal(result, expected) - - pytest.raises(ValueError, self.panel.transpose, 0, 0, 1) - - def test_transpose_copy(self): - panel = self.panel.copy() - result = panel.transpose(2, 0, 1, copy=True) - expected = panel.swapaxes('items', 'minor') - expected = expected.swapaxes('major', 'minor') - assert_panel_equal(result, expected) - - panel.values[0, 1, 1] = np.nan - assert notna(result.values[1, 0, 1]) - - def test_to_frame(self): - # filtered - filtered = self.panel.to_frame() - expected = self.panel.to_frame().dropna(how='any') - assert_frame_equal(filtered, expected) - - # unfiltered - unfiltered = self.panel.to_frame(filter_observations=False) - assert_panel_equal(unfiltered.to_panel(), self.panel) - - # names - assert unfiltered.index.names == ('major', 'minor') - - # unsorted, round trip - df = self.panel.to_frame(filter_observations=False) - unsorted = df.take(np.random.permutation(len(df))) - pan = unsorted.to_panel() - assert_panel_equal(pan, self.panel) - - # preserve original index names - df = DataFrame(np.random.randn(6, 2), - index=[['a', 'a', 'b', 'b', 'c', 'c'], - [0, 1, 0, 1, 0, 1]], - columns=['one', 'two']) - df.index.names = ['foo', 'bar'] - df.columns.name = 'baz' - - rdf = df.to_panel().to_frame() - assert rdf.index.names == df.index.names - assert rdf.columns.names == df.columns.names - - def test_to_frame_mixed(self): - panel = self.panel.fillna(0) - panel['str'] = 'foo' - panel['bool'] = panel['ItemA'] > 0 - - lp = panel.to_frame() - wp = lp.to_panel() - assert wp['bool'].values.dtype == np.bool_ - # Previously, this was mutating the underlying - # index and changing its name - assert_frame_equal(wp['bool'], panel['bool'], check_names=False) - - # GH 8704 - # with categorical - df = panel.to_frame() - df['category'] = df['str'].astype('category') - - # to_panel - # TODO: this converts back to object - p = df.to_panel() - expected = panel.copy() - expected['category'] = 'foo' - assert_panel_equal(p, expected) - def test_to_frame_multi_major(self): idx = MultiIndex.from_tuples( [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) @@ -1808,22 +437,6 @@ def test_to_frame_multi_drop_level(self): expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx) assert_frame_equal(result, expected) - def test_to_panel_na_handling(self): - df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)), - index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1], - [0, 1, 2, 3, 4, 5, 2, 3, 4, 5]]) - - panel = df.to_panel() - assert isna(panel[0].loc[1, [0, 1]]).all() - - def test_to_panel_duplicates(self): - # #2441 - df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]}) - idf = df.set_index(['a', 'b']) - - with pytest.raises(ValueError, match='non-uniquely indexed'): - idf.to_panel() - def test_panel_dups(self): # GH 4960 @@ -1842,11 +455,6 @@ def test_panel_dups(self): result = panel.loc['E'] assert_frame_equal(result, expected) - expected = no_dup_panel.loc[['A', 'B']] - expected.items = ['A', 'A'] - result = panel.loc['A'] - assert_panel_equal(result, expected) - # major data = np.random.randn(5, 5, 5) no_dup_panel = Panel(data, major_axis=list("ABCDE")) @@ -1860,11 +468,6 @@ def test_panel_dups(self): result = panel.loc[:, 'E'] assert_frame_equal(result, expected) - expected = no_dup_panel.loc[:, ['A', 'B']] - expected.major_axis = ['A', 'A'] - result = panel.loc[:, 'A'] - assert_panel_equal(result, expected) - # minor data = np.random.randn(5, 100, 5) no_dup_panel = Panel(data, minor_axis=list("ABCDE")) @@ -1878,48 +481,10 @@ def test_panel_dups(self): result = panel.loc[:, :, 'E'] assert_frame_equal(result, expected) - expected = no_dup_panel.loc[:, :, ['A', 'B']] - expected.minor_axis = ['A', 'A'] - result = panel.loc[:, :, 'A'] - assert_panel_equal(result, expected) - def test_filter(self): pass - def test_compound(self): - compounded = self.panel.compound() - - assert_series_equal(compounded['ItemA'], - (1 + self.panel['ItemA']).product(0) - 1, - check_names=False) - def test_shift(self): - # major - idx = self.panel.major_axis[0] - idx_lag = self.panel.major_axis[1] - shifted = self.panel.shift(1) - assert_frame_equal(self.panel.major_xs(idx), - shifted.major_xs(idx_lag)) - - # minor - idx = self.panel.minor_axis[0] - idx_lag = self.panel.minor_axis[1] - shifted = self.panel.shift(1, axis='minor') - assert_frame_equal(self.panel.minor_xs(idx), - shifted.minor_xs(idx_lag)) - - # items - idx = self.panel.items[0] - idx_lag = self.panel.items[1] - shifted = self.panel.shift(1, axis='items') - assert_frame_equal(self.panel[idx], shifted[idx_lag]) - - # negative numbers, #2164 - result = self.panel.shift(-1) - expected = Panel({i: f.shift(-1)[:-1] - for i, f in self.panel.iteritems()}) - assert_panel_equal(result, expected) - # mixed dtypes #6959 data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')] @@ -1928,131 +493,14 @@ def test_shift(self): shifted = mixed_panel.shift(1) assert_series_equal(mixed_panel.dtypes, shifted.dtypes) - def test_tshift(self): - # PeriodIndex - ps = tm.makePeriodPanel() - shifted = ps.tshift(1) - unshifted = shifted.tshift(-1) - - assert_panel_equal(unshifted, ps) - - shifted2 = ps.tshift(freq='B') - assert_panel_equal(shifted, shifted2) - - shifted3 = ps.tshift(freq=BDay()) - assert_panel_equal(shifted, shifted3) - - with pytest.raises(ValueError, match='does not match'): - ps.tshift(freq='M') - - # DatetimeIndex - panel = make_test_panel() - shifted = panel.tshift(1) - unshifted = shifted.tshift(-1) - - assert_panel_equal(panel, unshifted) - - shifted2 = panel.tshift(freq=panel.major_axis.freq) - assert_panel_equal(shifted, shifted2) - - inferred_ts = Panel(panel.values, items=panel.items, - major_axis=Index(np.asarray(panel.major_axis)), - minor_axis=panel.minor_axis) - shifted = inferred_ts.tshift(1) - unshifted = shifted.tshift(-1) - assert_panel_equal(shifted, panel.tshift(1)) - assert_panel_equal(unshifted, inferred_ts) - - no_freq = panel.iloc[:, [0, 5, 7], :] - pytest.raises(ValueError, no_freq.tshift) - - def test_pct_change(self): - df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]}) - df2 = df1 + 1 - df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]}) - wp = Panel({'i1': df1, 'i2': df2, 'i3': df3}) - # major, 1 - result = wp.pct_change() # axis='major' - expected = Panel({'i1': df1.pct_change(), - 'i2': df2.pct_change(), - 'i3': df3.pct_change()}) - assert_panel_equal(result, expected) - result = wp.pct_change(axis=1) - assert_panel_equal(result, expected) - # major, 2 - result = wp.pct_change(periods=2) - expected = Panel({'i1': df1.pct_change(2), - 'i2': df2.pct_change(2), - 'i3': df3.pct_change(2)}) - assert_panel_equal(result, expected) - # minor, 1 - result = wp.pct_change(axis='minor') - expected = Panel({'i1': df1.pct_change(axis=1), - 'i2': df2.pct_change(axis=1), - 'i3': df3.pct_change(axis=1)}) - assert_panel_equal(result, expected) - result = wp.pct_change(axis=2) - assert_panel_equal(result, expected) - # minor, 2 - result = wp.pct_change(periods=2, axis='minor') - expected = Panel({'i1': df1.pct_change(periods=2, axis=1), - 'i2': df2.pct_change(periods=2, axis=1), - 'i3': df3.pct_change(periods=2, axis=1)}) - assert_panel_equal(result, expected) - # items, 1 - result = wp.pct_change(axis='items') - expected = Panel( - {'i1': DataFrame({'c1': [np.nan, np.nan, np.nan], - 'c2': [np.nan, np.nan, np.nan]}), - 'i2': DataFrame({'c1': [1, 0.5, .2], - 'c2': [1. / 3, 0.25, 1. / 6]}), - 'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6], - 'c2': [.25, .2, 1. / 7]})}) - assert_panel_equal(result, expected) - result = wp.pct_change(axis=0) - assert_panel_equal(result, expected) - # items, 2 - result = wp.pct_change(periods=2, axis='items') - expected = Panel( - {'i1': DataFrame({'c1': [np.nan, np.nan, np.nan], - 'c2': [np.nan, np.nan, np.nan]}), - 'i2': DataFrame({'c1': [np.nan, np.nan, np.nan], - 'c2': [np.nan, np.nan, np.nan]}), - 'i3': DataFrame({'c1': [2, 1, .4], - 'c2': [2. / 3, .5, 1. / 3]})}) - assert_panel_equal(result, expected) - - def test_round(self): - values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12], - [-1566.213, 88.88], [-12, 94.5]], - [[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12], - [272.212, -99.99], [23, -76.5]]] - evalues = [[[float(np.around(i)) for i in j] for j in k] - for k in values] - p = Panel(values, items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B']) - expected = Panel(evalues, items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B']) - result = p.round() - assert_panel_equal(expected, result) - def test_numpy_round(self): values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12], [-1566.213, 88.88], [-12, 94.5]], [[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12], [272.212, -99.99], [23, -76.5]]] - evalues = [[[float(np.around(i)) for i in j] for j in k] - for k in values] p = Panel(values, items=['Item1', 'Item2'], major_axis=date_range('1/1/2000', periods=5), minor_axis=['A', 'B']) - expected = Panel(evalues, items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B']) - result = np.round(p) - assert_panel_equal(expected, result) msg = "the 'out' parameter is not supported" with pytest.raises(ValueError, match=msg): @@ -2070,7 +518,6 @@ def test_multiindex_get(self): minor_axis=np.arange(5)) f1 = wp['a'] f2 = wp.loc['a'] - assert_panel_equal(f1, f2) assert (f1.items == [1, 2]).all() assert (f2.items == [1, 2]).all() @@ -2078,269 +525,10 @@ def test_multiindex_get(self): MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)], names=['first', 'second']) - @pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning") - def test_multiindex_blocks(self): - ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)], - names=['first', 'second']) - wp = Panel(self.panel._data) - wp.items = ind - f1 = wp['a'] - assert (f1.items == [1, 2]).all() - - f1 = wp[('b', 1)] - assert (f1.columns == ['A', 'B', 'C', 'D']).all() - def test_repr_empty(self): empty = Panel() repr(empty) - # ignore warning from us, because removing panel - @pytest.mark.filterwarnings("ignore:Using:FutureWarning") - def test_rename(self): - mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'} - - renamed = self.panel.rename(items=mapper) - exp = Index(['foo', 'bar', 'baz']) - tm.assert_index_equal(renamed.items, exp) - - renamed = self.panel.rename(minor_axis=str.lower) - exp = Index(['a', 'b', 'c', 'd']) - tm.assert_index_equal(renamed.minor_axis, exp) - - # don't copy - renamed_nocopy = self.panel.rename(items=mapper, copy=False) - renamed_nocopy['foo'] = 3. - assert (self.panel['ItemA'].values == 3).all() - - def test_get_attr(self): - assert_frame_equal(self.panel['ItemA'], self.panel.ItemA) - - # specific cases from #3440 - self.panel['a'] = self.panel['ItemA'] - assert_frame_equal(self.panel['a'], self.panel.a) - self.panel['i'] = self.panel['ItemA'] - assert_frame_equal(self.panel['i'], self.panel.i) - - def test_from_frame_level1_unsorted(self): - tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1), - ('MSFT', 1)] - midx = MultiIndex.from_tuples(tuples) - df = DataFrame(np.random.rand(5, 4), index=midx) - p = df.to_panel() - assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index()) - - def test_to_excel(self): - try: - import xlwt # noqa - import xlrd # noqa - import openpyxl # noqa - from pandas.io.excel import ExcelFile - except ImportError: - pytest.skip("need xlwt xlrd openpyxl") - - for ext in ['xls', 'xlsx']: - with ensure_clean('__tmp__.' + ext) as path: - self.panel.to_excel(path) - try: - reader = ExcelFile(path) - except ImportError: - pytest.skip("need xlwt xlrd openpyxl") - - for item, df in self.panel.iteritems(): - recdf = reader.parse(str(item), index_col=0) - assert_frame_equal(df, recdf) - - def test_to_excel_xlsxwriter(self): - try: - import xlrd # noqa - import xlsxwriter # noqa - from pandas.io.excel import ExcelFile - except ImportError: - pytest.skip("Requires xlrd and xlsxwriter. Skipping test.") - - with ensure_clean('__tmp__.xlsx') as path: - self.panel.to_excel(path, engine='xlsxwriter') - try: - reader = ExcelFile(path) - except ImportError as e: - pytest.skip("cannot write excel file: %s" % e) - - for item, df in self.panel.iteritems(): - recdf = reader.parse(str(item), index_col=0) - assert_frame_equal(df, recdf) - - @pytest.mark.filterwarnings("ignore:'.reindex:FutureWarning") - def test_dropna(self): - p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde')) - p.loc[:, ['b', 'd'], 0] = np.nan - - result = p.dropna(axis=1) - exp = p.loc[:, ['a', 'c', 'e'], :] - assert_panel_equal(result, exp) - inp = p.copy() - inp.dropna(axis=1, inplace=True) - assert_panel_equal(inp, exp) - - result = p.dropna(axis=1, how='all') - assert_panel_equal(result, p) - - p.loc[:, ['b', 'd'], :] = np.nan - result = p.dropna(axis=1, how='all') - exp = p.loc[:, ['a', 'c', 'e'], :] - assert_panel_equal(result, exp) - - p = Panel(np.random.randn(4, 5, 6), items=list('abcd')) - p.loc[['b'], :, 0] = np.nan - - result = p.dropna() - exp = p.loc[['a', 'c', 'd']] - assert_panel_equal(result, exp) - - result = p.dropna(how='all') - assert_panel_equal(result, p) - - p.loc['b'] = np.nan - result = p.dropna(how='all') - exp = p.loc[['a', 'c', 'd']] - assert_panel_equal(result, exp) - - def test_drop(self): - df = DataFrame({"A": [1, 2], "B": [3, 4]}) - panel = Panel({"One": df, "Two": df}) - - def check_drop(drop_val, axis_number, aliases, expected): - try: - actual = panel.drop(drop_val, axis=axis_number) - assert_panel_equal(actual, expected) - for alias in aliases: - actual = panel.drop(drop_val, axis=alias) - assert_panel_equal(actual, expected) - except AssertionError: - pprint_thing("Failed with axis_number %d and aliases: %s" % - (axis_number, aliases)) - raise - # Items - expected = Panel({"One": df}) - check_drop('Two', 0, ['items'], expected) - - pytest.raises(KeyError, panel.drop, 'Three') - - # errors = 'ignore' - dropped = panel.drop('Three', errors='ignore') - assert_panel_equal(dropped, panel) - dropped = panel.drop(['Two', 'Three'], errors='ignore') - expected = Panel({"One": df}) - assert_panel_equal(dropped, expected) - - # Major - exp_df = DataFrame({"A": [2], "B": [4]}, index=[1]) - expected = Panel({"One": exp_df, "Two": exp_df}) - check_drop(0, 1, ['major_axis', 'major'], expected) - - exp_df = DataFrame({"A": [1], "B": [3]}, index=[0]) - expected = Panel({"One": exp_df, "Two": exp_df}) - check_drop([1], 1, ['major_axis', 'major'], expected) - - # Minor - exp_df = df[['B']] - expected = Panel({"One": exp_df, "Two": exp_df}) - check_drop(["A"], 2, ['minor_axis', 'minor'], expected) - - exp_df = df[['A']] - expected = Panel({"One": exp_df, "Two": exp_df}) - check_drop("B", 2, ['minor_axis', 'minor'], expected) - - def test_update(self): - pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]], - [[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]]) - - other = Panel( - [[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1]) - - pan.update(other) - - expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], [1.5, np.nan, 3.]], - [[3.6, 2., 3], [1.5, np.nan, 7], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]]) - - assert_panel_equal(pan, expected) - - def test_update_from_dict(self): - pan = Panel({'one': DataFrame([[1.5, np.nan, 3], - [1.5, np.nan, 3], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]), - 'two': DataFrame([[1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]])}) - - other = {'two': DataFrame( - [[3.6, 2., np.nan], [np.nan, np.nan, 7]])} - - pan.update(other) - - expected = Panel( - {'one': DataFrame([[1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]), - 'two': DataFrame([[3.6, 2., 3], - [1.5, np.nan, 7], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]) - } - ) - - assert_panel_equal(pan, expected) - - def test_update_nooverwrite(self): - pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]], - [[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]]) - - other = Panel( - [[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1]) - - pan.update(other, overwrite=False) - - expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3], - [1.5, np.nan, 3.], [1.5, np.nan, 3.]], - [[1.5, 2., 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]]) - - assert_panel_equal(pan, expected) - - def test_update_filtered(self): - pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]], - [[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]]) - - other = Panel( - [[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1]) - - pan.update(other, filter_func=lambda x: x > 2) - - expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.], - [1.5, np.nan, 3.], [1.5, np.nan, 3.]], - [[1.5, np.nan, 3], [1.5, np.nan, 7], - [1.5, np.nan, 3.], [1.5, np.nan, 3.]]]) - - assert_panel_equal(pan, expected) - @pytest.mark.parametrize('bad_kwarg, exception, msg', [ # errors must be 'ignore' or 'raise' ({'errors': 'something'}, ValueError, 'The parameter errors must.*'), @@ -2369,242 +557,6 @@ def test_update_deprecation(self, raise_conflict): with tm.assert_produces_warning(FutureWarning): pan.update(other, raise_conflict=raise_conflict) - def test_all_any(self): - assert (self.panel.all(axis=0).values == nanall( - self.panel, axis=0)).all() - assert (self.panel.all(axis=1).values == nanall( - self.panel, axis=1).T).all() - assert (self.panel.all(axis=2).values == nanall( - self.panel, axis=2).T).all() - assert (self.panel.any(axis=0).values == nanany( - self.panel, axis=0)).all() - assert (self.panel.any(axis=1).values == nanany( - self.panel, axis=1).T).all() - assert (self.panel.any(axis=2).values == nanany( - self.panel, axis=2).T).all() - - def test_all_any_unhandled(self): - pytest.raises(NotImplementedError, self.panel.all, bool_only=True) - pytest.raises(NotImplementedError, self.panel.any, bool_only=True) - - # GH issue 15960 - def test_sort_values(self): - pytest.raises(NotImplementedError, self.panel.sort_values) - pytest.raises(NotImplementedError, self.panel.sort_values, 'ItemA') - - -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -class TestPanelFrame(object): - """ - Check that conversions to and from Panel to DataFrame work. - """ - - def setup_method(self, method): - panel = make_test_panel() - self.panel = panel.to_frame() - self.unfiltered_panel = panel.to_frame(filter_observations=False) - - def test_ops_differently_indexed(self): - # trying to set non-identically indexed panel - wp = self.panel.to_panel() - wp2 = wp.reindex(major=wp.major_axis[:-1]) - lp2 = wp2.to_frame() - - result = self.panel + lp2 - assert_frame_equal(result.reindex(lp2.index), lp2 * 2) - - # careful, mutation - self.panel['foo'] = lp2['ItemA'] - assert_series_equal(self.panel['foo'].reindex(lp2.index), - lp2['ItemA'], - check_names=False) - - def test_ops_scalar(self): - result = self.panel.mul(2) - expected = DataFrame.__mul__(self.panel, 2) - assert_frame_equal(result, expected) - - def test_combineFrame(self): - wp = self.panel.to_panel() - result = self.panel.add(wp['ItemA'].stack(), axis=0) - assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2) - - def test_combinePanel(self): - wp = self.panel.to_panel() - result = self.panel.add(self.panel) - wide_result = result.to_panel() - assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA']) - - # one item - result = self.panel.add(self.panel.filter(['ItemA'])) - - def test_combine_scalar(self): - result = self.panel.mul(2) - expected = DataFrame(self.panel._data) * 2 - assert_frame_equal(result, expected) - - def test_combine_series(self): - s = self.panel['ItemA'][:10] - result = self.panel.add(s, axis=0) - expected = DataFrame.add(self.panel, s, axis=0) - assert_frame_equal(result, expected) - - s = self.panel.iloc[5] - result = self.panel + s - expected = DataFrame.add(self.panel, s, axis=1) - assert_frame_equal(result, expected) - - def test_operators(self): - wp = self.panel.to_panel() - result = (self.panel + 1).to_panel() - assert_frame_equal(wp['ItemA'] + 1, result['ItemA']) - - def test_arith_flex_panel(self): - ops = ['add', 'sub', 'mul', 'div', - 'truediv', 'pow', 'floordiv', 'mod'] - if not compat.PY3: - aliases = {} - else: - aliases = {'div': 'truediv'} - self.panel = self.panel.to_panel() - - for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]: - for op in ops: - alias = aliases.get(op, op) - f = getattr(operator, alias) - exp = f(self.panel, n) - result = getattr(self.panel, op)(n) - assert_panel_equal(result, exp, check_panel_type=True) - - # rops - r_f = lambda x, y: f(y, x) - exp = r_f(self.panel, n) - result = getattr(self.panel, 'r' + op)(n) - assert_panel_equal(result, exp) - - def test_sort(self): - def is_sorted(arr): - return (arr[1:] > arr[:-1]).any() - - sorted_minor = self.panel.sort_index(level=1) - assert is_sorted(sorted_minor.index.codes[1]) - - sorted_major = sorted_minor.sort_index(level=0) - assert is_sorted(sorted_major.index.codes[0]) - - def test_to_string(self): - buf = StringIO() - self.panel.to_string(buf) - - def test_to_sparse(self): - if isinstance(self.panel, Panel): - msg = 'sparsifying is not supported' - with pytest.raises(NotImplementedError, match=msg): - self.panel.to_sparse - - def test_truncate(self): - dates = self.panel.index.levels[0] - start, end = dates[1], dates[5] - - trunced = self.panel.truncate(start, end).to_panel() - expected = self.panel.to_panel()['ItemA'].truncate(start, end) - - # TODO truncate drops index.names - assert_frame_equal(trunced['ItemA'], expected, check_names=False) - - trunced = self.panel.truncate(before=start).to_panel() - expected = self.panel.to_panel()['ItemA'].truncate(before=start) - - # TODO truncate drops index.names - assert_frame_equal(trunced['ItemA'], expected, check_names=False) - - trunced = self.panel.truncate(after=end).to_panel() - expected = self.panel.to_panel()['ItemA'].truncate(after=end) - - # TODO truncate drops index.names - assert_frame_equal(trunced['ItemA'], expected, check_names=False) - - # truncate on dates that aren't in there - wp = self.panel.to_panel() - new_index = wp.major_axis[::5] - - wp2 = wp.reindex(major=new_index) - - lp2 = wp2.to_frame() - lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2]) - - wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2]) - - assert_panel_equal(wp_trunc, lp_trunc.to_panel()) - - # throw proper exception - pytest.raises(Exception, lp2.truncate, wp.major_axis[-2], - wp.major_axis[2]) - - def test_axis_dummies(self): - from pandas.core.reshape.reshape import make_axis_dummies - - minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8) - assert len(minor_dummies.columns) == len(self.panel.index.levels[1]) - - major_dummies = make_axis_dummies(self.panel, 'major').astype(np.uint8) - assert len(major_dummies.columns) == len(self.panel.index.levels[0]) - - mapping = {'A': 'one', 'B': 'one', 'C': 'two', 'D': 'two'} - - transformed = make_axis_dummies(self.panel, 'minor', - transform=mapping.get).astype(np.uint8) - assert len(transformed.columns) == 2 - tm.assert_index_equal(transformed.columns, Index(['one', 'two'])) - - # TODO: test correctness - - def test_get_dummies(self): - from pandas.core.reshape.reshape import get_dummies, make_axis_dummies - - self.panel['Label'] = self.panel.index.codes[1] - minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8) - dummies = get_dummies(self.panel['Label']) - tm.assert_numpy_array_equal(dummies.values, minor_dummies.values) - - def test_mean(self): - means = self.panel.mean(level='minor') - - # test versus Panel version - wide_means = self.panel.to_panel().mean('major') - assert_frame_equal(means, wide_means) - - def test_sum(self): - sums = self.panel.sum(level='minor') - - # test versus Panel version - wide_sums = self.panel.to_panel().sum('major') - assert_frame_equal(sums, wide_sums) - - def test_count(self): - index = self.panel.index - - major_count = self.panel.count(level=0)['ItemA'] - level_codes = index.codes[0] - for i, idx in enumerate(index.levels[0]): - assert major_count[i] == (level_codes == i).sum() - - minor_count = self.panel.count(level=1)['ItemA'] - level_codes = index.codes[1] - for i, idx in enumerate(index.levels[1]): - assert minor_count[i] == (level_codes == i).sum() - - def test_join(self): - lp1 = self.panel.filter(['ItemA', 'ItemB']) - lp2 = self.panel.filter(['ItemC']) - - joined = lp1.join(lp2) - - assert len(joined.columns) == 3 - - pytest.raises(Exception, lp1.join, - self.panel.filter(['ItemB', 'ItemC'])) - def test_panel_index(): index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3]) diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index 7500cbb3cfc3a..2a64947042979 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -7,6 +7,8 @@ from numpy import nan import pytest +from pandas.compat import PY2 + from pandas import DataFrame, MultiIndex, Series, compat, concat, merge from pandas.core import common as com from pandas.core.sorting import ( @@ -403,15 +405,21 @@ def test_mixed_integer_from_list(self): expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object) tm.assert_numpy_array_equal(result, expected) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_unsortable(self): # GH 13714 arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) + msg = (r"'(<|>)' not supported between instances of" + r" 'datetime\.datetime' and 'int'|" + r"unorderable types: int\(\) > datetime\.datetime\(\)") if compat.PY2: # RuntimeWarning: tp_compare didn't return -1 or -2 for exception with warnings.catch_warnings(): - pytest.raises(TypeError, safe_sort, arr) + with pytest.raises(TypeError, match=msg): + safe_sort(arr) else: - pytest.raises(TypeError, safe_sort, arr) + with pytest.raises(TypeError, match=msg): + safe_sort(arr) def test_exceptions(self): with pytest.raises(TypeError, diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 7cea3be03d1a7..40a83f90c8dfd 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -10,7 +10,7 @@ import pytest import pandas.compat as compat -from pandas.compat import PY3, range, u +from pandas.compat import PY2, PY3, range, u from pandas import DataFrame, Index, MultiIndex, Series, concat, isna, notna import pandas.core.strings as strings @@ -76,7 +76,7 @@ def assert_series_or_index_equal(left, right): 'len', 'lower', 'lstrip', 'partition', 'rpartition', 'rsplit', 'rstrip', 'slice', 'slice_replace', 'split', - 'strip', 'swapcase', 'title', 'upper' + 'strip', 'swapcase', 'title', 'upper', 'casefold' ], [()] * 100, [{}] * 100)) ids, _, _ = zip(*_any_string_method) # use method name as fixture-id @@ -1002,11 +1002,13 @@ def test_replace(self): tm.assert_series_equal(result, exp) # GH 13438 + msg = "repl must be a string or callable" for klass in (Series, Index): for repl in (None, 3, {'a': 'b'}): for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']): values = klass(data) - pytest.raises(TypeError, values.str.replace, 'a', repl) + with pytest.raises(TypeError, match=msg): + values.str.replace('a', repl) def test_replace_callable(self): # GH 15055 @@ -1123,10 +1125,14 @@ def test_replace_literal(self): callable_repl = lambda m: m.group(0).swapcase() compiled_pat = re.compile('[a-z][A-Z]{2}') - pytest.raises(ValueError, values.str.replace, 'abc', callable_repl, - regex=False) - pytest.raises(ValueError, values.str.replace, compiled_pat, '', - regex=False) + msg = "Cannot use a callable replacement when regex=False" + with pytest.raises(ValueError, match=msg): + values.str.replace('abc', callable_repl, regex=False) + + msg = ("Cannot use a compiled regex as replacement pattern with" + " regex=False") + with pytest.raises(ValueError, match=msg): + values.str.replace(compiled_pat, '', regex=False) def test_repeat(self): values = Series(['a', 'b', NA, 'c', NA, 'd']) @@ -1242,12 +1248,13 @@ def test_extract_expand_False(self): for klass in [Series, Index]: # no groups s_or_idx = klass(['A1', 'B2', 'C3']) - f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False) - pytest.raises(ValueError, f) + msg = "pattern contains no capture groups" + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract('[ABC][123]', expand=False) # only non-capturing groups - f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False) - pytest.raises(ValueError, f) + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract('(?:[AB]).*', expand=False) # single group renames series/index properly s_or_idx = klass(['A1', 'A2']) @@ -1387,12 +1394,13 @@ def test_extract_expand_True(self): for klass in [Series, Index]: # no groups s_or_idx = klass(['A1', 'B2', 'C3']) - f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True) - pytest.raises(ValueError, f) + msg = "pattern contains no capture groups" + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract('[ABC][123]', expand=True) # only non-capturing groups - f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True) - pytest.raises(ValueError, f) + with pytest.raises(ValueError, match=msg): + s_or_idx.str.extract('(?:[AB]).*', expand=True) # single group renames series/index properly s_or_idx = klass(['A1', 'A2']) @@ -3315,10 +3323,14 @@ def test_encode_decode(self): tm.assert_series_equal(result, exp) + @pytest.mark.skipif(PY2, reason="pytest.raises match regex fails") def test_encode_decode_errors(self): encodeBase = Series([u('a'), u('b'), u('a\x9d')]) - pytest.raises(UnicodeEncodeError, encodeBase.str.encode, 'cp1252') + msg = (r"'charmap' codec can't encode character '\\x9d' in position 1:" + " character maps to <undefined>") + with pytest.raises(UnicodeEncodeError, match=msg): + encodeBase.str.encode('cp1252') f = lambda x: x.encode('cp1252', 'ignore') result = encodeBase.str.encode('cp1252', 'ignore') @@ -3327,7 +3339,10 @@ def test_encode_decode_errors(self): decodeBase = Series([b'a', b'b', b'a\x9d']) - pytest.raises(UnicodeDecodeError, decodeBase.str.decode, 'cp1252') + msg = ("'charmap' codec can't decode byte 0x9d in position 1:" + " character maps to <undefined>") + with pytest.raises(UnicodeDecodeError, match=msg): + decodeBase.str.decode('cp1252') f = lambda x: x.decode('cp1252', 'ignore') result = decodeBase.str.decode('cp1252', 'ignore') @@ -3418,9 +3433,19 @@ def test_method_on_bytes(self): lhs = Series(np.array(list('abc'), 'S1').astype(object)) rhs = Series(np.array(list('def'), 'S1').astype(object)) if compat.PY3: - pytest.raises(TypeError, lhs.str.cat, rhs) + with pytest.raises(TypeError, match="can't concat str to bytes"): + lhs.str.cat(rhs) else: result = lhs.str.cat(rhs) expected = Series(np.array( ['ad', 'be', 'cf'], 'S2').astype(object)) tm.assert_series_equal(result, expected) + + @pytest.mark.skipif(compat.PY2, reason='not in python2') + def test_casefold(self): + # GH25405 + expected = Series(['ss', NA, 'case', 'ssd']) + s = Series(['ß', NA, 'case', 'ßd']) + result = s.str.casefold() + + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index e816d4c04344a..ce9d1888b8e96 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -89,9 +89,8 @@ def test_getitem(self): def test_select_bad_cols(self): df = DataFrame([[1, 2]], columns=['A', 'B']) g = df.rolling(window=5) - pytest.raises(KeyError, g.__getitem__, ['C']) # g[['C']] - - pytest.raises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']] + with pytest.raises(KeyError, match="Columns not found: 'C'"): + g[['C']] with pytest.raises(KeyError, match='^[^A]+$'): # A should not be referenced as a bad column... # will have to rethink regex if you change message! @@ -102,7 +101,9 @@ def test_attribute_access(self): df = DataFrame([[1, 2]], columns=['A', 'B']) r = df.rolling(window=5) tm.assert_series_equal(r.A.sum(), r['A'].sum()) - pytest.raises(AttributeError, lambda: r.F) + msg = "'Rolling' object has no attribute 'F'" + with pytest.raises(AttributeError, match=msg): + r.F def tests_skip_nuisance(self): @@ -217,12 +218,11 @@ def test_agg_nested_dicts(self): df = DataFrame({'A': range(5), 'B': range(0, 10, 2)}) r = df.rolling(window=3) - def f(): + msg = r"cannot perform renaming for (r1|r2) with a nested dictionary" + with pytest.raises(SpecificationError, match=msg): r.aggregate({'r1': {'A': ['mean', 'sum']}, 'r2': {'B': ['mean', 'sum']}}) - pytest.raises(SpecificationError, f) - expected = concat([r['A'].mean(), r['A'].std(), r['B'].mean(), r['B'].std()], axis=1) expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), ( @@ -1806,26 +1806,38 @@ def test_ewm_alpha_arg(self): def test_ewm_domain_checks(self): # GH 12492 s = Series(self.arr) - # com must satisfy: com >= 0 - pytest.raises(ValueError, s.ewm, com=-0.1) + msg = "comass must satisfy: comass >= 0" + with pytest.raises(ValueError, match=msg): + s.ewm(com=-0.1) s.ewm(com=0.0) s.ewm(com=0.1) - # span must satisfy: span >= 1 - pytest.raises(ValueError, s.ewm, span=-0.1) - pytest.raises(ValueError, s.ewm, span=0.0) - pytest.raises(ValueError, s.ewm, span=0.9) + + msg = "span must satisfy: span >= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(span=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.0) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.9) s.ewm(span=1.0) s.ewm(span=1.1) - # halflife must satisfy: halflife > 0 - pytest.raises(ValueError, s.ewm, halflife=-0.1) - pytest.raises(ValueError, s.ewm, halflife=0.0) + + msg = "halflife must satisfy: halflife > 0" + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=0.0) s.ewm(halflife=0.1) - # alpha must satisfy: 0 < alpha <= 1 - pytest.raises(ValueError, s.ewm, alpha=-0.1) - pytest.raises(ValueError, s.ewm, alpha=0.0) + + msg = "alpha must satisfy: 0 < alpha <= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=0.0) s.ewm(alpha=0.1) s.ewm(alpha=1.0) - pytest.raises(ValueError, s.ewm, alpha=1.1) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=1.1) @pytest.mark.parametrize('method', ['mean', 'vol', 'var']) def test_ew_empty_series(self, method): @@ -2598,7 +2610,10 @@ def get_result(obj, obj2=None): def test_flex_binary_moment(self): # GH3155 # don't blow the stack - pytest.raises(TypeError, rwindow._flex_binary_moment, 5, 6, None) + msg = ("arguments to moment function must be of type" + " np.ndarray/Series/DataFrame") + with pytest.raises(TypeError, match=msg): + rwindow._flex_binary_moment(5, 6, None) def test_corr_sanity(self): # GH 3155 @@ -2682,7 +2697,10 @@ def func(A, B, com, **kwargs): Series([1.]), Series([1.]), 50, min_periods=min_periods) tm.assert_series_equal(result, Series([np.NaN])) - pytest.raises(Exception, func, A, randn(50), 20, min_periods=5) + msg = "Input arrays must be of the same type!" + # exception raised is Exception + with pytest.raises(Exception, match=msg): + func(A, randn(50), 20, min_periods=5) def test_expanding_apply_args_kwargs(self, raw): @@ -3266,9 +3284,9 @@ def setup_method(self, method): def test_mutated(self): - def f(): + msg = r"group\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): self.frame.groupby('A', foo=1) - pytest.raises(TypeError, f) g = self.frame.groupby('A') assert not g.mutated diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py index 3822170d884aa..97e1dc2f6aefc 100644 --- a/pandas/tests/tools/test_numeric.py +++ b/pandas/tests/tools/test_numeric.py @@ -4,11 +4,50 @@ from numpy import iinfo import pytest +import pandas.compat as compat + import pandas as pd from pandas import DataFrame, Index, Series, to_numeric from pandas.util import testing as tm +@pytest.fixture(params=[None, "ignore", "raise", "coerce"]) +def errors(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def signed(request): + return request.param + + +@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"]) +def transform(request): + return request.param + + +@pytest.fixture(params=[ + 47393996303418497800, + 100000000000000000000 +]) +def large_val(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def multiple_elts(request): + return request.param + + +@pytest.fixture(params=[ + (lambda x: Index(x, name="idx"), tm.assert_index_equal), + (lambda x: Series(x, name="ser"), tm.assert_series_equal), + (lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal) +]) +def transform_assert_equal(request): + return request.param + + @pytest.mark.parametrize("input_kwargs,result_kwargs", [ (dict(), dict(dtype=np.int64)), (dict(errors="coerce", downcast="integer"), dict(dtype=np.int8)) @@ -172,7 +211,6 @@ def test_all_nan(): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"]) def test_type_check(errors): # see gh-11776 df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]}) @@ -183,11 +221,100 @@ def test_type_check(errors): to_numeric(df, **kwargs) -@pytest.mark.parametrize("val", [ - 1, 1.1, "1", "1.1", -1.5, "-1.5" -]) -def test_scalar(val): - assert to_numeric(val) == float(val) +@pytest.mark.parametrize("val", [1, 1.1, 20001]) +def test_scalar(val, signed, transform): + val = -val if signed else val + assert to_numeric(transform(val)) == float(val) + + +def test_really_large_scalar(large_val, signed, transform, errors): + # see gh-24910 + kwargs = dict(errors=errors) if errors is not None else dict() + val = -large_val if signed else large_val + + val = transform(val) + val_is_string = isinstance(val, str) + + if val_is_string and errors in (None, "raise"): + msg = "Integer out of range. at position 0" + with pytest.raises(ValueError, match=msg): + to_numeric(val, **kwargs) + else: + expected = float(val) if (errors == "coerce" and + val_is_string) else val + assert tm.assert_almost_equal(to_numeric(val, **kwargs), expected) + + +def test_really_large_in_arr(large_val, signed, transform, + multiple_elts, errors): + # see gh-24910 + kwargs = dict(errors=errors) if errors is not None else dict() + val = -large_val if signed else large_val + val = transform(val) + + extra_elt = "string" + arr = [val] + multiple_elts * [extra_elt] + + val_is_string = isinstance(val, str) + coercing = errors == "coerce" + + if errors in (None, "raise") and (val_is_string or multiple_elts): + if val_is_string: + msg = "Integer out of range. at position 0" + else: + msg = 'Unable to parse string "string" at position 1' + + with pytest.raises(ValueError, match=msg): + to_numeric(arr, **kwargs) + else: + result = to_numeric(arr, **kwargs) + + exp_val = float(val) if (coercing and val_is_string) else val + expected = [exp_val] + + if multiple_elts: + if coercing: + expected.append(np.nan) + exp_dtype = float + else: + expected.append(extra_elt) + exp_dtype = object + else: + exp_dtype = float if isinstance(exp_val, ( + int, compat.long, float)) else object + + tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype)) + + +def test_really_large_in_arr_consistent(large_val, signed, + multiple_elts, errors): + # see gh-24910 + # + # Even if we discover that we have to hold float, does not mean + # we should be lenient on subsequent elements that fail to be integer. + kwargs = dict(errors=errors) if errors is not None else dict() + arr = [str(-large_val if signed else large_val)] + + if multiple_elts: + arr.insert(0, large_val) + + if errors in (None, "raise"): + index = int(multiple_elts) + msg = "Integer out of range. at position {index}".format(index=index) + + with pytest.raises(ValueError, match=msg): + to_numeric(arr, **kwargs) + else: + result = to_numeric(arr, **kwargs) + + if errors == "coerce": + expected = [float(i) for i in arr] + exp_dtype = float + else: + expected = arr + exp_dtype = object + + tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype)) @pytest.mark.parametrize("errors,checker", [ @@ -205,15 +332,6 @@ def test_scalar_fail(errors, checker): assert checker(to_numeric(scalar, errors=errors)) -@pytest.fixture(params=[ - (lambda x: Index(x, name="idx"), tm.assert_index_equal), - (lambda x: Series(x, name="ser"), tm.assert_series_equal), - (lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal) -]) -def transform_assert_equal(request): - return request.param - - @pytest.mark.parametrize("data", [ [1, 2, 3], [1., np.nan, 3, np.nan] diff --git a/pandas/tests/tseries/frequencies/__init__.py b/pandas/tests/tseries/frequencies/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/tseries/frequencies/test_freq_code.py b/pandas/tests/tseries/frequencies/test_freq_code.py new file mode 100644 index 0000000000000..0aa29e451b1ba --- /dev/null +++ b/pandas/tests/tseries/frequencies/test_freq_code.py @@ -0,0 +1,149 @@ +import pytest + +from pandas._libs.tslibs import frequencies as libfrequencies, resolution +from pandas._libs.tslibs.frequencies import ( + FreqGroup, _period_code_map, get_freq, get_freq_code) +import pandas.compat as compat + +import pandas.tseries.offsets as offsets + + +@pytest.fixture(params=list(compat.iteritems(_period_code_map))) +def period_code_item(request): + return request.param + + +@pytest.mark.parametrize("freqstr,expected", [ + ("A", 1000), ("3A", 1000), ("-1A", 1000), + ("Y", 1000), ("3Y", 1000), ("-1Y", 1000), + ("W", 4000), ("W-MON", 4001), ("W-FRI", 4005) +]) +def test_freq_code(freqstr, expected): + assert get_freq(freqstr) == expected + + +def test_freq_code_match(period_code_item): + freqstr, code = period_code_item + assert get_freq(freqstr) == code + + +@pytest.mark.parametrize("freqstr,expected", [ + ("A", 1000), ("3A", 1000), ("-1A", 1000), ("A-JAN", 1000), + ("A-MAY", 1000), ("Y", 1000), ("3Y", 1000), ("-1Y", 1000), + ("Y-JAN", 1000), ("Y-MAY", 1000), (offsets.YearEnd(), 1000), + (offsets.YearEnd(month=1), 1000), (offsets.YearEnd(month=5), 1000), + ("W", 4000), ("W-MON", 4000), ("W-FRI", 4000), (offsets.Week(), 4000), + (offsets.Week(weekday=1), 4000), (offsets.Week(weekday=5), 4000), + ("T", FreqGroup.FR_MIN), +]) +def test_freq_group(freqstr, expected): + assert resolution.get_freq_group(freqstr) == expected + + +def test_freq_group_match(period_code_item): + freqstr, code = period_code_item + + str_group = resolution.get_freq_group(freqstr) + code_group = resolution.get_freq_group(code) + + assert str_group == code_group == code // 1000 * 1000 + + +@pytest.mark.parametrize("freqstr,exp_freqstr", [ + ("D", "D"), ("W", "D"), ("M", "D"), + ("S", "S"), ("T", "S"), ("H", "S") +]) +def test_get_to_timestamp_base(freqstr, exp_freqstr): + tsb = libfrequencies.get_to_timestamp_base + + assert tsb(get_freq_code(freqstr)[0]) == get_freq_code(exp_freqstr)[0] + + +_reso = resolution.Resolution + + +@pytest.mark.parametrize("freqstr,expected", [ + ("A", "year"), ("Q", "quarter"), ("M", "month"), + ("D", "day"), ("H", "hour"), ("T", "minute"), + ("S", "second"), ("L", "millisecond"), + ("U", "microsecond"), ("N", "nanosecond") +]) +def test_get_str_from_freq(freqstr, expected): + assert _reso.get_str_from_freq(freqstr) == expected + + +@pytest.mark.parametrize("freq", ["A", "Q", "M", "D", "H", + "T", "S", "L", "U", "N"]) +def test_get_freq_roundtrip(freq): + result = _reso.get_freq(_reso.get_str_from_freq(freq)) + assert freq == result + + +@pytest.mark.parametrize("freq", ["D", "H", "T", "S", "L", "U"]) +def test_get_freq_roundtrip2(freq): + result = _reso.get_freq(_reso.get_str(_reso.get_reso_from_freq(freq))) + assert freq == result + + +@pytest.mark.parametrize("args,expected", [ + ((1.5, "T"), (90, "S")), ((62.4, "T"), (3744, "S")), + ((1.04, "H"), (3744, "S")), ((1, "D"), (1, "D")), + ((0.342931, "H"), (1234551600, "U")), ((1.2345, "D"), (106660800, "L")) +]) +def test_resolution_bumping(args, expected): + # see gh-14378 + assert _reso.get_stride_from_decimal(*args) == expected + + +@pytest.mark.parametrize("args", [ + (0.5, "N"), + + # Too much precision in the input can prevent. + (0.3429324798798269273987982, "H") +]) +def test_cat(args): + msg = "Could not convert to integer offset at any resolution" + + with pytest.raises(ValueError, match=msg): + _reso.get_stride_from_decimal(*args) + + +@pytest.mark.parametrize("freq_input,expected", [ + # Frequency string. + ("A", (get_freq("A"), 1)), + ("3D", (get_freq("D"), 3)), + ("-2M", (get_freq("M"), -2)), + + # Tuple. + (("D", 1), (get_freq("D"), 1)), + (("A", 3), (get_freq("A"), 3)), + (("M", -2), (get_freq("M"), -2)), + ((5, "T"), (FreqGroup.FR_MIN, 5)), + + # Numeric Tuple. + ((1000, 1), (1000, 1)), + + # Offsets. + (offsets.Day(), (get_freq("D"), 1)), + (offsets.Day(3), (get_freq("D"), 3)), + (offsets.Day(-2), (get_freq("D"), -2)), + (offsets.MonthEnd(), (get_freq("M"), 1)), + (offsets.MonthEnd(3), (get_freq("M"), 3)), + (offsets.MonthEnd(-2), (get_freq("M"), -2)), + (offsets.Week(), (get_freq("W"), 1)), + (offsets.Week(3), (get_freq("W"), 3)), + (offsets.Week(-2), (get_freq("W"), -2)), + (offsets.Hour(), (FreqGroup.FR_HR, 1)), + + # Monday is weekday=0. + (offsets.Week(weekday=1), (get_freq("W-TUE"), 1)), + (offsets.Week(3, weekday=0), (get_freq("W-MON"), 3)), + (offsets.Week(-2, weekday=4), (get_freq("W-FRI"), -2)), +]) +def test_get_freq_code(freq_input, expected): + assert get_freq_code(freq_input) == expected + + +def test_get_code_invalid(): + with pytest.raises(ValueError, match="Invalid frequency"): + get_freq_code((5, "baz")) diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py new file mode 100644 index 0000000000000..9e7ddbc45bba8 --- /dev/null +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -0,0 +1,406 @@ +from datetime import datetime, timedelta + +import numpy as np +import pytest + +from pandas._libs.tslibs.ccalendar import DAYS, MONTHS +from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG +import pandas.compat as compat +from pandas.compat import is_platform_windows, range + +from pandas import ( + DatetimeIndex, Index, Series, Timestamp, date_range, period_range) +from pandas.core.tools.datetimes import to_datetime +import pandas.util.testing as tm + +import pandas.tseries.frequencies as frequencies +import pandas.tseries.offsets as offsets + + +def _check_generated_range(start, periods, freq): + """ + Check the range generated from a given start, frequency, and period count. + + Parameters + ---------- + start : str + The start date. + periods : int + The number of periods. + freq : str + The frequency of the range. + """ + freq = freq.upper() + + gen = date_range(start, periods=periods, freq=freq) + index = DatetimeIndex(gen.values) + + if not freq.startswith("Q-"): + assert frequencies.infer_freq(index) == gen.freqstr + else: + inf_freq = frequencies.infer_freq(index) + is_dec_range = inf_freq == "Q-DEC" and gen.freqstr in ( + "Q", "Q-DEC", "Q-SEP", "Q-JUN", "Q-MAR") + is_nov_range = inf_freq == "Q-NOV" and gen.freqstr in ( + "Q-NOV", "Q-AUG", "Q-MAY", "Q-FEB") + is_oct_range = inf_freq == "Q-OCT" and gen.freqstr in ( + "Q-OCT", "Q-JUL", "Q-APR", "Q-JAN") + assert is_dec_range or is_nov_range or is_oct_range + + +@pytest.fixture(params=[(timedelta(1), "D"), + (timedelta(hours=1), "H"), + (timedelta(minutes=1), "T"), + (timedelta(seconds=1), "S"), + (np.timedelta64(1, "ns"), "N"), + (timedelta(microseconds=1), "U"), + (timedelta(microseconds=1000), "L")]) +def base_delta_code_pair(request): + return request.param + + +@pytest.fixture(params=[1, 2, 3, 4]) +def count(request): + return request.param + + +@pytest.fixture(params=DAYS) +def day(request): + return request.param + + +@pytest.fixture(params=MONTHS) +def month(request): + return request.param + + +@pytest.fixture(params=[5, 7]) +def periods(request): + return request.param + + +def test_raise_if_period_index(): + index = period_range(start="1/1/1990", periods=20, freq="M") + msg = "Check the `freq` attribute instead of using infer_freq" + + with pytest.raises(TypeError, match=msg): + frequencies.infer_freq(index) + + +def test_raise_if_too_few(): + index = DatetimeIndex(["12/31/1998", "1/3/1999"]) + msg = "Need at least 3 dates to infer frequency" + + with pytest.raises(ValueError, match=msg): + frequencies.infer_freq(index) + + +def test_business_daily(): + index = DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"]) + assert frequencies.infer_freq(index) == "B" + + +def test_business_daily_look_alike(): + # see gh-16624 + # + # Do not infer "B when "weekend" (2-day gap) in wrong place. + index = DatetimeIndex(["12/31/1998", "1/3/1999", "1/4/1999"]) + assert frequencies.infer_freq(index) is None + + +def test_day_corner(): + index = DatetimeIndex(["1/1/2000", "1/2/2000", "1/3/2000"]) + assert frequencies.infer_freq(index) == "D" + + +def test_non_datetime_index(): + dates = to_datetime(["1/1/2000", "1/2/2000", "1/3/2000"]) + assert frequencies.infer_freq(dates) == "D" + + +def test_fifth_week_of_month_infer(): + # see gh-9425 + # + # Only attempt to infer up to WOM-4. + index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"]) + assert frequencies.infer_freq(index) is None + + +def test_week_of_month_fake(): + # All of these dates are on same day + # of week and are 4 or 5 weeks apart. + index = DatetimeIndex(["2013-08-27", "2013-10-01", + "2013-10-29", "2013-11-26"]) + assert frequencies.infer_freq(index) != "WOM-4TUE" + + +def test_fifth_week_of_month(): + # see gh-9425 + # + # Only supports freq up to WOM-4. + msg = ("Of the four parameters: start, end, periods, " + "and freq, exactly three must be specified") + + with pytest.raises(ValueError, match=msg): + date_range("2014-01-01", freq="WOM-5MON") + + +def test_monthly_ambiguous(): + rng = DatetimeIndex(["1/31/2000", "2/29/2000", "3/31/2000"]) + assert rng.inferred_freq == "M" + + +def test_annual_ambiguous(): + rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) + assert rng.inferred_freq == "A-JAN" + + +def test_infer_freq_delta(base_delta_code_pair, count): + b = Timestamp(datetime.now()) + base_delta, code = base_delta_code_pair + + inc = base_delta * count + index = DatetimeIndex([b + inc * j for j in range(3)]) + + exp_freq = "%d%s" % (count, code) if count > 1 else code + assert frequencies.infer_freq(index) == exp_freq + + +@pytest.mark.parametrize("constructor", [ + lambda now, delta: DatetimeIndex([now + delta * 7] + + [now + delta * j for j in range(3)]), + lambda now, delta: DatetimeIndex([now + delta * j for j in range(3)] + + [now + delta * 7]) +]) +def test_infer_freq_custom(base_delta_code_pair, constructor): + b = Timestamp(datetime.now()) + base_delta, _ = base_delta_code_pair + + index = constructor(b, base_delta) + assert frequencies.infer_freq(index) is None + + +def test_weekly_infer(periods, day): + _check_generated_range("1/1/2000", periods, "W-{day}".format(day=day)) + + +def test_week_of_month_infer(periods, day, count): + _check_generated_range("1/1/2000", periods, + "WOM-{count}{day}".format(count=count, day=day)) + + +@pytest.mark.parametrize("freq", ["M", "BM", "BMS"]) +def test_monthly_infer(periods, freq): + _check_generated_range("1/1/2000", periods, "M") + + +def test_quarterly_infer(month, periods): + _check_generated_range("1/1/2000", periods, + "Q-{month}".format(month=month)) + + +@pytest.mark.parametrize("annual", ["A", "BA"]) +def test_annually_infer(month, periods, annual): + _check_generated_range("1/1/2000", periods, + "{annual}-{month}".format(annual=annual, + month=month)) + + +@pytest.mark.parametrize("freq,expected", [ + ("Q", "Q-DEC"), ("Q-NOV", "Q-NOV"), ("Q-OCT", "Q-OCT") +]) +def test_infer_freq_index(freq, expected): + rng = period_range("1959Q2", "2009Q3", freq=freq) + rng = Index(rng.to_timestamp("D", how="e").astype(object)) + + assert rng.inferred_freq == expected + + +@pytest.mark.parametrize( + "expected,dates", + list(compat.iteritems( + {"AS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"], + "Q-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"], + "M": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"], + "W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"], + "D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], + "H": ["2011-12-31 22:00", "2011-12-31 23:00", + "2012-01-01 00:00", "2012-01-01 01:00"]})) +) +def test_infer_freq_tz(tz_naive_fixture, expected, dates): + # see gh-7310 + tz = tz_naive_fixture + idx = DatetimeIndex(dates, tz=tz) + assert idx.inferred_freq == expected + + +@pytest.mark.parametrize("date_pair", [ + ["2013-11-02", "2013-11-5"], # Fall DST + ["2014-03-08", "2014-03-11"], # Spring DST + ["2014-01-01", "2014-01-03"] # Regular Time +]) +@pytest.mark.parametrize("freq", [ + "3H", "10T", "3601S", "3600001L", "3600000001U", "3600000000001N" +]) +def test_infer_freq_tz_transition(tz_naive_fixture, date_pair, freq): + # see gh-8772 + tz = tz_naive_fixture + idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz) + assert idx.inferred_freq == freq + + +def test_infer_freq_tz_transition_custom(): + index = date_range("2013-11-03", periods=5, + freq="3H").tz_localize("America/Chicago") + assert index.inferred_freq is None + + +@pytest.mark.parametrize("data,expected", [ + # Hourly freq in a day must result in "H" + (["2014-07-01 09:00", "2014-07-01 10:00", "2014-07-01 11:00", + "2014-07-01 12:00", "2014-07-01 13:00", "2014-07-01 14:00"], "H"), + + (["2014-07-01 09:00", "2014-07-01 10:00", "2014-07-01 11:00", + "2014-07-01 12:00", "2014-07-01 13:00", "2014-07-01 14:00", + "2014-07-01 15:00", "2014-07-01 16:00", "2014-07-02 09:00", + "2014-07-02 10:00", "2014-07-02 11:00"], "BH"), + (["2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00", + "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00", + "2014-07-04 15:00", "2014-07-04 16:00", "2014-07-07 09:00", + "2014-07-07 10:00", "2014-07-07 11:00"], "BH"), + (["2014-07-04 09:00", "2014-07-04 10:00", "2014-07-04 11:00", + "2014-07-04 12:00", "2014-07-04 13:00", "2014-07-04 14:00", + "2014-07-04 15:00", "2014-07-04 16:00", "2014-07-07 09:00", + "2014-07-07 10:00", "2014-07-07 11:00", "2014-07-07 12:00", + "2014-07-07 13:00", "2014-07-07 14:00", "2014-07-07 15:00", + "2014-07-07 16:00", "2014-07-08 09:00", "2014-07-08 10:00", + "2014-07-08 11:00", "2014-07-08 12:00", "2014-07-08 13:00", + "2014-07-08 14:00", "2014-07-08 15:00", "2014-07-08 16:00"], "BH"), +]) +def test_infer_freq_business_hour(data, expected): + # see gh-7905 + idx = DatetimeIndex(data) + assert idx.inferred_freq == expected + + +def test_not_monotonic(): + rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) + rng = rng[::-1] + + assert rng.inferred_freq == "-1A-JAN" + + +def test_non_datetime_index2(): + rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"]) + vals = rng.to_pydatetime() + + result = frequencies.infer_freq(vals) + assert result == rng.inferred_freq + + +@pytest.mark.parametrize("idx", [ + tm.makeIntIndex(10), tm.makeFloatIndex(10), tm.makePeriodIndex(10) +]) +def test_invalid_index_types(idx): + msg = ("(cannot infer freq from a non-convertible)|" + "(Check the `freq` attribute instead of using infer_freq)") + + with pytest.raises(TypeError, match=msg): + frequencies.infer_freq(idx) + + +@pytest.mark.skipif(is_platform_windows(), + reason="see gh-10822: Windows issue") +@pytest.mark.parametrize("idx", [tm.makeStringIndex(10), + tm.makeUnicodeIndex(10)]) +def test_invalid_index_types_unicode(idx): + # see gh-10822 + # + # Odd error message on conversions to datetime for unicode. + msg = "Unknown string format" + + with pytest.raises(ValueError, match=msg): + frequencies.infer_freq(idx) + + +def test_string_datetime_like_compat(): + # see gh-6463 + data = ["2004-01", "2004-02", "2004-03", "2004-04"] + + expected = frequencies.infer_freq(data) + result = frequencies.infer_freq(Index(data)) + + assert result == expected + + +def test_series(): + # see gh-6407 + s = Series(date_range("20130101", "20130110")) + inferred = frequencies.infer_freq(s) + assert inferred == "D" + + +@pytest.mark.parametrize("end", [10, 10.]) +def test_series_invalid_type(end): + # see gh-6407 + msg = "cannot infer freq from a non-convertible dtype on a Series" + s = Series(np.arange(end)) + + with pytest.raises(TypeError, match=msg): + frequencies.infer_freq(s) + + +def test_series_inconvertible_string(): + # see gh-6407 + msg = "Unknown string format" + + with pytest.raises(ValueError, match=msg): + frequencies.infer_freq(Series(["foo", "bar"])) + + +@pytest.mark.parametrize("freq", [None, "L"]) +def test_series_period_index(freq): + # see gh-6407 + # + # Cannot infer on PeriodIndex + msg = "cannot infer freq from a non-convertible dtype on a Series" + s = Series(period_range("2013", periods=10, freq=freq)) + + with pytest.raises(TypeError, match=msg): + frequencies.infer_freq(s) + + +@pytest.mark.parametrize("freq", ["M", "L", "S"]) +def test_series_datetime_index(freq): + s = Series(date_range("20130101", periods=10, freq=freq)) + inferred = frequencies.infer_freq(s) + assert inferred == freq + + +@pytest.mark.parametrize("offset_func", [ + frequencies.get_offset, + lambda freq: date_range("2011-01-01", periods=5, freq=freq) +]) +@pytest.mark.parametrize("freq", [ + "WEEKDAY", "EOM", "W@MON", "W@TUE", "W@WED", "W@THU", + "W@FRI", "W@SAT", "W@SUN", "Q@JAN", "Q@FEB", "Q@MAR", + "A@JAN", "A@FEB", "A@MAR", "A@APR", "A@MAY", "A@JUN", + "A@JUL", "A@AUG", "A@SEP", "A@OCT", "A@NOV", "A@DEC", + "Y@JAN", "WOM@1MON", "WOM@2MON", "WOM@3MON", + "WOM@4MON", "WOM@1TUE", "WOM@2TUE", "WOM@3TUE", + "WOM@4TUE", "WOM@1WED", "WOM@2WED", "WOM@3WED", + "WOM@4WED", "WOM@1THU", "WOM@2THU", "WOM@3THU", + "WOM@4THU", "WOM@1FRI", "WOM@2FRI", "WOM@3FRI", + "WOM@4FRI" +]) +def test_legacy_offset_warnings(offset_func, freq): + with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): + offset_func(freq) + + +def test_ms_vs_capital_ms(): + left = frequencies.get_offset("ms") + right = frequencies.get_offset("MS") + + assert left == offsets.Milli() + assert right == offsets.MonthBegin() diff --git a/pandas/tests/tseries/frequencies/test_to_offset.py b/pandas/tests/tseries/frequencies/test_to_offset.py new file mode 100644 index 0000000000000..c9c35b47f3475 --- /dev/null +++ b/pandas/tests/tseries/frequencies/test_to_offset.py @@ -0,0 +1,146 @@ +import re + +import pytest + +from pandas import Timedelta + +import pandas.tseries.frequencies as frequencies +import pandas.tseries.offsets as offsets + + +@pytest.mark.parametrize("freq_input,expected", [ + (frequencies.to_offset("10us"), offsets.Micro(10)), + (offsets.Hour(), offsets.Hour()), + ((5, "T"), offsets.Minute(5)), + ("2h30min", offsets.Minute(150)), + ("2h 30min", offsets.Minute(150)), + ("2h30min15s", offsets.Second(150 * 60 + 15)), + ("2h 60min", offsets.Hour(3)), + ("2h 20.5min", offsets.Second(8430)), + ("1.5min", offsets.Second(90)), + ("0.5S", offsets.Milli(500)), + ("15l500u", offsets.Micro(15500)), + ("10s75L", offsets.Milli(10075)), + ("1s0.25ms", offsets.Micro(1000250)), + ("1s0.25L", offsets.Micro(1000250)), + ("2800N", offsets.Nano(2800)), + ("2SM", offsets.SemiMonthEnd(2)), + ("2SM-16", offsets.SemiMonthEnd(2, day_of_month=16)), + ("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)), + ("2SMS-15", offsets.SemiMonthBegin(2)), +]) +def test_to_offset(freq_input, expected): + result = frequencies.to_offset(freq_input) + assert result == expected + + +@pytest.mark.parametrize("freqstr,expected", [ + ("-1S", -1), + ("-2SM", -2), + ("-1SMS", -1), + ("-5min10s", -310), +]) +def test_to_offset_negative(freqstr, expected): + result = frequencies.to_offset(freqstr) + assert result.n == expected + + +@pytest.mark.parametrize("freqstr", [ + "2h20m", "U1", "-U", "3U1", "-2-3U", "-2D:3H", + "1.5.0S", "2SMS-15-15", "2SMS-15D", "100foo", + + # Invalid leading +/- signs. + "+-1d", "-+1h", "+1", "-7", "+d", "-m", + + # Invalid shortcut anchors. + "SM-0", "SM-28", "SM-29", "SM-FOO", "BSM", "SM--1", "SMS-1", + "SMS-28", "SMS-30", "SMS-BAR", "SMS-BYR", "BSMS", "SMS--2" +]) +def test_to_offset_invalid(freqstr): + # see gh-13930 + + # We escape string because some of our + # inputs contain regex special characters. + msg = re.escape("Invalid frequency: {freqstr}".format(freqstr=freqstr)) + with pytest.raises(ValueError, match=msg): + frequencies.to_offset(freqstr) + + +def test_to_offset_no_evaluate(): + with pytest.raises(ValueError, match="Could not evaluate"): + frequencies.to_offset(("", "")) + + +@pytest.mark.parametrize("freqstr,expected", [ + ("2D 3H", offsets.Hour(51)), + ("2 D3 H", offsets.Hour(51)), + ("2 D 3 H", offsets.Hour(51)), + (" 2 D 3 H ", offsets.Hour(51)), + (" H ", offsets.Hour()), + (" 3 H ", offsets.Hour(3)), +]) +def test_to_offset_whitespace(freqstr, expected): + result = frequencies.to_offset(freqstr) + assert result == expected + + +@pytest.mark.parametrize("freqstr,expected", [ + ("00H 00T 01S", 1), + ("-00H 03T 14S", -194), +]) +def test_to_offset_leading_zero(freqstr, expected): + result = frequencies.to_offset(freqstr) + assert result.n == expected + + +@pytest.mark.parametrize("freqstr,expected", [ + ("+1d", 1), + ("+2h30min", 150), +]) +def test_to_offset_leading_plus(freqstr, expected): + result = frequencies.to_offset(freqstr) + assert result.n == expected + + +@pytest.mark.parametrize("kwargs,expected", [ + (dict(days=1, seconds=1), offsets.Second(86401)), + (dict(days=-1, seconds=1), offsets.Second(-86399)), + (dict(hours=1, minutes=10), offsets.Minute(70)), + (dict(hours=1, minutes=-10), offsets.Minute(50)), + (dict(weeks=1), offsets.Day(7)), + (dict(hours=1), offsets.Hour(1)), + (dict(hours=1), frequencies.to_offset("60min")), + (dict(microseconds=1), offsets.Micro(1)) +]) +def test_to_offset_pd_timedelta(kwargs, expected): + # see gh-9064 + td = Timedelta(**kwargs) + result = frequencies.to_offset(td) + assert result == expected + + +def test_to_offset_pd_timedelta_invalid(): + # see gh-9064 + msg = "Invalid frequency: 0 days 00:00:00" + td = Timedelta(microseconds=0) + + with pytest.raises(ValueError, match=msg): + frequencies.to_offset(td) + + +@pytest.mark.parametrize("shortcut,expected", [ + ("W", offsets.Week(weekday=6)), + ("W-SUN", offsets.Week(weekday=6)), + ("Q", offsets.QuarterEnd(startingMonth=12)), + ("Q-DEC", offsets.QuarterEnd(startingMonth=12)), + ("Q-MAY", offsets.QuarterEnd(startingMonth=5)), + ("SM", offsets.SemiMonthEnd(day_of_month=15)), + ("SM-15", offsets.SemiMonthEnd(day_of_month=15)), + ("SM-1", offsets.SemiMonthEnd(day_of_month=1)), + ("SM-27", offsets.SemiMonthEnd(day_of_month=27)), + ("SMS-2", offsets.SemiMonthBegin(day_of_month=2)), + ("SMS-27", offsets.SemiMonthBegin(day_of_month=27)), +]) +def test_anchored_shortcuts(shortcut, expected): + result = frequencies.to_offset(shortcut) + assert result == expected diff --git a/pandas/tests/tseries/holiday/__init__.py b/pandas/tests/tseries/holiday/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/tseries/holiday/test_calendar.py b/pandas/tests/tseries/holiday/test_calendar.py new file mode 100644 index 0000000000000..a5cc4095ce583 --- /dev/null +++ b/pandas/tests/tseries/holiday/test_calendar.py @@ -0,0 +1,77 @@ +from datetime import datetime + +import pytest + +from pandas import DatetimeIndex +import pandas.util.testing as tm + +from pandas.tseries.holiday import ( + AbstractHolidayCalendar, Holiday, Timestamp, USFederalHolidayCalendar, + USThanksgivingDay, get_calendar) + + +@pytest.mark.parametrize("transform", [ + lambda x: x, + lambda x: x.strftime("%Y-%m-%d"), + lambda x: Timestamp(x) +]) +def test_calendar(transform): + start_date = datetime(2012, 1, 1) + end_date = datetime(2012, 12, 31) + + calendar = USFederalHolidayCalendar() + holidays = calendar.holidays(transform(start_date), transform(end_date)) + + expected = [ + datetime(2012, 1, 2), + datetime(2012, 1, 16), + datetime(2012, 2, 20), + datetime(2012, 5, 28), + datetime(2012, 7, 4), + datetime(2012, 9, 3), + datetime(2012, 10, 8), + datetime(2012, 11, 12), + datetime(2012, 11, 22), + datetime(2012, 12, 25) + ] + + assert list(holidays.to_pydatetime()) == expected + + +def test_calendar_caching(): + # see gh-9552. + + class TestCalendar(AbstractHolidayCalendar): + def __init__(self, name=None, rules=None): + super(TestCalendar, self).__init__(name=name, rules=rules) + + jan1 = TestCalendar(rules=[Holiday("jan1", year=2015, month=1, day=1)]) + jan2 = TestCalendar(rules=[Holiday("jan2", year=2015, month=1, day=2)]) + + # Getting holidays for Jan 1 should not alter results for Jan 2. + tm.assert_index_equal(jan1.holidays(), DatetimeIndex(["01-Jan-2015"])) + tm.assert_index_equal(jan2.holidays(), DatetimeIndex(["02-Jan-2015"])) + + +def test_calendar_observance_dates(): + # see gh-11477 + us_fed_cal = get_calendar("USFederalHolidayCalendar") + holidays0 = us_fed_cal.holidays(datetime(2015, 7, 3), datetime( + 2015, 7, 3)) # <-- same start and end dates + holidays1 = us_fed_cal.holidays(datetime(2015, 7, 3), datetime( + 2015, 7, 6)) # <-- different start and end dates + holidays2 = us_fed_cal.holidays(datetime(2015, 7, 3), datetime( + 2015, 7, 3)) # <-- same start and end dates + + # These should all produce the same result. + # + # In addition, calling with different start and end + # dates should not alter the output if we call the + # function again with the same start and end date. + tm.assert_index_equal(holidays0, holidays1) + tm.assert_index_equal(holidays0, holidays2) + + +def test_rule_from_name(): + us_fed_cal = get_calendar("USFederalHolidayCalendar") + assert us_fed_cal.rule_from_name("Thanksgiving") == USThanksgivingDay diff --git a/pandas/tests/tseries/holiday/test_federal.py b/pandas/tests/tseries/holiday/test_federal.py new file mode 100644 index 0000000000000..62b5ab2b849ae --- /dev/null +++ b/pandas/tests/tseries/holiday/test_federal.py @@ -0,0 +1,36 @@ +from datetime import datetime + +from pandas.tseries.holiday import ( + AbstractHolidayCalendar, USMartinLutherKingJr, USMemorialDay) + + +def test_no_mlk_before_1986(): + # see gh-10278 + class MLKCalendar(AbstractHolidayCalendar): + rules = [USMartinLutherKingJr] + + holidays = MLKCalendar().holidays(start="1984", + end="1988").to_pydatetime().tolist() + + # Testing to make sure holiday is not incorrectly observed before 1986. + assert holidays == [datetime(1986, 1, 20, 0, 0), + datetime(1987, 1, 19, 0, 0)] + + +def test_memorial_day(): + class MemorialDay(AbstractHolidayCalendar): + rules = [USMemorialDay] + + holidays = MemorialDay().holidays(start="1971", + end="1980").to_pydatetime().tolist() + + # Fixes 5/31 error and checked manually against Wikipedia. + assert holidays == [datetime(1971, 5, 31, 0, 0), + datetime(1972, 5, 29, 0, 0), + datetime(1973, 5, 28, 0, 0), + datetime(1974, 5, 27, 0, 0), + datetime(1975, 5, 26, 0, 0), + datetime(1976, 5, 31, 0, 0), + datetime(1977, 5, 30, 0, 0), + datetime(1978, 5, 29, 0, 0), + datetime(1979, 5, 28, 0, 0)] diff --git a/pandas/tests/tseries/holiday/test_holiday.py b/pandas/tests/tseries/holiday/test_holiday.py new file mode 100644 index 0000000000000..27bba1cc89dee --- /dev/null +++ b/pandas/tests/tseries/holiday/test_holiday.py @@ -0,0 +1,193 @@ +from datetime import datetime + +import pytest +from pytz import utc + +import pandas.util.testing as tm + +from pandas.tseries.holiday import ( + MO, SA, AbstractHolidayCalendar, DateOffset, EasterMonday, GoodFriday, + Holiday, HolidayCalendarFactory, Timestamp, USColumbusDay, USLaborDay, + USMartinLutherKingJr, USMemorialDay, USPresidentsDay, USThanksgivingDay, + get_calendar, next_monday) + + +def _check_holiday_results(holiday, start, end, expected): + """ + Check that the dates for a given holiday match in date and timezone. + + Parameters + ---------- + holiday : Holiday + The holiday to check. + start : datetime-like + The start date of range in which to collect dates for a given holiday. + end : datetime-like + The end date of range in which to collect dates for a given holiday. + expected : list + The list of dates we expect to get. + """ + assert list(holiday.dates(start, end)) == expected + + # Verify that timezone info is preserved. + assert (list(holiday.dates(utc.localize(Timestamp(start)), + utc.localize(Timestamp(end)))) == + [utc.localize(dt) for dt in expected]) + + +@pytest.mark.parametrize("holiday,start_date,end_date,expected", [ + (USMemorialDay, datetime(2011, 1, 1), datetime(2020, 12, 31), + [datetime(2011, 5, 30), datetime(2012, 5, 28), datetime(2013, 5, 27), + datetime(2014, 5, 26), datetime(2015, 5, 25), datetime(2016, 5, 30), + datetime(2017, 5, 29), datetime(2018, 5, 28), datetime(2019, 5, 27), + datetime(2020, 5, 25)]), + + (Holiday("July 4th Eve", month=7, day=3), "2001-01-01", "2003-03-03", + [Timestamp("2001-07-03 00:00:00"), Timestamp("2002-07-03 00:00:00")]), + (Holiday("July 4th Eve", month=7, day=3, days_of_week=(0, 1, 2, 3)), + "2001-01-01", "2008-03-03", [ + Timestamp("2001-07-03 00:00:00"), Timestamp("2002-07-03 00:00:00"), + Timestamp("2003-07-03 00:00:00"), Timestamp("2006-07-03 00:00:00"), + Timestamp("2007-07-03 00:00:00")]), + + (EasterMonday, datetime(2011, 1, 1), datetime(2020, 12, 31), + [Timestamp("2011-04-25 00:00:00"), Timestamp("2012-04-09 00:00:00"), + Timestamp("2013-04-01 00:00:00"), Timestamp("2014-04-21 00:00:00"), + Timestamp("2015-04-06 00:00:00"), Timestamp("2016-03-28 00:00:00"), + Timestamp("2017-04-17 00:00:00"), Timestamp("2018-04-02 00:00:00"), + Timestamp("2019-04-22 00:00:00"), Timestamp("2020-04-13 00:00:00")]), + (GoodFriday, datetime(2011, 1, 1), datetime(2020, 12, 31), + [Timestamp("2011-04-22 00:00:00"), Timestamp("2012-04-06 00:00:00"), + Timestamp("2013-03-29 00:00:00"), Timestamp("2014-04-18 00:00:00"), + Timestamp("2015-04-03 00:00:00"), Timestamp("2016-03-25 00:00:00"), + Timestamp("2017-04-14 00:00:00"), Timestamp("2018-03-30 00:00:00"), + Timestamp("2019-04-19 00:00:00"), Timestamp("2020-04-10 00:00:00")]), + + (USThanksgivingDay, datetime(2011, 1, 1), datetime(2020, 12, 31), + [datetime(2011, 11, 24), datetime(2012, 11, 22), datetime(2013, 11, 28), + datetime(2014, 11, 27), datetime(2015, 11, 26), datetime(2016, 11, 24), + datetime(2017, 11, 23), datetime(2018, 11, 22), datetime(2019, 11, 28), + datetime(2020, 11, 26)]) +]) +def test_holiday_dates(holiday, start_date, end_date, expected): + _check_holiday_results(holiday, start_date, end_date, expected) + + +@pytest.mark.parametrize("holiday,start,expected", [ + (USMemorialDay, datetime(2015, 7, 1), []), + (USMemorialDay, "2015-05-25", "2015-05-25"), + + (USLaborDay, datetime(2015, 7, 1), []), + (USLaborDay, "2015-09-07", "2015-09-07"), + + (USColumbusDay, datetime(2015, 7, 1), []), + (USColumbusDay, "2015-10-12", "2015-10-12"), + + (USThanksgivingDay, datetime(2015, 7, 1), []), + (USThanksgivingDay, "2015-11-26", "2015-11-26"), + + (USMartinLutherKingJr, datetime(2015, 7, 1), []), + (USMartinLutherKingJr, "2015-01-19", "2015-01-19"), + + (USPresidentsDay, datetime(2015, 7, 1), []), + (USPresidentsDay, "2015-02-16", "2015-02-16"), + + (GoodFriday, datetime(2015, 7, 1), []), + (GoodFriday, "2015-04-03", "2015-04-03"), + + (EasterMonday, "2015-04-06", "2015-04-06"), + (EasterMonday, datetime(2015, 7, 1), []), + (EasterMonday, "2015-04-05", []), + + ("New Years Day", "2015-01-01", "2015-01-01"), + ("New Years Day", "2010-12-31", "2010-12-31"), + ("New Years Day", datetime(2015, 7, 1), []), + ("New Years Day", "2011-01-01", []), + + ("July 4th", "2015-07-03", "2015-07-03"), + ("July 4th", datetime(2015, 7, 1), []), + ("July 4th", "2015-07-04", []), + + ("Veterans Day", "2012-11-12", "2012-11-12"), + ("Veterans Day", datetime(2015, 7, 1), []), + ("Veterans Day", "2012-11-11", []), + + ("Christmas", "2011-12-26", "2011-12-26"), + ("Christmas", datetime(2015, 7, 1), []), + ("Christmas", "2011-12-25", []), +]) +def test_holidays_within_dates(holiday, start, expected): + # see gh-11477 + # + # Fix holiday behavior where holiday.dates returned dates outside + # start/end date, or observed rules could not be applied because the + # holiday was not in the original date range (e.g., 7/4/2015 -> 7/3/2015). + if isinstance(holiday, str): + calendar = get_calendar("USFederalHolidayCalendar") + holiday = calendar.rule_from_name(holiday) + + if isinstance(expected, str): + expected = [Timestamp(expected)] + + _check_holiday_results(holiday, start, start, expected) + + +@pytest.mark.parametrize("transform", [ + lambda x: x.strftime("%Y-%m-%d"), + lambda x: Timestamp(x) +]) +def test_argument_types(transform): + start_date = datetime(2011, 1, 1) + end_date = datetime(2020, 12, 31) + + holidays = USThanksgivingDay.dates(start_date, end_date) + holidays2 = USThanksgivingDay.dates( + transform(start_date), transform(end_date)) + tm.assert_index_equal(holidays, holidays2) + + +@pytest.mark.parametrize("name,kwargs", [ + ("One-Time", dict(year=2012, month=5, day=28)), + ("Range", dict(month=5, day=28, start_date=datetime(2012, 1, 1), + end_date=datetime(2012, 12, 31), + offset=DateOffset(weekday=MO(1)))) +]) +def test_special_holidays(name, kwargs): + base_date = [datetime(2012, 5, 28)] + holiday = Holiday(name, **kwargs) + + start_date = datetime(2011, 1, 1) + end_date = datetime(2020, 12, 31) + + assert base_date == holiday.dates(start_date, end_date) + + +def test_get_calendar(): + class TestCalendar(AbstractHolidayCalendar): + rules = [] + + calendar = get_calendar("TestCalendar") + assert TestCalendar == calendar.__class__ + + +def test_factory(): + class_1 = HolidayCalendarFactory("MemorialDay", + AbstractHolidayCalendar, + USMemorialDay) + class_2 = HolidayCalendarFactory("Thanksgiving", + AbstractHolidayCalendar, + USThanksgivingDay) + class_3 = HolidayCalendarFactory("Combined", class_1, class_2) + + assert len(class_1.rules) == 1 + assert len(class_2.rules) == 1 + assert len(class_3.rules) == 2 + + +def test_both_offset_observance_raises(): + # see gh-10217 + msg = "Cannot use both offset and observance" + with pytest.raises(NotImplementedError, match=msg): + Holiday("Cyber Monday", month=11, day=1, + offset=[DateOffset(weekday=SA(4))], + observance=next_monday) diff --git a/pandas/tests/tseries/holiday/test_observance.py b/pandas/tests/tseries/holiday/test_observance.py new file mode 100644 index 0000000000000..1c22918b2efd8 --- /dev/null +++ b/pandas/tests/tseries/holiday/test_observance.py @@ -0,0 +1,93 @@ +from datetime import datetime + +import pytest + +from pandas.tseries.holiday import ( + after_nearest_workday, before_nearest_workday, nearest_workday, + next_monday, next_monday_or_tuesday, next_workday, previous_friday, + previous_workday, sunday_to_monday, weekend_to_monday) + +_WEDNESDAY = datetime(2014, 4, 9) +_THURSDAY = datetime(2014, 4, 10) +_FRIDAY = datetime(2014, 4, 11) +_SATURDAY = datetime(2014, 4, 12) +_SUNDAY = datetime(2014, 4, 13) +_MONDAY = datetime(2014, 4, 14) +_TUESDAY = datetime(2014, 4, 15) + + +@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY]) +def test_next_monday(day): + assert next_monday(day) == _MONDAY + + +@pytest.mark.parametrize("day,expected", [ + (_SATURDAY, _MONDAY), + (_SUNDAY, _TUESDAY), + (_MONDAY, _TUESDAY) +]) +def test_next_monday_or_tuesday(day, expected): + assert next_monday_or_tuesday(day) == expected + + +@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY]) +def test_previous_friday(day): + assert previous_friday(day) == _FRIDAY + + +def test_sunday_to_monday(): + assert sunday_to_monday(_SUNDAY) == _MONDAY + + +@pytest.mark.parametrize("day,expected", [ + (_SATURDAY, _FRIDAY), + (_SUNDAY, _MONDAY), + (_MONDAY, _MONDAY) +]) +def test_nearest_workday(day, expected): + assert nearest_workday(day) == expected + + +@pytest.mark.parametrize("day,expected", [ + (_SATURDAY, _MONDAY), + (_SUNDAY, _MONDAY), + (_MONDAY, _MONDAY) +]) +def test_weekend_to_monday(day, expected): + assert weekend_to_monday(day) == expected + + +@pytest.mark.parametrize("day,expected", [ + (_SATURDAY, _MONDAY), + (_SUNDAY, _MONDAY), + (_MONDAY, _TUESDAY) +]) +def test_next_workday(day, expected): + assert next_workday(day) == expected + + +@pytest.mark.parametrize("day,expected", [ + (_SATURDAY, _FRIDAY), + (_SUNDAY, _FRIDAY), + (_TUESDAY, _MONDAY) +]) +def test_previous_workday(day, expected): + assert previous_workday(day) == expected + + +@pytest.mark.parametrize("day,expected", [ + (_SATURDAY, _THURSDAY), + (_SUNDAY, _FRIDAY), + (_TUESDAY, _MONDAY) +]) +def test_before_nearest_workday(day, expected): + assert before_nearest_workday(day) == expected + + +@pytest.mark.parametrize("day,expected", [ + (_SATURDAY, _MONDAY), + (_SUNDAY, _TUESDAY), + (_FRIDAY, _MONDAY) +]) +def test_after_nearest_workday(day, expected): + assert after_nearest_workday(day) == expected diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index ac3955970587f..e6f21a7b47c3b 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -9,6 +9,7 @@ from pandas._libs.tslibs.frequencies import ( INVALID_FREQ_ERR_MSG, get_freq_code, get_freq_str) import pandas._libs.tslibs.offsets as liboffsets +from pandas._libs.tslibs.offsets import ApplyTypeError import pandas.compat as compat from pandas.compat import range from pandas.compat.numpy import np_datetime64_compat @@ -150,7 +151,8 @@ def test_sub(self): # offset2 attr return off = self.offset2 - with pytest.raises(Exception): + msg = "Cannot subtract datetime from offset" + with pytest.raises(TypeError, match=msg): off - self.d assert 2 * off - off == off @@ -257,6 +259,26 @@ def test_offset_n(self, offset_types): mul_offset = offset * 3 assert mul_offset.n == 3 + def test_offset_timedelta64_arg(self, offset_types): + # check that offset._validate_n raises TypeError on a timedelt64 + # object + off = self._get_offset(offset_types) + + td64 = np.timedelta64(4567, 's') + with pytest.raises(TypeError, match="argument must be an integer"): + type(off)(n=td64, **off.kwds) + + def test_offset_mul_ndarray(self, offset_types): + off = self._get_offset(offset_types) + + expected = np.array([[off, off * 2], [off * 3, off * 4]]) + + result = np.array([[1, 2], [3, 4]]) * off + tm.assert_numpy_array_equal(result, expected) + + result = off * np.array([[1, 2], [3, 4]]) + tm.assert_numpy_array_equal(result, expected) + def test_offset_freqstr(self, offset_types): offset = self._get_offset(offset_types) @@ -716,7 +738,10 @@ def test_apply_large_n(self): assert rs == xp def test_apply_corner(self): - pytest.raises(TypeError, BDay().apply, BMonthEnd()) + msg = ("Only know how to combine business day with datetime or" + " timedelta") + with pytest.raises(ApplyTypeError, match=msg): + BDay().apply(BMonthEnd()) class TestBusinessHour(Base): @@ -792,7 +817,8 @@ def test_sub(self): # we have to override test_sub here becasue self.offset2 is not # defined as self._offset(2) off = self.offset2 - with pytest.raises(Exception): + msg = "Cannot subtract datetime from offset" + with pytest.raises(TypeError, match=msg): off - self.d assert 2 * off - off == off @@ -1776,7 +1802,10 @@ def test_apply_large_n(self): assert rs == xp def test_apply_corner(self): - pytest.raises(Exception, CDay().apply, BMonthEnd()) + msg = ("Only know how to combine trading day with datetime, datetime64" + " or timedelta") + with pytest.raises(ApplyTypeError, match=msg): + CDay().apply(BMonthEnd()) def test_holidays(self): # Define a TradingDay offset diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index f4b012ec1897f..9a8251201f75f 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -11,6 +11,7 @@ import pytest from pandas import Timedelta, Timestamp +import pandas.util.testing as tm from pandas.tseries import offsets from pandas.tseries.offsets import Hour, Micro, Milli, Minute, Nano, Second @@ -262,6 +263,28 @@ def test_tick_division(cls): assert result.delta == off.delta / .001 +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_rdiv(cls): + off = cls(10) + delta = off.delta + td64 = delta.to_timedelta64() + + with pytest.raises(TypeError): + 2 / off + with pytest.raises(TypeError): + 2.0 / off + + assert (td64 * 2.5) / off == 2.5 + + if cls is not Nano: + # skip pytimedelta for Nano since it gets dropped + assert (delta.to_pytimedelta() * 2) / off == 2 + + result = np.array([2 * td64, td64]) / off + expected = np.array([2., 1.]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize('cls1', tick_classes) @pytest.mark.parametrize('cls2', tick_classes) def test_tick_zero(cls1, cls2): diff --git a/pandas/tests/tseries/offsets/test_yqm_offsets.py b/pandas/tests/tseries/offsets/test_yqm_offsets.py index 8023ee3139dd5..9ee03d2e886f3 100644 --- a/pandas/tests/tseries/offsets/test_yqm_offsets.py +++ b/pandas/tests/tseries/offsets/test_yqm_offsets.py @@ -713,7 +713,8 @@ class TestYearBegin(Base): _offset = YearBegin def test_misspecified(self): - pytest.raises(ValueError, YearBegin, month=13) + with pytest.raises(ValueError, match="Month must go from 1 to 12"): + YearBegin(month=13) offset_cases = [] offset_cases.append((YearBegin(), { @@ -804,7 +805,8 @@ class TestYearEnd(Base): _offset = YearEnd def test_misspecified(self): - pytest.raises(ValueError, YearEnd, month=13) + with pytest.raises(ValueError, match="Month must go from 1 to 12"): + YearEnd(month=13) offset_cases = [] offset_cases.append((YearEnd(), { @@ -900,8 +902,11 @@ class TestBYearBegin(Base): _offset = BYearBegin def test_misspecified(self): - pytest.raises(ValueError, BYearBegin, month=13) - pytest.raises(ValueError, BYearEnd, month=13) + msg = "Month must go from 1 to 12" + with pytest.raises(ValueError, match=msg): + BYearBegin(month=13) + with pytest.raises(ValueError, match=msg): + BYearEnd(month=13) offset_cases = [] offset_cases.append((BYearBegin(), { @@ -993,8 +998,11 @@ class TestBYearEndLagged(Base): _offset = BYearEnd def test_bad_month_fail(self): - pytest.raises(Exception, BYearEnd, month=13) - pytest.raises(Exception, BYearEnd, month=0) + msg = "Month must go from 1 to 12" + with pytest.raises(ValueError, match=msg): + BYearEnd(month=13) + with pytest.raises(ValueError, match=msg): + BYearEnd(month=0) offset_cases = [] offset_cases.append((BYearEnd(month=6), { diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py deleted file mode 100644 index eb4e63654b47b..0000000000000 --- a/pandas/tests/tseries/test_frequencies.py +++ /dev/null @@ -1,793 +0,0 @@ -from datetime import datetime, timedelta - -import numpy as np -import pytest - -from pandas._libs.tslibs import frequencies as libfrequencies, resolution -from pandas._libs.tslibs.ccalendar import MONTHS -from pandas._libs.tslibs.frequencies import ( - INVALID_FREQ_ERR_MSG, FreqGroup, _period_code_map, get_freq, get_freq_code) -import pandas.compat as compat -from pandas.compat import is_platform_windows, range - -from pandas import ( - DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range, - period_range) -from pandas.core.tools.datetimes import to_datetime -import pandas.util.testing as tm - -import pandas.tseries.frequencies as frequencies -import pandas.tseries.offsets as offsets - - -class TestToOffset(object): - - def test_to_offset_multiple(self): - freqstr = '2h30min' - freqstr2 = '2h 30min' - - result = frequencies.to_offset(freqstr) - assert (result == frequencies.to_offset(freqstr2)) - expected = offsets.Minute(150) - assert (result == expected) - - freqstr = '2h30min15s' - result = frequencies.to_offset(freqstr) - expected = offsets.Second(150 * 60 + 15) - assert (result == expected) - - freqstr = '2h 60min' - result = frequencies.to_offset(freqstr) - expected = offsets.Hour(3) - assert (result == expected) - - freqstr = '2h 20.5min' - result = frequencies.to_offset(freqstr) - expected = offsets.Second(8430) - assert (result == expected) - - freqstr = '1.5min' - result = frequencies.to_offset(freqstr) - expected = offsets.Second(90) - assert (result == expected) - - freqstr = '0.5S' - result = frequencies.to_offset(freqstr) - expected = offsets.Milli(500) - assert (result == expected) - - freqstr = '15l500u' - result = frequencies.to_offset(freqstr) - expected = offsets.Micro(15500) - assert (result == expected) - - freqstr = '10s75L' - result = frequencies.to_offset(freqstr) - expected = offsets.Milli(10075) - assert (result == expected) - - freqstr = '1s0.25ms' - result = frequencies.to_offset(freqstr) - expected = offsets.Micro(1000250) - assert (result == expected) - - freqstr = '1s0.25L' - result = frequencies.to_offset(freqstr) - expected = offsets.Micro(1000250) - assert (result == expected) - - freqstr = '2800N' - result = frequencies.to_offset(freqstr) - expected = offsets.Nano(2800) - assert (result == expected) - - freqstr = '2SM' - result = frequencies.to_offset(freqstr) - expected = offsets.SemiMonthEnd(2) - assert (result == expected) - - freqstr = '2SM-16' - result = frequencies.to_offset(freqstr) - expected = offsets.SemiMonthEnd(2, day_of_month=16) - assert (result == expected) - - freqstr = '2SMS-14' - result = frequencies.to_offset(freqstr) - expected = offsets.SemiMonthBegin(2, day_of_month=14) - assert (result == expected) - - freqstr = '2SMS-15' - result = frequencies.to_offset(freqstr) - expected = offsets.SemiMonthBegin(2) - assert (result == expected) - - # malformed - with pytest.raises(ValueError, match='Invalid frequency: 2h20m'): - frequencies.to_offset('2h20m') - - def test_to_offset_negative(self): - freqstr = '-1S' - result = frequencies.to_offset(freqstr) - assert (result.n == -1) - - freqstr = '-5min10s' - result = frequencies.to_offset(freqstr) - assert (result.n == -310) - - freqstr = '-2SM' - result = frequencies.to_offset(freqstr) - assert (result.n == -2) - - freqstr = '-1SMS' - result = frequencies.to_offset(freqstr) - assert (result.n == -1) - - def test_to_offset_invalid(self): - # GH 13930 - with pytest.raises(ValueError, match='Invalid frequency: U1'): - frequencies.to_offset('U1') - with pytest.raises(ValueError, match='Invalid frequency: -U'): - frequencies.to_offset('-U') - with pytest.raises(ValueError, match='Invalid frequency: 3U1'): - frequencies.to_offset('3U1') - with pytest.raises(ValueError, match='Invalid frequency: -2-3U'): - frequencies.to_offset('-2-3U') - with pytest.raises(ValueError, match='Invalid frequency: -2D:3H'): - frequencies.to_offset('-2D:3H') - with pytest.raises(ValueError, match='Invalid frequency: 1.5.0S'): - frequencies.to_offset('1.5.0S') - - # split offsets with spaces are valid - assert frequencies.to_offset('2D 3H') == offsets.Hour(51) - assert frequencies.to_offset('2 D3 H') == offsets.Hour(51) - assert frequencies.to_offset('2 D 3 H') == offsets.Hour(51) - assert frequencies.to_offset(' 2 D 3 H ') == offsets.Hour(51) - assert frequencies.to_offset(' H ') == offsets.Hour() - assert frequencies.to_offset(' 3 H ') == offsets.Hour(3) - - # special cases - assert frequencies.to_offset('2SMS-15') == offsets.SemiMonthBegin(2) - with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15-15'): - frequencies.to_offset('2SMS-15-15') - with pytest.raises(ValueError, match='Invalid frequency: 2SMS-15D'): - frequencies.to_offset('2SMS-15D') - - def test_to_offset_leading_zero(self): - freqstr = '00H 00T 01S' - result = frequencies.to_offset(freqstr) - assert (result.n == 1) - - freqstr = '-00H 03T 14S' - result = frequencies.to_offset(freqstr) - assert (result.n == -194) - - def test_to_offset_leading_plus(self): - freqstr = '+1d' - result = frequencies.to_offset(freqstr) - assert (result.n == 1) - - freqstr = '+2h30min' - result = frequencies.to_offset(freqstr) - assert (result.n == 150) - - for bad_freq in ['+-1d', '-+1h', '+1', '-7', '+d', '-m']: - with pytest.raises(ValueError, match='Invalid frequency:'): - frequencies.to_offset(bad_freq) - - def test_to_offset_pd_timedelta(self): - # Tests for #9064 - td = Timedelta(days=1, seconds=1) - result = frequencies.to_offset(td) - expected = offsets.Second(86401) - assert (expected == result) - - td = Timedelta(days=-1, seconds=1) - result = frequencies.to_offset(td) - expected = offsets.Second(-86399) - assert (expected == result) - - td = Timedelta(hours=1, minutes=10) - result = frequencies.to_offset(td) - expected = offsets.Minute(70) - assert (expected == result) - - td = Timedelta(hours=1, minutes=-10) - result = frequencies.to_offset(td) - expected = offsets.Minute(50) - assert (expected == result) - - td = Timedelta(weeks=1) - result = frequencies.to_offset(td) - expected = offsets.Day(7) - assert (expected == result) - - td1 = Timedelta(hours=1) - result1 = frequencies.to_offset(td1) - result2 = frequencies.to_offset('60min') - assert (result1 == result2) - - td = Timedelta(microseconds=1) - result = frequencies.to_offset(td) - expected = offsets.Micro(1) - assert (expected == result) - - td = Timedelta(microseconds=0) - pytest.raises(ValueError, lambda: frequencies.to_offset(td)) - - def test_anchored_shortcuts(self): - result = frequencies.to_offset('W') - expected = frequencies.to_offset('W-SUN') - assert (result == expected) - - result1 = frequencies.to_offset('Q') - result2 = frequencies.to_offset('Q-DEC') - expected = offsets.QuarterEnd(startingMonth=12) - assert (result1 == expected) - assert (result2 == expected) - - result1 = frequencies.to_offset('Q-MAY') - expected = offsets.QuarterEnd(startingMonth=5) - assert (result1 == expected) - - result1 = frequencies.to_offset('SM') - result2 = frequencies.to_offset('SM-15') - expected = offsets.SemiMonthEnd(day_of_month=15) - assert (result1 == expected) - assert (result2 == expected) - - result = frequencies.to_offset('SM-1') - expected = offsets.SemiMonthEnd(day_of_month=1) - assert (result == expected) - - result = frequencies.to_offset('SM-27') - expected = offsets.SemiMonthEnd(day_of_month=27) - assert (result == expected) - - result = frequencies.to_offset('SMS-2') - expected = offsets.SemiMonthBegin(day_of_month=2) - assert (result == expected) - - result = frequencies.to_offset('SMS-27') - expected = offsets.SemiMonthBegin(day_of_month=27) - assert (result == expected) - - # ensure invalid cases fail as expected - invalid_anchors = ['SM-0', 'SM-28', 'SM-29', - 'SM-FOO', 'BSM', 'SM--1', - 'SMS-1', 'SMS-28', 'SMS-30', - 'SMS-BAR', 'SMS-BYR' 'BSMS', - 'SMS--2'] - for invalid_anchor in invalid_anchors: - with pytest.raises(ValueError, match='Invalid frequency: '): - frequencies.to_offset(invalid_anchor) - - -def test_ms_vs_MS(): - left = frequencies.get_offset('ms') - right = frequencies.get_offset('MS') - assert left == offsets.Milli() - assert right == offsets.MonthBegin() - - -def test_rule_aliases(): - rule = frequencies.to_offset('10us') - assert rule == offsets.Micro(10) - - -class TestFrequencyCode(object): - - def test_freq_code(self): - assert get_freq('A') == 1000 - assert get_freq('3A') == 1000 - assert get_freq('-1A') == 1000 - - assert get_freq('Y') == 1000 - assert get_freq('3Y') == 1000 - assert get_freq('-1Y') == 1000 - - assert get_freq('W') == 4000 - assert get_freq('W-MON') == 4001 - assert get_freq('W-FRI') == 4005 - - for freqstr, code in compat.iteritems(_period_code_map): - result = get_freq(freqstr) - assert result == code - - result = resolution.get_freq_group(freqstr) - assert result == code // 1000 * 1000 - - result = resolution.get_freq_group(code) - assert result == code // 1000 * 1000 - - def test_freq_group(self): - assert resolution.get_freq_group('A') == 1000 - assert resolution.get_freq_group('3A') == 1000 - assert resolution.get_freq_group('-1A') == 1000 - assert resolution.get_freq_group('A-JAN') == 1000 - assert resolution.get_freq_group('A-MAY') == 1000 - - assert resolution.get_freq_group('Y') == 1000 - assert resolution.get_freq_group('3Y') == 1000 - assert resolution.get_freq_group('-1Y') == 1000 - assert resolution.get_freq_group('Y-JAN') == 1000 - assert resolution.get_freq_group('Y-MAY') == 1000 - - assert resolution.get_freq_group(offsets.YearEnd()) == 1000 - assert resolution.get_freq_group(offsets.YearEnd(month=1)) == 1000 - assert resolution.get_freq_group(offsets.YearEnd(month=5)) == 1000 - - assert resolution.get_freq_group('W') == 4000 - assert resolution.get_freq_group('W-MON') == 4000 - assert resolution.get_freq_group('W-FRI') == 4000 - assert resolution.get_freq_group(offsets.Week()) == 4000 - assert resolution.get_freq_group(offsets.Week(weekday=1)) == 4000 - assert resolution.get_freq_group(offsets.Week(weekday=5)) == 4000 - - def test_get_to_timestamp_base(self): - tsb = libfrequencies.get_to_timestamp_base - - assert (tsb(get_freq_code('D')[0]) == - get_freq_code('D')[0]) - assert (tsb(get_freq_code('W')[0]) == - get_freq_code('D')[0]) - assert (tsb(get_freq_code('M')[0]) == - get_freq_code('D')[0]) - - assert (tsb(get_freq_code('S')[0]) == - get_freq_code('S')[0]) - assert (tsb(get_freq_code('T')[0]) == - get_freq_code('S')[0]) - assert (tsb(get_freq_code('H')[0]) == - get_freq_code('S')[0]) - - def test_freq_to_reso(self): - Reso = resolution.Resolution - - assert Reso.get_str_from_freq('A') == 'year' - assert Reso.get_str_from_freq('Q') == 'quarter' - assert Reso.get_str_from_freq('M') == 'month' - assert Reso.get_str_from_freq('D') == 'day' - assert Reso.get_str_from_freq('H') == 'hour' - assert Reso.get_str_from_freq('T') == 'minute' - assert Reso.get_str_from_freq('S') == 'second' - assert Reso.get_str_from_freq('L') == 'millisecond' - assert Reso.get_str_from_freq('U') == 'microsecond' - assert Reso.get_str_from_freq('N') == 'nanosecond' - - for freq in ['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U', 'N']: - # check roundtrip - result = Reso.get_freq(Reso.get_str_from_freq(freq)) - assert freq == result - - for freq in ['D', 'H', 'T', 'S', 'L', 'U']: - result = Reso.get_freq(Reso.get_str(Reso.get_reso_from_freq(freq))) - assert freq == result - - def test_resolution_bumping(self): - # see gh-14378 - Reso = resolution.Resolution - - assert Reso.get_stride_from_decimal(1.5, 'T') == (90, 'S') - assert Reso.get_stride_from_decimal(62.4, 'T') == (3744, 'S') - assert Reso.get_stride_from_decimal(1.04, 'H') == (3744, 'S') - assert Reso.get_stride_from_decimal(1, 'D') == (1, 'D') - assert (Reso.get_stride_from_decimal(0.342931, 'H') == - (1234551600, 'U')) - assert Reso.get_stride_from_decimal(1.2345, 'D') == (106660800, 'L') - - with pytest.raises(ValueError): - Reso.get_stride_from_decimal(0.5, 'N') - - # too much precision in the input can prevent - with pytest.raises(ValueError): - Reso.get_stride_from_decimal(0.3429324798798269273987982, 'H') - - def test_get_freq_code(self): - # frequency str - assert (get_freq_code('A') == - (get_freq('A'), 1)) - assert (get_freq_code('3D') == - (get_freq('D'), 3)) - assert (get_freq_code('-2M') == - (get_freq('M'), -2)) - - # tuple - assert (get_freq_code(('D', 1)) == - (get_freq('D'), 1)) - assert (get_freq_code(('A', 3)) == - (get_freq('A'), 3)) - assert (get_freq_code(('M', -2)) == - (get_freq('M'), -2)) - - # numeric tuple - assert get_freq_code((1000, 1)) == (1000, 1) - - # offsets - assert (get_freq_code(offsets.Day()) == - (get_freq('D'), 1)) - assert (get_freq_code(offsets.Day(3)) == - (get_freq('D'), 3)) - assert (get_freq_code(offsets.Day(-2)) == - (get_freq('D'), -2)) - - assert (get_freq_code(offsets.MonthEnd()) == - (get_freq('M'), 1)) - assert (get_freq_code(offsets.MonthEnd(3)) == - (get_freq('M'), 3)) - assert (get_freq_code(offsets.MonthEnd(-2)) == - (get_freq('M'), -2)) - - assert (get_freq_code(offsets.Week()) == - (get_freq('W'), 1)) - assert (get_freq_code(offsets.Week(3)) == - (get_freq('W'), 3)) - assert (get_freq_code(offsets.Week(-2)) == - (get_freq('W'), -2)) - - # Monday is weekday=0 - assert (get_freq_code(offsets.Week(weekday=1)) == - (get_freq('W-TUE'), 1)) - assert (get_freq_code(offsets.Week(3, weekday=0)) == - (get_freq('W-MON'), 3)) - assert (get_freq_code(offsets.Week(-2, weekday=4)) == - (get_freq('W-FRI'), -2)) - - def test_frequency_misc(self): - assert (resolution.get_freq_group('T') == - FreqGroup.FR_MIN) - - code, stride = get_freq_code(offsets.Hour()) - assert code == FreqGroup.FR_HR - - code, stride = get_freq_code((5, 'T')) - assert code == FreqGroup.FR_MIN - assert stride == 5 - - offset = offsets.Hour() - result = frequencies.to_offset(offset) - assert result == offset - - result = frequencies.to_offset((5, 'T')) - expected = offsets.Minute(5) - assert result == expected - - with pytest.raises(ValueError, match='Invalid frequency'): - get_freq_code((5, 'baz')) - - with pytest.raises(ValueError, match='Invalid frequency'): - frequencies.to_offset('100foo') - - with pytest.raises(ValueError, match='Could not evaluate'): - frequencies.to_offset(('', '')) - - -_dti = DatetimeIndex - - -class TestFrequencyInference(object): - - def test_raise_if_period_index(self): - index = period_range(start="1/1/1990", periods=20, freq="M") - pytest.raises(TypeError, frequencies.infer_freq, index) - - def test_raise_if_too_few(self): - index = _dti(['12/31/1998', '1/3/1999']) - pytest.raises(ValueError, frequencies.infer_freq, index) - - def test_business_daily(self): - index = _dti(['01/01/1999', '1/4/1999', '1/5/1999']) - assert frequencies.infer_freq(index) == 'B' - - def test_business_daily_look_alike(self): - # GH 16624, do not infer 'B' when 'weekend' (2-day gap) in wrong place - index = _dti(['12/31/1998', '1/3/1999', '1/4/1999']) - assert frequencies.infer_freq(index) is None - - def test_day(self): - self._check_tick(timedelta(1), 'D') - - def test_day_corner(self): - index = _dti(['1/1/2000', '1/2/2000', '1/3/2000']) - assert frequencies.infer_freq(index) == 'D' - - def test_non_datetimeindex(self): - dates = to_datetime(['1/1/2000', '1/2/2000', '1/3/2000']) - assert frequencies.infer_freq(dates) == 'D' - - def test_hour(self): - self._check_tick(timedelta(hours=1), 'H') - - def test_minute(self): - self._check_tick(timedelta(minutes=1), 'T') - - def test_second(self): - self._check_tick(timedelta(seconds=1), 'S') - - def test_millisecond(self): - self._check_tick(timedelta(microseconds=1000), 'L') - - def test_microsecond(self): - self._check_tick(timedelta(microseconds=1), 'U') - - def test_nanosecond(self): - self._check_tick(np.timedelta64(1, 'ns'), 'N') - - def _check_tick(self, base_delta, code): - b = Timestamp(datetime.now()) - for i in range(1, 5): - inc = base_delta * i - index = _dti([b + inc * j for j in range(3)]) - if i > 1: - exp_freq = '%d%s' % (i, code) - else: - exp_freq = code - assert frequencies.infer_freq(index) == exp_freq - - index = _dti([b + base_delta * 7] + [b + base_delta * j for j in range( - 3)]) - assert frequencies.infer_freq(index) is None - - index = _dti([b + base_delta * j for j in range(3)] + [b + base_delta * - 7]) - - assert frequencies.infer_freq(index) is None - - def test_weekly(self): - days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] - - for day in days: - self._check_generated_range('1/1/2000', 'W-%s' % day) - - def test_week_of_month(self): - days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN'] - - for day in days: - for i in range(1, 5): - self._check_generated_range('1/1/2000', 'WOM-%d%s' % (i, day)) - - def test_fifth_week_of_month(self): - # Only supports freq up to WOM-4. See #9425 - func = lambda: date_range('2014-01-01', freq='WOM-5MON') - pytest.raises(ValueError, func) - - def test_fifth_week_of_month_infer(self): - # Only attempts to infer up to WOM-4. See #9425 - index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"]) - assert frequencies.infer_freq(index) is None - - def test_week_of_month_fake(self): - # All of these dates are on same day of week and are 4 or 5 weeks apart - index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29", - "2013-11-26"]) - assert frequencies.infer_freq(index) != 'WOM-4TUE' - - def test_monthly(self): - self._check_generated_range('1/1/2000', 'M') - - def test_monthly_ambiguous(self): - rng = _dti(['1/31/2000', '2/29/2000', '3/31/2000']) - assert rng.inferred_freq == 'M' - - def test_business_monthly(self): - self._check_generated_range('1/1/2000', 'BM') - - def test_business_start_monthly(self): - self._check_generated_range('1/1/2000', 'BMS') - - def test_quarterly(self): - for month in ['JAN', 'FEB', 'MAR']: - self._check_generated_range('1/1/2000', 'Q-%s' % month) - - def test_annual(self): - for month in MONTHS: - self._check_generated_range('1/1/2000', 'A-%s' % month) - - def test_business_annual(self): - for month in MONTHS: - self._check_generated_range('1/1/2000', 'BA-%s' % month) - - def test_annual_ambiguous(self): - rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) - assert rng.inferred_freq == 'A-JAN' - - def _check_generated_range(self, start, freq): - freq = freq.upper() - - gen = date_range(start, periods=7, freq=freq) - index = _dti(gen.values) - if not freq.startswith('Q-'): - assert frequencies.infer_freq(index) == gen.freqstr - else: - inf_freq = frequencies.infer_freq(index) - is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in ( - 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR') - is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in ( - 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB') - is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in ( - 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN') - assert is_dec_range or is_nov_range or is_oct_range - - gen = date_range(start, periods=5, freq=freq) - index = _dti(gen.values) - - if not freq.startswith('Q-'): - assert frequencies.infer_freq(index) == gen.freqstr - else: - inf_freq = frequencies.infer_freq(index) - is_dec_range = inf_freq == 'Q-DEC' and gen.freqstr in ( - 'Q', 'Q-DEC', 'Q-SEP', 'Q-JUN', 'Q-MAR') - is_nov_range = inf_freq == 'Q-NOV' and gen.freqstr in ( - 'Q-NOV', 'Q-AUG', 'Q-MAY', 'Q-FEB') - is_oct_range = inf_freq == 'Q-OCT' and gen.freqstr in ( - 'Q-OCT', 'Q-JUL', 'Q-APR', 'Q-JAN') - - assert is_dec_range or is_nov_range or is_oct_range - - def test_infer_freq(self): - rng = period_range('1959Q2', '2009Q3', freq='Q') - rng = Index(rng.to_timestamp('D', how='e').astype(object)) - assert rng.inferred_freq == 'Q-DEC' - - rng = period_range('1959Q2', '2009Q3', freq='Q-NOV') - rng = Index(rng.to_timestamp('D', how='e').astype(object)) - assert rng.inferred_freq == 'Q-NOV' - - rng = period_range('1959Q2', '2009Q3', freq='Q-OCT') - rng = Index(rng.to_timestamp('D', how='e').astype(object)) - assert rng.inferred_freq == 'Q-OCT' - - def test_infer_freq_tz(self): - - freqs = {'AS-JAN': - ['2009-01-01', '2010-01-01', '2011-01-01', '2012-01-01'], - 'Q-OCT': - ['2009-01-31', '2009-04-30', '2009-07-31', '2009-10-31'], - 'M': ['2010-11-30', '2010-12-31', '2011-01-31', '2011-02-28'], - 'W-SAT': - ['2010-12-25', '2011-01-01', '2011-01-08', '2011-01-15'], - 'D': ['2011-01-01', '2011-01-02', '2011-01-03', '2011-01-04'], - 'H': ['2011-12-31 22:00', '2011-12-31 23:00', - '2012-01-01 00:00', '2012-01-01 01:00']} - - # GH 7310 - for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris', - 'US/Pacific', 'US/Eastern']: - for expected, dates in compat.iteritems(freqs): - idx = DatetimeIndex(dates, tz=tz) - assert idx.inferred_freq == expected - - def test_infer_freq_tz_transition(self): - # Tests for #8772 - date_pairs = [['2013-11-02', '2013-11-5'], # Fall DST - ['2014-03-08', '2014-03-11'], # Spring DST - ['2014-01-01', '2014-01-03']] # Regular Time - freqs = ['3H', '10T', '3601S', '3600001L', '3600000001U', - '3600000000001N'] - - for tz in [None, 'Australia/Sydney', 'Asia/Tokyo', 'Europe/Paris', - 'US/Pacific', 'US/Eastern']: - for date_pair in date_pairs: - for freq in freqs: - idx = date_range(date_pair[0], date_pair[ - 1], freq=freq, tz=tz) - assert idx.inferred_freq == freq - - index = date_range("2013-11-03", periods=5, - freq="3H").tz_localize("America/Chicago") - assert index.inferred_freq is None - - def test_infer_freq_businesshour(self): - # GH 7905 - idx = DatetimeIndex( - ['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00', - '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00']) - # hourly freq in a day must result in 'H' - assert idx.inferred_freq == 'H' - - idx = DatetimeIndex( - ['2014-07-01 09:00', '2014-07-01 10:00', '2014-07-01 11:00', - '2014-07-01 12:00', '2014-07-01 13:00', '2014-07-01 14:00', - '2014-07-01 15:00', '2014-07-01 16:00', '2014-07-02 09:00', - '2014-07-02 10:00', '2014-07-02 11:00']) - assert idx.inferred_freq == 'BH' - - idx = DatetimeIndex( - ['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', - '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', - '2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00', - '2014-07-07 10:00', '2014-07-07 11:00']) - assert idx.inferred_freq == 'BH' - - idx = DatetimeIndex( - ['2014-07-04 09:00', '2014-07-04 10:00', '2014-07-04 11:00', - '2014-07-04 12:00', '2014-07-04 13:00', '2014-07-04 14:00', - '2014-07-04 15:00', '2014-07-04 16:00', '2014-07-07 09:00', - '2014-07-07 10:00', '2014-07-07 11:00', '2014-07-07 12:00', - '2014-07-07 13:00', '2014-07-07 14:00', '2014-07-07 15:00', - '2014-07-07 16:00', '2014-07-08 09:00', '2014-07-08 10:00', - '2014-07-08 11:00', '2014-07-08 12:00', '2014-07-08 13:00', - '2014-07-08 14:00', '2014-07-08 15:00', '2014-07-08 16:00']) - assert idx.inferred_freq == 'BH' - - def test_not_monotonic(self): - rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) - rng = rng[::-1] - assert rng.inferred_freq == '-1A-JAN' - - def test_non_datetimeindex2(self): - rng = _dti(['1/31/2000', '1/31/2001', '1/31/2002']) - - vals = rng.to_pydatetime() - - result = frequencies.infer_freq(vals) - assert result == rng.inferred_freq - - def test_invalid_index_types(self): - - # test all index types - for i in [tm.makeIntIndex(10), tm.makeFloatIndex(10), - tm.makePeriodIndex(10)]: - pytest.raises(TypeError, lambda: frequencies.infer_freq(i)) - - # GH 10822 - # odd error message on conversions to datetime for unicode - if not is_platform_windows(): - for i in [tm.makeStringIndex(10), tm.makeUnicodeIndex(10)]: - pytest.raises(ValueError, lambda: frequencies.infer_freq(i)) - - def test_string_datetimelike_compat(self): - - # GH 6463 - expected = frequencies.infer_freq(['2004-01', '2004-02', '2004-03', - '2004-04']) - result = frequencies.infer_freq(Index(['2004-01', '2004-02', '2004-03', - '2004-04'])) - assert result == expected - - def test_series(self): - - # GH6407 - # inferring series - - # invalid type of Series - for s in [Series(np.arange(10)), Series(np.arange(10.))]: - pytest.raises(TypeError, lambda: frequencies.infer_freq(s)) - - # a non-convertible string - pytest.raises(ValueError, lambda: frequencies.infer_freq( - Series(['foo', 'bar']))) - - # cannot infer on PeriodIndex - for freq in [None, 'L']: - s = Series(period_range('2013', periods=10, freq=freq)) - pytest.raises(TypeError, lambda: frequencies.infer_freq(s)) - - # DateTimeIndex - for freq in ['M', 'L', 'S']: - s = Series(date_range('20130101', periods=10, freq=freq)) - inferred = frequencies.infer_freq(s) - assert inferred == freq - - s = Series(date_range('20130101', '20130110')) - inferred = frequencies.infer_freq(s) - assert inferred == 'D' - - def test_legacy_offset_warnings(self): - freqs = ['WEEKDAY', 'EOM', 'W@MON', 'W@TUE', 'W@WED', 'W@THU', - 'W@FRI', 'W@SAT', 'W@SUN', 'Q@JAN', 'Q@FEB', 'Q@MAR', - 'A@JAN', 'A@FEB', 'A@MAR', 'A@APR', 'A@MAY', 'A@JUN', - 'A@JUL', 'A@AUG', 'A@SEP', 'A@OCT', 'A@NOV', 'A@DEC', - 'Y@JAN', 'WOM@1MON', 'WOM@2MON', 'WOM@3MON', - 'WOM@4MON', 'WOM@1TUE', 'WOM@2TUE', 'WOM@3TUE', - 'WOM@4TUE', 'WOM@1WED', 'WOM@2WED', 'WOM@3WED', - 'WOM@4WED', 'WOM@1THU', 'WOM@2THU', 'WOM@3THU', - 'WOM@4THU', 'WOM@1FRI', 'WOM@2FRI', 'WOM@3FRI', - 'WOM@4FRI'] - - msg = INVALID_FREQ_ERR_MSG - for freq in freqs: - with pytest.raises(ValueError, match=msg): - frequencies.get_offset(freq) - - with pytest.raises(ValueError, match=msg): - date_range('2011-01-01', periods=5, freq=freq) diff --git a/pandas/tests/tseries/test_holiday.py b/pandas/tests/tseries/test_holiday.py deleted file mode 100644 index 86f154ed1acc2..0000000000000 --- a/pandas/tests/tseries/test_holiday.py +++ /dev/null @@ -1,382 +0,0 @@ -from datetime import datetime - -import pytest -from pytz import utc - -from pandas import DatetimeIndex, compat -import pandas.util.testing as tm - -from pandas.tseries.holiday import ( - MO, SA, AbstractHolidayCalendar, DateOffset, EasterMonday, GoodFriday, - Holiday, HolidayCalendarFactory, Timestamp, USColumbusDay, - USFederalHolidayCalendar, USLaborDay, USMartinLutherKingJr, USMemorialDay, - USPresidentsDay, USThanksgivingDay, after_nearest_workday, - before_nearest_workday, get_calendar, nearest_workday, next_monday, - next_monday_or_tuesday, next_workday, previous_friday, previous_workday, - sunday_to_monday, weekend_to_monday) - - -class TestCalendar(object): - - def setup_method(self, method): - self.holiday_list = [ - datetime(2012, 1, 2), - datetime(2012, 1, 16), - datetime(2012, 2, 20), - datetime(2012, 5, 28), - datetime(2012, 7, 4), - datetime(2012, 9, 3), - datetime(2012, 10, 8), - datetime(2012, 11, 12), - datetime(2012, 11, 22), - datetime(2012, 12, 25)] - - self.start_date = datetime(2012, 1, 1) - self.end_date = datetime(2012, 12, 31) - - def test_calendar(self): - - calendar = USFederalHolidayCalendar() - holidays = calendar.holidays(self.start_date, self.end_date) - - holidays_1 = calendar.holidays( - self.start_date.strftime('%Y-%m-%d'), - self.end_date.strftime('%Y-%m-%d')) - holidays_2 = calendar.holidays( - Timestamp(self.start_date), - Timestamp(self.end_date)) - - assert list(holidays.to_pydatetime()) == self.holiday_list - assert list(holidays_1.to_pydatetime()) == self.holiday_list - assert list(holidays_2.to_pydatetime()) == self.holiday_list - - def test_calendar_caching(self): - # Test for issue #9552 - - class TestCalendar(AbstractHolidayCalendar): - - def __init__(self, name=None, rules=None): - super(TestCalendar, self).__init__(name=name, rules=rules) - - jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)]) - jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)]) - - tm.assert_index_equal(jan1.holidays(), DatetimeIndex(['01-Jan-2015'])) - tm.assert_index_equal(jan2.holidays(), DatetimeIndex(['02-Jan-2015'])) - - def test_calendar_observance_dates(self): - # Test for issue 11477 - USFedCal = get_calendar('USFederalHolidayCalendar') - holidays0 = USFedCal.holidays(datetime(2015, 7, 3), datetime( - 2015, 7, 3)) # <-- same start and end dates - holidays1 = USFedCal.holidays(datetime(2015, 7, 3), datetime( - 2015, 7, 6)) # <-- different start and end dates - holidays2 = USFedCal.holidays(datetime(2015, 7, 3), datetime( - 2015, 7, 3)) # <-- same start and end dates - - tm.assert_index_equal(holidays0, holidays1) - tm.assert_index_equal(holidays0, holidays2) - - def test_rule_from_name(self): - USFedCal = get_calendar('USFederalHolidayCalendar') - assert USFedCal.rule_from_name('Thanksgiving') == USThanksgivingDay - - -class TestHoliday(object): - - def setup_method(self, method): - self.start_date = datetime(2011, 1, 1) - self.end_date = datetime(2020, 12, 31) - - def check_results(self, holiday, start, end, expected): - assert list(holiday.dates(start, end)) == expected - - # Verify that timezone info is preserved. - assert (list(holiday.dates(utc.localize(Timestamp(start)), - utc.localize(Timestamp(end)))) == - [utc.localize(dt) for dt in expected]) - - def test_usmemorialday(self): - self.check_results(holiday=USMemorialDay, - start=self.start_date, - end=self.end_date, - expected=[ - datetime(2011, 5, 30), - datetime(2012, 5, 28), - datetime(2013, 5, 27), - datetime(2014, 5, 26), - datetime(2015, 5, 25), - datetime(2016, 5, 30), - datetime(2017, 5, 29), - datetime(2018, 5, 28), - datetime(2019, 5, 27), - datetime(2020, 5, 25), - ], ) - - def test_non_observed_holiday(self): - - self.check_results( - Holiday('July 4th Eve', month=7, day=3), - start="2001-01-01", - end="2003-03-03", - expected=[ - Timestamp('2001-07-03 00:00:00'), - Timestamp('2002-07-03 00:00:00') - ] - ) - - self.check_results( - Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)), - start="2001-01-01", - end="2008-03-03", - expected=[ - Timestamp('2001-07-03 00:00:00'), - Timestamp('2002-07-03 00:00:00'), - Timestamp('2003-07-03 00:00:00'), - Timestamp('2006-07-03 00:00:00'), - Timestamp('2007-07-03 00:00:00'), - ] - ) - - def test_easter(self): - - self.check_results(EasterMonday, - start=self.start_date, - end=self.end_date, - expected=[ - Timestamp('2011-04-25 00:00:00'), - Timestamp('2012-04-09 00:00:00'), - Timestamp('2013-04-01 00:00:00'), - Timestamp('2014-04-21 00:00:00'), - Timestamp('2015-04-06 00:00:00'), - Timestamp('2016-03-28 00:00:00'), - Timestamp('2017-04-17 00:00:00'), - Timestamp('2018-04-02 00:00:00'), - Timestamp('2019-04-22 00:00:00'), - Timestamp('2020-04-13 00:00:00'), - ], ) - self.check_results(GoodFriday, - start=self.start_date, - end=self.end_date, - expected=[ - Timestamp('2011-04-22 00:00:00'), - Timestamp('2012-04-06 00:00:00'), - Timestamp('2013-03-29 00:00:00'), - Timestamp('2014-04-18 00:00:00'), - Timestamp('2015-04-03 00:00:00'), - Timestamp('2016-03-25 00:00:00'), - Timestamp('2017-04-14 00:00:00'), - Timestamp('2018-03-30 00:00:00'), - Timestamp('2019-04-19 00:00:00'), - Timestamp('2020-04-10 00:00:00'), - ], ) - - def test_usthanksgivingday(self): - - self.check_results(USThanksgivingDay, - start=self.start_date, - end=self.end_date, - expected=[ - datetime(2011, 11, 24), - datetime(2012, 11, 22), - datetime(2013, 11, 28), - datetime(2014, 11, 27), - datetime(2015, 11, 26), - datetime(2016, 11, 24), - datetime(2017, 11, 23), - datetime(2018, 11, 22), - datetime(2019, 11, 28), - datetime(2020, 11, 26), - ], ) - - def test_holidays_within_dates(self): - # Fix holiday behavior found in #11477 - # where holiday.dates returned dates outside start/end date - # or observed rules could not be applied as the holiday - # was not in the original date range (e.g., 7/4/2015 -> 7/3/2015) - start_date = datetime(2015, 7, 1) - end_date = datetime(2015, 7, 1) - - calendar = get_calendar('USFederalHolidayCalendar') - new_years = calendar.rule_from_name('New Years Day') - july_4th = calendar.rule_from_name('July 4th') - veterans_day = calendar.rule_from_name('Veterans Day') - christmas = calendar.rule_from_name('Christmas') - - # Holiday: (start/end date, holiday) - holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"), - USLaborDay: ("2015-09-07", "2015-09-07"), - USColumbusDay: ("2015-10-12", "2015-10-12"), - USThanksgivingDay: ("2015-11-26", "2015-11-26"), - USMartinLutherKingJr: ("2015-01-19", "2015-01-19"), - USPresidentsDay: ("2015-02-16", "2015-02-16"), - GoodFriday: ("2015-04-03", "2015-04-03"), - EasterMonday: [("2015-04-06", "2015-04-06"), - ("2015-04-05", [])], - new_years: [("2015-01-01", "2015-01-01"), - ("2011-01-01", []), - ("2010-12-31", "2010-12-31")], - july_4th: [("2015-07-03", "2015-07-03"), - ("2015-07-04", [])], - veterans_day: [("2012-11-11", []), - ("2012-11-12", "2012-11-12")], - christmas: [("2011-12-25", []), - ("2011-12-26", "2011-12-26")]} - - for rule, dates in compat.iteritems(holidays): - empty_dates = rule.dates(start_date, end_date) - assert empty_dates.tolist() == [] - - if isinstance(dates, tuple): - dates = [dates] - - for start, expected in dates: - if len(expected): - expected = [Timestamp(expected)] - self.check_results(rule, start, start, expected) - - def test_argument_types(self): - holidays = USThanksgivingDay.dates(self.start_date, self.end_date) - - holidays_1 = USThanksgivingDay.dates( - self.start_date.strftime('%Y-%m-%d'), - self.end_date.strftime('%Y-%m-%d')) - - holidays_2 = USThanksgivingDay.dates( - Timestamp(self.start_date), - Timestamp(self.end_date)) - - tm.assert_index_equal(holidays, holidays_1) - tm.assert_index_equal(holidays, holidays_2) - - def test_special_holidays(self): - base_date = [datetime(2012, 5, 28)] - holiday_1 = Holiday('One-Time', year=2012, month=5, day=28) - holiday_2 = Holiday('Range', month=5, day=28, - start_date=datetime(2012, 1, 1), - end_date=datetime(2012, 12, 31), - offset=DateOffset(weekday=MO(1))) - - assert base_date == holiday_1.dates(self.start_date, self.end_date) - assert base_date == holiday_2.dates(self.start_date, self.end_date) - - def test_get_calendar(self): - class TestCalendar(AbstractHolidayCalendar): - rules = [] - - calendar = get_calendar('TestCalendar') - assert TestCalendar == calendar.__class__ - - def test_factory(self): - class_1 = HolidayCalendarFactory('MemorialDay', - AbstractHolidayCalendar, - USMemorialDay) - class_2 = HolidayCalendarFactory('Thansksgiving', - AbstractHolidayCalendar, - USThanksgivingDay) - class_3 = HolidayCalendarFactory('Combined', class_1, class_2) - - assert len(class_1.rules) == 1 - assert len(class_2.rules) == 1 - assert len(class_3.rules) == 2 - - -class TestObservanceRules(object): - - def setup_method(self, method): - self.we = datetime(2014, 4, 9) - self.th = datetime(2014, 4, 10) - self.fr = datetime(2014, 4, 11) - self.sa = datetime(2014, 4, 12) - self.su = datetime(2014, 4, 13) - self.mo = datetime(2014, 4, 14) - self.tu = datetime(2014, 4, 15) - - def test_next_monday(self): - assert next_monday(self.sa) == self.mo - assert next_monday(self.su) == self.mo - - def test_next_monday_or_tuesday(self): - assert next_monday_or_tuesday(self.sa) == self.mo - assert next_monday_or_tuesday(self.su) == self.tu - assert next_monday_or_tuesday(self.mo) == self.tu - - def test_previous_friday(self): - assert previous_friday(self.sa) == self.fr - assert previous_friday(self.su) == self.fr - - def test_sunday_to_monday(self): - assert sunday_to_monday(self.su) == self.mo - - def test_nearest_workday(self): - assert nearest_workday(self.sa) == self.fr - assert nearest_workday(self.su) == self.mo - assert nearest_workday(self.mo) == self.mo - - def test_weekend_to_monday(self): - assert weekend_to_monday(self.sa) == self.mo - assert weekend_to_monday(self.su) == self.mo - assert weekend_to_monday(self.mo) == self.mo - - def test_next_workday(self): - assert next_workday(self.sa) == self.mo - assert next_workday(self.su) == self.mo - assert next_workday(self.mo) == self.tu - - def test_previous_workday(self): - assert previous_workday(self.sa) == self.fr - assert previous_workday(self.su) == self.fr - assert previous_workday(self.tu) == self.mo - - def test_before_nearest_workday(self): - assert before_nearest_workday(self.sa) == self.th - assert before_nearest_workday(self.su) == self.fr - assert before_nearest_workday(self.tu) == self.mo - - def test_after_nearest_workday(self): - assert after_nearest_workday(self.sa) == self.mo - assert after_nearest_workday(self.su) == self.tu - assert after_nearest_workday(self.fr) == self.mo - - -class TestFederalHolidayCalendar(object): - - def test_no_mlk_before_1986(self): - # see gh-10278 - class MLKCalendar(AbstractHolidayCalendar): - rules = [USMartinLutherKingJr] - - holidays = MLKCalendar().holidays(start='1984', - end='1988').to_pydatetime().tolist() - - # Testing to make sure holiday is not incorrectly observed before 1986 - assert holidays == [datetime(1986, 1, 20, 0, 0), - datetime(1987, 1, 19, 0, 0)] - - def test_memorial_day(self): - class MemorialDay(AbstractHolidayCalendar): - rules = [USMemorialDay] - - holidays = MemorialDay().holidays(start='1971', - end='1980').to_pydatetime().tolist() - - # Fixes 5/31 error and checked manually against Wikipedia - assert holidays == [datetime(1971, 5, 31, 0, 0), - datetime(1972, 5, 29, 0, 0), - datetime(1973, 5, 28, 0, 0), - datetime(1974, 5, 27, 0, 0), - datetime(1975, 5, 26, 0, 0), - datetime(1976, 5, 31, 0, 0), - datetime(1977, 5, 30, 0, 0), - datetime(1978, 5, 29, 0, 0), - datetime(1979, 5, 28, 0, 0)] - - -class TestHolidayConflictingArguments(object): - - def test_both_offset_observance_raises(self): - # see gh-10217 - with pytest.raises(NotImplementedError): - Holiday("Cyber Monday", month=11, day=1, - offset=[DateOffset(weekday=SA(4))], - observance=next_monday) diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index d36de931e2610..c80b4483c0482 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -257,8 +257,7 @@ def test_categorical_with_nan_consistency(): assert result[1] in expected -@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") -@pytest.mark.parametrize("obj", [pd.Timestamp("20130101"), tm.makePanel()]) +@pytest.mark.parametrize("obj", [pd.Timestamp("20130101")]) def test_pandas_errors(obj): msg = "Unexpected type for hashing" with pytest.raises(TypeError, match=msg): diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index c454db3bbdffc..1b782b430a1a7 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -67,8 +67,8 @@ def to_offset(freq): Returns ------- - delta : DateOffset - None if freq is None + DateOffset + None if freq is None. Raises ------ @@ -77,7 +77,7 @@ def to_offset(freq): See Also -------- - pandas.DateOffset + DateOffset Examples -------- @@ -214,7 +214,7 @@ def infer_freq(index, warn=True): Returns ------- - freq : string or None + str or None None if no discernible frequency TypeError if the index is not datetime-like ValueError if there are less than three values. @@ -300,7 +300,7 @@ def get_freq(self): Returns ------- - freqstr : str or None + str or None """ if not self.is_monotonic or not self.index._is_unique: return None diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 86cd8b1e698c6..e92051ebbea9a 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -4,12 +4,13 @@ import warnings from pandas._libs.properties import cache_readonly # noqa -from pandas.compat import PY2, callable, signature +from pandas.compat import PY2, signature def deprecate(name, alternative, version, alt_name=None, klass=None, stacklevel=2, msg=None): - """Return a new function that emits a deprecation warning on use. + """ + Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py index 18e8d415459fd..19b1cc700261c 100644 --- a/pandas/util/_tester.py +++ b/pandas/util/_tester.py @@ -11,7 +11,7 @@ def test(extra_args=None): try: import pytest except ImportError: - raise ImportError("Need pytest>=3.0 to run tests") + raise ImportError("Need pytest>=4.0.2 to run tests") try: import hypothesis # noqa except ImportError: diff --git a/pandas/util/move.c b/pandas/util/move.c index 62860adb1c1f6..188d7b79b35d2 100644 --- a/pandas/util/move.c +++ b/pandas/util/move.c @@ -1,3 +1,12 @@ +/* +Copyright (c) 2019, PyData Development Team +All rights reserved. + +Distributed under the terms of the BSD Simplified License. + +The full license is in the LICENSE file, distributed with this software. +*/ + #include <Python.h> #define COMPILING_IN_PY2 (PY_VERSION_HEX <= 0x03000000) @@ -10,15 +19,15 @@ /* in python 3, we cannot intern bytes objects so this is always false */ #define PyString_CHECK_INTERNED(cs) 0 -#endif /* !COMPILING_IN_PY2 */ +#endif // !COMPILING_IN_PY2 #ifndef Py_TPFLAGS_HAVE_GETCHARBUFFER #define Py_TPFLAGS_HAVE_GETCHARBUFFER 0 -#endif +#endif // Py_TPFLAGS_HAVE_GETCHARBUFFER #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif +#endif // Py_TPFLAGS_HAVE_NEWBUFFER static PyObject *badmove; /* bad move exception class */ @@ -31,15 +40,13 @@ typedef struct { static PyTypeObject stolenbuf_type; /* forward declare type */ static void -stolenbuf_dealloc(stolenbufobject *self) -{ +stolenbuf_dealloc(stolenbufobject *self) { Py_DECREF(self->invalid_bytes); PyObject_Del(self); } static int -stolenbuf_getbuffer(stolenbufobject *self, Py_buffer *view, int flags) -{ +stolenbuf_getbuffer(stolenbufobject *self, Py_buffer *view, int flags) { return PyBuffer_FillInfo(view, (PyObject*) self, (void*) PyString_AS_STRING(self->invalid_bytes), @@ -51,8 +58,8 @@ stolenbuf_getbuffer(stolenbufobject *self, Py_buffer *view, int flags) #if COMPILING_IN_PY2 static Py_ssize_t -stolenbuf_getreadwritebuf(stolenbufobject *self, Py_ssize_t segment, void **out) -{ +stolenbuf_getreadwritebuf(stolenbufobject *self, + Py_ssize_t segment, void **out) { if (segment != 0) { PyErr_SetString(PyExc_SystemError, "accessing non-existent string segment"); @@ -63,8 +70,7 @@ stolenbuf_getreadwritebuf(stolenbufobject *self, Py_ssize_t segment, void **out) } static Py_ssize_t -stolenbuf_getsegcount(stolenbufobject *self, Py_ssize_t *len) -{ +stolenbuf_getsegcount(stolenbufobject *self, Py_ssize_t *len) { if (len) { *len = PyString_GET_SIZE(self->invalid_bytes); } @@ -79,14 +85,14 @@ static PyBufferProcs stolenbuf_as_buffer = { (getbufferproc) stolenbuf_getbuffer, }; -#else /* Python 3 */ +#else // Python 3 static PyBufferProcs stolenbuf_as_buffer = { (getbufferproc) stolenbuf_getbuffer, NULL, }; -#endif /* COMPILING_IN_PY2 */ +#endif // COMPILING_IN_PY2 PyDoc_STRVAR(stolenbuf_doc, "A buffer that is wrapping a stolen bytes object's buffer."); @@ -157,8 +163,7 @@ PyDoc_STRVAR( however, if called through *unpacking like ``stolenbuf(*(a,))`` it would only have the one reference (the tuple). */ static PyObject* -move_into_mutable_buffer(PyObject *self, PyObject *bytes_rvalue) -{ +move_into_mutable_buffer(PyObject *self, PyObject *bytes_rvalue) { stolenbufobject *ret; if (!PyString_CheckExact(bytes_rvalue)) { @@ -203,7 +208,7 @@ static PyModuleDef move_module = { -1, methods, }; -#endif /* !COMPILING_IN_PY2 */ +#endif // !COMPILING_IN_PY2 PyDoc_STRVAR( badmove_doc, @@ -226,7 +231,7 @@ PyInit__move(void) #else #define ERROR_RETURN init_move(void) -#endif /* !COMPILING_IN_PY2 */ +#endif // !COMPILING_IN_PY2 { PyObject *m; @@ -245,7 +250,7 @@ init_move(void) if (!(m = PyModule_Create(&move_module))) #else if (!(m = Py_InitModule(MODULE_NAME, methods))) -#endif /* !COMPILING_IN_PY2 */ +#endif // !COMPILING_IN_PY2 { return ERROR_RETURN; } @@ -264,5 +269,5 @@ init_move(void) #if !COMPILING_IN_PY2 return m; -#endif /* !COMPILING_IN_PY2 */ +#endif // !COMPILING_IN_PY2 } diff --git a/pandas/util/testing.py b/pandas/util/testing.py index f441dd20f3982..a5ae1f6a4d960 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1,5 +1,6 @@ from __future__ import division +from collections import Counter from contextlib import contextmanager from datetime import datetime from functools import wraps @@ -20,8 +21,8 @@ from pandas._libs import testing as _testing import pandas.compat as compat from pandas.compat import ( - PY2, PY3, Counter, callable, filter, httplib, lmap, lrange, lzip, map, - raise_with_traceback, range, string_types, u, unichr, zip) + PY2, PY3, filter, httplib, lmap, lrange, lzip, map, raise_with_traceback, + range, string_types, u, unichr, zip) from pandas.core.dtypes.common import ( is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -33,7 +34,7 @@ import pandas as pd from pandas import ( Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, - IntervalIndex, MultiIndex, Panel, RangeIndex, Series, bdate_range) + IntervalIndex, MultiIndex, RangeIndex, Series, bdate_range) from pandas.core.algorithms import take_1d from pandas.core.arrays import ( DatetimeArray, ExtensionArray, IntervalArray, PeriodArray, TimedeltaArray, @@ -1502,69 +1503,6 @@ def assert_frame_equal(left, right, check_dtype=True, obj='DataFrame.iloc[:, {idx}]'.format(idx=i)) -def assert_panel_equal(left, right, - check_dtype=True, - check_panel_type=False, - check_less_precise=False, - check_names=False, - by_blocks=False, - obj='Panel'): - """Check that left and right Panels are equal. - - Parameters - ---------- - left : Panel (or nd) - right : Panel (or nd) - check_dtype : bool, default True - Whether to check the Panel dtype is identical. - check_panel_type : bool, default False - Whether to check the Panel class is identical. - check_less_precise : bool or int, default False - Specify comparison precision. Only used when check_exact is False. - 5 digits (False) or 3 digits (True) after decimal points are compared. - If int, then specify the digits to compare - check_names : bool, default True - Whether to check the Index names attribute. - by_blocks : bool, default False - Specify how to compare internal data. If False, compare by columns. - If True, compare by blocks. - obj : str, default 'Panel' - Specify the object name being compared, internally used to show - the appropriate assertion message. - """ - - if check_panel_type: - assert_class_equal(left, right, obj=obj) - - for axis in left._AXIS_ORDERS: - left_ind = getattr(left, axis) - right_ind = getattr(right, axis) - assert_index_equal(left_ind, right_ind, check_names=check_names) - - if by_blocks: - rblocks = right._to_dict_of_blocks() - lblocks = left._to_dict_of_blocks() - for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): - assert dtype in lblocks - assert dtype in rblocks - array_equivalent(lblocks[dtype].values, rblocks[dtype].values) - else: - - # can potentially be slow - for i, item in enumerate(left._get_axis(0)): - msg = "non-matching item (right) '{item}'".format(item=item) - assert item in right, msg - litem = left.iloc[i] - ritem = right.iloc[i] - assert_frame_equal(litem, ritem, - check_less_precise=check_less_precise, - check_names=check_names) - - for i, item in enumerate(right._get_axis(0)): - msg = "non-matching item (left) '{item}'".format(item=item) - assert item in left, msg - - def assert_equal(left, right, **kwargs): """ Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. @@ -2051,22 +1989,6 @@ def makePeriodFrame(nper=None): return DataFrame(data) -def makePanel(nper=None): - with warnings.catch_warnings(record=True): - warnings.filterwarnings("ignore", "\\nPanel", FutureWarning) - cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]] - data = {c: makeTimeDataFrame(nper) for c in cols} - return Panel.fromDict(data) - - -def makePeriodPanel(nper=None): - with warnings.catch_warnings(record=True): - warnings.filterwarnings("ignore", "\\nPanel", FutureWarning) - cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]] - data = {c: makePeriodFrame(nper) for c in cols} - return Panel.fromDict(data) - - def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None, idx_type=None): """Create an index/multindex with given dimensions, levels, names, etc' @@ -2314,15 +2236,6 @@ def makeMissingDataframe(density=.9, random_state=None): return df -def add_nans(panel): - I, J, N = panel.shape - for i, item in enumerate(panel.items): - dm = panel[item] - for j, col in enumerate(dm.columns): - dm[col][:i + j] = np.NaN - return panel - - class TestSubDict(dict): def __init__(self, *args, **kwargs): diff --git a/requirements-dev.txt b/requirements-dev.txt index 76aaeefa648f4..be84c6f29fdeb 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -10,7 +10,8 @@ gitpython hypothesis>=3.82 isort moto -pytest>=4.0 +pytest>=4.0.2 +pytest-mock sphinx numpydoc beautifulsoup4>=4.2.1 diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py index bb58449843096..09fb5a30cbc3b 100644 --- a/scripts/tests/test_validate_docstrings.py +++ b/scripts/tests/test_validate_docstrings.py @@ -4,6 +4,8 @@ import textwrap import pytest import numpy as np +import pandas as pd + import validate_docstrings validate_one = validate_docstrings.validate_one @@ -1004,6 +1006,32 @@ def test_item_subsection(self, idx, subsection): assert result[idx][3] == subsection +class TestDocstringClass(object): + @pytest.mark.parametrize('name, expected_obj', + [('pandas.isnull', pd.isnull), + ('pandas.DataFrame', pd.DataFrame), + ('pandas.Series.sum', pd.Series.sum)]) + def test_resolves_class_name(self, name, expected_obj): + d = validate_docstrings.Docstring(name) + assert d.obj is expected_obj + + @pytest.mark.parametrize('invalid_name', ['panda', 'panda.DataFrame']) + def test_raises_for_invalid_module_name(self, invalid_name): + msg = 'No module can be imported from "{}"'.format(invalid_name) + with pytest.raises(ImportError, match=msg): + validate_docstrings.Docstring(invalid_name) + + @pytest.mark.parametrize('invalid_name', + ['pandas.BadClassName', + 'pandas.Series.bad_method_name']) + def test_raises_for_invalid_attribute_name(self, invalid_name): + name_components = invalid_name.split('.') + obj_name, invalid_attr_name = name_components[-2], name_components[-1] + msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name) + with pytest.raises(AttributeError, match=msg): + validate_docstrings.Docstring(invalid_name) + + class TestMainFunction(object): def test_exit_status_for_validate_one(self, monkeypatch): monkeypatch.setattr( diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py index bce33f7e78daa..20f32124a2532 100755 --- a/scripts/validate_docstrings.py +++ b/scripts/validate_docstrings.py @@ -267,7 +267,7 @@ def _load_obj(name): else: continue - if 'module' not in locals(): + if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name)) diff --git a/setup.cfg b/setup.cfg index 7155cc1013544..84b8f69a83f16 100644 --- a/setup.cfg +++ b/setup.cfg @@ -57,6 +57,7 @@ split_penalty_after_opening_bracket = 1000000 split_penalty_logical_operator = 30 [tool:pytest] +minversion = 4.0.2 testpaths = pandas markers = single: mark a test as single cpu only @@ -114,7 +115,6 @@ force_sort_within_sections=True skip= pandas/core/api.py, pandas/core/frame.py, - asv_bench/benchmarks/algorithms.py, asv_bench/benchmarks/attrs_caching.py, asv_bench/benchmarks/binary_ops.py, asv_bench/benchmarks/categoricals.py, @@ -153,3 +153,23 @@ skip= asv_bench/benchmarks/dtypes.py asv_bench/benchmarks/strings.py asv_bench/benchmarks/period.py + pandas/__init__.py + pandas/plotting/__init__.py + pandas/tests/extension/decimal/__init__.py + pandas/tests/extension/base/__init__.py + pandas/io/msgpack/__init__.py + pandas/io/json/__init__.py + pandas/io/clipboard/__init__.py + pandas/io/excel/__init__.py + pandas/compat/__init__.py + pandas/compat/numpy/__init__.py + pandas/core/arrays/__init__.py + pandas/core/groupby/__init__.py + pandas/core/internals/__init__.py + pandas/api/__init__.py + pandas/api/extensions/__init__.py + pandas/api/types/__init__.py + pandas/_libs/__init__.py + pandas/_libs/tslibs/__init__.py + pandas/util/__init__.py + pandas/arrays/__init__.py diff --git a/setup.py b/setup.py index 4bf040b8c8e20..c8d29a2e4be5a 100755 --- a/setup.py +++ b/setup.py @@ -450,7 +450,8 @@ def run(self): # Note: if not using `cythonize`, coverage can be enabled by # pinning `ext.cython_directives = directives` to each ext in extensions. # github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy -directives = {'linetrace': False} +directives = {'linetrace': False, + 'language_level': 2} macros = [] if linetrace: # https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
- [x] closes #16607 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I found the issue stagnant, but since the fix was already in place, I manually took the code @rosygupta made and applied it onto latest master. Rebase wasn't an option that I found would work since the file in question has been split into multiple. Let me know what else needs to be updated. Question: Given the code was created from a reference of cpython https://github.com/python/cpython/blob/6f0eb93183519024cb360162bdd81b9faec97ba6/Lib/_strptime.py#L321 I'm wondering why we can't use cpython's strptime directly and have to make our own implementation?
https://api.github.com/repos/pandas-dev/pandas/pulls/24844
2019-01-20T04:58:43Z
2019-03-05T07:50:36Z
null
2019-03-05T08:16:52Z
DOC: Clean sort_values and sort_index docstrings
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a0ee9cb253fef..63d16f7bcb78d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -530,7 +530,7 @@ def set_axis(self, labels, axis=0, inplace=None): The axis to update. The value 0 identifies the rows, and 1 identifies the columns. - inplace : boolean, default None + inplace : bool, default None Whether to return a new %(klass)s instance. .. warning:: @@ -3966,35 +3966,37 @@ def add_suffix(self, suffix): def sort_values(self, by=None, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ - Sort by the values along either axis + Sort by the values along either axis. Parameters ----------%(optional_by)s axis : %(axes_single_arg)s, default 0 - Axis to be sorted + Axis to be sorted. ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False - if True, perform operation in-place + If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also ndarray.np.sort for more information. `mergesort` is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' - `first` puts NaNs at the beginning, `last` puts NaNs at the end + Puts NaNs at the beginning if `first`; `last` puts NaNs at the + end. Returns ------- - sorted_obj : %(klass)s + sorted_obj : DataFrame or None + DataFrame with sorted values if inplace=False, None otherwise. Examples -------- >>> df = pd.DataFrame({ - ... 'col1' : ['A', 'A', 'B', np.nan, 'D', 'C'], - ... 'col2' : [2, 1, 9, 8, 7, 4], + ... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'], + ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }) >>> df @@ -4056,32 +4058,35 @@ def sort_values(self, by=None, axis=0, ascending=True, inplace=False, def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): """ - Sort object by labels (along an axis) + Sort object by labels (along an axis). Parameters ---------- - axis : %(axes)s to direct sorting + axis : {0 or 'index', 1 or 'columns'}, default 0 + The axis along which to sort. The value 0 identifies the rows, + and 1 identifies the columns. level : int or level name or list of ints or list of level names - if not None, sort on values in specified index level(s) - ascending : boolean, default True - Sort ascending vs. descending + If not None, sort on values in specified index level(s). + ascending : bool, default True + Sort ascending vs. descending. inplace : bool, default False - if True, perform operation in-place + If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' - Choice of sorting algorithm. See also ndarray.np.sort for more - information. `mergesort` is the only stable algorithm. For - DataFrames, this option is only applied when sorting on a single - column or label. + Choice of sorting algorithm. See also ndarray.np.sort for more + information. `mergesort` is the only stable algorithm. For + DataFrames, this option is only applied when sorting on a single + column or label. na_position : {'first', 'last'}, default 'last' - `first` puts NaNs at the beginning, `last` puts NaNs at the end. - Not implemented for MultiIndex. + Puts NaNs at the beginning if `first`; `last` puts NaNs at the end. + Not implemented for MultiIndex. sort_remaining : bool, default True - if true and sorting by level and index is multilevel, sort by other - levels too (in order) after sorting by specified level + If True and sorting by level and index is multilevel, sort by other + levels too (in order) after sorting by specified level. Returns ------- - sorted_obj : %(klass)s + sorted_obj : DataFrame or None + DataFrame with sorted index if inplace=False, None otherwise. """ inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) diff --git a/pandas/core/series.py b/pandas/core/series.py index 0c8e697c572e8..a25aa86a47927 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2857,13 +2857,13 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True - If true and sorting by level and index is multilevel, sort by other + If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- pandas.Series - The original Series sorted by the labels + The original Series sorted by the labels. See Also --------
Fixes some capitalization
https://api.github.com/repos/pandas-dev/pandas/pulls/24843
2019-01-20T00:57:42Z
2019-01-27T03:05:47Z
2019-01-27T03:05:47Z
2019-01-27T03:13:54Z
BUG-16807-1 SparseFrame fills with default_fill_value if data is None
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 170e7f14da397..b1df419d15c1b 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -226,7 +226,7 @@ Sparse ^^^^^^ - Significant speedup in `SparseArray` initialization that benefits most operations, fixing performance regression introduced in v0.20.0 (:issue:`24985`) -- +- Bug in :class:`SparseFrame` constructor where passing ``None`` as the data would cause ``default_fill_value`` to be ignored (:issue:`16807`) - diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index e0af11d13774c..2d54b82a3c844 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -124,8 +124,8 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None, columns = Index([]) else: for c in columns: - data[c] = SparseArray(np.nan, index=index, - kind=self._default_kind, + data[c] = SparseArray(self._default_fill_value, + index=index, kind=self._default_kind, fill_value=self._default_fill_value) mgr = to_manager(data, columns, index) if dtype is not None: diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index bfb5103c97adc..d917c94e813cd 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -269,6 +269,19 @@ def test_type_coercion_at_construction(self): default_fill_value=0) tm.assert_sp_frame_equal(result, expected) + def test_default_dtype(self): + result = pd.SparseDataFrame(columns=list('ab'), index=range(2)) + expected = pd.SparseDataFrame([[np.nan, np.nan], [np.nan, np.nan]], + columns=list('ab'), index=range(2)) + tm.assert_sp_frame_equal(result, expected) + + def test_nan_data_with_int_dtype_raises_error(self): + sdf = pd.SparseDataFrame([[np.nan, np.nan], [np.nan, np.nan]], + columns=list('ab'), index=range(2)) + msg = "Cannot convert non-finite values" + with pytest.raises(ValueError, match=msg): + pd.SparseDataFrame(sdf, dtype=np.int64) + def test_dtypes(self): df = DataFrame(np.random.randn(10000, 4)) df.loc[:9998] = np.nan @@ -1246,6 +1259,14 @@ def test_notna(self): 'B': [True, False, True, True, False]}) tm.assert_frame_equal(res.to_dense(), exp) + def test_default_fill_value_with_no_data(self): + # GH 16807 + expected = pd.SparseDataFrame([[1.0, 1.0], [1.0, 1.0]], + columns=list('ab'), index=range(2)) + result = pd.SparseDataFrame(columns=list('ab'), index=range(2), + default_fill_value=1.0) + tm.assert_frame_equal(expected, result) + class TestSparseDataFrameArithmetic(object):
- [ ] closes #16807 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Previously, the constructor for SparseFrame would default to `np.nan` if the data parameter was `None`, even if a `default_fill_value` is specified. This PR fixes this.
https://api.github.com/repos/pandas-dev/pandas/pulls/24842
2019-01-19T22:30:15Z
2019-03-03T04:46:45Z
2019-03-03T04:46:44Z
2019-03-03T04:46:51Z
Make DataFrame.to_html output full content
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 4032dc20b2e19..617d65dbbde1d 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -154,6 +154,7 @@ MultiIndex I/O ^^^ +- Bug in :func:`DataFrame.to_html()` where values were truncated using display options instead of outputting the full content (:issue:`17004`) - Fixed bug in missing text when using :meth:`to_clipboard` if copying utf-16 characters in Python 3 on Windows (:issue:`25040`) - - diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 456583509565e..66d13bf2668f9 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -12,7 +12,7 @@ from pandas.core.dtypes.generic import ABCMultiIndex -from pandas import compat +from pandas import compat, option_context from pandas.core.config import get_option from pandas.io.common import _is_url @@ -320,9 +320,15 @@ def _write_header(self, indent): self.write('</thead>', indent) + def _get_formatted_values(self): + with option_context('display.max_colwidth', 999999): + fmt_values = {i: self.fmt._format_col(i) + for i in range(self.ncols)} + return fmt_values + def _write_body(self, indent): self.write('<tbody>', indent) - fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)} + fmt_values = self._get_formatted_values() # write values if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): @@ -486,6 +492,9 @@ class NotebookFormatter(HTMLFormatter): DataFrame._repr_html_() and DataFrame.to_html(notebook=True) """ + def _get_formatted_values(self): + return {i: self.fmt._format_col(i) for i in range(self.ncols)} + def write_style(self): # We use the "scoped" attribute here so that the desired # style properties for the data frame are not then applied diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py index 554cfd306e2a7..428f1411a10a6 100644 --- a/pandas/tests/io/formats/test_to_html.py +++ b/pandas/tests/io/formats/test_to_html.py @@ -15,6 +15,15 @@ import pandas.io.formats.format as fmt +lorem_ipsum = ( + "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod" + " tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim" + " veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex" + " ea commodo consequat. Duis aute irure dolor in reprehenderit in" + " voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur" + " sint occaecat cupidatat non proident, sunt in culpa qui officia" + " deserunt mollit anim id est laborum.") + def expected_html(datapath, name): """ @@ -600,3 +609,17 @@ def test_to_html_render_links(render_links, expected, datapath): result = df.to_html(render_links=render_links) expected = expected_html(datapath, expected) assert result == expected + + +@pytest.mark.parametrize('method,expected', [ + ('to_html', lambda x:lorem_ipsum), + ('_repr_html_', lambda x:lorem_ipsum[:x - 4] + '...') # regression case +]) +@pytest.mark.parametrize('max_colwidth', [10, 20, 50, 100]) +def test_ignore_display_max_colwidth(method, expected, max_colwidth): + # see gh-17004 + df = DataFrame([lorem_ipsum]) + with pd.option_context('display.max_colwidth', max_colwidth): + result = getattr(df, method)() + expected = expected(max_colwidth) + assert expected in result
- [ ] closes #17004 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24841
2019-01-19T21:20:01Z
2019-03-03T02:59:52Z
2019-03-03T02:59:52Z
2019-03-05T12:25:34Z
BUG: Index Name is not displayed with header=False in to_csv
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1717b00664f92..efc21c2d91f84 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2956,12 +2956,17 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, index : bool, default True Write row names (index). - index_label : str or sequence, or False, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the object uses MultiIndex. If - False do not print fields for index names. Use index_label=False - for easier importing in R. + index_label : bool or str or sequence, default None + If index_label is not explicitly called, False if either header + or index is set to False; otherwise, True. If index_label is + explicitly called by allowed types of input, then input will be + given to index_label. If False, do not print fields + for index names. Use index_label=False for easier importing in R. + + .. versionchanged:: 0.25.0 + + Previously True option is not allowed. + mode : str Python write mode, default 'w'. encoding : str, optional diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 71b7ef32bebc3..7b7e43c814a36 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -50,7 +50,18 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.header = header self.index = index - self.index_label = index_label + + # if index label is not explicitly called, index label is True if + # header or index is not False; otherwise, index label is set to False + if index_label is None: + if self.header is False or self.header is None or not self.index: + self.index_label = False + else: + self.index_label = True + else: + # if index label is explicitly called, then use the caller. + self.index_label = index_label + self.mode = mode if encoding is None: encoding = 'ascii' if compat.PY2 else 'utf-8' @@ -188,6 +199,40 @@ def save(self): for _fh in handles: _fh.close() + def _index_label_encoder(self): + """ + Encode index label if it is not False. + + Returns + ------- + index_label: list + New index_label given index types + encode_labels: list + List of index labels + """ + index_label = self.index_label + obj = self.obj + + if index_label is True: + index_label = [] + # append index label based on index type + if isinstance(obj.index, ABCMultiIndex): + # add empty string is name is None + index_label = list(map(lambda name: name or '', + obj.index.names)) + else: + # if no name, use empty string + if obj.index.name is None: + index_label.append('') + else: + index_label.append(obj.index.name) + elif not isinstance(index_label, + (list, tuple, np.ndarray, ABCIndexClass)): + index_label = [index_label] + + encoded_labels = list(index_label) + return index_label, encoded_labels + def _save_header(self): writer = self.writer @@ -200,8 +245,16 @@ def _save_header(self): has_aliases = isinstance(header, (tuple, list, np.ndarray, ABCIndexClass)) - if not (has_aliases or self.header): - return + if not (has_aliases or header): + # if index_label is False, nothing will display. + if index_label is False: + return + else: + # based on index_label value, encoded labels are given + index_label, encoded_labels = self._index_label_encoder() + encoded_labels.extend([''] * len(obj.columns)) + writer.writerow(encoded_labels) + return if has_aliases: if len(header) != len(cols): raise ValueError(('Writing {ncols} cols but got {nalias} ' @@ -215,27 +268,16 @@ def _save_header(self): if self.index: # should write something for index label if index_label is not False: - if index_label is None: - if isinstance(obj.index, ABCMultiIndex): - index_label = [] - for i, name in enumerate(obj.index.names): - if name is None: - name = '' - index_label.append(name) - else: - index_label = obj.index.name - if index_label is None: - index_label = [''] - else: - index_label = [index_label] - elif not isinstance(index_label, - (list, tuple, np.ndarray, ABCIndexClass)): - # given a string for a DF with Index - index_label = [index_label] - - encoded_labels = list(index_label) + index_label, encoded_labels = self._index_label_encoder() else: - encoded_labels = [] + # if index is multiindex, multiple empty labels are provided + if isinstance(obj.index, ABCMultiIndex): + index_label = [] + index_label.extend([''] * len(obj.index.names)) + # if index is single index, list of empty string is provided + else: + index_label = [''] + encoded_labels = list(index_label) if not has_mi_columns or has_aliases: encoded_labels += list(write_cols) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 917cd9a04efaf..bfc5ca0b9640b 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -862,14 +862,14 @@ def test_to_csv_quote_none(self): expected = tm.convert_rows_list_to_csv_str(expected_rows) assert result == expected - def test_to_csv_index_no_leading_comma(self): + def test_to_csv_index_leading_comma(self): df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=['one', 'two', 'three']) buf = StringIO() - df.to_csv(buf, index_label=False) + df.to_csv(buf) - expected_rows = ['A,B', + expected_rows = [',A,B', 'one,1,4', 'two,2,5', 'three,3,6'] diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 1a28cafa2b520..f89dafeef65b5 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -537,3 +537,47 @@ def test_to_csv_compression(self, compression_only, result = pd.read_csv(path, index_col=0, compression=read_compression) tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize("header, index_label, expected_rows", [ + (False, True, ['index.name,,', '0,0,0', '1,0,0']), + (True, True, ['index.name,0,1', '0,0,0', '1,0,0']), + (False, False, ['0,0,0', '1,0,0']), + (True, False, [',0,1', '0,0,0', '1,0,0']), + (False, None, ['0,0,0', '1,0,0']), + (True, None, ['index.name,0,1', '0,0,0', '1,0,0']), + (True, "new_index", ['new_index,0,1', '0,0,0', '1,0,0']), + (True, ["new_index"], ['new_index,0,1', '0,0,0', '1,0,0']) + ]) + def test_to_csv_header_single_index(self, header, index_label, + expected_rows): + # GH 24546 + df = pd.DataFrame(np.zeros((2, 2), dtype=int)) + df.index.name = 'index.name' + df.columns.name = 'columns.name' + + result = df.to_csv(header=header, index_label=index_label) + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected + + @pytest.mark.parametrize("header, index_label, expected_rows", [ + (False, True, ['index.name.0,index.name.1,,', 'a,b,0,0', 'a,c,0,0']), + (True, True, ['index.name.0,index.name.1,0,1', 'a,b,0,0', 'a,c,0,0']), + (False, False, ['a,b,0,0', 'a,c,0,0']), + (True, False, [',,0,1', 'a,b,0,0', 'a,c,0,0']), + (False, None, ['a,b,0,0', 'a,c,0,0']), + (True, None, ['index.name.0,index.name.1,0,1', 'a,b,0,0', 'a,c,0,0']), + (True, ("index1", "index2"), + ['index1,index2,0,1', 'a,b,0,0', 'a,c,0,0']), + (True, ["index1", "index2"], + ['index1,index2,0,1', 'a,b,0,0', 'a,c,0,0']) + ]) + def test_to_csv_header_multi_index(self, header, index_label, + expected_rows): + # GH 24546 + df = pd.DataFrame(np.zeros((2, 2), dtype=int)) + df.index = pd.MultiIndex.from_product([['a'], ['b', 'c']], names=[ + 'index.name.0', 'index.name.1']) + + result = df.to_csv(header=header, index_label=index_label) + expected = tm.convert_rows_list_to_csv_str(expected_rows) + assert result == expected
- [ ] xref #24546 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24840
2019-01-19T20:37:33Z
2019-06-27T03:14:56Z
null
2019-08-23T13:27:59Z
STY: use pytest.raises context syntax (reshape)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 7861a122afdb6..e11847d2b8ce2 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1087,7 +1087,7 @@ def _validate(self, validate): elif validate in ["one_to_many", "1:m"]: if not left_unique: raise MergeError("Merge keys are not unique in left dataset;" - "not a one-to-many merge") + " not a one-to-many merge") elif validate in ["many_to_one", "m:1"]: if not right_unique: diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 8ee1e49f01ac1..e21f9d0291afa 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -195,38 +195,47 @@ def test_join_on(self): assert np.isnan(joined['three']['c']) # merge column not p resent - pytest.raises(KeyError, target.join, source, on='E') + with pytest.raises(KeyError, match="^'E'$"): + target.join(source, on='E') # overlap source_copy = source.copy() source_copy['A'] = 0 - pytest.raises(ValueError, target.join, source_copy, on='A') + msg = ("You are trying to merge on float64 and object columns. If" + " you wish to proceed you should use pd.concat") + with pytest.raises(ValueError, match=msg): + target.join(source_copy, on='A') def test_join_on_fails_with_different_right_index(self): - with pytest.raises(ValueError): - df = DataFrame({'a': np.random.choice(['m', 'f'], size=3), - 'b': np.random.randn(3)}) - df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10), - 'b': np.random.randn(10)}, - index=tm.makeCustomIndex(10, 2)) + df = DataFrame({'a': np.random.choice(['m', 'f'], size=3), + 'b': np.random.randn(3)}) + df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10), + 'b': np.random.randn(10)}, + index=tm.makeCustomIndex(10, 2)) + msg = (r'len\(left_on\) must equal the number of levels in the index' + ' of "right"') + with pytest.raises(ValueError, match=msg): merge(df, df2, left_on='a', right_index=True) def test_join_on_fails_with_different_left_index(self): - with pytest.raises(ValueError): - df = DataFrame({'a': np.random.choice(['m', 'f'], size=3), - 'b': np.random.randn(3)}, - index=tm.makeCustomIndex(10, 2)) - df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10), - 'b': np.random.randn(10)}) + df = DataFrame({'a': np.random.choice(['m', 'f'], size=3), + 'b': np.random.randn(3)}, + index=tm.makeCustomIndex(3, 2)) + df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10), + 'b': np.random.randn(10)}) + msg = (r'len\(right_on\) must equal the number of levels in the index' + ' of "left"') + with pytest.raises(ValueError, match=msg): merge(df, df2, right_on='b', left_index=True) def test_join_on_fails_with_different_column_counts(self): - with pytest.raises(ValueError): - df = DataFrame({'a': np.random.choice(['m', 'f'], size=3), - 'b': np.random.randn(3)}) - df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10), - 'b': np.random.randn(10)}, - index=tm.makeCustomIndex(10, 2)) + df = DataFrame({'a': np.random.choice(['m', 'f'], size=3), + 'b': np.random.randn(3)}) + df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10), + 'b': np.random.randn(10)}, + index=tm.makeCustomIndex(10, 2)) + msg = r"len\(right_on\) must equal len\(left_on\)" + with pytest.raises(ValueError, match=msg): merge(df, df2, right_on='a', left_on=['a', 'b']) @pytest.mark.parametrize("wrong_type", [2, 'str', None, np.array([0, 1])]) @@ -237,9 +246,11 @@ def test_join_on_fails_with_wrong_object_type(self, wrong_type): # Edited test to remove the Series object from test parameters df = DataFrame({'a': [1, 1]}) - with pytest.raises(TypeError, match=str(type(wrong_type))): + msg = ("Can only merge Series or DataFrame objects, a {} was passed" + .format(str(type(wrong_type)))) + with pytest.raises(TypeError, match=msg): merge(wrong_type, df, left_on='a', right_on='a') - with pytest.raises(TypeError, match=str(type(wrong_type))): + with pytest.raises(TypeError, match=msg): merge(df, wrong_type, left_on='a', right_on='a') def test_join_on_pass_vector(self): @@ -603,7 +614,9 @@ def _check_diff_index(df_list, result, exp_index): joined = df_list[0].join(df_list[1:], how='inner') _check_diff_index(df_list, joined, df.index[2:8]) - pytest.raises(ValueError, df_list[0].join, df_list[1:], on='a') + msg = "Joining multiple DataFrames only supported for joining on index" + with pytest.raises(ValueError, match=msg): + df_list[0].join(df_list[1:], on='a') def test_join_many_mixed(self): df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D']) @@ -725,10 +738,13 @@ def test_panel_join_many(self): tm.assert_panel_equal(joined, expected) # edge cases - pytest.raises(ValueError, panels[0].join, panels[1:], - how='outer', lsuffix='foo', rsuffix='bar') - pytest.raises(ValueError, panels[0].join, panels[1:], - how='right') + msg = "Suffixes not supported when passing multiple panels" + with pytest.raises(ValueError, match=msg): + panels[0].join(panels[1:], how='outer', lsuffix='foo', + rsuffix='bar') + msg = "Right join not supported with multiple panels" + with pytest.raises(ValueError, match=msg): + panels[0].join(panels[1:], how='right') def test_join_multi_to_multi(self, join_type): # GH 20475 @@ -749,10 +765,12 @@ def test_join_multi_to_multi(self, join_type): ) assert_frame_equal(expected, result) - with pytest.raises(ValueError): + msg = (r'len\(left_on\) must equal the number of levels in the index' + ' of "right"') + with pytest.raises(ValueError, match=msg): left.join(right, on='xy', how=join_type) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): right.join(left, on=['abc', 'xy'], how=join_type) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 2080fc542bc61..1d7c42b7328d0 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -119,25 +119,37 @@ def test_merge_index_singlekey_inner(self): assert_frame_equal(result, expected.loc[:, result.columns]) def test_merge_misspecified(self): - pytest.raises(ValueError, merge, self.left, self.right, - left_index=True) - pytest.raises(ValueError, merge, self.left, self.right, - right_index=True) - - pytest.raises(ValueError, merge, self.left, self.left, - left_on='key', on='key') - - pytest.raises(ValueError, merge, self.df, self.df2, - left_on=['key1'], right_on=['key1', 'key2']) + msg = "Must pass right_on or right_index=True" + with pytest.raises(pd.errors.MergeError, match=msg): + merge(self.left, self.right, left_index=True) + msg = "Must pass left_on or left_index=True" + with pytest.raises(pd.errors.MergeError, match=msg): + merge(self.left, self.right, right_index=True) + + msg = ('Can only pass argument "on" OR "left_on" and "right_on", not' + ' a combination of both') + with pytest.raises(pd.errors.MergeError, match=msg): + merge(self.left, self.left, left_on='key', on='key') + + msg = r"len\(right_on\) must equal len\(left_on\)" + with pytest.raises(ValueError, match=msg): + merge(self.df, self.df2, left_on=['key1'], + right_on=['key1', 'key2']) def test_index_and_on_parameters_confusion(self): - pytest.raises(ValueError, merge, self.df, self.df2, how='left', - left_index=False, right_index=['key1', 'key2']) - pytest.raises(ValueError, merge, self.df, self.df2, how='left', - left_index=['key1', 'key2'], right_index=False) - pytest.raises(ValueError, merge, self.df, self.df2, how='left', - left_index=['key1', 'key2'], - right_index=['key1', 'key2']) + msg = ("right_index parameter must be of type bool, not" + r" <(class|type) 'list'>") + with pytest.raises(ValueError, match=msg): + merge(self.df, self.df2, how='left', + left_index=False, right_index=['key1', 'key2']) + msg = ("left_index parameter must be of type bool, not " + r"<(class|type) 'list'>") + with pytest.raises(ValueError, match=msg): + merge(self.df, self.df2, how='left', + left_index=['key1', 'key2'], right_index=False) + with pytest.raises(ValueError, match=msg): + merge(self.df, self.df2, how='left', + left_index=['key1', 'key2'], right_index=['key1', 'key2']) def test_merge_overlap(self): merged = merge(self.left, self.left, on='key') @@ -269,7 +281,6 @@ def test_no_overlap_more_informative_error(self): df1 = DataFrame({'x': ['a']}, index=[dt]) df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt]) - pytest.raises(MergeError, merge, df1, df2) msg = ('No common columns to perform merge on. ' 'Merge options: left_on={lon}, right_on={ron}, ' @@ -566,7 +577,10 @@ def test_overlapping_columns_error_message(self): # #2649, #10639 df2.columns = ['key1', 'foo', 'foo'] - pytest.raises(ValueError, merge, df, df2) + msg = (r"Data columns not unique: Index\(\[u?'foo', u?'foo'\]," + r" dtype='object'\)") + with pytest.raises(MergeError, match=msg): + merge(df, df2) def test_merge_on_datetime64tz(self): @@ -717,9 +731,10 @@ def test_indicator(self): assert_frame_equal(test_custom_name, df_result_custom_name) # Check only accepts strings and booleans - with pytest.raises(ValueError): + msg = "indicator option can only accept boolean or string arguments" + with pytest.raises(ValueError, match=msg): merge(df1, df2, on='col1', how='outer', indicator=5) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): df1.merge(df2, on='col1', how='outer', indicator=5) # Check result integrity @@ -743,20 +758,25 @@ def test_indicator(self): for i in ['_right_indicator', '_left_indicator', '_merge']: df_badcolumn = DataFrame({'col1': [1, 2], i: [2, 2]}) - with pytest.raises(ValueError): + msg = ("Cannot use `indicator=True` option when data contains a" + " column named {}|" + "Cannot use name of an existing column for indicator" + " column").format(i) + with pytest.raises(ValueError, match=msg): merge(df1, df_badcolumn, on='col1', how='outer', indicator=True) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): df1.merge(df_badcolumn, on='col1', how='outer', indicator=True) # Check for name conflict with custom name df_badcolumn = DataFrame( {'col1': [1, 2], 'custom_column_name': [2, 2]}) - with pytest.raises(ValueError): + msg = "Cannot use name of an existing column for indicator column" + with pytest.raises(ValueError, match=msg): merge(df1, df_badcolumn, on='col1', how='outer', indicator='custom_column_name') - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): df1.merge(df_badcolumn, on='col1', how='outer', indicator='custom_column_name') @@ -843,11 +863,13 @@ def test_validation(self): merge(left, right_w_dups, left_index=True, right_index=True, validate='one_to_many') - with pytest.raises(MergeError): + msg = ("Merge keys are not unique in right dataset; not a one-to-one" + " merge") + with pytest.raises(MergeError, match=msg): merge(left, right_w_dups, left_index=True, right_index=True, validate='one_to_one') - with pytest.raises(MergeError): + with pytest.raises(MergeError, match=msg): merge(left, right_w_dups, on='a', validate='one_to_one') # Dups on left @@ -856,26 +878,33 @@ def test_validation(self): merge(left_w_dups, right, left_index=True, right_index=True, validate='many_to_one') - with pytest.raises(MergeError): + msg = ("Merge keys are not unique in left dataset; not a one-to-one" + " merge") + with pytest.raises(MergeError, match=msg): merge(left_w_dups, right, left_index=True, right_index=True, validate='one_to_one') - with pytest.raises(MergeError): + with pytest.raises(MergeError, match=msg): merge(left_w_dups, right, on='a', validate='one_to_one') # Dups on both merge(left_w_dups, right_w_dups, on='a', validate='many_to_many') - with pytest.raises(MergeError): + msg = ("Merge keys are not unique in right dataset; not a many-to-one" + " merge") + with pytest.raises(MergeError, match=msg): merge(left_w_dups, right_w_dups, left_index=True, right_index=True, validate='many_to_one') - with pytest.raises(MergeError): + msg = ("Merge keys are not unique in left dataset; not a one-to-many" + " merge") + with pytest.raises(MergeError, match=msg): merge(left_w_dups, right_w_dups, on='a', validate='one_to_many') # Check invalid arguments - with pytest.raises(ValueError): + msg = "Not a valid argument for validate" + with pytest.raises(ValueError, match=msg): merge(left, right, on='a', validate='jibberish') # Two column merge, dups in both, but jointly no dups. @@ -896,7 +925,9 @@ def test_validation(self): 'um... weasel noise?']}, index=range(3)) - with pytest.raises(MergeError): + msg = ("Merge keys are not unique in either left or right dataset;" + " not a one-to-one merge") + with pytest.raises(MergeError, match=msg): merge(left, right, on='a', validate='1:1') result = merge(left, right, on=['a', 'b'], validate='1:1') @@ -1439,6 +1470,7 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm): left_index=left_index, right_index=right_index) tm.assert_frame_equal(result, expected) else: - with pytest.raises(ValueError, match='a Series without a name'): + msg = "Cannot merge a Series without a name" + with pytest.raises(ValueError, match=msg): result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 7814cbaba4a50..899daf488638a 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -777,7 +777,8 @@ def test_append(self, sort): assert appended is not self.frame # Overlap - with pytest.raises(ValueError): + msg = "Indexes have overlapping values" + with pytest.raises(ValueError, match=msg): self.frame.append(self.frame, verify_integrity=True) # see gh-6129: new columns @@ -960,13 +961,22 @@ def test_append_different_columns_types_raises( df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_can_append) ser = pd.Series([7, 8, 9], index=index_cannot_append_with_other, name=2) - with pytest.raises(TypeError): + msg = ("the other index needs to be an IntervalIndex too, but was" + r" type {}|" + r"object of type '(int|long|float|Timestamp)' has no len\(\)|" + "Expected tuple, got str") + with pytest.raises(TypeError, match=msg.format( + index_can_append.__class__.__name__)): df.append(ser) df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=index_cannot_append_with_other) ser = pd.Series([7, 8, 9], index=index_can_append, name=2) - with pytest.raises(TypeError): + msg = (r"unorderable types: (Interval|int)\(\) > " + r"(int|long|float|str)\(\)|" + r"Expected tuple, got (int|long|float|str)|" + r"Cannot compare type 'Timestamp' with type '(int|long)'") + with pytest.raises(TypeError, match=msg): df.append(ser) def test_append_dtype_coerce(self, sort): @@ -1291,11 +1301,15 @@ def test_concat_keys_levels_no_overlap(self): df = DataFrame(np.random.randn(1, 3), index=['a']) df2 = DataFrame(np.random.randn(1, 4), index=['b']) - pytest.raises(ValueError, concat, [df, df], - keys=['one', 'two'], levels=[['foo', 'bar', 'baz']]) + msg = "Values not found in passed level" + with pytest.raises(ValueError, match=msg): + concat([df, df], + keys=['one', 'two'], levels=[['foo', 'bar', 'baz']]) - pytest.raises(ValueError, concat, [df, df2], - keys=['one', 'two'], levels=[['foo', 'bar', 'baz']]) + msg = "Key one not in level" + with pytest.raises(ValueError, match=msg): + concat([df, df2], + keys=['one', 'two'], levels=[['foo', 'bar', 'baz']]) def test_concat_rename_index(self): a = DataFrame(np.random.rand(3, 3), @@ -1488,7 +1502,10 @@ def test_concat_mixed_objs(self): with catch_warnings(record=True): simplefilter("ignore", FutureWarning) panel = tm.makePanel() - pytest.raises(ValueError, lambda: concat([panel, s1], axis=1)) + msg = ("cannot concatenate unaligned mixed dimensional NDFrame" + " objects") + with pytest.raises(ValueError, match=msg): + concat([panel, s1], axis=1) def test_empty_dtype_coerce(self): @@ -1666,7 +1683,8 @@ def test_concat_exclude_none(self): pieces = [df[:5], None, None, df[5:]] result = concat(pieces) tm.assert_frame_equal(result, df) - pytest.raises(ValueError, concat, [None, None]) + with pytest.raises(ValueError, match="All objects passed were None"): + concat([None, None]) def test_concat_datetime64_block(self): from pandas.core.indexes.datetimes import date_range @@ -1799,13 +1817,20 @@ def test_concat_invalid(self): # trying to concat a ndframe with a non-ndframe df1 = mkdf(10, 2) + msg = ('cannot concatenate object of type "{}";' + ' only pd.Series, pd.DataFrame, and pd.Panel' + r' \(deprecated\) objs are valid') for obj in [1, dict(), [1, 2], (1, 2)]: - pytest.raises(TypeError, lambda x: concat([df1, obj])) + with pytest.raises(TypeError, match=msg.format(type(obj))): + concat([df1, obj]) def test_concat_invalid_first_argument(self): df1 = mkdf(10, 2) df2 = mkdf(10, 2) - pytest.raises(TypeError, concat, df1, df2) + msg = ('first argument must be an iterable of pandas ' + 'objects, you passed an object of type "DataFrame"') + with pytest.raises(TypeError, match=msg): + concat(df1, df2) # generator ok though concat(DataFrame(np.random.rand(5, 5)) for _ in range(3)) @@ -2310,7 +2335,9 @@ def test_categorical_index_preserver(self): # wrong catgories df3 = DataFrame({'A': a, 'B': Categorical(b, categories=list('abe')) }).set_index('B') - pytest.raises(TypeError, lambda: pd.concat([df2, df3])) + msg = "categories must match existing categories when appending" + with pytest.raises(TypeError, match=msg): + pd.concat([df2, df3]) def test_concat_categoricalindex(self): # GH 16111, categories that aren't lexsorted diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index 6b633d7e77f52..6bd1958633e25 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -116,9 +116,11 @@ def test_tuple_vars_fail_with_multiindex(self): tuple_b = ('B', 'b') list_b = [tuple_b] + msg = (r"(id|value)_vars must be a list of tuples when columns are" + " a MultiIndex") for id_vars, value_vars in ((tuple_a, list_b), (list_a, tuple_b), (tuple_a, tuple_b)): - with pytest.raises(ValueError, match=r'MultiIndex'): + with pytest.raises(ValueError, match=msg): self.df1.melt(id_vars=id_vars, value_vars=value_vars) def test_custom_var_name(self): @@ -352,7 +354,9 @@ def test_pairs(self): spec = {'visitdt': ['visitdt%d' % i for i in range(1, 3)], 'wt': ['wt%d' % i for i in range(1, 4)]} - pytest.raises(ValueError, lreshape, df, spec) + msg = "All column lists must be same length" + with pytest.raises(ValueError, match=msg): + lreshape(df, spec) class TestWideToLong(object): @@ -603,7 +607,8 @@ def test_non_unique_idvars(self): 'B_B1': [1, 2, 3, 4, 5], 'x': [1, 1, 1, 1, 1] }) - with pytest.raises(ValueError): + msg = "the id variables need to uniquely identify each row" + with pytest.raises(ValueError, match=msg): wide_to_long(df, ['A_A', 'B_B'], i='x', j='colname') def test_cast_j_int(self): @@ -639,7 +644,8 @@ def test_identical_stubnames(self): 'A2011': [3.0, 4.0], 'B2010': [5.0, 6.0], 'A': ['X1', 'X2']}) - with pytest.raises(ValueError): + msg = "stubname can't be identical to a column name" + with pytest.raises(ValueError, match=msg): wide_to_long(df, ['A', 'B'], i='A', j='colname') def test_nonnumeric_suffix(self): diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 7c70f8177d846..e4fbb204af533 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -526,7 +526,7 @@ def test_pivot_with_tuple_of_values(self, method): 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], 'baz': [1, 2, 3, 4, 5, 6], 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^\('bar', 'baz'\)$"): # tuple is seen as a single column name if method: df.pivot(index='zoo', columns='foo', values=('bar', 'baz')) @@ -742,24 +742,27 @@ def test_margins_no_values_two_row_two_cols(self): index=['A', 'B'], columns=['C', 'D'], aggfunc=len, margins=True) assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0] - def test_pivot_table_with_margins_set_margin_name(self): + @pytest.mark.parametrize( + 'margin_name', ['foo', 'one', 666, None, ['a', 'b']]) + def test_pivot_table_with_margins_set_margin_name(self, margin_name): # see gh-3335 - for margin_name in ['foo', 'one', 666, None, ['a', 'b']]: - with pytest.raises(ValueError): - # multi-index index - pivot_table(self.data, values='D', index=['A', 'B'], - columns=['C'], margins=True, - margins_name=margin_name) - with pytest.raises(ValueError): - # multi-index column - pivot_table(self.data, values='D', index=['C'], - columns=['A', 'B'], margins=True, - margins_name=margin_name) - with pytest.raises(ValueError): - # non-multi-index index/column - pivot_table(self.data, values='D', index=['A'], - columns=['B'], margins=True, - margins_name=margin_name) + msg = (r'Conflicting name "{}" in margins|' + "margins_name argument must be a string").format(margin_name) + with pytest.raises(ValueError, match=msg): + # multi-index index + pivot_table(self.data, values='D', index=['A', 'B'], + columns=['C'], margins=True, + margins_name=margin_name) + with pytest.raises(ValueError, match=msg): + # multi-index column + pivot_table(self.data, values='D', index=['C'], + columns=['A', 'B'], margins=True, + margins_name=margin_name) + with pytest.raises(ValueError, match=msg): + # non-multi-index index/column + pivot_table(self.data, values='D', index=['A'], + columns=['B'], margins=True, + margins_name=margin_name) def test_pivot_timegrouper(self): df = DataFrame({ @@ -818,13 +821,14 @@ def test_pivot_timegrouper(self): values='Quantity', aggfunc=np.sum) tm.assert_frame_equal(result, expected.T) - pytest.raises(KeyError, lambda: pivot_table( - df, index=Grouper(freq='6MS', key='foo'), - columns='Buyer', values='Quantity', aggfunc=np.sum)) - pytest.raises(KeyError, lambda: pivot_table( - df, index='Buyer', - columns=Grouper(freq='6MS', key='foo'), - values='Quantity', aggfunc=np.sum)) + msg = "'The grouper name foo is not found'" + with pytest.raises(KeyError, match=msg): + pivot_table(df, index=Grouper(freq='6MS', key='foo'), + columns='Buyer', values='Quantity', aggfunc=np.sum) + with pytest.raises(KeyError, match=msg): + pivot_table(df, index='Buyer', + columns=Grouper(freq='6MS', key='foo'), + values='Quantity', aggfunc=np.sum) # passing the level df = df.set_index('Date') @@ -838,13 +842,14 @@ def test_pivot_timegrouper(self): values='Quantity', aggfunc=np.sum) tm.assert_frame_equal(result, expected.T) - pytest.raises(ValueError, lambda: pivot_table( - df, index=Grouper(freq='6MS', level='foo'), - columns='Buyer', values='Quantity', aggfunc=np.sum)) - pytest.raises(ValueError, lambda: pivot_table( - df, index='Buyer', - columns=Grouper(freq='6MS', level='foo'), - values='Quantity', aggfunc=np.sum)) + msg = "The level foo is not valid" + with pytest.raises(ValueError, match=msg): + pivot_table(df, index=Grouper(freq='6MS', level='foo'), + columns='Buyer', values='Quantity', aggfunc=np.sum) + with pytest.raises(ValueError, match=msg): + pivot_table(df, index='Buyer', + columns=Grouper(freq='6MS', level='foo'), + values='Quantity', aggfunc=np.sum) # double grouper df = DataFrame({ @@ -1279,7 +1284,8 @@ def test_pivot_number_of_levels_larger_than_int32(self): 'ind2': np.arange(2 ** 16), 'count': 0}) - with pytest.raises(ValueError, match='int32 overflow'): + msg = "Unstacked DataFrame is too big, causing int32 overflow" + with pytest.raises(ValueError, match=msg): df.pivot_table(index='ind1', columns='ind2', values='count', aggfunc='count') @@ -1421,8 +1427,9 @@ def test_crosstab_margins_set_margin_name(self): exp_rows = exp_rows.fillna(0).astype(np.int64) tm.assert_series_equal(all_rows, exp_rows) + msg = "margins_name argument must be a string" for margins_name in [666, None, ['a', 'b']]: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): crosstab(a, [b, c], rownames=['a'], colnames=('b', 'c'), margins=True, margins_name=margins_name)
xref #24332
https://api.github.com/repos/pandas-dev/pandas/pulls/24838
2019-01-19T15:48:27Z
2019-01-19T21:29:36Z
2019-01-19T21:29:35Z
2019-01-20T22:31:57Z
Fix memory growth bug in read_csv
diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py index 771f2795334e1..d42a15d61fb0d 100644 --- a/asv_bench/benchmarks/io/csv.py +++ b/asv_bench/benchmarks/io/csv.py @@ -214,4 +214,23 @@ def time_baseline(self): names=list(string.digits[:9])) +class ReadCSVMemoryGrowth(BaseIO): + + chunksize = 20 + num_rows = 1000 + fname = "__test__.csv" + + def setup(self): + with open(self.fname, "w") as f: + for i in range(self.num_rows): + f.write("{i}\n".format(i=i)) + + def mem_parser_chunks(self): + # see gh-24805. + result = read_csv(self.fname, chunksize=self.chunksize) + + for _ in result: + pass + + from ..pandas_vb_common import setup # noqa: F401 diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index 3a4058f37efc7..a86af7c5416de 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -300,7 +300,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { * just because a recent chunk did not have as many words. */ if (self->words_len + nbytes < self->max_words_cap) { - length = self->max_words_cap - nbytes; + length = self->max_words_cap - nbytes - 1; } else { length = self->words_len; } diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index d87ef7cd15a64..05da171d7dc31 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1916,6 +1916,24 @@ def test_filename_with_special_chars(all_parsers): tm.assert_frame_equal(result, df) +def test_read_csv_memory_growth_chunksize(all_parsers): + # see gh-24805 + # + # Let's just make sure that we don't crash + # as we iteratively process all chunks. + parser = all_parsers + + with tm.ensure_clean() as path: + with open(path, "w") as f: + for i in range(1000): + f.write(str(i) + "\n") + + result = parser.read_csv(path, chunksize=20) + + for _ in result: + pass + + def test_read_table_deprecated(all_parsers): # see gh-21948 parser = all_parsers
The edge case where we hit powers of 2 every time during allocation can be painful. Closes #24805. xref #23527.
https://api.github.com/repos/pandas-dev/pandas/pulls/24837
2019-01-19T11:40:03Z
2019-01-20T16:01:34Z
2019-01-20T16:01:33Z
2019-01-20T19:51:41Z
fixed regression commit
diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json index 9c333f62810f4..fa098e2455683 100644 --- a/asv_bench/asv.conf.json +++ b/asv_bench/asv.conf.json @@ -118,9 +118,8 @@ // skipped for the matching benchmark. // "regressions_first_commits": { - ".*": "v0.20.0" + ".*": "0409521665" }, "regression_thresholds": { - ".*": 0.05 } }
Fixes an issue with the ASV config. It seems that ASV may not understand tags in the regressions_first_commit. I've replaced that tag with the commit for 0.23.4 I removed the regression threashold, as it's already the default.
https://api.github.com/repos/pandas-dev/pandas/pulls/24835
2019-01-19T04:57:35Z
2019-01-19T13:41:44Z
2019-01-19T13:41:44Z
2019-01-19T13:41:47Z
REF remove unused imports from tseries.frequencies; update imports elsewhere
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 700e9edd89bd0..e0c71b5609096 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -4,7 +4,8 @@ import numpy as np -from pandas._libs.tslibs import NaT, iNaT, period as libperiod +from pandas._libs.tslibs import ( + NaT, frequencies as libfrequencies, iNaT, period as libperiod) from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, @@ -31,7 +32,7 @@ def _field_accessor(name, alias, docstring=None): def f(self): - base, mult = frequencies.get_freq_code(self.freq) + base, mult = libfrequencies.get_freq_code(self.freq) result = get_period_field_arr(alias, self.asi8, base) return result @@ -348,12 +349,12 @@ def to_timestamp(self, freq=None, how='start'): return (self + self.freq).to_timestamp(how='start') - adjust if freq is None: - base, mult = frequencies.get_freq_code(self.freq) - freq = frequencies.get_to_timestamp_base(base) + base, mult = libfrequencies.get_freq_code(self.freq) + freq = libfrequencies.get_to_timestamp_base(base) else: freq = Period._maybe_convert_freq(freq) - base, mult = frequencies.get_freq_code(freq) + base, mult = libfrequencies.get_freq_code(freq) new_data = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base) @@ -450,8 +451,8 @@ def asfreq(self, freq=None, how='E'): freq = Period._maybe_convert_freq(freq) - base1, mult1 = frequencies.get_freq_code(self.freq) - base2, mult2 = frequencies.get_freq_code(freq) + base1, mult1 = libfrequencies.get_freq_code(self.freq) + base2, mult2 = libfrequencies.get_freq_code(freq) asi8 = self.asi8 # mult1 can't be negative or 0 @@ -551,7 +552,7 @@ def _addsub_int_array( def _add_offset(self, other): assert not isinstance(other, Tick) - base = frequencies.get_base_alias(other.rule_code) + base = libfrequencies.get_base_alias(other.rule_code) if base != self.freq.rule_code: _raise_on_incompatible(self, other) @@ -855,7 +856,7 @@ def dt64arr_to_periodarr(data, freq, tz=None): if isinstance(data, (ABCIndexClass, ABCSeries)): data = data._values - base, mult = frequencies.get_freq_code(freq) + base, mult = libfrequencies.get_freq_code(freq) return libperiod.dt64arr_to_periodarr(data.view('i8'), base, tz), freq @@ -865,7 +866,7 @@ def _get_ordinal_range(start, end, periods, freq, mult=1): 'exactly two must be specified') if freq is not None: - _, mult = frequencies.get_freq_code(freq) + _, mult = libfrequencies.get_freq_code(freq) if start is not None: start = Period(start, freq) @@ -919,10 +920,10 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, if quarter is not None: if freq is None: freq = 'Q' - base = frequencies.FreqGroup.FR_QTR + base = libfrequencies.FreqGroup.FR_QTR else: - base, mult = frequencies.get_freq_code(freq) - if base != frequencies.FreqGroup.FR_QTR: + base, mult = libfrequencies.get_freq_code(freq) + if base != libfrequencies.FreqGroup.FR_QTR: raise AssertionError("base must equal FR_QTR") year, quarter = _make_field_arrays(year, quarter) @@ -931,7 +932,7 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: - base, mult = frequencies.get_freq_code(freq) + base, mult = libfrequencies.get_freq_code(freq) arrays = _make_field_arrays(year, month, day, hour, minute, second) for y, mth, d, h, mn, s in compat.zip(*arrays): ordinals.append(libperiod.period_ordinal( diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 5e4dd2998a3be..a4bd7f9017eb4 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -5,7 +5,8 @@ import numpy as np from pandas._libs import index as libindex -from pandas._libs.tslibs import NaT, iNaT, resolution +from pandas._libs.tslibs import ( + NaT, frequencies as libfrequencies, iNaT, resolution) from pandas._libs.tslibs.period import ( DIFFERENT_FREQ, IncompatibleFrequency, Period) from pandas.util._decorators import Appender, Substitution, cache_readonly @@ -376,7 +377,7 @@ def _maybe_convert_timedelta(self, other): return delta elif isinstance(other, DateOffset): freqstr = other.rule_code - base = frequencies.get_base_alias(freqstr) + base = libfrequencies.get_base_alias(freqstr) if base == self.freq.rule_code: return other.n diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 25604b29f22f6..6822225273906 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -7,6 +7,7 @@ from pandas._libs import lib from pandas._libs.tslibs import NaT, Timestamp +from pandas._libs.tslibs.frequencies import is_subperiod, is_superperiod from pandas._libs.tslibs.period import IncompatibleFrequency import pandas.compat as compat from pandas.compat.numpy import function as nv @@ -28,7 +29,7 @@ from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range -from pandas.tseries.frequencies import is_subperiod, is_superperiod, to_offset +from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import ( DateOffset, Day, Nano, Tick, delta_to_nanoseconds) diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 4c6b3b5132fec..aaa7aa04acf48 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -11,6 +11,7 @@ from pandas._libs import lib, tslibs from pandas._libs.tslibs import resolution +from pandas._libs.tslibs.frequencies import FreqGroup, get_freq import pandas.compat as compat from pandas.compat import lrange @@ -25,9 +26,6 @@ from pandas.core.indexes.period import Period, PeriodIndex, period_range import pandas.core.tools.datetimes as tools -import pandas.tseries.frequencies as frequencies -from pandas.tseries.frequencies import FreqGroup - # constants HOURS_PER_DAY = 24. MIN_PER_HOUR = 60. @@ -955,7 +953,7 @@ def _annual_finder(vmin, vmax, freq): def get_finder(freq): if isinstance(freq, compat.string_types): - freq = frequencies.get_freq(freq) + freq = get_freq(freq) fgroup = resolution.get_freq_group(freq) if fgroup == FreqGroup.FR_ANN: @@ -992,7 +990,7 @@ class TimeSeries_DateLocator(Locator): def __init__(self, freq, minor_locator=False, dynamic_mode=True, base=1, quarter=1, month=1, day=1, plot_obj=None): if isinstance(freq, compat.string_types): - freq = frequencies.get_freq(freq) + freq = get_freq(freq) self.freq = freq self.base = base (self.quarter, self.month, self.day) = (quarter, month, day) @@ -1073,7 +1071,7 @@ class TimeSeries_DateFormatter(Formatter): def __init__(self, freq, minor_locator=False, dynamic_mode=True, plot_obj=None): if isinstance(freq, compat.string_types): - freq = frequencies.get_freq(freq) + freq = get_freq(freq) self.format = None self.freq = freq self.locs = [] diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 49249ae446747..51b0629005942 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -5,6 +5,8 @@ from matplotlib import pylab import numpy as np +from pandas._libs.tslibs.frequencies import ( + FreqGroup, get_base_alias, get_freq, is_subperiod, is_superperiod) from pandas._libs.tslibs.period import Period import pandas.compat as compat @@ -73,7 +75,7 @@ def _maybe_resample(series, ax, kwargs): series = series.to_period(freq=freq) if ax_freq is not None and freq != ax_freq: - if frequencies.is_superperiod(freq, ax_freq): # upsample input + if is_superperiod(freq, ax_freq): # upsample input series = series.copy() series.index = series.index.asfreq(ax_freq, how='s') freq = ax_freq @@ -82,7 +84,7 @@ def _maybe_resample(series, ax, kwargs): series = getattr(series.resample('D'), how)().dropna() series = getattr(series.resample(ax_freq), how)().dropna() freq = ax_freq - elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): + elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, kwargs) else: # pragma: no cover raise ValueError('Incompatible frequency conversion') @@ -90,13 +92,13 @@ def _maybe_resample(series, ax, kwargs): def _is_sub(f1, f2): - return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or - (f2.startswith('W') and frequencies.is_subperiod(f1, 'D'))) + return ((f1.startswith('W') and is_subperiod('D', f2)) or + (f2.startswith('W') and is_subperiod(f1, 'D'))) def _is_sup(f1, f2): - return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or - (f2.startswith('W') and frequencies.is_superperiod(f1, 'D'))) + return ((f1.startswith('W') and is_superperiod('D', f2)) or + (f2.startswith('W') and is_superperiod(f1, 'D'))) def _upsample_others(ax, freq, kwargs): @@ -209,7 +211,7 @@ def _get_freq(ax, series): if isinstance(freq, DateOffset): freq = freq.rule_code else: - freq = frequencies.get_base_alias(freq) + freq = get_base_alias(freq) freq = frequencies.get_period_alias(freq) return freq, ax_freq @@ -231,7 +233,7 @@ def _use_dynamic_x(ax, data): if isinstance(freq, DateOffset): freq = freq.rule_code else: - freq = frequencies.get_base_alias(freq) + freq = get_base_alias(freq) freq = frequencies.get_period_alias(freq) if freq is None: @@ -239,9 +241,9 @@ def _use_dynamic_x(ax, data): # hack this for 0.10.1, creating more technical debt...sigh if isinstance(data.index, ABCDatetimeIndex): - base = frequencies.get_freq(freq) + base = get_freq(freq) x = data.index - if (base <= frequencies.FreqGroup.FR_DAY): + if (base <= FreqGroup.FR_DAY): return x[:1].is_normalized return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0] return True @@ -275,7 +277,7 @@ def _maybe_convert_index(ax, data): if freq is None: raise ValueError('Could not get frequency alias for plotting') - freq = frequencies.get_base_alias(freq) + freq = get_base_alias(freq) freq = frequencies.get_period_alias(freq) data = data.to_period(freq=freq) diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 2768da0316aad..7c9ca9da89d53 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -366,8 +366,8 @@ def test_construction_with_ndarray(self): dates = [datetime(2013, 10, 7), datetime(2013, 10, 8), datetime(2013, 10, 9)] - data = DatetimeIndex(dates, freq=pd.tseries.frequencies.BDay()).values - result = DatetimeIndex(data, freq=pd.tseries.frequencies.BDay()) + data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values + result = DatetimeIndex(data, freq=pd.offsets.BDay()) expected = DatetimeIndex(['2013-10-07', '2013-10-08', '2013-10-09'], diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 5ec3f69e55fde..e29974f56967f 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -292,7 +292,7 @@ def test_astype_categorical_to_other(self): tm.assert_series_equal(s.astype('category'), expected) tm.assert_series_equal(s.astype(CategoricalDtype()), expected) msg = (r"could not convert string to float: '(0 - 499|9500 - 9999)'|" - r"invalid literal for float\(\): 9500 - 9999") + r"invalid literal for float\(\): (0 - 499|9500 - 9999)") with pytest.raises(ValueError, match=msg): s.astype('float64') diff --git a/pandas/tests/tseries/test_frequencies.py b/pandas/tests/tseries/test_frequencies.py index c0a3e998dc2e0..eb4e63654b47b 100644 --- a/pandas/tests/tseries/test_frequencies.py +++ b/pandas/tests/tseries/test_frequencies.py @@ -3,10 +3,10 @@ import numpy as np import pytest -from pandas._libs.tslibs import resolution +from pandas._libs.tslibs import frequencies as libfrequencies, resolution from pandas._libs.tslibs.ccalendar import MONTHS from pandas._libs.tslibs.frequencies import ( - INVALID_FREQ_ERR_MSG, _period_code_map) + INVALID_FREQ_ERR_MSG, FreqGroup, _period_code_map, get_freq, get_freq_code) import pandas.compat as compat from pandas.compat import is_platform_windows, range @@ -277,20 +277,20 @@ def test_rule_aliases(): class TestFrequencyCode(object): def test_freq_code(self): - assert frequencies.get_freq('A') == 1000 - assert frequencies.get_freq('3A') == 1000 - assert frequencies.get_freq('-1A') == 1000 + assert get_freq('A') == 1000 + assert get_freq('3A') == 1000 + assert get_freq('-1A') == 1000 - assert frequencies.get_freq('Y') == 1000 - assert frequencies.get_freq('3Y') == 1000 - assert frequencies.get_freq('-1Y') == 1000 + assert get_freq('Y') == 1000 + assert get_freq('3Y') == 1000 + assert get_freq('-1Y') == 1000 - assert frequencies.get_freq('W') == 4000 - assert frequencies.get_freq('W-MON') == 4001 - assert frequencies.get_freq('W-FRI') == 4005 + assert get_freq('W') == 4000 + assert get_freq('W-MON') == 4001 + assert get_freq('W-FRI') == 4005 for freqstr, code in compat.iteritems(_period_code_map): - result = frequencies.get_freq(freqstr) + result = get_freq(freqstr) assert result == code result = resolution.get_freq_group(freqstr) @@ -324,24 +324,24 @@ def test_freq_group(self): assert resolution.get_freq_group(offsets.Week(weekday=5)) == 4000 def test_get_to_timestamp_base(self): - tsb = frequencies.get_to_timestamp_base - - assert (tsb(frequencies.get_freq_code('D')[0]) == - frequencies.get_freq_code('D')[0]) - assert (tsb(frequencies.get_freq_code('W')[0]) == - frequencies.get_freq_code('D')[0]) - assert (tsb(frequencies.get_freq_code('M')[0]) == - frequencies.get_freq_code('D')[0]) - - assert (tsb(frequencies.get_freq_code('S')[0]) == - frequencies.get_freq_code('S')[0]) - assert (tsb(frequencies.get_freq_code('T')[0]) == - frequencies.get_freq_code('S')[0]) - assert (tsb(frequencies.get_freq_code('H')[0]) == - frequencies.get_freq_code('S')[0]) + tsb = libfrequencies.get_to_timestamp_base + + assert (tsb(get_freq_code('D')[0]) == + get_freq_code('D')[0]) + assert (tsb(get_freq_code('W')[0]) == + get_freq_code('D')[0]) + assert (tsb(get_freq_code('M')[0]) == + get_freq_code('D')[0]) + + assert (tsb(get_freq_code('S')[0]) == + get_freq_code('S')[0]) + assert (tsb(get_freq_code('T')[0]) == + get_freq_code('S')[0]) + assert (tsb(get_freq_code('H')[0]) == + get_freq_code('S')[0]) def test_freq_to_reso(self): - Reso = frequencies.Resolution + Reso = resolution.Resolution assert Reso.get_str_from_freq('A') == 'year' assert Reso.get_str_from_freq('Q') == 'quarter' @@ -365,7 +365,7 @@ def test_freq_to_reso(self): def test_resolution_bumping(self): # see gh-14378 - Reso = frequencies.Resolution + Reso = resolution.Resolution assert Reso.get_stride_from_decimal(1.5, 'T') == (90, 'S') assert Reso.get_stride_from_decimal(62.4, 'T') == (3744, 'S') @@ -384,63 +384,63 @@ def test_resolution_bumping(self): def test_get_freq_code(self): # frequency str - assert (frequencies.get_freq_code('A') == - (frequencies.get_freq('A'), 1)) - assert (frequencies.get_freq_code('3D') == - (frequencies.get_freq('D'), 3)) - assert (frequencies.get_freq_code('-2M') == - (frequencies.get_freq('M'), -2)) + assert (get_freq_code('A') == + (get_freq('A'), 1)) + assert (get_freq_code('3D') == + (get_freq('D'), 3)) + assert (get_freq_code('-2M') == + (get_freq('M'), -2)) # tuple - assert (frequencies.get_freq_code(('D', 1)) == - (frequencies.get_freq('D'), 1)) - assert (frequencies.get_freq_code(('A', 3)) == - (frequencies.get_freq('A'), 3)) - assert (frequencies.get_freq_code(('M', -2)) == - (frequencies.get_freq('M'), -2)) + assert (get_freq_code(('D', 1)) == + (get_freq('D'), 1)) + assert (get_freq_code(('A', 3)) == + (get_freq('A'), 3)) + assert (get_freq_code(('M', -2)) == + (get_freq('M'), -2)) # numeric tuple - assert frequencies.get_freq_code((1000, 1)) == (1000, 1) + assert get_freq_code((1000, 1)) == (1000, 1) # offsets - assert (frequencies.get_freq_code(offsets.Day()) == - (frequencies.get_freq('D'), 1)) - assert (frequencies.get_freq_code(offsets.Day(3)) == - (frequencies.get_freq('D'), 3)) - assert (frequencies.get_freq_code(offsets.Day(-2)) == - (frequencies.get_freq('D'), -2)) - - assert (frequencies.get_freq_code(offsets.MonthEnd()) == - (frequencies.get_freq('M'), 1)) - assert (frequencies.get_freq_code(offsets.MonthEnd(3)) == - (frequencies.get_freq('M'), 3)) - assert (frequencies.get_freq_code(offsets.MonthEnd(-2)) == - (frequencies.get_freq('M'), -2)) - - assert (frequencies.get_freq_code(offsets.Week()) == - (frequencies.get_freq('W'), 1)) - assert (frequencies.get_freq_code(offsets.Week(3)) == - (frequencies.get_freq('W'), 3)) - assert (frequencies.get_freq_code(offsets.Week(-2)) == - (frequencies.get_freq('W'), -2)) + assert (get_freq_code(offsets.Day()) == + (get_freq('D'), 1)) + assert (get_freq_code(offsets.Day(3)) == + (get_freq('D'), 3)) + assert (get_freq_code(offsets.Day(-2)) == + (get_freq('D'), -2)) + + assert (get_freq_code(offsets.MonthEnd()) == + (get_freq('M'), 1)) + assert (get_freq_code(offsets.MonthEnd(3)) == + (get_freq('M'), 3)) + assert (get_freq_code(offsets.MonthEnd(-2)) == + (get_freq('M'), -2)) + + assert (get_freq_code(offsets.Week()) == + (get_freq('W'), 1)) + assert (get_freq_code(offsets.Week(3)) == + (get_freq('W'), 3)) + assert (get_freq_code(offsets.Week(-2)) == + (get_freq('W'), -2)) # Monday is weekday=0 - assert (frequencies.get_freq_code(offsets.Week(weekday=1)) == - (frequencies.get_freq('W-TUE'), 1)) - assert (frequencies.get_freq_code(offsets.Week(3, weekday=0)) == - (frequencies.get_freq('W-MON'), 3)) - assert (frequencies.get_freq_code(offsets.Week(-2, weekday=4)) == - (frequencies.get_freq('W-FRI'), -2)) + assert (get_freq_code(offsets.Week(weekday=1)) == + (get_freq('W-TUE'), 1)) + assert (get_freq_code(offsets.Week(3, weekday=0)) == + (get_freq('W-MON'), 3)) + assert (get_freq_code(offsets.Week(-2, weekday=4)) == + (get_freq('W-FRI'), -2)) def test_frequency_misc(self): assert (resolution.get_freq_group('T') == - frequencies.FreqGroup.FR_MIN) + FreqGroup.FR_MIN) - code, stride = frequencies.get_freq_code(offsets.Hour()) - assert code == frequencies.FreqGroup.FR_HR + code, stride = get_freq_code(offsets.Hour()) + assert code == FreqGroup.FR_HR - code, stride = frequencies.get_freq_code((5, 'T')) - assert code == frequencies.FreqGroup.FR_MIN + code, stride = get_freq_code((5, 'T')) + assert code == FreqGroup.FR_MIN assert stride == 5 offset = offsets.Hour() @@ -452,7 +452,7 @@ def test_frequency_misc(self): assert result == expected with pytest.raises(ValueError, match='Invalid frequency'): - frequencies.get_freq_code((5, 'baz')) + get_freq_code((5, 'baz')) with pytest.raises(ValueError, match='Invalid frequency'): frequencies.to_offset('100foo') diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 8cdec31d7ce8a..c454db3bbdffc 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -11,10 +11,7 @@ from pandas._libs.tslibs.conversion import tz_convert from pandas._libs.tslibs.fields import build_field_sarray import pandas._libs.tslibs.frequencies as libfreqs -from pandas._libs.tslibs.frequencies import ( # noqa, semi-public API - FreqGroup, get_base_alias, get_freq, get_freq_code, get_to_timestamp_base, - is_subperiod, is_superperiod) -from pandas._libs.tslibs.offsets import _offset_to_period_map # noqa:E402 +from pandas._libs.tslibs.offsets import _offset_to_period_map import pandas._libs.tslibs.resolution as libresolution from pandas._libs.tslibs.resolution import Resolution from pandas._libs.tslibs.timezones import UTC @@ -28,19 +25,8 @@ from pandas.core.algorithms import unique -from pandas.tseries.offsets import ( # noqa - BDay, BMonthBegin, BMonthEnd, BQuarterBegin, BQuarterEnd, BYearBegin, - BYearEnd, CDay, DateOffset, Day, Hour, Micro, Milli, Minute, MonthBegin, - MonthEnd, Nano, QuarterBegin, QuarterEnd, Second, Week, YearBegin, YearEnd, - prefix_mapping) - -RESO_NS = 0 -RESO_US = 1 -RESO_MS = 2 -RESO_SEC = 3 -RESO_MIN = 4 -RESO_HR = 5 -RESO_DAY = 6 +from pandas.tseries.offsets import ( + DateOffset, Day, Hour, Micro, Milli, Minute, Nano, Second, prefix_mapping) _ONE_MICRO = 1000 _ONE_MILLI = (_ONE_MICRO * 1000) @@ -52,11 +38,6 @@ # --------------------------------------------------------------------- # Offset names ("time rules") and related functions -try: - cday = CDay() -except NotImplementedError: - cday = None - #: cache of previously seen offsets _offset_map = {} @@ -216,8 +197,6 @@ def get_offset(name): return _offset_map[name] -getOffset = get_offset - # --------------------------------------------------------------------- # Period codes @@ -314,7 +293,7 @@ def is_unique(self): def is_unique_asi8(self): return len(self.deltas_asi8) == 1 - def get_freq(self): # noqa:F811 + def get_freq(self): """ Find the appropriate frequency string to describe the inferred frequency of self.values
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24834
2019-01-19T00:04:49Z
2019-01-19T21:34:05Z
2019-01-19T21:34:05Z
2019-01-19T22:29:23Z
POC: move to_offset to libfrequencies
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 798e338d5581b..c6715bb924552 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -39,7 +39,7 @@ from pandas._libs.tslibs.nattype import nat_strings, iNaT # noqa:F821 from pandas._libs.tslibs.nattype cimport ( checknull_with_nat, NPY_NAT, c_NaT as NaT) -from pandas._libs.tslibs.offsets cimport to_offset +from pandas._libs.tslibs.frequencies cimport to_offset from pandas._libs.tslibs.timestamps cimport create_timestamp_from_ts from pandas._libs.tslibs.timestamps import Timestamp diff --git a/pandas/_libs/tslibs/frequencies.pxd b/pandas/_libs/tslibs/frequencies.pxd index 4e7949e55c836..275de2759c97a 100644 --- a/pandas/_libs/tslibs/frequencies.pxd +++ b/pandas/_libs/tslibs/frequencies.pxd @@ -7,3 +7,5 @@ cpdef object get_freq(object freq) cpdef str get_base_alias(freqstr) cpdef int get_to_timestamp_base(int base) cpdef str get_freq_str(base, mult=*) + +cpdef object to_offset(object freq) diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index bd9e68e134407..2eaf45a83ae81 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -1,9 +1,14 @@ # -*- coding: utf-8 -*- import re +from cpython.datetime cimport PyDateTime_IMPORT, PyDelta_Check +PyDateTime_IMPORT + +import numpy as np cimport numpy as cnp cnp.import_array() +from pandas._libs.tslibs cimport util from pandas._libs.tslibs.util cimport is_integer_object, is_string_object from pandas._libs.tslibs.ccalendar import MONTH_NUMBERS @@ -125,8 +130,168 @@ _lite_rule_alias = { _dont_uppercase = {'MS', 'ms'} +# cache of previously seen offsets +cdef dict _c_offset_map = {} +_offset_map = _c_offset_map # visible from python modules + +cdef dict _c_prefix_mapping = {} +prefix_mapping = _c_prefix_mapping # visible from python modules + +cdef dict _c_name_to_offset_map = {} +_name_to_offset_map = _c_name_to_offset_map # visible from python modules + + # ---------------------------------------------------------------------- +cpdef object to_offset(object freq): + """ + Return DateOffset object from string or tuple representation + or datetime.timedelta object + + Parameters + ---------- + freq : str, tuple, datetime.timedelta, DateOffset or None + + Returns + ------- + delta : DateOffset + None if freq is None + + Raises + ------ + ValueError + If freq is an invalid frequency + + See Also + -------- + pandas.DateOffset + + Examples + -------- + >>> to_offset('5min') + <5 * Minutes> + + >>> to_offset('1D1H') + <25 * Hours> + + >>> to_offset(('W', 2)) + <2 * Weeks: weekday=6> + + >>> to_offset((2, 'B')) + <2 * BusinessDays> + + >>> to_offset(datetime.timedelta(days=1)) + <Day> + + >>> to_offset(Hour()) + <Hour> + """ + if freq is None: + return None + + if util.is_offset_object(freq): + return freq + + if isinstance(freq, tuple): + name = freq[0] + stride = freq[1] + if isinstance(stride, (str, unicode)): + name, stride = stride, name + name, _ = _base_and_stride(name) + delta = get_offset(name) * stride + + elif PyDelta_Check(freq): + delta = None + from .timedeltas import Timedelta + freq = Timedelta(freq) + try: + for name in freq.components._fields: + offset = _name_to_offset_map[name] + stride = getattr(freq.components, name) + if stride != 0: + offset = stride * offset + if delta is None: + delta = offset + else: + delta = delta + offset + except Exception: + raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) + + else: + # TODO: Avoid runtime/circular import + from .resolution import Resolution + + delta = None + stride_sign = None + try: + splitted = re.split(opattern, freq) + if splitted[-1] != '' and not splitted[-1].isspace(): + # the last element must be blank + raise ValueError('last element must be blank') + + for sep, stride, name in zip(splitted[0::4], splitted[1::4], + splitted[2::4]): + if sep != '' and not sep.isspace(): + raise ValueError('separator must be spaces') + + prefix = _lite_rule_alias.get(name) or name + if stride_sign is None: + stride_sign = -1 if stride.startswith('-') else 1 + if not stride: + stride = 1 + if prefix in Resolution._reso_str_bump_map: + stride, name = Resolution.get_stride_from_decimal( + float(stride), prefix + ) + stride = int(stride) + offset = get_offset(name) + offset = offset * int(np.fabs(stride) * stride_sign) + if delta is None: + delta = offset + else: + delta = delta + offset + except Exception: + raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) + + if delta is None: + raise ValueError(INVALID_FREQ_ERR_MSG.format(freq)) + + return delta + + +# TODO: If we can get rid of getOffset alias in frequencies.py, we get make +# this just cdef +cpdef get_offset(name): + """ + Return DateOffset object associated with rule name + + Examples + -------- + get_offset('EOM') --> BMonthEnd(1) + """ + if name not in _dont_uppercase: + name = name.upper() + name = _lite_rule_alias.get(name, name) + name = _lite_rule_alias.get(name.lower(), name) + else: + name = _lite_rule_alias.get(name, name) + + if name not in _c_offset_map: + try: + split = name.split('-') + klass = _c_prefix_mapping[split[0]] + # handles case where there's no suffix (and will TypeError if too + # many '-') + offset = klass._from_name(*split[1:]) + except (ValueError, TypeError, KeyError): + # bad prefix or suffix + raise ValueError(INVALID_FREQ_ERR_MSG.format(name)) + # cache + _c_offset_map[name] = offset + + return _c_offset_map[name] + + cpdef get_freq_code(freqstr): """ Return freq str or tuple to freq code and stride (mult) diff --git a/pandas/_libs/tslibs/offsets.pxd b/pandas/_libs/tslibs/offsets.pxd deleted file mode 100644 index 2829a27b9905c..0000000000000 --- a/pandas/_libs/tslibs/offsets.pxd +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- - -cdef to_offset(object obj) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 856aa52f82cf5..780ed97e082e0 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -84,17 +84,6 @@ for _d in DAYS: # --------------------------------------------------------------------- # Misc Helpers -cdef to_offset(object obj): - """ - Wrap pandas.tseries.frequencies.to_offset to keep centralize runtime - imports - """ - if isinstance(obj, _BaseOffset): - return obj - from pandas.tseries.frequencies import to_offset - return to_offset(obj) - - def as_datetime(obj): f = getattr(obj, 'to_pydatetime', None) if f is not None: @@ -333,6 +322,7 @@ class _BaseOffset(object): def __eq__(self, other): if is_string_object(other): + from .frequencies import to_offset try: # GH#23524 if to_offset fails, we are dealing with an # incomparable type so == is False and != is True diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 2f4edb7de8f95..c5a274da524ca 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -43,13 +43,12 @@ from pandas._libs.tslibs.ccalendar import MONTH_NUMBERS from pandas._libs.tslibs.conversion cimport tz_convert_utc_to_tzlocal from pandas._libs.tslibs.frequencies cimport ( get_freq_code, get_base_alias, get_to_timestamp_base, get_freq_str, - get_rule_month) + get_rule_month, to_offset) from pandas._libs.tslibs.parsing import parse_time_string from pandas._libs.tslibs.resolution import Resolution from pandas._libs.tslibs.nattype import nat_strings from pandas._libs.tslibs.nattype cimport ( _nat_scalar_rules, NPY_NAT, is_null_datetimelike, c_NaT as NaT) -from pandas._libs.tslibs.offsets cimport to_offset from pandas._libs.tslibs.offsets import _Tick cdef bint PY2 = str == bytes diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 0a19d8749fc7c..88e578b26aab4 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -28,6 +28,7 @@ from pandas._libs.tslibs.util cimport ( is_float_object, is_string_object) from pandas._libs.tslibs.ccalendar import DAY_SECONDS +from pandas._libs.tslibs.frequencies cimport to_offset from pandas._libs.tslibs.np_datetime cimport ( cmp_scalar, reverse_ops, td64_to_tdstruct, pandas_timedeltastruct) @@ -35,7 +36,6 @@ from pandas._libs.tslibs.np_datetime cimport ( from pandas._libs.tslibs.nattype import nat_strings from pandas._libs.tslibs.nattype cimport ( checknull_with_nat, NPY_NAT, c_NaT as NaT) -from pandas._libs.tslibs.offsets cimport to_offset from pandas._libs.tslibs.offsets import _Tick as Tick # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index fe0564cb62c30..f1f6b15afeed8 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -27,12 +27,12 @@ from pandas._libs.tslibs.conversion cimport ( tz_convert_single, _TSObject, convert_to_tsobject, convert_datetime_to_tsobject) from pandas._libs.tslibs.fields import get_start_end_field, get_date_name_field +from pandas._libs.tslibs.frequencies cimport to_offset from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.np_datetime cimport ( reverse_ops, cmp_scalar, check_dts_bounds, npy_datetimestruct, dt64_to_dtstruct) -from pandas._libs.tslibs.offsets cimport to_offset from pandas._libs.tslibs.timedeltas import Timedelta from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds from pandas._libs.tslibs.timezones cimport ( diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index ac3955970587f..51724892e132a 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -7,7 +7,7 @@ from pandas._libs.tslibs import ( NaT, OutOfBoundsDatetime, Timestamp, conversion, timezones) from pandas._libs.tslibs.frequencies import ( - INVALID_FREQ_ERR_MSG, get_freq_code, get_freq_str) + INVALID_FREQ_ERR_MSG, _offset_map, get_freq_code, get_freq_str, get_offset) import pandas._libs.tslibs.offsets as liboffsets import pandas.compat as compat from pandas.compat import range @@ -18,7 +18,6 @@ import pandas.util.testing as tm from pandas.io.pickle import read_pickle -from pandas.tseries.frequencies import _offset_map, get_offset from pandas.tseries.holiday import USFederalHolidayCalendar import pandas.tseries.offsets as offsets from pandas.tseries.offsets import ( diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index c454db3bbdffc..a4a751c214e69 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -1,22 +1,19 @@ # -*- coding: utf-8 -*- -from datetime import timedelta -import re import numpy as np from pytz import AmbiguousTimeError from pandas._libs.algos import unique_deltas -from pandas._libs.tslibs import Timedelta, Timestamp +from pandas._libs.tslibs import Timestamp from pandas._libs.tslibs.ccalendar import MONTH_ALIASES, int_to_weekday from pandas._libs.tslibs.conversion import tz_convert from pandas._libs.tslibs.fields import build_field_sarray -import pandas._libs.tslibs.frequencies as libfreqs +from pandas._libs.tslibs.frequencies import ( # noqa:F401 + _name_to_offset_map, get_offset, to_offset) from pandas._libs.tslibs.offsets import _offset_to_period_map import pandas._libs.tslibs.resolution as libresolution -from pandas._libs.tslibs.resolution import Resolution +from pandas._libs.tslibs.resolution import Resolution # noqa:F401 from pandas._libs.tslibs.timezones import UTC -import pandas.compat as compat -from pandas.compat import zip from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( @@ -26,7 +23,7 @@ from pandas.core.algorithms import unique from pandas.tseries.offsets import ( - DateOffset, Day, Hour, Micro, Milli, Minute, Nano, Second, prefix_mapping) + Day, Hour, Micro, Milli, Minute, Nano, Second) _ONE_MICRO = 1000 _ONE_MILLI = (_ONE_MICRO * 1000) @@ -38,163 +35,21 @@ # --------------------------------------------------------------------- # Offset names ("time rules") and related functions -#: cache of previously seen offsets -_offset_map = {} - def get_period_alias(offset_str): """ alias to closest period strings BQ->Q etc""" return _offset_to_period_map.get(offset_str, None) -_name_to_offset_map = {'days': Day(1), - 'hours': Hour(1), - 'minutes': Minute(1), - 'seconds': Second(1), - 'milliseconds': Milli(1), - 'microseconds': Micro(1), - 'nanoseconds': Nano(1)} - - -def to_offset(freq): - """ - Return DateOffset object from string or tuple representation - or datetime.timedelta object - - Parameters - ---------- - freq : str, tuple, datetime.timedelta, DateOffset or None - - Returns - ------- - delta : DateOffset - None if freq is None - - Raises - ------ - ValueError - If freq is an invalid frequency - - See Also - -------- - pandas.DateOffset - - Examples - -------- - >>> to_offset('5min') - <5 * Minutes> - - >>> to_offset('1D1H') - <25 * Hours> - - >>> to_offset(('W', 2)) - <2 * Weeks: weekday=6> - - >>> to_offset((2, 'B')) - <2 * BusinessDays> - - >>> to_offset(datetime.timedelta(days=1)) - <Day> - - >>> to_offset(Hour()) - <Hour> - """ - if freq is None: - return None - - if isinstance(freq, DateOffset): - return freq - - if isinstance(freq, tuple): - name = freq[0] - stride = freq[1] - if isinstance(stride, compat.string_types): - name, stride = stride, name - name, _ = libfreqs._base_and_stride(name) - delta = get_offset(name) * stride - - elif isinstance(freq, timedelta): - delta = None - freq = Timedelta(freq) - try: - for name in freq.components._fields: - offset = _name_to_offset_map[name] - stride = getattr(freq.components, name) - if stride != 0: - offset = stride * offset - if delta is None: - delta = offset - else: - delta = delta + offset - except Exception: - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) - - else: - delta = None - stride_sign = None - try: - splitted = re.split(libfreqs.opattern, freq) - if splitted[-1] != '' and not splitted[-1].isspace(): - # the last element must be blank - raise ValueError('last element must be blank') - for sep, stride, name in zip(splitted[0::4], splitted[1::4], - splitted[2::4]): - if sep != '' and not sep.isspace(): - raise ValueError('separator must be spaces') - prefix = libfreqs._lite_rule_alias.get(name) or name - if stride_sign is None: - stride_sign = -1 if stride.startswith('-') else 1 - if not stride: - stride = 1 - if prefix in Resolution._reso_str_bump_map.keys(): - stride, name = Resolution.get_stride_from_decimal( - float(stride), prefix - ) - stride = int(stride) - offset = get_offset(name) - offset = offset * int(np.fabs(stride) * stride_sign) - if delta is None: - delta = offset - else: - delta = delta + offset - except Exception: - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) - - if delta is None: - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) - - return delta - - -def get_offset(name): - """ - Return DateOffset object associated with rule name - - Examples - -------- - get_offset('EOM') --> BMonthEnd(1) - """ - if name not in libfreqs._dont_uppercase: - name = name.upper() - name = libfreqs._lite_rule_alias.get(name, name) - name = libfreqs._lite_rule_alias.get(name.lower(), name) - else: - name = libfreqs._lite_rule_alias.get(name, name) - - if name not in _offset_map: - try: - split = name.split('-') - klass = prefix_mapping[split[0]] - # handles case where there's no suffix (and will TypeError if too - # many '-') - offset = klass._from_name(*split[1:]) - except (ValueError, TypeError, KeyError): - # bad prefix or suffix - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) - # cache - _offset_map[name] = offset - - return _offset_map[name] +_name_to_offset_map.update({ + 'days': Day(1), + 'hours': Hour(1), + 'minutes': Minute(1), + 'seconds': Second(1), + 'milliseconds': Milli(1), + 'microseconds': Micro(1), + 'nanoseconds': Nano(1) +}) # --------------------------------------------------------------------- diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index f208ce37a3b14..186657e3c6af0 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -10,6 +10,7 @@ NaT, OutOfBoundsDatetime, Timedelta, Timestamp, ccalendar, conversion, delta_to_nanoseconds, frequencies as libfrequencies, normalize_date, offsets as liboffsets, timezones) +from pandas._libs.tslibs.frequencies import prefix_mapping from pandas._libs.tslibs.offsets import ( ApplyTypeError, BaseOffset, _get_calendar, _is_normalized, _to_dt64, apply_index_wraps, as_datetime, roll_yearday, shift_month) @@ -2479,7 +2480,9 @@ def generate_range(start=None, end=None, periods=None, offset=BDay()): cur = next_date -prefix_mapping = {offset._prefix: offset for offset in [ +# We use a dict defined in libfrequencies and update it here to avoid +# circular import. +prefix_mapping.update({offset._prefix: offset for offset in [ YearBegin, # 'AS' YearEnd, # 'A' BYearBegin, # 'BAS' @@ -2511,4 +2514,4 @@ def generate_range(start=None, end=None, periods=None, offset=BDay()): WeekOfMonth, # 'WOM' FY5253, FY5253Quarter -]} +]})
The runtime non-cython import of to_offset is a sticking point in a bunch of tslibs code. The reason we haven't moved to_offset up into cython is because it requires all of tseries.offsets, and we haven't wanted to move all of that up. This moves to_offset up by defining appropriate dictionaries in the cython module and then filling them in the python modules. It's a bit roundabout, but may be worthwhile (about to run asvs) The refactor that was left out of this PR is moving tslibs.resolution.Resolution into tslibs.frequencies to avoid a circular/runtime import.
https://api.github.com/repos/pandas-dev/pandas/pulls/24833
2019-01-18T23:04:42Z
2019-02-14T18:22:56Z
null
2020-04-05T17:37:28Z
Implement+Test Tick.__rtruediv__
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 856aa52f82cf5..e28462f7103b9 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -18,6 +18,7 @@ from numpy cimport int64_t cnp.import_array() +from pandas._libs.tslibs cimport util from pandas._libs.tslibs.util cimport is_string_object, is_integer_object from pandas._libs.tslibs.ccalendar import MONTHS, DAYS @@ -408,6 +409,10 @@ class _BaseOffset(object): return self.apply(other) def __mul__(self, other): + if hasattr(other, "_typ"): + return NotImplemented + if util.is_array(other): + return np.array([self * x for x in other]) return type(self)(n=other * self.n, normalize=self.normalize, **self.kwds) @@ -458,6 +463,9 @@ class _BaseOffset(object): TypeError if `int(n)` raises ValueError if n != int(n) """ + if util.is_timedelta64_object(n): + raise TypeError('`n` argument must be an integer, ' + 'got {ntype}'.format(ntype=type(n))) try: nint = int(n) except (ValueError, TypeError): @@ -533,12 +541,20 @@ class _Tick(object): can do isinstance checks on _Tick and avoid importing tseries.offsets """ + # ensure that reversed-ops with numpy scalars return NotImplemented + __array_priority__ = 1000 + def __truediv__(self, other): result = self.delta.__truediv__(other) return _wrap_timedelta_result(result) + def __rtruediv__(self, other): + result = self.delta.__rtruediv__(other) + return _wrap_timedelta_result(result) + if PY2: __div__ = __truediv__ + __rdiv__ = __rtruediv__ # ---------------------------------------------------------------------- diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index ac3955970587f..621572da57541 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -257,6 +257,26 @@ def test_offset_n(self, offset_types): mul_offset = offset * 3 assert mul_offset.n == 3 + def test_offset_timedelta64_arg(self, offset_types): + # check that offset._validate_n raises TypeError on a timedelt64 + # object + off = self._get_offset(offset_types) + + td64 = np.timedelta64(4567, 's') + with pytest.raises(TypeError, match="argument must be an integer"): + type(off)(n=td64, **off.kwds) + + def test_offset_mul_ndarray(self, offset_types): + off = self._get_offset(offset_types) + + expected = np.array([[off, off * 2], [off * 3, off * 4]]) + + result = np.array([[1, 2], [3, 4]]) * off + tm.assert_numpy_array_equal(result, expected) + + result = off * np.array([[1, 2], [3, 4]]) + tm.assert_numpy_array_equal(result, expected) + def test_offset_freqstr(self, offset_types): offset = self._get_offset(offset_types) diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index f4b012ec1897f..9a8251201f75f 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -11,6 +11,7 @@ import pytest from pandas import Timedelta, Timestamp +import pandas.util.testing as tm from pandas.tseries import offsets from pandas.tseries.offsets import Hour, Micro, Milli, Minute, Nano, Second @@ -262,6 +263,28 @@ def test_tick_division(cls): assert result.delta == off.delta / .001 +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_rdiv(cls): + off = cls(10) + delta = off.delta + td64 = delta.to_timedelta64() + + with pytest.raises(TypeError): + 2 / off + with pytest.raises(TypeError): + 2.0 / off + + assert (td64 * 2.5) / off == 2.5 + + if cls is not Nano: + # skip pytimedelta for Nano since it gets dropped + assert (delta.to_pytimedelta() * 2) / off == 2 + + result = np.array([2 * td64, td64]) / off + expected = np.array([2., 1.]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize('cls1', tick_classes) @pytest.mark.parametrize('cls2', tick_classes) def test_tick_zero(cls1, cls2):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24832
2019-01-18T21:43:30Z
2019-02-01T20:03:22Z
2019-02-01T20:03:22Z
2019-02-01T20:59:36Z
Question regarding Series.argsort documentation #24816
diff --git a/pandas/core/series.py b/pandas/core/series.py index eb412add7bbbb..0c8e697c572e8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2992,11 +2992,13 @@ def argsort(self, axis=0, kind='quicksort', order=None): Parameters ---------- - axis : int (can only be zero) + axis : int + Has no effect but is accepted for compatibility with numpy. kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm - order : ignored + order : None + Has no effect but is accepted for compatibility with numpy. Returns -------
- [ ] closes #24816 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24830
2019-01-18T19:38:07Z
2019-01-19T01:35:19Z
2019-01-19T01:35:18Z
2019-01-19T01:35:42Z
Excel Reader Refactor - Base Class Introduction
diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 3a7c39ec65309..3d85ae7fd1f46 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -375,60 +375,25 @@ def read_excel(io, **kwds) -class _XlrdReader(object): - - def __init__(self, filepath_or_buffer): - """Reader using xlrd engine. - - Parameters - ---------- - filepath_or_buffer : string, path object or Workbook - Object to be parsed. - """ - err_msg = "Install xlrd >= 1.0.0 for Excel support" - - try: - import xlrd - except ImportError: - raise ImportError(err_msg) - else: - if xlrd.__VERSION__ < LooseVersion("1.0.0"): - raise ImportError(err_msg + - ". Current version " + xlrd.__VERSION__) +@add_metaclass(abc.ABCMeta) +class _BaseExcelReader(object): - # If filepath_or_buffer is a url, want to keep the data as bytes so - # can't pass to get_filepath_or_buffer() - if _is_url(filepath_or_buffer): - filepath_or_buffer = _urlopen(filepath_or_buffer) - elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)): - filepath_or_buffer, _, _, _ = get_filepath_or_buffer( - filepath_or_buffer) + @property + @abc.abstractmethod + def sheet_names(self): + pass - if isinstance(filepath_or_buffer, xlrd.Book): - self.book = filepath_or_buffer - elif not isinstance(filepath_or_buffer, xlrd.Book) and hasattr( - filepath_or_buffer, "read"): - # N.B. xlrd.Book has a read attribute too - if hasattr(filepath_or_buffer, 'seek'): - try: - # GH 19779 - filepath_or_buffer.seek(0) - except UnsupportedOperation: - # HTTPResponse does not support seek() - # GH 20434 - pass + @abc.abstractmethod + def get_sheet_by_name(self, name): + pass - data = filepath_or_buffer.read() - self.book = xlrd.open_workbook(file_contents=data) - elif isinstance(filepath_or_buffer, compat.string_types): - self.book = xlrd.open_workbook(filepath_or_buffer) - else: - raise ValueError('Must explicitly set engine if not passing in' - ' buffer or path for io.') + @abc.abstractmethod + def get_sheet_by_index(self, index): + pass - @property - def sheet_names(self): - return self.book.sheet_names() + @abc.abstractmethod + def get_sheet_data(self, sheet, convert_float): + pass def parse(self, sheet_name=0, @@ -455,48 +420,6 @@ def parse(self, _validate_header_arg(header) - from xlrd import (xldate, XL_CELL_DATE, - XL_CELL_ERROR, XL_CELL_BOOLEAN, - XL_CELL_NUMBER) - - epoch1904 = self.book.datemode - - def _parse_cell(cell_contents, cell_typ): - """converts the contents of the cell into a pandas - appropriate object""" - - if cell_typ == XL_CELL_DATE: - - # Use the newer xlrd datetime handling. - try: - cell_contents = xldate.xldate_as_datetime( - cell_contents, epoch1904) - except OverflowError: - return cell_contents - - # Excel doesn't distinguish between dates and time, - # so we treat dates on the epoch as times only. - # Also, Excel supports 1900 and 1904 epochs. - year = (cell_contents.timetuple())[0:3] - if ((not epoch1904 and year == (1899, 12, 31)) or - (epoch1904 and year == (1904, 1, 1))): - cell_contents = time(cell_contents.hour, - cell_contents.minute, - cell_contents.second, - cell_contents.microsecond) - - elif cell_typ == XL_CELL_ERROR: - cell_contents = np.nan - elif cell_typ == XL_CELL_BOOLEAN: - cell_contents = bool(cell_contents) - elif convert_float and cell_typ == XL_CELL_NUMBER: - # GH5394 - Excel 'numbers' are always floats - # it's a minimal perf hit and less surprising - val = int(cell_contents) - if val == cell_contents: - cell_contents = val - return cell_contents - ret_dict = False # Keep sheetname to maintain backwards compatibility. @@ -504,7 +427,7 @@ def _parse_cell(cell_contents, cell_typ): sheets = sheet_name ret_dict = True elif sheet_name is None: - sheets = self.book.sheet_names() + sheets = self.sheet_names ret_dict = True else: sheets = [sheet_name] @@ -519,19 +442,13 @@ def _parse_cell(cell_contents, cell_typ): print("Reading sheet {sheet}".format(sheet=asheetname)) if isinstance(asheetname, compat.string_types): - sheet = self.book.sheet_by_name(asheetname) + sheet = self.get_sheet_by_name(asheetname) else: # assume an integer if not a string - sheet = self.book.sheet_by_index(asheetname) + sheet = self.get_sheet_by_index(asheetname) - data = [] + data = self.get_sheet_data(sheet, convert_float) usecols = _maybe_convert_usecols(usecols) - for i in range(sheet.nrows): - row = [_parse_cell(value, typ) - for value, typ in zip(sheet.row_values(i), - sheet.row_types(i))] - data.append(row) - if sheet.nrows == 0: output[asheetname] = DataFrame() continue @@ -620,6 +537,120 @@ def _parse_cell(cell_contents, cell_typ): return output[asheetname] +class _XlrdReader(_BaseExcelReader): + + def __init__(self, filepath_or_buffer): + """Reader using xlrd engine. + + Parameters + ---------- + filepath_or_buffer : string, path object or Workbook + Object to be parsed. + """ + err_msg = "Install xlrd >= 1.0.0 for Excel support" + + try: + import xlrd + except ImportError: + raise ImportError(err_msg) + else: + if xlrd.__VERSION__ < LooseVersion("1.0.0"): + raise ImportError(err_msg + + ". Current version " + xlrd.__VERSION__) + + # If filepath_or_buffer is a url, want to keep the data as bytes so + # can't pass to get_filepath_or_buffer() + if _is_url(filepath_or_buffer): + filepath_or_buffer = _urlopen(filepath_or_buffer) + elif not isinstance(filepath_or_buffer, (ExcelFile, xlrd.Book)): + filepath_or_buffer, _, _, _ = get_filepath_or_buffer( + filepath_or_buffer) + + if isinstance(filepath_or_buffer, xlrd.Book): + self.book = filepath_or_buffer + elif hasattr(filepath_or_buffer, "read"): + # N.B. xlrd.Book has a read attribute too + if hasattr(filepath_or_buffer, 'seek'): + try: + # GH 19779 + filepath_or_buffer.seek(0) + except UnsupportedOperation: + # HTTPResponse does not support seek() + # GH 20434 + pass + + data = filepath_or_buffer.read() + self.book = xlrd.open_workbook(file_contents=data) + elif isinstance(filepath_or_buffer, compat.string_types): + self.book = xlrd.open_workbook(filepath_or_buffer) + else: + raise ValueError('Must explicitly set engine if not passing in' + ' buffer or path for io.') + + @property + def sheet_names(self): + return self.book.sheet_names() + + def get_sheet_by_name(self, name): + return self.book.sheet_by_name(name) + + def get_sheet_by_index(self, index): + return self.book.sheet_by_index(index) + + def get_sheet_data(self, sheet, convert_float): + from xlrd import (xldate, XL_CELL_DATE, + XL_CELL_ERROR, XL_CELL_BOOLEAN, + XL_CELL_NUMBER) + + epoch1904 = self.book.datemode + + def _parse_cell(cell_contents, cell_typ): + """converts the contents of the cell into a pandas + appropriate object""" + + if cell_typ == XL_CELL_DATE: + + # Use the newer xlrd datetime handling. + try: + cell_contents = xldate.xldate_as_datetime( + cell_contents, epoch1904) + except OverflowError: + return cell_contents + + # Excel doesn't distinguish between dates and time, + # so we treat dates on the epoch as times only. + # Also, Excel supports 1900 and 1904 epochs. + year = (cell_contents.timetuple())[0:3] + if ((not epoch1904 and year == (1899, 12, 31)) or + (epoch1904 and year == (1904, 1, 1))): + cell_contents = time(cell_contents.hour, + cell_contents.minute, + cell_contents.second, + cell_contents.microsecond) + + elif cell_typ == XL_CELL_ERROR: + cell_contents = np.nan + elif cell_typ == XL_CELL_BOOLEAN: + cell_contents = bool(cell_contents) + elif convert_float and cell_typ == XL_CELL_NUMBER: + # GH5394 - Excel 'numbers' are always floats + # it's a minimal perf hit and less surprising + val = int(cell_contents) + if val == cell_contents: + cell_contents = val + return cell_contents + + data = [] + + for i in range(sheet.nrows): + row = [_parse_cell(value, typ) + for value, typ in zip(sheet.row_values(i), + sheet.row_types(i))] + data.append(row) + + return data + + class ExcelFile(object): """ Class for parsing tabular excel sheets into DataFrame objects.
This is to provide further enablement of new Excel readers like openpyxl and potentially one for reading binary files. The main goal here was the introduction of a base reader class which defines the properties / methods that any subclass needs to define to plug in to. This ultimately could be refactored into a sub-directory which splits readers into it's own module
https://api.github.com/repos/pandas-dev/pandas/pulls/24829
2019-01-18T18:04:38Z
2019-01-26T15:34:47Z
2019-01-26T15:34:47Z
2019-01-26T15:39:36Z
STY: use pytest.raises context syntax (groupby)
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e52ab66ef9cb4..8766fdbc29755 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -438,7 +438,7 @@ def get_converter(s): return [self.indices[name] for name in names] except KeyError: # turns out it wasn't a tuple - msg = ("must supply a a same-length tuple to get_group" + msg = ("must supply a same-length tuple to get_group" " with multiple grouping keys") raise ValueError(msg) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index d8df227d4911a..633a1643f6cdd 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -464,8 +464,8 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, raise ValueError('level name {} is not the name of the ' 'index'.format(level)) elif level > 0 or level < -1: - raise ValueError('level > 0 or level < -1 only valid with ' - ' MultiIndex') + raise ValueError( + 'level > 0 or level < -1 only valid with MultiIndex') # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index f33df5fb0eb98..d7ea9bdf9209b 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -73,15 +73,18 @@ def test_generate_bins(self): bins = func(values, binner, closed='right') assert ((bins == np.array([3, 6])).all()) - pytest.raises(ValueError, generate_bins_generic, values, [], - 'right') - pytest.raises(ValueError, generate_bins_generic, values[:0], - binner, 'right') - - pytest.raises(ValueError, generate_bins_generic, values, [4], - 'right') - pytest.raises(ValueError, generate_bins_generic, values, [-3, -1], - 'right') + msg = "Invalid length for values or for binner" + with pytest.raises(ValueError, match=msg): + generate_bins_generic(values, [], 'right') + with pytest.raises(ValueError, match=msg): + generate_bins_generic(values[:0], binner, 'right') + + msg = "Values falls before first bin" + with pytest.raises(ValueError, match=msg): + generate_bins_generic(values, [4], 'right') + msg = "Values falls after last bin" + with pytest.raises(ValueError, match=msg): + generate_bins_generic(values, [-3, -1], 'right') def test_group_ohlc(): diff --git a/pandas/tests/groupby/test_filters.py b/pandas/tests/groupby/test_filters.py index 8195d36b7bfe9..4d386db735b57 100644 --- a/pandas/tests/groupby/test_filters.py +++ b/pandas/tests/groupby/test_filters.py @@ -116,8 +116,9 @@ def raise_if_sum_is_zero(x): s = pd.Series([-1, 0, 1, 2]) grouper = s.apply(lambda x: x % 2) grouped = s.groupby(grouper) - pytest.raises(TypeError, - lambda: grouped.filter(raise_if_sum_is_zero)) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + grouped.filter(raise_if_sum_is_zero) def test_filter_with_axis_in_groupby(): @@ -140,16 +141,28 @@ def test_filter_bad_shapes(): g_s = s.groupby(s) f = lambda x: x - pytest.raises(TypeError, lambda: g_df.filter(f)) - pytest.raises(TypeError, lambda: g_s.filter(f)) + msg = "filter function returned a DataFrame, but expected a scalar bool" + with pytest.raises(TypeError, match=msg): + g_df.filter(f) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + g_s.filter(f) f = lambda x: x == 1 - pytest.raises(TypeError, lambda: g_df.filter(f)) - pytest.raises(TypeError, lambda: g_s.filter(f)) + msg = "filter function returned a DataFrame, but expected a scalar bool" + with pytest.raises(TypeError, match=msg): + g_df.filter(f) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + g_s.filter(f) f = lambda x: np.outer(x, x) - pytest.raises(TypeError, lambda: g_df.filter(f)) - pytest.raises(TypeError, lambda: g_s.filter(f)) + msg = "can't multiply sequence by non-int of type 'str'" + with pytest.raises(TypeError, match=msg): + g_df.filter(f) + msg = "the filter must return a boolean result" + with pytest.raises(TypeError, match=msg): + g_s.filter(f) def test_filter_nan_is_false(): diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 00714c3333bde..a884a37840f8a 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -761,8 +761,11 @@ def test_frame_describe_tupleindex(): 'z': [100, 200, 300, 400, 500] * 3}) df1['k'] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5 df2 = df1.rename(columns={'k': 'key'}) - pytest.raises(ValueError, lambda: df1.groupby('k').describe()) - pytest.raises(ValueError, lambda: df2.groupby('key').describe()) + msg = "Names should be list-like for a MultiIndex" + with pytest.raises(ValueError, match=msg): + df1.groupby('k').describe() + with pytest.raises(ValueError, match=msg): + df2.groupby('key').describe() def test_frame_describe_unstacked_format(): diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 33cfb9a06a805..98c917a6eca3c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -71,7 +71,10 @@ def test_basic(dtype): assert agged[1] == 21 # corner cases - pytest.raises(Exception, grouped.aggregate, lambda x: x * 2) + msg = "Must produce aggregated value" + # exception raised is type Exception + with pytest.raises(Exception, match=msg): + grouped.aggregate(lambda x: x * 2) def test_groupby_nonobject_dtype(mframe, df_mixed_floats): @@ -330,12 +333,17 @@ def f3(x): assert_frame_equal(result1, result2) # should fail (not the same number of levels) - pytest.raises(AssertionError, df.groupby('a').apply, f2) - pytest.raises(AssertionError, df2.groupby('a').apply, f2) + msg = "Cannot concat indices that do not have the same number of levels" + with pytest.raises(AssertionError, match=msg): + df.groupby('a').apply(f2) + with pytest.raises(AssertionError, match=msg): + df2.groupby('a').apply(f2) # should fail (incorrect shape) - pytest.raises(AssertionError, df.groupby('a').apply, f3) - pytest.raises(AssertionError, df2.groupby('a').apply, f3) + with pytest.raises(AssertionError, match=msg): + df.groupby('a').apply(f3) + with pytest.raises(AssertionError, match=msg): + df2.groupby('a').apply(f3) def test_attr_wrapper(ts): @@ -356,7 +364,9 @@ def test_attr_wrapper(ts): expected = grouped.agg(lambda x: x.dtype) # make sure raises error - pytest.raises(AttributeError, getattr, grouped, 'foo') + msg = "'SeriesGroupBy' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + getattr(grouped, 'foo') def test_frame_groupby(tsframe): @@ -664,11 +674,13 @@ def test_groupby_as_index_series_scalar(df): def test_groupby_as_index_corner(df, ts): - pytest.raises(TypeError, ts.groupby, lambda x: x.weekday(), - as_index=False) + msg = "as_index=False only valid with DataFrame" + with pytest.raises(TypeError, match=msg): + ts.groupby(lambda x: x.weekday(), as_index=False) - pytest.raises(ValueError, df.groupby, lambda x: x.lower(), - as_index=False, axis=1) + msg = "as_index=False only valid for axis=0" + with pytest.raises(ValueError, match=msg): + df.groupby(lambda x: x.lower(), as_index=False, axis=1) def test_groupby_multiple_key(df): @@ -722,8 +734,11 @@ def test_omit_nuisance(df): # won't work with axis = 1 grouped = df.groupby({'A': 0, 'C': 0, 'D': 1, 'E': 1}, axis=1) - result = pytest.raises(TypeError, grouped.agg, - lambda x: x.sum(0, numeric_only=False)) + msg = (r'\("unsupported operand type\(s\) for \+: ' + "'Timestamp' and 'float'\"" + r", u?'occurred at index 0'\)") + with pytest.raises(TypeError, match=msg): + grouped.agg(lambda x: x.sum(0, numeric_only=False)) def test_omit_nuisance_python_multiple(three_group): @@ -756,7 +771,9 @@ def test_empty_groups_corner(mframe): def test_nonsense_func(): df = DataFrame([0]) - pytest.raises(Exception, df.groupby, lambda x: x + 'foo') + msg = r"unsupported operand type\(s\) for \+: '(int|long)' and 'str'" + with pytest.raises(TypeError, match=msg): + df.groupby(lambda x: x + 'foo') def test_wrap_aggregated_output_multindex(mframe): @@ -823,12 +840,22 @@ def test_groupby_level_nonmulti(): result = s.groupby(level=[-1]).sum() tm.assert_series_equal(result, expected) - pytest.raises(ValueError, s.groupby, level=1) - pytest.raises(ValueError, s.groupby, level=-2) - pytest.raises(ValueError, s.groupby, level=[]) - pytest.raises(ValueError, s.groupby, level=[0, 0]) - pytest.raises(ValueError, s.groupby, level=[0, 1]) - pytest.raises(ValueError, s.groupby, level=[1]) + msg = "level > 0 or level < -1 only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + s.groupby(level=1) + with pytest.raises(ValueError, match=msg): + s.groupby(level=-2) + msg = "No group keys passed!" + with pytest.raises(ValueError, match=msg): + s.groupby(level=[]) + msg = "multiple levels only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + s.groupby(level=[0, 0]) + with pytest.raises(ValueError, match=msg): + s.groupby(level=[0, 1]) + msg = "level > 0 or level < -1 only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + s.groupby(level=[1]) def test_groupby_complex(): @@ -1101,7 +1128,8 @@ def test_groupby_list_infer_array_like(df): expected = df.groupby(df['A']).mean() assert_frame_equal(result, expected, check_names=False) - pytest.raises(Exception, df.groupby, list(df['A'][:-1])) + with pytest.raises(KeyError, match=r"^'foo'$"): + df.groupby(list(df['A'][:-1])) # pathological case of ambiguity df = DataFrame({'foo': [0, 1], @@ -1128,10 +1156,13 @@ def test_groupby_keys_same_size_as_index(): def test_groupby_one_row(): # GH 11741 + msg = r"^'Z'$" df1 = pd.DataFrame(np.random.randn(1, 4), columns=list('ABCD')) - pytest.raises(KeyError, df1.groupby, 'Z') + with pytest.raises(KeyError, match=msg): + df1.groupby('Z') df2 = pd.DataFrame(np.random.randn(2, 4), columns=list('ABCD')) - pytest.raises(KeyError, df2.groupby, 'Z') + with pytest.raises(KeyError, match=msg): + df2.groupby('Z') def test_groupby_nat_exclude(): @@ -1169,7 +1200,8 @@ def test_groupby_nat_exclude(): tm.assert_frame_equal( grouped.get_group(Timestamp('2013-02-01')), df.iloc[[3, 5]]) - pytest.raises(KeyError, grouped.get_group, pd.NaT) + with pytest.raises(KeyError, match=r"^NaT$"): + grouped.get_group(pd.NaT) nan_df = DataFrame({'nan': [np.nan, np.nan, np.nan], 'nat': [pd.NaT, pd.NaT, pd.NaT]}) @@ -1181,8 +1213,10 @@ def test_groupby_nat_exclude(): assert grouped.groups == {} assert grouped.ngroups == 0 assert grouped.indices == {} - pytest.raises(KeyError, grouped.get_group, np.nan) - pytest.raises(KeyError, grouped.get_group, pd.NaT) + with pytest.raises(KeyError, match=r"^nan$"): + grouped.get_group(np.nan) + with pytest.raises(KeyError, match=r"^NaT$"): + grouped.get_group(pd.NaT) @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") @@ -1643,7 +1677,7 @@ def test_pivot_table_values_key_error(): df['year'] = df.set_index('eventDate').index.year df['month'] = df.set_index('eventDate').index.month - with pytest.raises(KeyError): + with pytest.raises(KeyError, match="'badname'"): df.reset_index().pivot_table(index='year', columns='month', values='badname', aggfunc='count') @@ -1689,7 +1723,7 @@ def test_tuple_correct_keyerror(): df = pd.DataFrame(1, index=range(3), columns=pd.MultiIndex.from_product([[1, 2], [3, 4]])) - with pytest.raises(KeyError, match="(7, 8)"): + with pytest.raises(KeyError, match=r"^\(7, 8\)$"): df.groupby((7, 8)).mean() diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 55d9cee0376f1..a509a7cb57c97 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -26,9 +26,9 @@ class TestSelection(object): def test_select_bad_cols(self): df = DataFrame([[1, 2]], columns=['A', 'B']) g = df.groupby('A') - pytest.raises(KeyError, g.__getitem__, ['C']) # g[['C']] + with pytest.raises(KeyError, match='"Columns not found: \'C\'"'): + g[['C']] - pytest.raises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']] with pytest.raises(KeyError, match='^[^A]+$'): # A should not be referenced as a bad column... # will have to rethink regex if you change message! @@ -39,8 +39,11 @@ def test_groupby_duplicated_column_errormsg(self): df = DataFrame(columns=['A', 'B', 'A', 'C'], data=[range(4), range(2, 6), range(0, 8, 2)]) - pytest.raises(ValueError, df.groupby, 'A') - pytest.raises(ValueError, df.groupby, ['A', 'B']) + msg = "Grouper for 'A' not 1-dimensional" + with pytest.raises(ValueError, match=msg): + df.groupby('A') + with pytest.raises(ValueError, match=msg): + df.groupby(['A', 'B']) grouped = df.groupby('B') c = grouped.count() @@ -304,7 +307,8 @@ def test_grouper_iter(self, df): def test_empty_groups(self, df): # see gh-1048 - pytest.raises(ValueError, df.groupby, []) + with pytest.raises(ValueError, match="No group keys passed!"): + df.groupby([]) def test_groupby_grouper(self, df): grouped = df.groupby('A') @@ -345,11 +349,15 @@ def test_groupby_grouper_f_sanity_checked(self): # when the elements are Timestamp. # the result is Index[0:6], very confusing. - pytest.raises(AssertionError, ts.groupby, lambda key: key[0:6]) + msg = r"Grouper result violates len\(labels\) == len\(data\)" + with pytest.raises(AssertionError, match=msg): + ts.groupby(lambda key: key[0:6]) def test_grouping_error_on_multidim_input(self, df): - pytest.raises(ValueError, - Grouping, df.index, df[['A', 'A']]) + msg = ("Grouper for '<class 'pandas.core.frame.DataFrame'>'" + " not 1-dimensional") + with pytest.raises(ValueError, match=msg): + Grouping(df.index, df[['A', 'A']]) def test_multiindex_passthru(self): @@ -470,14 +478,18 @@ def test_groupby_level(self, sort, mframe, df): assert_frame_equal(result1, expected1.T) # raise exception for non-MultiIndex - pytest.raises(ValueError, df.groupby, level=1) + msg = "level > 0 or level < -1 only valid with MultiIndex" + with pytest.raises(ValueError, match=msg): + df.groupby(level=1) def test_groupby_level_index_names(self): # GH4014 this used to raise ValueError since 'exp'>1 (in py2) df = DataFrame({'exp': ['A'] * 3 + ['B'] * 3, 'var1': lrange(6), }).set_index('exp') df.groupby(level='exp') - pytest.raises(ValueError, df.groupby, level='foo') + msg = "level name foo is not the name of the index" + with pytest.raises(ValueError, match=msg): + df.groupby(level='foo') @pytest.mark.parametrize('sort', [True, False]) def test_groupby_level_with_nas(self, sort): @@ -588,10 +600,15 @@ def test_get_group(self): assert_frame_equal(result1, result3) # must pass a same-length tuple with multiple keys - pytest.raises(ValueError, lambda: g.get_group('foo')) - pytest.raises(ValueError, lambda: g.get_group(('foo'))) - pytest.raises(ValueError, - lambda: g.get_group(('foo', 'bar', 'baz'))) + msg = "must supply a tuple to get_group with multiple grouping keys" + with pytest.raises(ValueError, match=msg): + g.get_group('foo') + with pytest.raises(ValueError, match=msg): + g.get_group(('foo')) + msg = ("must supply a same-length tuple to get_group with multiple" + " grouping keys") + with pytest.raises(ValueError, match=msg): + g.get_group(('foo', 'bar', 'baz')) def test_get_group_empty_bins(self, observed): @@ -605,7 +622,9 @@ def test_get_group_empty_bins(self, observed): expected = DataFrame([3, 1], index=[0, 1]) assert_frame_equal(result, expected) - pytest.raises(KeyError, lambda: g.get_group(pd.Interval(10, 15))) + msg = r"Interval\(10, 15, closed='right'\)" + with pytest.raises(KeyError, match=msg): + g.get_group(pd.Interval(10, 15)) def test_get_group_grouped_by_tuple(self): # GH 8121 diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 465ae67fd7318..f120402e6e8ca 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -637,8 +637,11 @@ def test_cython_transform_frame(op, args, targop): for c in df: if c not in ['float', 'int', 'float_missing' ] and op != 'shift': - pytest.raises(DataError, gb[c].transform, op) - pytest.raises(DataError, getattr(gb[c], op)) + msg = "No numeric types to aggregate" + with pytest.raises(DataError, match=msg): + gb[c].transform(op) + with pytest.raises(DataError, match=msg): + getattr(gb[c], op)() else: expected = gb[c].apply(targop) expected.name = c
xref #24332
https://api.github.com/repos/pandas-dev/pandas/pulls/24828
2019-01-18T17:37:27Z
2019-01-20T18:44:06Z
2019-01-20T18:44:06Z
2019-01-20T22:33:50Z
TST: remove patches to pandas.util.testing.N
diff --git a/pandas/tests/indexing/multiindex/conftest.py b/pandas/tests/indexing/multiindex/conftest.py index 046fc19c0d9c8..545e092d9ce65 100644 --- a/pandas/tests/indexing/multiindex/conftest.py +++ b/pandas/tests/indexing/multiindex/conftest.py @@ -21,8 +21,7 @@ def multiindex_dataframe_random_data(): def multiindex_year_month_day_dataframe_random_data(): """DataFrame with 3 level MultiIndex (year, month, day) covering first 100 business days from 2000-01-01 with random data""" - tm.N = 100 - tdf = tm.makeTimeDataFrame() + tdf = tm.makeTimeDataFrame(100) ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum() # use Int64Index, to make sure things work diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index c78ab41d2fae4..ad79cc97f8b77 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -313,10 +313,7 @@ def test_business_freq(self): @pytest.mark.slow def test_business_freq_convert(self): - n = tm.N - tm.N = 300 - bts = tm.makeTimeSeries().asfreq('BM') - tm.N = n + bts = tm.makeTimeSeries(300).asfreq('BM') ts = bts.to_period('M') _, ax = self.plt.subplots() bts.plot(ax=ax) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index cd4c0a7924d39..04c54bcf8c22c 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -22,8 +22,8 @@ def test_setindex(self, string_series): string_series.index = None # wrong length - msg = (r"Length mismatch: Expected axis has (30|100) elements, new" - r" values have (29|99) elements") + msg = ("Length mismatch: Expected axis has 30 elements, new" + " values have 29 elements") with pytest.raises(ValueError, match=msg): string_series.index = np.arange(len(string_series) - 1) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index b5023c376dedd..a7bbbbb5033ac 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -47,8 +47,7 @@ def setup_method(self, method): s[3] = np.NaN self.series = s - tm.N = 100 - self.tdf = tm.makeTimeDataFrame() + self.tdf = tm.makeTimeDataFrame(100) self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
xref https://github.com/pandas-dev/pandas/pull/24769#issuecomment-455326819
https://api.github.com/repos/pandas-dev/pandas/pulls/24826
2019-01-18T09:23:19Z
2019-01-18T19:01:45Z
2019-01-18T19:01:45Z
2019-01-18T22:16:46Z
DOC/CLN: Timezone section in timeseries.rst
diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index f56ad710973dd..5841125817d03 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -2129,11 +2129,13 @@ These can easily be converted to a ``PeriodIndex``: Time Zone Handling ------------------ -Pandas provides rich support for working with timestamps in different time -zones using ``pytz`` and ``dateutil`` libraries. ``dateutil`` currently is only -supported for fixed offset and tzfile zones. The default library is ``pytz``. -Support for ``dateutil`` is provided for compatibility with other -applications e.g. if you use ``dateutil`` in other Python packages. +pandas provides rich support for working with timestamps in different time +zones using the ``pytz`` and ``dateutil`` libraries. + +.. note:: + + pandas does not yet support ``datetime.timezone`` objects from the standard + library. Working with Time Zones ~~~~~~~~~~~~~~~~~~~~~~~ @@ -2145,13 +2147,16 @@ By default, pandas objects are time zone unaware: rng = pd.date_range('3/6/2012 00:00', periods=15, freq='D') rng.tz is None -To supply the time zone, you can use the ``tz`` keyword to ``date_range`` and -other functions. Dateutil time zone strings are distinguished from ``pytz`` -time zones by starting with ``dateutil/``. +To localize these dates to a time zone (assign a particular time zone to a naive date), +you can use the ``tz_localize`` method or the ``tz`` keyword argument in +:func:`date_range`, :class:`Timestamp`, or :class:`DatetimeIndex`. +You can either pass ``pytz`` or ``dateutil`` time zone objects or Olson time zone database strings. +Olson time zone strings will return ``pytz`` time zone objects by default. +To return ``dateutil`` time zone objects, append ``dateutil/`` before the string. * In ``pytz`` you can find a list of common (and less common) time zones using ``from pytz import common_timezones, all_timezones``. -* ``dateutil`` uses the OS timezones so there isn't a fixed list available. For +* ``dateutil`` uses the OS time zones so there isn't a fixed list available. For common zones, the names are the same as ``pytz``. .. ipython:: python @@ -2159,23 +2164,23 @@ time zones by starting with ``dateutil/``. import dateutil # pytz - rng_pytz = pd.date_range('3/6/2012 00:00', periods=10, freq='D', + rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D', tz='Europe/London') rng_pytz.tz # dateutil - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=10, freq='D', - tz='dateutil/Europe/London') + rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D') + rng_dateutil = rng_dateutil.tz_localize('dateutil/Europe/London') rng_dateutil.tz # dateutil - utc special case - rng_utc = pd.date_range('3/6/2012 00:00', periods=10, freq='D', + rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D', tz=dateutil.tz.tzutc()) rng_utc.tz -Note that the ``UTC`` timezone is a special case in ``dateutil`` and should be constructed explicitly -as an instance of ``dateutil.tz.tzutc``. You can also construct other timezones explicitly first, -which gives you more control over which time zone is used: +Note that the ``UTC`` time zone is a special case in ``dateutil`` and should be constructed explicitly +as an instance of ``dateutil.tz.tzutc``. You can also construct other time +zones objects explicitly first. .. ipython:: python @@ -2183,56 +2188,46 @@ which gives you more control over which time zone is used: # pytz tz_pytz = pytz.timezone('Europe/London') - rng_pytz = pd.date_range('3/6/2012 00:00', periods=10, freq='D', - tz=tz_pytz) + rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D') + rng_pytz = rng_pytz.tz_localize(tz_pytz) rng_pytz.tz == tz_pytz # dateutil tz_dateutil = dateutil.tz.gettz('Europe/London') - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=10, freq='D', + rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D', tz=tz_dateutil) rng_dateutil.tz == tz_dateutil -Timestamps, like Python's ``datetime.datetime`` object can be either time zone -naive or time zone aware. Naive time series and ``DatetimeIndex`` objects can be -*localized* using ``tz_localize``: - -.. ipython:: python - - ts = pd.Series(np.random.randn(len(rng)), rng) - - ts_utc = ts.tz_localize('UTC') - ts_utc - -Again, you can explicitly construct the timezone object first. -You can use the ``tz_convert`` method to convert pandas objects to convert -tz-aware data to another time zone: +To convert a time zone aware pandas object from one time zone to another, +you can use the ``tz_convert`` method. .. ipython:: python - ts_utc.tz_convert('US/Eastern') + rng_pytz.tz_convert('US/Eastern') .. warning:: - Be wary of conversions between libraries. For some zones ``pytz`` and ``dateutil`` have different - definitions of the zone. This is more of a problem for unusual timezones than for + Be wary of conversions between libraries. For some time zones, ``pytz`` and ``dateutil`` have different + definitions of the zone. This is more of a problem for unusual time zones than for 'standard' zones like ``US/Eastern``. .. warning:: - Be aware that a timezone definition across versions of timezone libraries may not - be considered equal. This may cause problems when working with stored data that - is localized using one version and operated on with a different version. - See :ref:`here<io.hdf5-notes>` for how to handle such a situation. + Be aware that a time zone definition across versions of time zone libraries may not + be considered equal. This may cause problems when working with stored data that + is localized using one version and operated on with a different version. + See :ref:`here<io.hdf5-notes>` for how to handle such a situation. .. warning:: - It is incorrect to pass a timezone directly into the ``datetime.datetime`` constructor (e.g., - ``datetime.datetime(2011, 1, 1, tz=timezone('US/Eastern'))``. Instead, the datetime - needs to be localized using the localize method on the timezone. + For ``pytz`` time zones, it is incorrect to pass a time zone object directly into + the ``datetime.datetime`` constructor + (e.g., ``datetime.datetime(2011, 1, 1, tz=pytz.timezone('US/Eastern'))``. + Instead, the datetime needs to be localized using the ``localize`` method + on the ``pytz`` time zone object. -Under the hood, all timestamps are stored in UTC. Scalar values from a -``DatetimeIndex`` with a time zone will have their fields (day, hour, minute) +Under the hood, all timestamps are stored in UTC. Values from a time zone aware +:class:`DatetimeIndex` or :class:`Timestamp` will have their fields (day, hour, minute, etc.) localized to the time zone. However, timestamps with the same UTC value are still considered to be equal even if they are in different time zones: @@ -2241,51 +2236,35 @@ still considered to be equal even if they are in different time zones: rng_eastern = rng_utc.tz_convert('US/Eastern') rng_berlin = rng_utc.tz_convert('Europe/Berlin') - rng_eastern[5] - rng_berlin[5] - rng_eastern[5] == rng_berlin[5] - -Like ``Series``, ``DataFrame``, and ``DatetimeIndex``; ``Timestamp`` objects -can be converted to other time zones using ``tz_convert``: - -.. ipython:: python - - rng_eastern[5] - rng_berlin[5] - rng_eastern[5].tz_convert('Europe/Berlin') - -Localization of ``Timestamp`` functions just like ``DatetimeIndex`` and ``Series``: - -.. ipython:: python - - rng[5] - rng[5].tz_localize('Asia/Shanghai') - + rng_eastern[2] + rng_berlin[2] + rng_eastern[2] == rng_berlin[2] -Operations between ``Series`` in different time zones will yield UTC -``Series``, aligning the data on the UTC timestamps: +Operations between :class:`Series` in different time zones will yield UTC +:class:`Series`, aligning the data on the UTC timestamps: .. ipython:: python + ts_utc = pd.Series(range(3), pd.date_range('20130101', periods=3, tz='UTC')) eastern = ts_utc.tz_convert('US/Eastern') berlin = ts_utc.tz_convert('Europe/Berlin') result = eastern + berlin result result.index -To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or ``tz_convert(None)``. -``tz_localize(None)`` will remove timezone holding local time representations. -``tz_convert(None)`` will remove timezone after converting to UTC time. +To remove time zone information, use ``tz_localize(None)`` or ``tz_convert(None)``. +``tz_localize(None)`` will remove the time zone yielding the local time representation. +``tz_convert(None)`` will remove the time zone after converting to UTC time. .. ipython:: python didx = pd.date_range(start='2014-08-01 09:00', freq='H', - periods=10, tz='US/Eastern') + periods=3, tz='US/Eastern') didx didx.tz_localize(None) didx.tz_convert(None) - # tz_convert(None) is identical with tz_convert('UTC').tz_localize(None) + # tz_convert(None) is identical to tz_convert('UTC').tz_localize(None) didx.tz_convert('UTC').tz_localize(None) .. _timeseries.timezone_ambiguous: @@ -2293,54 +2272,34 @@ To remove timezone from tz-aware ``DatetimeIndex``, use ``tz_localize(None)`` or Ambiguous Times when Localizing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In some cases, localize cannot determine the DST and non-DST hours when there are -duplicates. This often happens when reading files or database records that simply -duplicate the hours. Passing ``ambiguous='infer'`` into ``tz_localize`` will -attempt to determine the right offset. Below the top example will fail as it -contains ambiguous times and the bottom will infer the right offset. +``tz_localize`` may not be able to determine the UTC offset of a timestamp +because daylight savings time (DST) in a local time zone causes some times to occur +twice within one day ("clocks fall back"). The following options are available: + +* ``'raise'``: Raises a ``pytz.AmbiguousTimeError`` (the default behavior) +* ``'infer'``: Attempt to determine the correct offset base on the monotonicity of the timestamps +* ``'NaT'``: Replaces ambiguous times with ``NaT`` +* ``bool``: ``True`` represents a DST time, ``False`` represents non-DST time. An array-like of ``bool`` values is supported for a sequence of times. .. ipython:: python rng_hourly = pd.DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00', - '11/06/2011 01:00', '11/06/2011 02:00', - '11/06/2011 03:00']) + '11/06/2011 01:00', '11/06/2011 02:00']) -This will fail as there are ambiguous times +This will fail as there are ambiguous times (``'11/06/2011 01:00'``) .. code-block:: ipython In [2]: rng_hourly.tz_localize('US/Eastern') AmbiguousTimeError: Cannot infer dst time from Timestamp('2011-11-06 01:00:00'), try using the 'ambiguous' argument -Infer the ambiguous times - -.. ipython:: python - - rng_hourly_eastern = rng_hourly.tz_localize('US/Eastern', ambiguous='infer') - rng_hourly_eastern.to_list() - -In addition to 'infer', there are several other arguments supported. Passing -an array-like of bools or 0s/1s where True represents a DST hour and False a -non-DST hour, allows for distinguishing more than one DST -transition (e.g., if you have multiple records in a database each with their -own DST transition). Or passing 'NaT' will fill in transition times -with not-a-time values. These methods are available in the ``DatetimeIndex`` -constructor as well as ``tz_localize``. +Handle these ambiguous times by specifying the following. .. ipython:: python - rng_hourly_dst = np.array([1, 1, 0, 0, 0]) - rng_hourly.tz_localize('US/Eastern', ambiguous=rng_hourly_dst).to_list() - rng_hourly.tz_localize('US/Eastern', ambiguous='NaT').to_list() - - didx = pd.date_range(start='2014-08-01 09:00', freq='H', - periods=10, tz='US/Eastern') - didx - didx.tz_localize(None) - didx.tz_convert(None) - - # tz_convert(None) is identical with tz_convert('UTC').tz_localize(None) - didx.tz_convert('UCT').tz_localize(None) + rng_hourly.tz_localize('US/Eastern', ambiguous='infer') + rng_hourly.tz_localize('US/Eastern', ambiguous='NaT') + rng_hourly.tz_localize('US/Eastern', ambiguous=[True, True, False, False]) .. _timeseries.timezone_nonexistent: @@ -2348,7 +2307,7 @@ Nonexistent Times when Localizing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A DST transition may also shift the local time ahead by 1 hour creating nonexistent -local times. The behavior of localizing a timeseries with nonexistent times +local times ("clocks spring forward"). The behavior of localizing a timeseries with nonexistent times can be controlled by the ``nonexistent`` argument. The following options are available: * ``'raise'``: Raises a ``pytz.NonExistentTimeError`` (the default behavior) @@ -2382,58 +2341,61 @@ Transform nonexistent times to ``NaT`` or shift the times. .. _timeseries.timezone_series: -TZ Aware Dtypes -~~~~~~~~~~~~~~~ +Time Zone Series Operations +~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``Series/DatetimeIndex`` with a timezone **naive** value are represented with a dtype of ``datetime64[ns]``. +A :class:`Series` with time zone **naive** values is +represented with a dtype of ``datetime64[ns]``. .. ipython:: python s_naive = pd.Series(pd.date_range('20130101', periods=3)) s_naive -``Series/DatetimeIndex`` with a timezone **aware** value are represented with a dtype of ``datetime64[ns, tz]``. +A :class:`Series` with a time zone **aware** values is +represented with a dtype of ``datetime64[ns, tz]`` where ``tz`` is the time zone .. ipython:: python s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern')) s_aware -Both of these ``Series`` can be manipulated via the ``.dt`` accessor, see :ref:`here <basics.dt_accessors>`. +Both of these :class:`Series` time zone information +can be manipulated via the ``.dt`` accessor, see :ref:`the dt accessor section <basics.dt_accessors>`. -For example, to localize and convert a naive stamp to timezone aware. +For example, to localize and convert a naive stamp to time zone aware. .. ipython:: python s_naive.dt.tz_localize('UTC').dt.tz_convert('US/Eastern') - -Further more you can ``.astype(...)`` timezone aware (and naive). This operation is effectively a localize AND convert on a naive stamp, and -a convert on an aware stamp. +Time zone information can also be manipulated using the ``astype`` method. +This method can localize and convert time zone naive timestamps or +convert time zone aware timestamps. .. ipython:: python - # localize and convert a naive timezone + # localize and convert a naive time zone s_naive.astype('datetime64[ns, US/Eastern]') # make an aware tz naive s_aware.astype('datetime64[ns]') - # convert to a new timezone + # convert to a new time zone s_aware.astype('datetime64[ns, CET]') .. note:: Using :meth:`Series.to_numpy` on a ``Series``, returns a NumPy array of the data. - NumPy does not currently support timezones (even though it is *printing* in the local timezone!), - therefore an object array of Timestamps is returned for timezone aware data: + NumPy does not currently support time zones (even though it is *printing* in the local time zone!), + therefore an object array of Timestamps is returned for time zone aware data: .. ipython:: python s_naive.to_numpy() s_aware.to_numpy() - By converting to an object array of Timestamps, it preserves the timezone + By converting to an object array of Timestamps, it preserves the time zone information. For example, when converting back to a Series: .. ipython:: python
- Shortened some of the examples to show the minimum, necessary functionality - Removed some redundant examples - Refactored the ambiguous times handling section - Showcase `tz_localize` more in the beginning of the section & noted that pandas doesn't support `datetime.timezone` objects yet
https://api.github.com/repos/pandas-dev/pandas/pulls/24825
2019-01-18T06:22:09Z
2019-02-03T20:34:54Z
2019-02-03T20:34:54Z
2019-02-03T21:36:41Z
avoid non-standard imports
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 3e62a08975dad..c8bfc564e7573 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -112,6 +112,7 @@ if [[ -z "$CHECK" || "$CHECK" == "patterns" ]]; then # Check for imports from pandas.core.common instead of `import pandas.core.common as com` MSG='Check for non-standard imports' ; echo $MSG invgrep -R --include="*.py*" -E "from pandas.core.common import " pandas + # invgrep -R --include="*.py*" -E "from numpy import nan " pandas # GH#24822 not yet implemented since the offending imports have not all been removed RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Check for pytest warns' ; echo $MSG diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 244e8f83bea37..f2c3f50c291c3 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -6,8 +6,6 @@ import warnings import numpy as np -from numpy import nan -from numpy.random import randn import pytest from pandas.compat import PY35, lrange @@ -240,22 +238,22 @@ class TestDataFrameAnalytics(): @td.skip_if_no_scipy def test_corr_pearson(self, float_frame): - float_frame['A'][:5] = nan - float_frame['B'][5:10] = nan + float_frame['A'][:5] = np.nan + float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'pearson') @td.skip_if_no_scipy def test_corr_kendall(self, float_frame): - float_frame['A'][:5] = nan - float_frame['B'][5:10] = nan + float_frame['A'][:5] = np.nan + float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'kendall') @td.skip_if_no_scipy def test_corr_spearman(self, float_frame): - float_frame['A'][:5] = nan - float_frame['B'][5:10] = nan + float_frame['A'][:5] = np.nan + float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'spearman') @@ -266,8 +264,8 @@ def _check_method(self, frame, method='pearson'): @td.skip_if_no_scipy def test_corr_non_numeric(self, float_frame, float_string_frame): - float_frame['A'][:5] = nan - float_frame['B'][5:10] = nan + float_frame['A'][:5] = np.nan + float_frame['B'][5:10] = np.nan # exclude non-numeric types result = float_string_frame.corr() @@ -351,16 +349,16 @@ def test_cov(self, float_frame, float_string_frame): # with NAs frame = float_frame.copy() - frame['A'][:5] = nan - frame['B'][5:10] = nan + frame['A'][:5] = np.nan + frame['B'][5:10] = np.nan result = float_frame.cov(min_periods=len(float_frame) - 8) expected = float_frame.cov() expected.loc['A', 'B'] = np.nan expected.loc['B', 'A'] = np.nan # regular - float_frame['A'][:5] = nan - float_frame['B'][:10] = nan + float_frame['A'][:5] = np.nan + float_frame['B'][:10] = np.nan cov = float_frame.cov() tm.assert_almost_equal(cov['A']['C'], @@ -385,7 +383,7 @@ def test_cov(self, float_frame, float_string_frame): def test_corrwith(self, datetime_frame): a = datetime_frame - noise = Series(randn(len(a)), index=a.index) + noise = Series(np.random.randn(len(a)), index=a.index) b = datetime_frame.add(noise, axis=0) @@ -409,8 +407,9 @@ def test_corrwith(self, datetime_frame): # non time-series data index = ['a', 'b', 'c', 'd', 'e'] columns = ['one', 'two', 'three', 'four'] - df1 = DataFrame(randn(5, 4), index=index, columns=columns) - df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns) + df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns) + df2 = DataFrame(np.random.randn(4, 4), + index=index[:4], columns=columns) correls = df1.corrwith(df2, axis=1) for row in index[:4]: tm.assert_almost_equal(correls[row], @@ -823,9 +822,9 @@ def test_min(self, float_frame_with_na, int_frame, assert_stat_op_api('min', float_frame, float_string_frame) def test_cummin(self, datetime_frame): - datetime_frame.loc[5:10, 0] = nan - datetime_frame.loc[10:15, 1] = nan - datetime_frame.loc[15:, 2] = nan + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan # axis = 0 cummin = datetime_frame.cummin() @@ -846,9 +845,9 @@ def test_cummin(self, datetime_frame): assert np.shape(cummin_xs) == np.shape(datetime_frame) def test_cummax(self, datetime_frame): - datetime_frame.loc[5:10, 0] = nan - datetime_frame.loc[10:15, 1] = nan - datetime_frame.loc[15:, 2] = nan + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan # axis = 0 cummax = datetime_frame.cummax() @@ -950,9 +949,9 @@ def test_mixed_ops(self, op): assert len(result) == 2 def test_cumsum(self, datetime_frame): - datetime_frame.loc[5:10, 0] = nan - datetime_frame.loc[10:15, 1] = nan - datetime_frame.loc[15:, 2] = nan + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan # axis = 0 cumsum = datetime_frame.cumsum() @@ -973,9 +972,9 @@ def test_cumsum(self, datetime_frame): assert np.shape(cumsum_xs) == np.shape(datetime_frame) def test_cumprod(self, datetime_frame): - datetime_frame.loc[5:10, 0] = nan - datetime_frame.loc[10:15, 1] = nan - datetime_frame.loc[15:, 2] = nan + datetime_frame.loc[5:10, 0] = np.nan + datetime_frame.loc[10:15, 1] = np.nan + datetime_frame.loc[15:, 2] = np.nan # axis = 0 cumprod = datetime_frame.cumprod() @@ -1753,7 +1752,7 @@ def test_round(self): expected_neg_rounded) # nan in Series round - nan_round_Series = Series({'col1': nan, 'col2': 1}) + nan_round_Series = Series({'col1': np.nan, 'col2': 1}) # TODO(wesm): unused? expected_nan_round = DataFrame({ # noqa @@ -2084,8 +2083,10 @@ def test_dot(self): result = A.dot(b) # unaligned - df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4)) - df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3]) + df = DataFrame(np.random.randn(3, 4), + index=[1, 2, 3], columns=lrange(4)) + df2 = DataFrame(np.random.randn(5, 3), + index=lrange(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match='aligned'): df.dot(df2) @@ -2144,8 +2145,10 @@ def test_matmul(self): tm.assert_frame_equal(result, expected) # unaligned - df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4)) - df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3]) + df = DataFrame(np.random.randn(3, 4), + index=[1, 2, 3], columns=lrange(4)) + df2 = DataFrame(np.random.randn(5, 3), + index=lrange(5), columns=[1, 2, 3]) with pytest.raises(ValueError, match='aligned'): operator.matmul(df, df2) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index c1be64829c303..0934dd20638e4 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -7,7 +7,6 @@ import pydoc import numpy as np -from numpy.random import randn import pytest from pandas.compat import long, lrange, range @@ -149,8 +148,8 @@ def test_not_hashable(self, empty_frame): pytest.raises(TypeError, hash, empty_frame) def test_new_empty_index(self): - df1 = self.klass(randn(0, 3)) - df2 = self.klass(randn(0, 3)) + df1 = self.klass(np.random.randn(0, 3)) + df2 = self.klass(np.random.randn(0, 3)) df1.index.name = 'foo' assert df2.index.name is None diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 508a68d44bb04..dea925dcde676 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -5,7 +5,6 @@ from datetime import datetime import numpy as np -from numpy import random import pytest from pandas.compat import lrange, lzip, u @@ -289,7 +288,7 @@ def test_reindex_nan(self): assert_frame_equal(left, right) def test_reindex_name_remains(self): - s = Series(random.rand(10)) + s = Series(np.random.rand(10)) df = DataFrame(s, index=np.arange(len(s))) i = Series(np.arange(10), name='iname') @@ -299,7 +298,7 @@ def test_reindex_name_remains(self): df = df.reindex(Index(np.arange(10), name='tmpname')) assert df.index.name == 'tmpname' - s = Series(random.rand(10)) + s = Series(np.random.rand(10)) df = DataFrame(s.T, index=np.arange(len(s))) i = Series(np.arange(10), name='iname') df = df.reindex(columns=i) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 67f27948343f7..5419f4d5127f6 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -6,7 +6,6 @@ import itertools import numpy as np -from numpy import nan import pytest from pandas.compat import StringIO @@ -216,7 +215,7 @@ def test_construction_with_mixed(self, float_string_frame): # test construction edge cases with mixed types # f7u12, this does not work without extensive workaround - data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)], + data = [[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], [datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)]] df = DataFrame(data) @@ -558,18 +557,18 @@ def test_get_X_columns(self): def test_strange_column_corruption_issue(self): # (wesm) Unclear how exactly this is related to internal matters df = DataFrame(index=[0, 1]) - df[0] = nan + df[0] = np.nan wasCol = {} # uncommenting these makes the results match # for col in xrange(100, 200): # wasCol[col] = 1 - # df[col] = nan + # df[col] = np.nan for i, dt in enumerate(df.index): for col in range(100, 200): if col not in wasCol: wasCol[col] = 1 - df[col] = nan + df[col] = np.nan df[col][dt] = i myid = 100 diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index b38acbf5dd72f..59497153c8524 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -5,7 +5,6 @@ from datetime import datetime import numpy as np -from numpy import nan import pytest from pandas.compat import lrange @@ -247,20 +246,20 @@ def test_append_dtypes(self): assert_frame_equal(result, expected) def test_update(self): - df = DataFrame([[1.5, nan, 3.], - [1.5, nan, 3.], - [1.5, nan, 3], - [1.5, nan, 3]]) + df = DataFrame([[1.5, np.nan, 3.], + [1.5, np.nan, 3.], + [1.5, np.nan, 3], + [1.5, np.nan, 3]]) other = DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]], index=[1, 3]) df.update(other) - expected = DataFrame([[1.5, nan, 3], + expected = DataFrame([[1.5, np.nan, 3], [3.6, 2, 3], - [1.5, nan, 3], - [1.5, nan, 7.]]) + [1.5, np.nan, 3], + [1.5, np.nan, 7.]]) assert_frame_equal(df, expected) def test_update_dtypes(self): @@ -277,37 +276,37 @@ def test_update_dtypes(self): assert_frame_equal(df, expected) def test_update_nooverwrite(self): - df = DataFrame([[1.5, nan, 3.], - [1.5, nan, 3.], - [1.5, nan, 3], - [1.5, nan, 3]]) + df = DataFrame([[1.5, np.nan, 3.], + [1.5, np.nan, 3.], + [1.5, np.nan, 3], + [1.5, np.nan, 3]]) other = DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]], index=[1, 3]) df.update(other, overwrite=False) - expected = DataFrame([[1.5, nan, 3], + expected = DataFrame([[1.5, np.nan, 3], [1.5, 2, 3], - [1.5, nan, 3], - [1.5, nan, 3.]]) + [1.5, np.nan, 3], + [1.5, np.nan, 3.]]) assert_frame_equal(df, expected) def test_update_filtered(self): - df = DataFrame([[1.5, nan, 3.], - [1.5, nan, 3.], - [1.5, nan, 3], - [1.5, nan, 3]]) + df = DataFrame([[1.5, np.nan, 3.], + [1.5, np.nan, 3.], + [1.5, np.nan, 3], + [1.5, np.nan, 3]]) other = DataFrame([[3.6, 2., np.nan], [np.nan, np.nan, 7]], index=[1, 3]) df.update(other, filter_func=lambda x: x > 2) - expected = DataFrame([[1.5, nan, 3], - [1.5, nan, 3], - [1.5, nan, 3], - [1.5, nan, 7.]]) + expected = DataFrame([[1.5, np.nan, 3], + [1.5, np.nan, 3], + [1.5, np.nan, 3], + [1.5, np.nan, 7.]]) assert_frame_equal(df, expected) @pytest.mark.parametrize('bad_kwarg, exception, msg', [ @@ -322,12 +321,12 @@ def test_update_raise_bad_parameter(self, bad_kwarg, exception, msg): def test_update_raise_on_overlap(self): df = DataFrame([[1.5, 1, 3.], - [1.5, nan, 3.], - [1.5, nan, 3], - [1.5, nan, 3]]) + [1.5, np.nan, 3.], + [1.5, np.nan, 3], + [1.5, np.nan, 3]]) - other = DataFrame([[2., nan], - [nan, 7]], index=[1, 3], columns=[1, 2]) + other = DataFrame([[2., np.nan], + [np.nan, 7]], index=[1, 3], columns=[1, 2]) with pytest.raises(ValueError, match="Data overlaps"): df.update(other, errors='raise') diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 4e0143c368e10..4f6a2e2bfbebf 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -8,8 +8,6 @@ import numpy as np import numpy.ma as ma -import numpy.ma.mrecords as mrecords -from numpy.random import randn import pytest from pandas.compat import ( @@ -489,7 +487,7 @@ def test_constructor_dict_cast(self): # can't cast to float test_data = { 'A': dict(zip(range(20), tm.makeStringIndex(20))), - 'B': dict(zip(range(15), randn(15))) + 'B': dict(zip(range(15), np.random.randn(15))) } frame = DataFrame(test_data, dtype=float) assert len(frame) == 20 @@ -605,7 +603,7 @@ def test_constructor_period(self): def test_nested_dict_frame_constructor(self): rng = pd.period_range('1/1/2000', periods=5) - df = DataFrame(randn(10, 5), columns=rng) + df = DataFrame(np.random.randn(10, 5), columns=rng) data = {} for col in df.columns: @@ -812,7 +810,7 @@ def test_constructor_mrecarray(self): # call assert_frame_equal for all selections of 3 arrays for comb in itertools.combinations(arrays, 3): names, data = zip(*comb) - mrecs = mrecords.fromarrays(data, names=names) + mrecs = ma.mrecords.fromarrays(data, names=names) # fill the comb comb = {k: (v.filled() if hasattr(v, 'filled') else v) @@ -859,7 +857,7 @@ def test_constructor_scalar_inference(self): assert df['object'].dtype == np.object_ def test_constructor_arrays_and_scalars(self): - df = DataFrame({'a': randn(10), 'b': True}) + df = DataFrame({'a': np.random.randn(10), 'b': True}) exp = DataFrame({'a': df['a'].values, 'b': [True] * 10}) tm.assert_frame_equal(df, exp) @@ -875,11 +873,11 @@ def test_constructor_DataFrame(self): def test_constructor_more(self): # used to be in test_matrix.py - arr = randn(10) + arr = np.random.randn(10) dm = DataFrame(arr, columns=['A'], index=np.arange(10)) assert dm.values.ndim == 2 - arr = randn(0) + arr = np.random.randn(0) dm = DataFrame(arr) assert dm.values.ndim == 2 assert dm.values.ndim == 2 @@ -1140,8 +1138,8 @@ class CustomDict(dict): tm.assert_frame_equal(result, result_custom) def test_constructor_ragged(self): - data = {'A': randn(10), - 'B': randn(8)} + data = {'A': np.random.randn(10), + 'B': np.random.randn(8)} with pytest.raises(ValueError, match='arrays must all be same length'): DataFrame(data) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index f113140261aea..19b8ae4eb6e0f 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -6,8 +6,6 @@ from warnings import catch_warnings, simplefilter import numpy as np -from numpy import nan -from numpy.random import randn import pytest from pandas._libs.tslib import iNaT @@ -50,9 +48,9 @@ def test_getitem(self): self.frame['random'] df = self.frame.copy() - df['$10'] = randn(len(df)) + df['$10'] = np.random.randn(len(df)) - ad = randn(len(df)) + ad = np.random.randn(len(df)) df['@awesome_domain'] = ad with pytest.raises(KeyError): @@ -103,7 +101,7 @@ def test_getitem_listlike(self, idx_type, levels): frame, missing = self.frame, 'food' else: # MultiIndex columns - frame = DataFrame(randn(8, 3), + frame = DataFrame(np.random.randn(8, 3), columns=Index([('foo', 'bar'), ('baz', 'qux'), ('peek', 'aboo')], name=('sth', 'sth2'))) @@ -338,7 +336,7 @@ def _checkit(lst): _checkit([False, False, False]) def test_getitem_boolean_iadd(self): - arr = randn(5, 5) + arr = np.random.randn(5, 5) df = DataFrame(arr.copy(), columns=['A', 'B', 'C', 'D', 'E']) @@ -419,7 +417,8 @@ def test_getitem_setitem_ix_negative_integers(self): df.ix[:, [-1]] # #1942 - a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)]) + a = DataFrame(np.random.randn(20, 2), + index=[chr(x + 65) for x in range(20)]) with catch_warnings(record=True): simplefilter("ignore", DeprecationWarning) a.ix[-1] = a.ix[-2] @@ -459,10 +458,10 @@ def test_setitem(self): tm.assert_series_equal(series, self.frame['col6'], check_names=False) with pytest.raises(KeyError): - self.frame[randn(len(self.frame) + 1)] = 1 + self.frame[np.random.randn(len(self.frame) + 1)] = 1 # set ndarray - arr = randn(len(self.frame)) + arr = np.random.randn(len(self.frame)) self.frame['col9'] = arr assert (self.frame['col9'] == arr).all() @@ -497,7 +496,7 @@ def test_setitem(self): @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"]) def test_setitem_dtype(self, dtype): - arr = randn(len(self.frame)) + arr = np.random.randn(len(self.frame)) self.frame[dtype] = np.array(arr, dtype=dtype) assert self.frame[dtype].dtype.name == dtype @@ -511,7 +510,7 @@ def test_setitem_always_copy(self): s = self.frame['A'].copy() self.frame['E'] = s - self.frame['E'][5:10] = nan + self.frame['E'][5:10] = np.nan assert notna(s[5:10]).all() def test_setitem_boolean(self): @@ -554,8 +553,8 @@ def test_setitem_boolean(self): # index with DataFrame mask = df > np.abs(df) expected = df.copy() - df[df > np.abs(df)] = nan - expected.values[mask.values] = nan + df[df > np.abs(df)] = np.nan + expected.values[mask.values] = np.nan assert_frame_equal(df, expected) # set from DataFrame @@ -911,7 +910,7 @@ def test_setitem_fancy_2d(self): expected = frame.copy() subidx = self.frame.index[[5, 4, 1]] - values = randn(3, 2) + values = np.random.randn(3, 2) with catch_warnings(record=True): simplefilter("ignore", DeprecationWarning) @@ -937,7 +936,7 @@ def test_setitem_fancy_2d(self): with catch_warnings(record=True): simplefilter("ignore", DeprecationWarning) expected2 = self.frame.copy() - arr = randn(5, len(frame.columns)) + arr = np.random.randn(5, len(frame.columns)) frame.ix[5:10] = arr expected2.values[5:10] = arr assert_frame_equal(frame, expected2) @@ -958,7 +957,7 @@ def test_setitem_fancy_2d(self): frame2 = self.frame.copy() expected = self.frame.copy() - values = randn(5, 2) + values = np.random.randn(5, 2) frame.ix[:5, ['A', 'B']] = values expected['A'][:5] = values[:, 0] @@ -1128,8 +1127,8 @@ def test_setitem_fancy_mixed_2d(self): assert_frame_equal(df, expected) def test_ix_align(self): - b = Series(randn(10), name=0).sort_values() - df_orig = DataFrame(randn(10, 4)) + b = Series(np.random.randn(10), name=0).sort_values() + df_orig = DataFrame(np.random.randn(10, 4)) df = df_orig.copy() with catch_warnings(record=True): @@ -1174,7 +1173,7 @@ def test_ix_align(self): def test_ix_frame_align(self): b = DataFrame(np.random.randn(3, 4)) - df_orig = DataFrame(randn(10, 4)) + df_orig = DataFrame(np.random.randn(10, 4)) df = df_orig.copy() with catch_warnings(record=True): @@ -1355,7 +1354,7 @@ def test_setitem_fancy_1d(self): with catch_warnings(record=True): simplefilter("ignore", DeprecationWarning) - vals = randn(5) + vals = np.random.randn(5) expected.values[5:10, 2] = vals frame.ix[5:10, 2] = vals assert_frame_equal(frame, expected) @@ -1412,7 +1411,7 @@ def test_setitem_fancy_scalar(self): ts = f[col] # noqa for idx in f.index[::5]: i = f.index.get_loc(idx) - val = randn() + val = np.random.randn() expected.values[i, j] = val ix[idx, col] = val @@ -1588,15 +1587,16 @@ def test_getitem_setitem_float_labels(self): assert (result == 0).values.all() def test_setitem_single_column_mixed(self): - df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'], + df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'], columns=['foo', 'bar', 'baz']) df['str'] = 'qux' - df.loc[df.index[::2], 'str'] = nan - expected = np.array([nan, 'qux', nan, 'qux', nan], dtype=object) + df.loc[df.index[::2], 'str'] = np.nan + expected = np.array([np.nan, 'qux', np.nan, 'qux', np.nan], + dtype=object) assert_almost_equal(df['str'].values, expected) def test_setitem_single_column_mixed_datetime(self): - df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'], + df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'], columns=['foo', 'bar', 'baz']) df['timestamp'] = Timestamp('20010102') @@ -1611,17 +1611,17 @@ def test_setitem_single_column_mixed_datetime(self): assert isna(df.loc['b', 'timestamp']) # allow this syntax - df.loc['c', 'timestamp'] = nan + df.loc['c', 'timestamp'] = np.nan assert isna(df.loc['c', 'timestamp']) # allow this syntax - df.loc['d', :] = nan + df.loc['d', :] = np.nan assert not isna(df.loc['c', :]).all() # as of GH 3216 this will now work! # try to set with a list like item # pytest.raises( - # Exception, df.loc.__setitem__, ('d', 'timestamp'), [nan]) + # Exception, df.loc.__setitem__, ('d', 'timestamp'), [np.nan]) def test_setitem_mixed_datetime(self): # GH 9336 @@ -1852,7 +1852,8 @@ def test_set_value_resize(self): pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam') def test_set_value_with_index_dtype_change(self): - df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC')) + df_orig = DataFrame(np.random.randn(3, 3), + index=lrange(3), columns=list('ABC')) # this is actually ambiguous as the 2 is interpreted as a positional # so column is not created @@ -2365,7 +2366,7 @@ def test_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture): def test_at_time_between_time_datetimeindex(self): index = date_range("2012-01-01", "2012-01-05", freq='30min') - df = DataFrame(randn(len(index), 5), index=index) + df = DataFrame(np.random.randn(len(index), 5), index=index) akey = time(12, 0, 0) bkey = slice(time(13, 0, 0), time(14, 0, 0)) ainds = [24, 72, 120, 168] @@ -2463,7 +2464,7 @@ def test_xs_corner(self): assert_series_equal(result, expected) def test_xs_duplicates(self): - df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a']) + df = DataFrame(np.random.randn(5, 2), index=['b', 'b', 'c', 'b', 'a']) cross = df.xs('c') exp = df.iloc[2] @@ -3119,7 +3120,7 @@ def test_mask_edge_case_1xN_frame(self): # GH4071 df = DataFrame([[1, 2]]) res = df.mask(DataFrame([[True, False]])) - expec = DataFrame([[nan, 2]]) + expec = DataFrame([[np.nan, 2]]) assert_frame_equal(res, expec) def test_mask_callable(self): diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index ac4b380034366..77a3d4785d295 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -7,7 +7,6 @@ import dateutil import numpy as np -from numpy import nan, random import pytest from pandas.compat import lrange @@ -39,8 +38,8 @@ class TestDataFrameMissingData(TestData): def test_dropEmptyRows(self): N = len(self.frame.index) - mat = random.randn(N) - mat[:5] = nan + mat = np.random.randn(N) + mat[:5] = np.nan frame = DataFrame({'foo': mat}, index=self.frame.index) original = Series(mat, index=self.frame.index, name='foo') @@ -61,8 +60,8 @@ def test_dropEmptyRows(self): def test_dropIncompleteRows(self): N = len(self.frame.index) - mat = random.randn(N) - mat[:5] = nan + mat = np.random.randn(N) + mat[:5] = np.nan frame = DataFrame({'foo': mat}, index=self.frame.index) frame['bar'] = 5 @@ -86,7 +85,7 @@ def test_dropIncompleteRows(self): def test_dropna(self): df = DataFrame(np.random.randn(6, 4)) - df[2][:2] = nan + df[2][:2] = np.nan dropped = df.dropna(axis=1) expected = df.loc[:, [0, 1, 3]] @@ -134,7 +133,7 @@ def test_dropna(self): dropped = df.dropna(axis=1, how='all') assert_frame_equal(dropped, df) - df[2] = nan + df[2] = np.nan dropped = df.dropna(axis=1, how='all') expected = df.loc[:, [0, 1, 3]] assert_frame_equal(dropped, expected) @@ -209,8 +208,8 @@ def test_dropna_tz_aware_datetime(self): def test_fillna(self): tf = self.tsframe - tf.loc[tf.index[:5], 'A'] = nan - tf.loc[tf.index[-5:], 'A'] = nan + tf.loc[tf.index[:5], 'A'] = np.nan + tf.loc[tf.index[-5:], 'A'] = np.nan zero_filled = self.tsframe.fillna(0) assert (zero_filled.loc[zero_filled.index[:5], 'A'] == 0).all() @@ -222,8 +221,8 @@ def test_fillna(self): # mixed type mf = self.mixed_frame - mf.loc[mf.index[5:20], 'foo'] = nan - mf.loc[mf.index[-10:], 'A'] = nan + mf.loc[mf.index[5:20], 'foo'] = np.nan + mf.loc[mf.index[-10:], 'A'] = np.nan result = self.mixed_frame.fillna(value=0) result = self.mixed_frame.fillna(method='pad') @@ -232,7 +231,7 @@ def test_fillna(self): # mixed numeric (but no float16) mf = self.mixed_float.reindex(columns=['A', 'B', 'D']) - mf.loc[mf.index[-10:], 'A'] = nan + mf.loc[mf.index[-10:], 'A'] = np.nan result = mf.fillna(value=0) _check_mixed_float(result, dtype=dict(C=None)) @@ -453,15 +452,15 @@ def test_fillna_datetime_columns(self): tm.assert_frame_equal(result, expected) def test_ffill(self): - self.tsframe['A'][:5] = nan - self.tsframe['A'][-5:] = nan + self.tsframe['A'][:5] = np.nan + self.tsframe['A'][-5:] = np.nan assert_frame_equal(self.tsframe.ffill(), self.tsframe.fillna(method='ffill')) def test_bfill(self): - self.tsframe['A'][:5] = nan - self.tsframe['A'][-5:] = nan + self.tsframe['A'][:5] = np.nan + self.tsframe['A'][-5:] = np.nan assert_frame_equal(self.tsframe.bfill(), self.tsframe.fillna(method='bfill')) @@ -531,9 +530,9 @@ def test_fillna_inplace(self): tm.assert_frame_equal(df, expected) def test_fillna_dict_series(self): - df = DataFrame({'a': [nan, 1, 2, nan, nan], - 'b': [1, 2, 3, nan, nan], - 'c': [nan, 1, 2, 3, 4]}) + df = DataFrame({'a': [np.nan, 1, 2, np.nan, np.nan], + 'b': [1, 2, 3, np.nan, np.nan], + 'c': [np.nan, 1, 2, 3, 4]}) result = df.fillna({'a': 0, 'b': 5}) @@ -556,13 +555,13 @@ def test_fillna_dict_series(self): def test_fillna_dataframe(self): # GH 8377 - df = DataFrame({'a': [nan, 1, 2, nan, nan], - 'b': [1, 2, 3, nan, nan], - 'c': [nan, 1, 2, 3, 4]}, + df = DataFrame({'a': [np.nan, 1, 2, np.nan, np.nan], + 'b': [1, 2, 3, np.nan, np.nan], + 'c': [np.nan, 1, 2, 3, 4]}, index=list('VWXYZ')) # df2 may have different index and columns - df2 = DataFrame({'a': [nan, 10, 20, 30, 40], + df2 = DataFrame({'a': [np.nan, 10, 20, 30, 40], 'b': [50, 60, 70, 80, 90], 'foo': ['bar'] * 5}, index=list('VWXuZ')) @@ -570,9 +569,9 @@ def test_fillna_dataframe(self): result = df.fillna(df2) # only those columns and indices which are shared get filled - expected = DataFrame({'a': [nan, 1, 2, nan, 40], - 'b': [1, 2, 3, nan, 90], - 'c': [nan, 1, 2, 3, 4]}, + expected = DataFrame({'a': [np.nan, 1, 2, np.nan, 40], + 'b': [1, 2, 3, np.nan, 90], + 'c': [np.nan, 1, 2, 3, 4]}, index=list('VWXYZ')) assert_frame_equal(result, expected) @@ -611,8 +610,8 @@ def test_fillna_col_reordering(self): def test_fill_corner(self): mf = self.mixed_frame - mf.loc[mf.index[5:20], 'foo'] = nan - mf.loc[mf.index[-10:], 'A'] = nan + mf.loc[mf.index[5:20], 'foo'] = np.nan + mf.loc[mf.index[-10:], 'A'] = np.nan filled = self.mixed_frame.fillna(value=0) assert (filled.loc[filled.index[5:20], 'foo'] == 0).all() diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index d9392b68c8ce1..8b37d4ff2cf9e 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -1,7 +1,6 @@ from datetime import timedelta import numpy as np -from numpy.random import randn import pytest import pandas as pd @@ -19,7 +18,7 @@ class TestPeriodIndex(object): def test_as_frame_columns(self): rng = period_range('1/1/2000', periods=5) - df = DataFrame(randn(10, 5), columns=rng) + df = DataFrame(np.random.randn(10, 5), columns=rng) ts = df[rng[0]] tm.assert_series_equal(ts, df.iloc[:, 0]) @@ -32,7 +31,7 @@ def test_as_frame_columns(self): def test_frame_setitem(self): rng = period_range('1/1/2000', periods=5, name='index') - df = DataFrame(randn(5, 3), index=rng) + df = DataFrame(np.random.randn(5, 3), index=rng) df['Index'] = rng rs = Index(df['Index']) @@ -47,7 +46,7 @@ def test_frame_setitem(self): def test_frame_to_time_stamp(self): K = 5 index = period_range(freq='A', start='1/1/2001', end='12/1/2009') - df = DataFrame(randn(len(index), K), index=index) + df = DataFrame(np.random.randn(len(index), K), index=index) df['mix'] = 'a' exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC') diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 1e06d5cad1374..9c4d306ea5720 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -5,7 +5,6 @@ import operator import numpy as np -from numpy.random import randn import pytest from pandas.compat import StringIO, lrange, range, zip @@ -153,7 +152,7 @@ def test_query_empty_string(self): def test_eval_resolvers_as_list(self): # GH 14095 - df = DataFrame(randn(10, 2), columns=list('ab')) + df = DataFrame(np.random.randn(10, 2), columns=list('ab')) dict1 = {'a': 1} dict2 = {'b': 2} assert (df.eval('a + b', resolvers=[dict1, dict2]) == @@ -169,7 +168,7 @@ def test_query_with_named_multiindex(self, parser, engine): a = np.random.choice(['red', 'green'], size=10) b = np.random.choice(['eggs', 'ham'], size=10) index = MultiIndex.from_arrays([a, b], names=['color', 'food']) - df = DataFrame(randn(10, 2), index=index) + df = DataFrame(np.random.randn(10, 2), index=index) ind = Series(df.index.get_level_values('color').values, index=index, name='color') @@ -218,7 +217,7 @@ def test_query_with_unnamed_multiindex(self, parser, engine): a = np.random.choice(['red', 'green'], size=10) b = np.random.choice(['eggs', 'ham'], size=10) index = MultiIndex.from_arrays([a, b]) - df = DataFrame(randn(10, 2), index=index) + df = DataFrame(np.random.randn(10, 2), index=index) ind = Series(df.index.get_level_values(0).values, index=index) res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine) @@ -309,7 +308,7 @@ def test_query_with_partially_named_multiindex(self, parser, engine): b = np.arange(10) index = MultiIndex.from_arrays([a, b]) index.names = [None, 'rating'] - df = DataFrame(randn(10, 2), index=index) + df = DataFrame(np.random.randn(10, 2), index=index) res = df.query('rating == 1', parser=parser, engine=engine) ind = Series(df.index.get_level_values('rating').values, index=index, name='rating') @@ -379,7 +378,7 @@ def teardown_class(cls): def test_date_query_with_attribute_access(self): engine, parser = self.engine, self.parser skip_if_no_pandas_parser(parser) - df = DataFrame(randn(5, 3)) + df = DataFrame(np.random.randn(5, 3)) df['dates1'] = date_range('1/1/2012', periods=5) df['dates2'] = date_range('1/1/2013', periods=5) df['dates3'] = date_range('1/1/2014', periods=5) @@ -390,7 +389,7 @@ def test_date_query_with_attribute_access(self): def test_date_query_no_attribute_access(self): engine, parser = self.engine, self.parser - df = DataFrame(randn(5, 3)) + df = DataFrame(np.random.randn(5, 3)) df['dates1'] = date_range('1/1/2012', periods=5) df['dates2'] = date_range('1/1/2013', periods=5) df['dates3'] = date_range('1/1/2014', periods=5) @@ -402,7 +401,7 @@ def test_date_query_no_attribute_access(self): def test_date_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 - df = DataFrame(randn(n, 3)) + df = DataFrame(np.random.randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates2'] = date_range('1/1/2013', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) @@ -416,7 +415,7 @@ def test_date_query_with_NaT(self): def test_date_index_query(self): engine, parser = self.engine, self.parser n = 10 - df = DataFrame(randn(n, 3)) + df = DataFrame(np.random.randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.set_index('dates1', inplace=True, drop=True) @@ -428,7 +427,7 @@ def test_date_index_query(self): def test_date_index_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 - df = DataFrame(randn(n, 3)) + df = DataFrame(np.random.randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.iloc[0, 0] = pd.NaT @@ -603,7 +602,7 @@ def test_local_syntax(self): skip_if_no_pandas_parser(self.parser) engine, parser = self.engine, self.parser - df = DataFrame(randn(100, 10), columns=list('abcdefghij')) + df = DataFrame(np.random.randn(100, 10), columns=list('abcdefghij')) b = 1 expect = df[df.a < b] result = df.query('a < @b', engine=engine, parser=parser) @@ -617,7 +616,7 @@ def test_chained_cmp_and_in(self): skip_if_no_pandas_parser(self.parser) engine, parser = self.engine, self.parser cols = list('abc') - df = DataFrame(randn(100, len(cols)), columns=cols) + df = DataFrame(np.random.randn(100, len(cols)), columns=cols) res = df.query('a < b < c and a not in b not in c', engine=engine, parser=parser) ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) # noqa @@ -712,7 +711,7 @@ def setup_class(cls): def test_date_query_no_attribute_access(self): engine, parser = self.engine, self.parser - df = DataFrame(randn(5, 3)) + df = DataFrame(np.random.randn(5, 3)) df['dates1'] = date_range('1/1/2012', periods=5) df['dates2'] = date_range('1/1/2013', periods=5) df['dates3'] = date_range('1/1/2014', periods=5) @@ -724,7 +723,7 @@ def test_date_query_no_attribute_access(self): def test_date_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 - df = DataFrame(randn(n, 3)) + df = DataFrame(np.random.randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates2'] = date_range('1/1/2013', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) @@ -738,7 +737,7 @@ def test_date_query_with_NaT(self): def test_date_index_query(self): engine, parser = self.engine, self.parser n = 10 - df = DataFrame(randn(n, 3)) + df = DataFrame(np.random.randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.set_index('dates1', inplace=True, drop=True) @@ -750,7 +749,7 @@ def test_date_index_query(self): def test_date_index_query_with_NaT(self): engine, parser = self.engine, self.parser n = 10 - df = DataFrame(randn(n, 3)) + df = DataFrame(np.random.randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.iloc[0, 0] = pd.NaT @@ -763,7 +762,7 @@ def test_date_index_query_with_NaT(self): def test_date_index_query_with_NaT_duplicates(self): engine, parser = self.engine, self.parser n = 10 - df = DataFrame(randn(n, 3)) + df = DataFrame(np.random.randn(n, 3)) df['dates1'] = date_range('1/1/2012', periods=n) df['dates3'] = date_range('1/1/2014', periods=n) df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT @@ -845,7 +844,7 @@ def test_query_builtin(self): class TestDataFrameQueryStrings(object): def test_str_query_method(self, parser, engine): - df = DataFrame(randn(10, 1), columns=['b']) + df = DataFrame(np.random.randn(10, 1), columns=['b']) df['strings'] = Series(list('aabbccddee')) expect = df[df.strings == 'a'] @@ -881,7 +880,7 @@ def test_str_query_method(self, parser, engine): assert_frame_equal(res, df[~df.strings.isin(['a'])]) def test_str_list_query_method(self, parser, engine): - df = DataFrame(randn(10, 1), columns=['b']) + df = DataFrame(np.random.randn(10, 1), columns=['b']) df['strings'] = Series(list('aabbccddee')) expect = df[df.strings.isin(['a', 'b'])] @@ -1017,7 +1016,7 @@ def test_query_string_scalar_variable(self, parser, engine): class TestDataFrameEvalWithFrame(object): def setup_method(self, method): - self.frame = DataFrame(randn(10, 3), columns=list('abc')) + self.frame = DataFrame(np.random.randn(10, 3), columns=list('abc')) def teardown_method(self, method): del self.frame diff --git a/pandas/tests/frame/test_rank.py b/pandas/tests/frame/test_rank.py index 0a9801ea8ed61..10c42e0d1a1cf 100644 --- a/pandas/tests/frame/test_rank.py +++ b/pandas/tests/frame/test_rank.py @@ -3,7 +3,6 @@ from distutils.version import LooseVersion import numpy as np -from numpy import nan import pytest from pandas import DataFrame, Series @@ -13,16 +12,16 @@ class TestRank(TestData): - s = Series([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]) + s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]) df = DataFrame({'A': s, 'B': s}) results = { - 'average': np.array([1.5, 5.5, 7.0, 3.5, nan, - 3.5, 1.5, 8.0, nan, 5.5]), - 'min': np.array([1, 5, 7, 3, nan, 3, 1, 8, nan, 5]), - 'max': np.array([2, 6, 7, 4, nan, 4, 2, 8, nan, 6]), - 'first': np.array([1, 5, 7, 3, nan, 4, 2, 8, nan, 6]), - 'dense': np.array([1, 3, 4, 2, nan, 2, 1, 5, nan, 3]), + 'average': np.array([1.5, 5.5, 7.0, 3.5, np.nan, + 3.5, 1.5, 8.0, np.nan, 5.5]), + 'min': np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]), + 'max': np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]), + 'first': np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]), + 'dense': np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]), } @pytest.fixture(params=['average', 'min', 'max', 'first', 'dense']) @@ -87,27 +86,27 @@ def test_rank2(self): tm.assert_frame_equal(result, expected) df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']]) - expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]]) + expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 3.0, 2.0]]) result = df.rank(1, numeric_only=False) tm.assert_frame_equal(result, expected) - expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]]) + expected = DataFrame([[2.0, np.nan, 1.0], [1.0, 1.0, 2.0]]) result = df.rank(0, numeric_only=False) tm.assert_frame_equal(result, expected) # f7u12, this does not work without extensive workaround - data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)], + data = [[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], [datetime(2000, 1, 2), datetime(2000, 1, 3), datetime(2000, 1, 1)]] df = DataFrame(data) # check the rank - expected = DataFrame([[2., nan, 1.], + expected = DataFrame([[2., np.nan, 1.], [2., 3., 1.]]) result = df.rank(1, numeric_only=False, ascending=True) tm.assert_frame_equal(result, expected) - expected = DataFrame([[1., nan, 2.], + expected = DataFrame([[1., np.nan, 2.], [2., 1., 3.]]) result = df.rank(1, numeric_only=False, ascending=False) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_replace.py b/pandas/tests/frame/test_replace.py index 87fd5f2e74a9a..219f7a1585fc2 100644 --- a/pandas/tests/frame/test_replace.py +++ b/pandas/tests/frame/test_replace.py @@ -6,7 +6,6 @@ import re import numpy as np -from numpy import nan import pytest from pandas.compat import StringIO, lrange, range, zip @@ -20,24 +19,24 @@ class TestDataFrameReplace(TestData): def test_replace_inplace(self): - self.tsframe['A'][:5] = nan - self.tsframe['A'][-5:] = nan + self.tsframe['A'][:5] = np.nan + self.tsframe['A'][-5:] = np.nan tsframe = self.tsframe.copy() - tsframe.replace(nan, 0, inplace=True) + tsframe.replace(np.nan, 0, inplace=True) assert_frame_equal(tsframe, self.tsframe.fillna(0)) # mixed type mf = self.mixed_frame - mf.iloc[5:20, mf.columns.get_loc('foo')] = nan - mf.iloc[-10:, mf.columns.get_loc('A')] = nan + mf.iloc[5:20, mf.columns.get_loc('foo')] = np.nan + mf.iloc[-10:, mf.columns.get_loc('A')] = np.nan result = self.mixed_frame.replace(np.nan, 0) expected = self.mixed_frame.fillna(value=0) assert_frame_equal(result, expected) tsframe = self.tsframe.copy() - tsframe.replace([nan], [0], inplace=True) + tsframe.replace([np.nan], [0], inplace=True) assert_frame_equal(tsframe, self.tsframe.fillna(0)) def test_regex_replace_scalar(self): @@ -49,11 +48,11 @@ def test_regex_replace_scalar(self): # simplest cases # regex -> value # obj frame - res = dfobj.replace(r'\s*\.\s*', nan, regex=True) + res = dfobj.replace(r'\s*\.\s*', np.nan, regex=True) assert_frame_equal(dfobj, res.fillna('.')) # mixed - res = dfmix.replace(r'\s*\.\s*', nan, regex=True) + res = dfmix.replace(r'\s*\.\s*', np.nan, regex=True) assert_frame_equal(dfmix, res.fillna('.')) # regex -> regex @@ -72,11 +71,11 @@ def test_regex_replace_scalar(self): assert_frame_equal(res, expec) # everything with compiled regexs as well - res = dfobj.replace(re.compile(r'\s*\.\s*'), nan, regex=True) + res = dfobj.replace(re.compile(r'\s*\.\s*'), np.nan, regex=True) assert_frame_equal(dfobj, res.fillna('.')) # mixed - res = dfmix.replace(re.compile(r'\s*\.\s*'), nan, regex=True) + res = dfmix.replace(re.compile(r'\s*\.\s*'), np.nan, regex=True) assert_frame_equal(dfmix, res.fillna('.')) # regex -> regex @@ -116,12 +115,12 @@ def test_regex_replace_scalar_inplace(self): # regex -> value # obj frame res = dfobj.copy() - res.replace(r'\s*\.\s*', nan, regex=True, inplace=True) + res.replace(r'\s*\.\s*', np.nan, regex=True, inplace=True) assert_frame_equal(dfobj, res.fillna('.')) # mixed res = dfmix.copy() - res.replace(r'\s*\.\s*', nan, regex=True, inplace=True) + res.replace(r'\s*\.\s*', np.nan, regex=True, inplace=True) assert_frame_equal(dfmix, res.fillna('.')) # regex -> regex @@ -143,12 +142,12 @@ def test_regex_replace_scalar_inplace(self): # everything with compiled regexs as well res = dfobj.copy() - res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True) + res.replace(re.compile(r'\s*\.\s*'), np.nan, regex=True, inplace=True) assert_frame_equal(dfobj, res.fillna('.')) # mixed res = dfmix.copy() - res.replace(re.compile(r'\s*\.\s*'), nan, regex=True, inplace=True) + res.replace(re.compile(r'\s*\.\s*'), np.nan, regex=True, inplace=True) assert_frame_equal(dfmix, res.fillna('.')) # regex -> regex @@ -171,12 +170,12 @@ def test_regex_replace_scalar_inplace(self): assert_frame_equal(res, expec) res = dfobj.copy() - res.replace(regex=r'\s*\.\s*', value=nan, inplace=True) + res.replace(regex=r'\s*\.\s*', value=np.nan, inplace=True) assert_frame_equal(dfobj, res.fillna('.')) # mixed res = dfmix.copy() - res.replace(regex=r'\s*\.\s*', value=nan, inplace=True) + res.replace(regex=r'\s*\.\s*', value=np.nan, inplace=True) assert_frame_equal(dfmix, res.fillna('.')) # regex -> regex @@ -198,12 +197,12 @@ def test_regex_replace_scalar_inplace(self): # everything with compiled regexs as well res = dfobj.copy() - res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True) + res.replace(regex=re.compile(r'\s*\.\s*'), value=np.nan, inplace=True) assert_frame_equal(dfobj, res.fillna('.')) # mixed res = dfmix.copy() - res.replace(regex=re.compile(r'\s*\.\s*'), value=nan, inplace=True) + res.replace(regex=re.compile(r'\s*\.\s*'), value=np.nan, inplace=True) assert_frame_equal(dfmix, res.fillna('.')) # regex -> regex @@ -232,9 +231,9 @@ def test_regex_replace_list_obj(self): # lists of regexes and values # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] to_replace_res = [r'\s*\.\s*', r'e|f|g'] - values = [nan, 'crap'] + values = [np.nan, 'crap'] res = dfobj.replace(to_replace_res, values, regex=True) - expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 + + expec = DataFrame({'a': ['a', 'b', np.nan, np.nan], 'b': ['crap'] * 3 + ['h'], 'c': ['h', 'crap', 'l', 'o']}) assert_frame_equal(res, expec) @@ -276,10 +275,10 @@ def test_regex_replace_list_obj_inplace(self): # lists of regexes and values # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] to_replace_res = [r'\s*\.\s*', r'e|f|g'] - values = [nan, 'crap'] + values = [np.nan, 'crap'] res = dfobj.copy() res.replace(to_replace_res, values, inplace=True, regex=True) - expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 + + expec = DataFrame({'a': ['a', 'b', np.nan, np.nan], 'b': ['crap'] * 3 + ['h'], 'c': ['h', 'crap', 'l', 'o']}) assert_frame_equal(res, expec) @@ -323,11 +322,11 @@ def test_regex_replace_list_mixed(self): # lists of regexes and values # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] to_replace_res = [r'\s*\.\s*', r'a'] - values = [nan, 'crap'] + values = [np.nan, 'crap'] mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')} dfmix2 = DataFrame(mix2) res = dfmix2.replace(to_replace_res, values, regex=True) - expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan], + expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', np.nan, np.nan], 'c': ['h', 'crap', 'l', 'o']}) assert_frame_equal(res, expec) @@ -361,10 +360,10 @@ def test_regex_replace_list_mixed_inplace(self): # lists of regexes and values # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN] to_replace_res = [r'\s*\.\s*', r'a'] - values = [nan, 'crap'] + values = [np.nan, 'crap'] res = dfmix.copy() res.replace(to_replace_res, values, inplace=True, regex=True) - expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]}) + expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', np.nan, np.nan]}) assert_frame_equal(res, expec) # list of [re1, re2, ..., reN] -> [re1, re2, .., reN] @@ -394,7 +393,7 @@ def test_regex_replace_list_mixed_inplace(self): assert_frame_equal(res, expec) def test_regex_replace_dict_mixed(self): - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} dfmix = DataFrame(mix) # dicts @@ -403,10 +402,11 @@ def test_regex_replace_dict_mixed(self): # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole # frame - res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': nan}, regex=True) + res = dfmix.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, regex=True) res2 = dfmix.copy() - res2.replace({'b': r'\s*\.\s*'}, {'b': nan}, inplace=True, regex=True) - expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c': + res2.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, + inplace=True, regex=True) + expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', np.nan, np.nan], 'c': mix['c']}) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) @@ -433,33 +433,33 @@ def test_regex_replace_dict_mixed(self): # scalar -> dict # to_replace regex, {value: value} - expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c': + expec = DataFrame({'a': mix['a'], 'b': [np.nan, 'b', '.', '.'], 'c': mix['c']}) - res = dfmix.replace('a', {'b': nan}, regex=True) + res = dfmix.replace('a', {'b': np.nan}, regex=True) res2 = dfmix.copy() - res2.replace('a', {'b': nan}, regex=True, inplace=True) + res2.replace('a', {'b': np.nan}, regex=True, inplace=True) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) - res = dfmix.replace('a', {'b': nan}, regex=True) + res = dfmix.replace('a', {'b': np.nan}, regex=True) res2 = dfmix.copy() - res2.replace(regex='a', value={'b': nan}, inplace=True) - expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c': + res2.replace(regex='a', value={'b': np.nan}, inplace=True) + expec = DataFrame({'a': mix['a'], 'b': [np.nan, 'b', '.', '.'], 'c': mix['c']}) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) def test_regex_replace_dict_nested(self): # nested dicts will not work until this is implemented for Series - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} dfmix = DataFrame(mix) - res = dfmix.replace({'b': {r'\s*\.\s*': nan}}, regex=True) + res = dfmix.replace({'b': {r'\s*\.\s*': np.nan}}, regex=True) res2 = dfmix.copy() res4 = dfmix.copy() - res2.replace({'b': {r'\s*\.\s*': nan}}, inplace=True, regex=True) - res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': nan}}) - res4.replace(regex={'b': {r'\s*\.\s*': nan}}, inplace=True) - expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c': + res2.replace({'b': {r'\s*\.\s*': np.nan}}, inplace=True, regex=True) + res3 = dfmix.replace(regex={'b': {r'\s*\.\s*': np.nan}}) + res4.replace(regex={'b': {r'\s*\.\s*': np.nan}}, inplace=True) + expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', np.nan, np.nan], 'c': mix['c']}) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) @@ -473,23 +473,23 @@ def test_regex_replace_dict_nested_gh4115(self): assert_frame_equal(result, expected) def test_regex_replace_list_to_scalar(self): - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} df = DataFrame(mix) - expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4), - 'c': [nan, nan, nan, 'd']}) + expec = DataFrame({'a': mix['a'], 'b': np.array([np.nan] * 4), + 'c': [np.nan, np.nan, np.nan, 'd']}) - res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True) + res = df.replace([r'\s*\.\s*', 'a|b'], np.nan, regex=True) res2 = df.copy() res3 = df.copy() - res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True) - res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True) + res2.replace([r'\s*\.\s*', 'a|b'], np.nan, regex=True, inplace=True) + res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=np.nan, inplace=True) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) assert_frame_equal(res3, expec) def test_regex_replace_str_to_numeric(self): # what happens when you try to replace a numeric value with a regex? - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} df = DataFrame(mix) res = df.replace(r'\s*\.\s*', 0, regex=True) res2 = df.copy() @@ -503,7 +503,7 @@ def test_regex_replace_str_to_numeric(self): assert_frame_equal(res3, expec) def test_regex_replace_regex_list_to_numeric(self): - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} df = DataFrame(mix) res = df.replace([r'\s*\.\s*', 'b'], 0, regex=True) res2 = df.copy() @@ -511,30 +511,30 @@ def test_regex_replace_regex_list_to_numeric(self): res3 = df.copy() res3.replace(regex=[r'\s*\.\s*', 'b'], value=0, inplace=True) expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0, - nan, + np.nan, 'd']}) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) assert_frame_equal(res3, expec) def test_regex_replace_series_of_regexes(self): - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} df = DataFrame(mix) s1 = Series({'b': r'\s*\.\s*'}) - s2 = Series({'b': nan}) + s2 = Series({'b': np.nan}) res = df.replace(s1, s2, regex=True) res2 = df.copy() res2.replace(s1, s2, inplace=True, regex=True) res3 = df.copy() res3.replace(regex=s1, value=s2, inplace=True) - expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c': + expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', np.nan, np.nan], 'c': mix['c']}) assert_frame_equal(res, expec) assert_frame_equal(res2, expec) assert_frame_equal(res3, expec) def test_regex_replace_numeric_to_object_conversion(self): - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} df = DataFrame(mix) expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']}) res = df.replace(0, 'a') @@ -549,15 +549,15 @@ def test_replace_regex_metachar(self, metachar): assert_frame_equal(result, expected) def test_replace(self): - self.tsframe['A'][:5] = nan - self.tsframe['A'][-5:] = nan + self.tsframe['A'][:5] = np.nan + self.tsframe['A'][-5:] = np.nan - zero_filled = self.tsframe.replace(nan, -1e8) + zero_filled = self.tsframe.replace(np.nan, -1e8) assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8)) - assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe) + assert_frame_equal(zero_filled.replace(-1e8, np.nan), self.tsframe) - self.tsframe['A'][:5] = nan - self.tsframe['A'][-5:] = nan + self.tsframe['A'][:5] = np.nan + self.tsframe['A'][-5:] = np.nan self.tsframe['B'][:5] = -1e8 # empty @@ -580,9 +580,9 @@ def test_replace_list(self): # lists of regexes and values # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN] to_replace_res = [r'.', r'e'] - values = [nan, 'crap'] + values = [np.nan, 'crap'] res = dfobj.replace(to_replace_res, values) - expec = DataFrame({'a': ['a', 'b', nan, nan], + expec = DataFrame({'a': ['a', 'b', np.nan, np.nan], 'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap', 'l', 'o']}) assert_frame_equal(res, expec) @@ -644,18 +644,18 @@ def test_replace_convert(self): def test_replace_mixed(self): mf = self.mixed_frame - mf.iloc[5:20, mf.columns.get_loc('foo')] = nan - mf.iloc[-10:, mf.columns.get_loc('A')] = nan + mf.iloc[5:20, mf.columns.get_loc('foo')] = np.nan + mf.iloc[-10:, mf.columns.get_loc('A')] = np.nan result = self.mixed_frame.replace(np.nan, -18) expected = self.mixed_frame.fillna(value=-18) assert_frame_equal(result, expected) - assert_frame_equal(result.replace(-18, nan), self.mixed_frame) + assert_frame_equal(result.replace(-18, np.nan), self.mixed_frame) result = self.mixed_frame.replace(np.nan, -1e8) expected = self.mixed_frame.fillna(value=-1e8) assert_frame_equal(result, expected) - assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame) + assert_frame_equal(result.replace(-1e8, np.nan), self.mixed_frame) # int block upcasting df = DataFrame({'A': Series([1.0, 2.0], dtype='float64'), @@ -726,14 +726,14 @@ def test_replace_value_is_none(self): orig_value = self.tsframe.iloc[0, 0] orig2 = self.tsframe.iloc[1, 0] - self.tsframe.iloc[0, 0] = nan + self.tsframe.iloc[0, 0] = np.nan self.tsframe.iloc[1, 0] = 1 - result = self.tsframe.replace(to_replace={nan: 0}) - expected = self.tsframe.T.replace(to_replace={nan: 0}).T + result = self.tsframe.replace(to_replace={np.nan: 0}) + expected = self.tsframe.T.replace(to_replace={np.nan: 0}).T assert_frame_equal(result, expected) - result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8}) + result = self.tsframe.replace(to_replace={np.nan: 0, 1: -1e8}) tsframe = self.tsframe.copy() tsframe.iloc[0, 0] = 0 tsframe.iloc[1, 0] = -1e8 @@ -746,19 +746,19 @@ def test_replace_for_new_dtypes(self): # dtypes tsframe = self.tsframe.copy().astype(np.float32) - tsframe['A'][:5] = nan - tsframe['A'][-5:] = nan + tsframe['A'][:5] = np.nan + tsframe['A'][-5:] = np.nan - zero_filled = tsframe.replace(nan, -1e8) + zero_filled = tsframe.replace(np.nan, -1e8) assert_frame_equal(zero_filled, tsframe.fillna(-1e8)) - assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe) + assert_frame_equal(zero_filled.replace(-1e8, np.nan), tsframe) - tsframe['A'][:5] = nan - tsframe['A'][-5:] = nan + tsframe['A'][:5] = np.nan + tsframe['A'][-5:] = np.nan tsframe['B'][:5] = -1e8 b = tsframe['B'] - b[b == -1e8] = nan + b[b == -1e8] = np.nan tsframe['B'] = b result = tsframe.fillna(method='bfill') assert_frame_equal(result, tsframe.fillna(method='bfill')) @@ -875,10 +875,10 @@ def test_replace_series_no_regex(self): assert_series_equal(result, expected) def test_replace_dict_tuple_list_ordering_remains_the_same(self): - df = DataFrame(dict(A=[nan, 1])) - res1 = df.replace(to_replace={nan: 0, 1: -1e8}) - res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0]) - res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0]) + df = DataFrame(dict(A=[np.nan, 1])) + res1 = df.replace(to_replace={np.nan: 0, 1: -1e8}) + res2 = df.replace(to_replace=(1, np.nan), value=[-1e8, 0]) + res3 = df.replace(to_replace=[1, np.nan], value=[-1e8, 0]) expected = DataFrame({'A': [0, -1e8]}) assert_frame_equal(res1, res2) @@ -1062,7 +1062,7 @@ def test_replace_datetimetz(self): def test_replace_with_empty_dictlike(self): # GH 15289 - mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} + mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} df = DataFrame(mix) assert_frame_equal(df, df.replace({})) assert_frame_equal(df, df.replace(Series([]))) @@ -1072,31 +1072,31 @@ def test_replace_with_empty_dictlike(self): @pytest.mark.parametrize("to_replace, method, expected", [ (0, 'bfill', {'A': [1, 1, 2], - 'B': [5, nan, 7], + 'B': [5, np.nan, 7], 'C': ['a', 'b', 'c']}), - (nan, 'bfill', {'A': [0, 1, 2], - 'B': [5.0, 7.0, 7.0], - 'C': ['a', 'b', 'c']}), + (np.nan, 'bfill', {'A': [0, 1, 2], + 'B': [5.0, 7.0, 7.0], + 'C': ['a', 'b', 'c']}), ('d', 'ffill', {'A': [0, 1, 2], - 'B': [5, nan, 7], + 'B': [5, np.nan, 7], 'C': ['a', 'b', 'c']}), ([0, 2], 'bfill', {'A': [1, 1, 2], - 'B': [5, nan, 7], + 'B': [5, np.nan, 7], 'C': ['a', 'b', 'c']}), ([1, 2], 'pad', {'A': [0, 0, 0], - 'B': [5, nan, 7], + 'B': [5, np.nan, 7], 'C': ['a', 'b', 'c']}), ((1, 2), 'bfill', {'A': [0, 2, 2], - 'B': [5, nan, 7], + 'B': [5, np.nan, 7], 'C': ['a', 'b', 'c']}), (['b', 'c'], 'ffill', {'A': [0, 1, 2], - 'B': [5, nan, 7], + 'B': [5, np.nan, 7], 'C': ['a', 'a', 'a']}), ]) def test_replace_method(self, to_replace, method, expected): # GH 19632 df = DataFrame({'A': [0, 1, 2], - 'B': [5, nan, 7], + 'B': [5, np.nan, 7], 'C': ['a', 'b', 'c']}) result = df.replace(to_replace=to_replace, value=None, method=method) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index f2f6944a21e03..28222a82945be 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -7,8 +7,6 @@ from warnings import catch_warnings, simplefilter import numpy as np -from numpy import nan -from numpy.random import randn import pytest from pandas.compat import u @@ -388,7 +386,7 @@ def test_stack_mixed_levels(self): ('A', 'dog', 'short'), ('B', 'dog', 'short')], names=['exp', 'animal', 'hair_length'] ) - df = DataFrame(randn(4, 4), columns=columns) + df = DataFrame(np.random.randn(4, 4), columns=columns) animal_hair_stacked = df.stack(level=['animal', 'hair_length']) exp_hair_stacked = df.stack(level=['exp', 'hair_length']) @@ -420,7 +418,7 @@ def test_stack_int_level_names(self): ('A', 'dog', 'short'), ('B', 'dog', 'short')], names=['exp', 'animal', 'hair_length'] ) - df = DataFrame(randn(4, 4), columns=columns) + df = DataFrame(np.random.randn(4, 4), columns=columns) exp_animal_stacked = df.stack(level=['exp', 'animal']) animal_hair_stacked = df.stack(level=['animal', 'hair_length']) @@ -634,7 +632,6 @@ def test_unstack_unused_level(self, cols): def test_unstack_nan_index(self): # GH7466 cast = lambda val: '{0:1}'.format('' if val != val else val) - nan = np.nan def verify(df): mk_list = lambda a: list(a) if isinstance(a, tuple) else [a] @@ -645,7 +642,7 @@ def verify(df): right = sorted(list(map(cast, right))) assert left == right - df = DataFrame({'jim': ['a', 'b', nan, 'd'], + df = DataFrame({'jim': ['a', 'b', np.nan, 'd'], 'joe': ['w', 'x', 'y', 'z'], 'jolie': ['a.w', 'b.x', ' .y', 'd.z']}) @@ -660,10 +657,10 @@ def verify(df): assert udf.notna().values.sum() == len(df) verify(udf['jolie']) - df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 + + df = DataFrame({'1st': ['d'] * 3 + [np.nan] * 5 + ['a'] * 2 + ['c'] * 3 + ['e'] * 2 + ['b'] * 5, - '2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 + - ['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2, + '2nd': ['y'] * 2 + ['w'] * 3 + [np.nan] * 3 + + ['z'] * 4 + [np.nan] * 3 + ['x'] * 3 + [np.nan] * 2, '3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59, 50, 62, 59, 76, 52, 14, 53, 60, 51]}) @@ -685,10 +682,10 @@ def verify(df): df.iloc[3, 1] = np.NaN left = df.set_index(['A', 'B']).unstack(0) - vals = [[3, 0, 1, 2, nan, nan, nan, nan], - [nan, nan, nan, nan, 4, 5, 6, 7]] + vals = [[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7]] vals = list(map(list, zip(*vals))) - idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B') + idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name='B') cols = MultiIndex(levels=[['C'], ['a', 'b']], codes=[[0, 0], [0, 1]], names=[None, 'A']) @@ -701,11 +698,11 @@ def verify(df): df.iloc[2, 1] = np.NaN left = df.set_index(['A', 'B']).unstack(0) - vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]] + vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]] cols = MultiIndex(levels=[['C'], ['a', 'b']], codes=[[0, 0], [0, 1]], names=[None, 'A']) - idx = Index([nan, 0, 1, 2, 3], name='B') + idx = Index([np.nan, 0, 1, 2, 3], name='B') right = DataFrame(vals, columns=cols, index=idx) assert_frame_equal(left, right) @@ -714,11 +711,11 @@ def verify(df): df.iloc[3, 1] = np.NaN left = df.set_index(['A', 'B']).unstack(0) - vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]] + vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]] cols = MultiIndex(levels=[['C'], ['a', 'b']], codes=[[0, 0], [0, 1]], names=[None, 'A']) - idx = Index([nan, 0, 1, 2, 3], name='B') + idx = Index([np.nan, 0, 1, 2, 3], name='B') right = DataFrame(vals, columns=cols, index=idx) assert_frame_equal(left, right) @@ -731,7 +728,7 @@ def verify(df): df.iloc[3, 1] = np.NaN left = df.set_index(['A', 'B']).unstack() - vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]]) + vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]]) idx = Index(['a', 'b'], name='A') cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)], codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]], @@ -741,9 +738,9 @@ def verify(df): assert_frame_equal(left, right) # GH4862 - vals = [['Hg', nan, nan, 680585148], - ['U', 0.0, nan, 680585148], - ['Pb', 7.07e-06, nan, 680585148], + vals = [['Hg', np.nan, np.nan, 680585148], + ['U', 0.0, np.nan, 680585148], + ['Pb', 7.07e-06, np.nan, 680585148], ['Sn', 2.3614e-05, 0.0133, 680607017], ['Ag', 0.0, 0.0133, 680607017], ['Hg', -0.00015, 0.0133, 680607017]] @@ -752,8 +749,8 @@ def verify(df): left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack() - vals = [[nan, nan, 7.07e-06, nan, 0.0], - [0.0, -0.00015, nan, 2.3614e-05, nan]] + vals = [[np.nan, np.nan, 7.07e-06, np.nan, 0.0], + [0.0, -0.00015, np.nan, 2.3614e-05, np.nan]] idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]], codes=[[0, 1], [-1, 0]], @@ -777,8 +774,8 @@ def verify(df): 'joe': (np.random.randn(6) * 10).round(2)}) df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02') - df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan - df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan + df.loc[1, '2nd'] = df.loc[3, '2nd'] = np.nan + df.loc[1, '3rd'] = df.loc[4, '3rd'] = np.nan left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd']) assert left.notna().values.sum() == 2 * len(df) @@ -845,7 +842,7 @@ def _test_stack_with_multiindex(multiindex): df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]]) result = df.stack(dropna=False) - expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]], + expected = DataFrame([[0, 2], [1, np.nan], [3, 5], [4, np.nan]], index=MultiIndex( levels=[[0, 1], ['u', 'x', 'y', 'z']], codes=[[0, 0, 1, 1], diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 75a8c834e3af6..bc37317f72802 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -5,7 +5,6 @@ from datetime import datetime, time import numpy as np -from numpy.random import randn import pytest from pandas.compat import product @@ -530,7 +529,7 @@ def test_asfreq_fillvalue(self): def test_first_last_valid(self, data, idx, expected_first, expected_last): N = len(self.frame.index) - mat = randn(N) + mat = np.random.randn(N) mat[:5] = np.nan mat[-5:] = np.nan @@ -812,7 +811,7 @@ def test_frame_to_period(self): dr = date_range('1/1/2000', '1/1/2001') pr = period_range('1/1/2000', '1/1/2001') - df = DataFrame(randn(len(dr), K), index=dr) + df = DataFrame(np.random.randn(len(dr), K), index=dr) df['mix'] = 'a' pts = df.to_period()
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24822
2019-01-18T02:31:12Z
2019-01-19T21:27:08Z
2019-01-19T21:27:08Z
2019-01-19T22:28:30Z
use Timedelta instead of convert_to_timedelta64
diff --git a/pandas/_libs/tslibs/timedeltas.pxd b/pandas/_libs/tslibs/timedeltas.pxd index c02a840281266..097309b17823b 100644 --- a/pandas/_libs/tslibs/timedeltas.pxd +++ b/pandas/_libs/tslibs/timedeltas.pxd @@ -5,4 +5,4 @@ from numpy cimport int64_t # Exposed for tslib, not intended for outside use. cdef int64_t cast_from_unit(object ts, object unit) except? -1 cpdef int64_t delta_to_nanoseconds(delta) except? -1 -cpdef convert_to_timedelta64(object ts, object unit) +cdef convert_to_timedelta64(object ts, object unit) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 0476ba1c78efc..0a19d8749fc7c 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -149,7 +149,7 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1: raise TypeError(type(delta)) -cpdef convert_to_timedelta64(object ts, object unit): +cdef convert_to_timedelta64(object ts, object unit): """ Convert an incoming object to a timedelta64 if possible. Before calling, unit must be standardized to avoid repeated unit conversion @@ -178,16 +178,12 @@ cpdef convert_to_timedelta64(object ts, object unit): if ts == NPY_NAT: return np.timedelta64(NPY_NAT) else: - if util.is_array(ts): - ts = ts.astype('int64').item() if unit in ['Y', 'M', 'W']: ts = np.timedelta64(ts, unit) else: ts = cast_from_unit(ts, unit) ts = np.timedelta64(ts) elif is_float_object(ts): - if util.is_array(ts): - ts = ts.astype('int64').item() if unit in ['Y', 'M', 'W']: ts = np.timedelta64(int(ts), unit) else: diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 6bcf56c306e6a..e3428146b91d8 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -4,9 +4,7 @@ import numpy as np -from pandas._libs import tslibs -from pandas._libs.tslibs.timedeltas import ( - convert_to_timedelta64, parse_timedelta_unit) +from pandas._libs.tslibs.timedeltas import Timedelta, parse_timedelta_unit from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries @@ -120,7 +118,9 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): """Convert string 'r' to a timedelta object.""" try: - result = convert_to_timedelta64(r, unit) + result = Timedelta(r, unit) + if not box: + result = result.asm8 except ValueError: if errors == 'raise': raise @@ -130,8 +130,6 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): # coerce result = pd.NaT - if box: - result = tslibs.Timedelta(result) return result diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index bc753c45c803a..9b5fdfb06a9fa 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -10,7 +10,6 @@ import pandas as pd from pandas import ( Series, Timedelta, TimedeltaIndex, timedelta_range, to_timedelta) -from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct import pandas.util.testing as tm @@ -373,21 +372,21 @@ def test_unit_parser(self, units, np_unit, wrapper): assert result == expected def test_numeric_conversions(self): - assert ct(0) == np.timedelta64(0, 'ns') - assert ct(10) == np.timedelta64(10, 'ns') - assert ct(10, unit='ns') == np.timedelta64(10, 'ns').astype('m8[ns]') + assert Timedelta(0) == np.timedelta64(0, 'ns') + assert Timedelta(10) == np.timedelta64(10, 'ns') + assert Timedelta(10, unit='ns') == np.timedelta64(10, 'ns') - assert ct(10, unit='us') == np.timedelta64(10, 'us').astype('m8[ns]') - assert ct(10, unit='ms') == np.timedelta64(10, 'ms').astype('m8[ns]') - assert ct(10, unit='s') == np.timedelta64(10, 's').astype('m8[ns]') - assert ct(10, unit='d') == np.timedelta64(10, 'D').astype('m8[ns]') + assert Timedelta(10, unit='us') == np.timedelta64(10, 'us') + assert Timedelta(10, unit='ms') == np.timedelta64(10, 'ms') + assert Timedelta(10, unit='s') == np.timedelta64(10, 's') + assert Timedelta(10, unit='d') == np.timedelta64(10, 'D') def test_timedelta_conversions(self): - assert (ct(timedelta(seconds=1)) == + assert (Timedelta(timedelta(seconds=1)) == np.timedelta64(1, 's').astype('m8[ns]')) - assert (ct(timedelta(microseconds=1)) == + assert (Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, 'us').astype('m8[ns]')) - assert (ct(timedelta(days=1)) == + assert (Timedelta(timedelta(days=1)) == np.timedelta64(1, 'D').astype('m8[ns]')) def test_round(self): @@ -493,47 +492,49 @@ def test_short_format_converters(self): def conv(v): return v.astype('m8[ns]') - assert ct('10') == np.timedelta64(10, 'ns') - assert ct('10ns') == np.timedelta64(10, 'ns') - assert ct('100') == np.timedelta64(100, 'ns') - assert ct('100ns') == np.timedelta64(100, 'ns') - - assert ct('1000') == np.timedelta64(1000, 'ns') - assert ct('1000ns') == np.timedelta64(1000, 'ns') - assert ct('1000NS') == np.timedelta64(1000, 'ns') - - assert ct('10us') == np.timedelta64(10000, 'ns') - assert ct('100us') == np.timedelta64(100000, 'ns') - assert ct('1000us') == np.timedelta64(1000000, 'ns') - assert ct('1000Us') == np.timedelta64(1000000, 'ns') - assert ct('1000uS') == np.timedelta64(1000000, 'ns') - - assert ct('1ms') == np.timedelta64(1000000, 'ns') - assert ct('10ms') == np.timedelta64(10000000, 'ns') - assert ct('100ms') == np.timedelta64(100000000, 'ns') - assert ct('1000ms') == np.timedelta64(1000000000, 'ns') - - assert ct('-1s') == -np.timedelta64(1000000000, 'ns') - assert ct('1s') == np.timedelta64(1000000000, 'ns') - assert ct('10s') == np.timedelta64(10000000000, 'ns') - assert ct('100s') == np.timedelta64(100000000000, 'ns') - assert ct('1000s') == np.timedelta64(1000000000000, 'ns') - - assert ct('1d') == conv(np.timedelta64(1, 'D')) - assert ct('-1d') == -conv(np.timedelta64(1, 'D')) - assert ct('1D') == conv(np.timedelta64(1, 'D')) - assert ct('10D') == conv(np.timedelta64(10, 'D')) - assert ct('100D') == conv(np.timedelta64(100, 'D')) - assert ct('1000D') == conv(np.timedelta64(1000, 'D')) - assert ct('10000D') == conv(np.timedelta64(10000, 'D')) + assert Timedelta('10') == np.timedelta64(10, 'ns') + assert Timedelta('10ns') == np.timedelta64(10, 'ns') + assert Timedelta('100') == np.timedelta64(100, 'ns') + assert Timedelta('100ns') == np.timedelta64(100, 'ns') + + assert Timedelta('1000') == np.timedelta64(1000, 'ns') + assert Timedelta('1000ns') == np.timedelta64(1000, 'ns') + assert Timedelta('1000NS') == np.timedelta64(1000, 'ns') + + assert Timedelta('10us') == np.timedelta64(10000, 'ns') + assert Timedelta('100us') == np.timedelta64(100000, 'ns') + assert Timedelta('1000us') == np.timedelta64(1000000, 'ns') + assert Timedelta('1000Us') == np.timedelta64(1000000, 'ns') + assert Timedelta('1000uS') == np.timedelta64(1000000, 'ns') + + assert Timedelta('1ms') == np.timedelta64(1000000, 'ns') + assert Timedelta('10ms') == np.timedelta64(10000000, 'ns') + assert Timedelta('100ms') == np.timedelta64(100000000, 'ns') + assert Timedelta('1000ms') == np.timedelta64(1000000000, 'ns') + + assert Timedelta('-1s') == -np.timedelta64(1000000000, 'ns') + assert Timedelta('1s') == np.timedelta64(1000000000, 'ns') + assert Timedelta('10s') == np.timedelta64(10000000000, 'ns') + assert Timedelta('100s') == np.timedelta64(100000000000, 'ns') + assert Timedelta('1000s') == np.timedelta64(1000000000000, 'ns') + + assert Timedelta('1d') == conv(np.timedelta64(1, 'D')) + assert Timedelta('-1d') == -conv(np.timedelta64(1, 'D')) + assert Timedelta('1D') == conv(np.timedelta64(1, 'D')) + assert Timedelta('10D') == conv(np.timedelta64(10, 'D')) + assert Timedelta('100D') == conv(np.timedelta64(100, 'D')) + assert Timedelta('1000D') == conv(np.timedelta64(1000, 'D')) + assert Timedelta('10000D') == conv(np.timedelta64(10000, 'D')) # space - assert ct(' 10000D ') == conv(np.timedelta64(10000, 'D')) - assert ct(' - 10000D ') == -conv(np.timedelta64(10000, 'D')) + assert Timedelta(' 10000D ') == conv(np.timedelta64(10000, 'D')) + assert Timedelta(' - 10000D ') == -conv(np.timedelta64(10000, 'D')) # invalid - pytest.raises(ValueError, ct, '1foo') - pytest.raises(ValueError, ct, 'foo') + with pytest.raises(ValueError): + Timedelta('1foo') + with pytest.raises(ValueError): + Timedelta('foo') def test_full_format_converters(self): def conv(v): @@ -541,25 +542,27 @@ def conv(v): d1 = np.timedelta64(1, 'D') - assert ct('1days') == conv(d1) - assert ct('1days,') == conv(d1) - assert ct('- 1days,') == -conv(d1) + assert Timedelta('1days') == conv(d1) + assert Timedelta('1days,') == conv(d1) + assert Timedelta('- 1days,') == -conv(d1) - assert ct('00:00:01') == conv(np.timedelta64(1, 's')) - assert ct('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's')) - assert ct('06:00:01.0') == conv(np.timedelta64(6 * 3600 + 1, 's')) - assert ct('06:00:01.01') == conv(np.timedelta64( + assert Timedelta('00:00:01') == conv(np.timedelta64(1, 's')) + assert Timedelta('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's')) + assert Timedelta('06:00:01.0') == conv( + np.timedelta64(6 * 3600 + 1, 's')) + assert Timedelta('06:00:01.01') == conv(np.timedelta64( 1000 * (6 * 3600 + 1) + 10, 'ms')) - assert (ct('- 1days, 00:00:01') == + assert (Timedelta('- 1days, 00:00:01') == conv(-d1 + np.timedelta64(1, 's'))) - assert (ct('1days, 06:00:01') == + assert (Timedelta('1days, 06:00:01') == conv(d1 + np.timedelta64(6 * 3600 + 1, 's'))) - assert (ct('1days, 06:00:01.01') == + assert (Timedelta('1days, 06:00:01.01') == conv(d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms'))) # invalid - pytest.raises(ValueError, ct, '- 1days, 00') + with pytest.raises(ValueError): + Timedelta('- 1days, 00') def test_overflow(self): # GH 9442
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24821
2019-01-18T02:14:53Z
2019-01-18T22:40:07Z
2019-01-18T22:40:07Z
2019-01-18T22:41:14Z
BUG: DataFrame.merge(suffixes=) does not respect None
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 09626be713c4f..80c76d14a5938 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -181,6 +181,7 @@ Groupby/Resample/Rolling Reshaping ^^^^^^^^^ +- Bug in :func:`pandas.merge` adds a string of ``None`` if ``None`` is assigned in suffixes instead of remain the column name as-is (:issue:`24782`). - Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`) - :func:`to_records` now accepts dtypes to its `column_dtypes` parameter (:issue:`24895`) - diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 050c3d3e87fc6..5cae6e1a89170 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1971,16 +1971,28 @@ def items_overlap_with_suffix(left, lsuffix, right, rsuffix): raise ValueError('columns overlap but no suffix specified: ' '{rename}'.format(rename=to_rename)) - def lrenamer(x): - if x in to_rename: - return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) - return x + def renamer(x, suffix): + """Rename the left and right indices. + + If there is overlap, and suffix is not None, add + suffix, otherwise, leave it as-is. + + Parameters + ---------- + x : original column name + suffix : str or None - def rrenamer(x): - if x in to_rename: - return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) + Returns + ------- + x : renamed column name + """ + if x in to_rename and suffix is not None: + return '{x}{suffix}'.format(x=x, suffix=suffix) return x + lrenamer = partial(renamer, suffix=lsuffix) + rrenamer = partial(renamer, suffix=rsuffix) + return (_transform_index(left, lrenamer), _transform_index(right, rrenamer)) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 1dd19a7c1514e..ad3327e694b67 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -159,9 +159,15 @@ def merge_ordered(left, right, on=None, left DataFrame fill_method : {'ffill', None}, default None Interpolation method for data - suffixes : 2-length sequence (tuple, list, ...) - Suffix to apply to overlapping column names in the left and right - side, respectively + suffixes : Sequence, default is ("_x", "_y") + A length-2 sequence where each element is optionally a string + indicating the suffix to add to overlapping column names in + `left` and `right` respectively. Pass a value of `None` instead + of a string to indicate that the column name from `left` or + `right` should be left as-is, with no suffix. At least one of the + values must not be None. + + .. versionchanged:: 0.25.0 how : {'left', 'right', 'outer', 'inner'}, default 'outer' * left: use only keys from left frame (SQL: left outer join) * right: use only keys from right frame (SQL: right outer join) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index a0a20d1da6cef..25487ccc76e62 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1526,3 +1526,65 @@ def test_merge_series(on, left_on, right_on, left_index, right_index, nm): with pytest.raises(ValueError, match=msg): result = pd.merge(a, b, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index) + + +@pytest.mark.parametrize("col1, col2, kwargs, expected_cols", [ + (0, 0, dict(suffixes=("", "_dup")), ["0", "0_dup"]), + (0, 0, dict(suffixes=(None, "_dup")), [0, "0_dup"]), + (0, 0, dict(suffixes=("_x", "_y")), ["0_x", "0_y"]), + ("a", 0, dict(suffixes=(None, "_y")), ["a", 0]), + (0.0, 0.0, dict(suffixes=("_x", None)), ["0.0_x", 0.0]), + ("b", "b", dict(suffixes=(None, "_y")), ["b", "b_y"]), + ("a", "a", dict(suffixes=("_x", None)), ["a_x", "a"]), + ("a", "b", dict(suffixes=("_x", None)), ["a", "b"]), + ("a", "a", dict(suffixes=[None, "_x"]), ["a", "a_x"]), + (0, 0, dict(suffixes=["_a", None]), ["0_a", 0]), + ("a", "a", dict(), ["a_x", "a_y"]), + (0, 0, dict(), ["0_x", "0_y"]) +]) +def test_merge_suffix(col1, col2, kwargs, expected_cols): + # issue: 24782 + a = pd.DataFrame({col1: [1, 2, 3]}) + b = pd.DataFrame({col2: [4, 5, 6]}) + + expected = pd.DataFrame([[1, 4], [2, 5], [3, 6]], + columns=expected_cols) + + result = a.merge(b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + result = pd.merge(a, b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("col1, col2, suffixes", [ + ("a", "a", [None, None]), + ("a", "a", (None, None)), + ("a", "a", ("", None)), + (0, 0, [None, None]), + (0, 0, (None, "")) +]) +def test_merge_suffix_error(col1, col2, suffixes): + # issue: 24782 + a = pd.DataFrame({col1: [1, 2, 3]}) + b = pd.DataFrame({col2: [3, 4, 5]}) + + # TODO: might reconsider current raise behaviour, see issue 24782 + msg = "columns overlap but no suffix specified" + with pytest.raises(ValueError, match=msg): + pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize("col1, col2, suffixes", [ + ("a", "a", None), + (0, 0, None) +]) +def test_merge_suffix_none_error(col1, col2, suffixes): + # issue: 24782 + a = pd.DataFrame({col1: [1, 2, 3]}) + b = pd.DataFrame({col2: [3, 4, 5]}) + + # TODO: might reconsider current raise behaviour, see GH24782 + msg = "iterable" + with pytest.raises(TypeError, match=msg): + pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
- [ ] closes #24782 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24819
2019-01-17T20:28:06Z
2019-02-06T03:51:07Z
2019-02-06T03:51:06Z
2019-02-09T16:05:25Z
BUG: Format mismatch doesn't coerce to NaT
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3685a24d60e74..673a1b8a8581f 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1548,6 +1548,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.astype`, :meth:`PeriodIndex.astype` and :meth:`TimedeltaIndex.astype` ignoring the sign of the ``dtype`` for unsigned integer dtypes (:issue:`24405`). - Fixed bug in :meth:`Series.max` with ``datetime64[ns]``-dtype failing to return ``NaT`` when nulls are present and ``skipna=False`` is passed (:issue:`24265`) - Bug in :func:`to_datetime` where arrays of ``datetime`` objects containing both timezone-aware and timezone-naive ``datetimes`` would fail to raise ``ValueError`` (:issue:`24569`) +- Bug in :func:`to_datetime` with invalid datetime format doesn't coerce input to ``NaT`` even if ``errors='coerce'`` (:issue:`24763`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 5b540ee88a3f3..e6478da400d76 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -265,7 +265,12 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, except tslibs.OutOfBoundsDatetime: if errors == 'raise': raise - result = arg + elif errors == 'coerce': + result = np.empty(arg.shape, dtype='M8[ns]') + iresult = result.view('i8') + iresult.fill(tslibs.iNaT) + else: + result = arg except ValueError: # if format was inferred, try falling back # to array_to_datetime - terminate here @@ -273,7 +278,12 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, if not infer_datetime_format: if errors == 'raise': raise - result = arg + elif errors == 'coerce': + result = np.empty(arg.shape, dtype='M8[ns]') + iresult = result.view('i8') + iresult.fill(tslibs.iNaT) + else: + result = arg except ValueError as e: # Fallback to try to convert datetime objects if timezone-aware # datetime objects are found without passing `utc=True` diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 50c8f8d4c1f4c..bec2fa66c43cd 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -555,6 +555,63 @@ def test_datetime_invalid_datatype(self): with pytest.raises(TypeError): pd.to_datetime(pd.to_datetime) + @pytest.mark.parametrize('value', ["a", "00:01:99"]) + @pytest.mark.parametrize('infer', [True, False]) + @pytest.mark.parametrize('format', [None, 'H%:M%:S%']) + def test_datetime_invalid_scalar(self, value, format, infer): + # GH24763 + res = pd.to_datetime(value, errors='ignore', format=format, + infer_datetime_format=infer) + assert res == value + + res = pd.to_datetime(value, errors='coerce', format=format, + infer_datetime_format=infer) + assert res is pd.NaT + + with pytest.raises(ValueError): + pd.to_datetime(value, errors='raise', format=format, + infer_datetime_format=infer) + + @pytest.mark.parametrize('value', ["3000/12/11 00:00:00"]) + @pytest.mark.parametrize('infer', [True, False]) + @pytest.mark.parametrize('format', [None, 'H%:M%:S%']) + def test_datetime_outofbounds_scalar(self, value, format, infer): + # GH24763 + res = pd.to_datetime(value, errors='ignore', format=format, + infer_datetime_format=infer) + assert res == value + + res = pd.to_datetime(value, errors='coerce', format=format, + infer_datetime_format=infer) + assert res is pd.NaT + + if format is not None: + with pytest.raises(ValueError): + pd.to_datetime(value, errors='raise', format=format, + infer_datetime_format=infer) + else: + with pytest.raises(OutOfBoundsDatetime): + pd.to_datetime(value, errors='raise', format=format, + infer_datetime_format=infer) + + @pytest.mark.parametrize('values', [["a"], ["00:01:99"], + ["a", "b", "99:00:00"]]) + @pytest.mark.parametrize('infer', [True, False]) + @pytest.mark.parametrize('format', [None, 'H%:M%:S%']) + def test_datetime_invalid_index(self, values, format, infer): + # GH24763 + res = pd.to_datetime(values, errors='ignore', format=format, + infer_datetime_format=infer) + tm.assert_index_equal(res, pd.Index(values)) + + res = pd.to_datetime(values, errors='coerce', format=format, + infer_datetime_format=infer) + tm.assert_index_equal(res, pd.DatetimeIndex([pd.NaT] * len(values))) + + with pytest.raises(ValueError): + pd.to_datetime(values, errors='raise', format=format, + infer_datetime_format=infer) + @pytest.mark.parametrize("utc", [True, None]) @pytest.mark.parametrize("format", ['%Y%m%d %H:%M:%S', None]) @pytest.mark.parametrize("box", [True, False])
- [x] closes #24763 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24815
2019-01-17T11:55:31Z
2019-01-21T14:19:56Z
2019-01-21T14:19:56Z
2019-01-22T08:49:49Z
STY: use pytest.raises context syntax (series)
diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 99a4f0c424ce9..cd4c0a7924d39 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -16,11 +16,16 @@ class TestSeriesAlterAxes(object): def test_setindex(self, string_series): # wrong type - pytest.raises(TypeError, setattr, string_series, 'index', None) + msg = (r"Index\(\.\.\.\) must be called with a collection of some" + r" kind, None was passed") + with pytest.raises(TypeError, match=msg): + string_series.index = None # wrong length - pytest.raises(Exception, setattr, string_series, 'index', - np.arange(len(string_series) - 1)) + msg = (r"Length mismatch: Expected axis has (30|100) elements, new" + r" values have (29|99) elements") + with pytest.raises(ValueError, match=msg): + string_series.index = np.arange(len(string_series) - 1) # works string_series.index = np.arange(len(string_series)) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index b5140a5319c01..6811e370726b2 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -97,8 +97,10 @@ def test_argsort_stable(self): check_dtype=False) tm.assert_series_equal(qindexer, Series(qexpected), check_dtype=False) - pytest.raises(AssertionError, tm.assert_numpy_array_equal, - qindexer, mindexer) + msg = (r"ndarray Expected type <(class|type) 'numpy\.ndarray'>," + r" found <class 'pandas\.core\.series\.Series'> instead") + with pytest.raises(AssertionError, match=msg): + tm.assert_numpy_array_equal(qindexer, mindexer) def test_cumsum(self, datetime_series): self._check_accum_op('cumsum', datetime_series) @@ -476,8 +478,13 @@ def test_dot(self): assert_almost_equal(a.dot(b['1']), expected['1']) assert_almost_equal(a.dot(b2['1']), expected['1']) - pytest.raises(Exception, a.dot, a.values[:3]) - pytest.raises(ValueError, a.dot, b.T) + msg = r"Dot product shape mismatch, \(4L?,\) vs \(3L?,\)" + # exception raised is of type Exception + with pytest.raises(Exception, match=msg): + a.dot(a.values[:3]) + msg = "matrices are not aligned" + with pytest.raises(ValueError, match=msg): + a.dot(b.T) @pytest.mark.skipif(not PY35, reason='matmul supported for Python>=3.5') @@ -541,8 +548,13 @@ def test_matmul(self): index=['1', '2', '3']) assert_series_equal(result, expected) - pytest.raises(Exception, a.dot, a.values[:3]) - pytest.raises(ValueError, a.dot, b.T) + msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)" + # exception raised is of type Exception + with pytest.raises(Exception, match=msg): + a.dot(a.values[:3]) + msg = "matrices are not aligned" + with pytest.raises(ValueError, match=msg): + a.dot(b.T) def test_clip(self, datetime_series): val = datetime_series.median() @@ -697,11 +709,13 @@ def test_isin(self): def test_isin_with_string_scalar(self): # GH4763 s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C']) - with pytest.raises(TypeError): + msg = (r"only list-like objects are allowed to be passed to isin\(\)," + r" you passed a \[str\]") + with pytest.raises(TypeError, match=msg): s.isin('a') - with pytest.raises(TypeError): - s = Series(['aaa', 'b', 'c']) + s = Series(['aaa', 'b', 'c']) + with pytest.raises(TypeError, match=msg): s.isin('aaa') def test_isin_with_i8(self): @@ -771,18 +785,21 @@ def test_ptp(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): tm.assert_series_equal(s.ptp(level=0, skipna=False), expected) - with pytest.raises(ValueError): + msg = r"No axis named 1 for object type <(class|type) 'type'>" + with pytest.raises(ValueError, match=msg): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s.ptp(axis=1) s = pd.Series(['a', 'b', 'c', 'd', 'e']) - with pytest.raises(TypeError): + msg = r"unsupported operand type\(s\) for -: 'str' and 'str'" + with pytest.raises(TypeError, match=msg): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s.ptp() - with pytest.raises(NotImplementedError): + msg = r"Series\.ptp does not implement numeric_only\." + with pytest.raises(NotImplementedError, match=msg): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s.ptp(numeric_only=True) @@ -1103,20 +1120,27 @@ def test_validate_any_all_out_keepdims_raises(self, kwargs, func): param = list(kwargs)[0] name = func.__name__ - msg = "the '{}' parameter .* {}".format(param, name) + msg = (r"the '{arg}' parameter is not " + r"supported in the pandas " + r"implementation of {fname}\(\)").format(arg=param, fname=name) with pytest.raises(ValueError, match=msg): func(s, **kwargs) @td.skip_if_np_lt_115 def test_validate_sum_initial(self): s = pd.Series([1, 2]) - with pytest.raises(ValueError, match="the 'initial' .* sum"): + msg = (r"the 'initial' parameter is not " + r"supported in the pandas " + r"implementation of sum\(\)") + with pytest.raises(ValueError, match=msg): np.sum(s, initial=10) def test_validate_median_initial(self): s = pd.Series([1, 2]) - with pytest.raises(ValueError, - match="the 'overwrite_input' .* median"): + msg = (r"the 'overwrite_input' parameter is not " + r"supported in the pandas " + r"implementation of median\(\)") + with pytest.raises(ValueError, match=msg): # It seems like np.median doesn't dispatch, so we use the # method instead of the ufunc. s.median(overwrite_input=True) @@ -1124,8 +1148,10 @@ def test_validate_median_initial(self): @td.skip_if_np_lt_115 def test_validate_stat_keepdims(self): s = pd.Series([1, 2]) - with pytest.raises(ValueError, - match="the 'keepdims'"): + msg = (r"the 'keepdims' parameter is not " + r"supported in the pandas " + r"implementation of sum\(\)") + with pytest.raises(ValueError, match=msg): np.sum(s, keepdims=True) diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index c95cf125e22f7..1f2e2b179c687 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -289,8 +289,11 @@ def test_index_tab_completion(self, index): def test_not_hashable(self): s_empty = Series() s = Series([1]) - pytest.raises(TypeError, hash, s_empty) - pytest.raises(TypeError, hash, s) + msg = "'Series' objects are mutable, thus they cannot be hashed" + with pytest.raises(TypeError, match=msg): + hash(s_empty) + with pytest.raises(TypeError, match=msg): + hash(s) def test_contains(self): tm.assert_contains_all(self.ts.index, self.ts) @@ -333,7 +336,8 @@ def test_items(self): def test_raise_on_info(self): s = Series(np.random.randn(10)) - with pytest.raises(AttributeError): + msg = "'Series' object has no attribute 'info'" + with pytest.raises(AttributeError, match=msg): s.info() def test_copy(self): @@ -555,15 +559,17 @@ def test_cat_accessor_updates_on_inplace(self): def test_categorical_delegations(self): # invalid accessor - pytest.raises(AttributeError, lambda: Series([1, 2, 3]).cat) - with pytest.raises(AttributeError, - match=(r"Can only use .cat accessor " - r"with a 'category' dtype")): + msg = r"Can only use \.cat accessor with a 'category' dtype" + with pytest.raises(AttributeError, match=msg): + Series([1, 2, 3]).cat + with pytest.raises(AttributeError, match=msg): Series([1, 2, 3]).cat() - pytest.raises(AttributeError, lambda: Series(['a', 'b', 'c']).cat) - pytest.raises(AttributeError, lambda: Series(np.arange(5.)).cat) - pytest.raises(AttributeError, - lambda: Series([Timestamp('20130101')]).cat) + with pytest.raises(AttributeError, match=msg): + Series(['a', 'b', 'c']).cat + with pytest.raises(AttributeError, match=msg): + Series(np.arange(5.)).cat + with pytest.raises(AttributeError, match=msg): + Series([Timestamp('20130101')]).cat # Series should delegate calls to '.categories', '.codes', '.ordered' # and the methods '.set_categories()' 'drop_unused_categories()' to the @@ -605,10 +611,10 @@ def test_categorical_delegations(self): # This method is likely to be confused, so test that it raises an error # on wrong inputs: - def f(): + msg = "'Series' object has no attribute 'set_categories'" + with pytest.raises(AttributeError, match=msg): s.set_categories([4, 3, 2, 1]) - pytest.raises(Exception, f) # right: s.cat.set_categories([4,3,2,1]) # GH18862 (let Series.cat.rename_categories take callables) diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index e13cb9edffe2b..45e3dffde60f7 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -25,8 +25,9 @@ def test_append(self, datetime_series, string_series, object_series): else: raise AssertionError("orphaned index!") - pytest.raises(ValueError, datetime_series.append, datetime_series, - verify_integrity=True) + msg = "Indexes have overlapping values:" + with pytest.raises(ValueError, match=msg): + datetime_series.append(datetime_series, verify_integrity=True) def test_append_many(self, datetime_series): pieces = [datetime_series[:5], datetime_series[5:10], diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index fa303c904440c..d92ca48751d0a 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -65,8 +65,10 @@ def test_constructor(self, datetime_series, empty_series): assert not empty_series.index.is_all_dates assert not Series({}).index.is_all_dates - pytest.raises(Exception, Series, np.random.randn(3, 3), - index=np.arange(3)) + + # exception raised is of type Exception + with pytest.raises(Exception, match="Data must be 1-dimensional"): + Series(np.random.randn(3, 3), index=np.arange(3)) mixed.name = 'Series' rs = Series(mixed).name @@ -75,7 +77,9 @@ def test_constructor(self, datetime_series, empty_series): # raise on MultiIndex GH4187 m = MultiIndex.from_arrays([[1, 2], [3, 4]]) - pytest.raises(NotImplementedError, Series, m) + msg = "initializing a Series from a MultiIndex is not supported" + with pytest.raises(NotImplementedError, match=msg): + Series(m) @pytest.mark.parametrize('input_class', [list, dict, OrderedDict]) def test_constructor_empty(self, input_class): @@ -495,7 +499,9 @@ def test_constructor_broadcast_list(self): # GH 19342 # construction with single-element container and index # should raise - pytest.raises(ValueError, Series, ['foo'], index=['a', 'b', 'c']) + msg = "Length of passed values is 1, index implies 3" + with pytest.raises(ValueError, match=msg): + Series(['foo'], index=['a', 'b', 'c']) def test_constructor_corner(self): df = tm.makeTimeDataFrame() @@ -675,10 +681,17 @@ def test_constructor_dtype_datetime64(self): assert s.dtype == 'M8[ns]' # GH3414 related + # msg = (r"cannot astype a datetimelike from \[datetime64\[ns\]\] to" + # r" \[int32\]") + # with pytest.raises(TypeError, match=msg): + # Series(Series(dates).astype('int') / 1000000, dtype='M8[ms]') pytest.raises(TypeError, lambda x: Series( Series(dates).astype('int') / 1000000, dtype='M8[ms]')) - pytest.raises(TypeError, - lambda x: Series(dates, dtype='datetime64')) + + msg = (r"The 'datetime64' dtype has no unit\. Please pass in" + r" 'datetime64\[ns\]' instead\.") + with pytest.raises(ValueError, match=msg): + Series(dates, dtype='datetime64') # invalid dates can be help as object result = Series([datetime(2, 1, 1)]) @@ -984,9 +997,11 @@ def test_constructor_dict_of_tuples(self): def test_constructor_set(self): values = {1, 2, 3, 4, 5} - pytest.raises(TypeError, Series, values) + with pytest.raises(TypeError, match="'set' type is unordered"): + Series(values) values = frozenset(values) - pytest.raises(TypeError, Series, values) + with pytest.raises(TypeError, match="'frozenset' type is unordered"): + Series(values) # https://github.com/pandas-dev/pandas/issues/22698 @pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning") @@ -1081,14 +1096,16 @@ def test_constructor_dtype_timedelta64(self): td.astype('int64') # invalid casting - pytest.raises(TypeError, td.astype, 'int32') + msg = (r"cannot astype a timedelta from \[timedelta64\[ns\]\] to" + r" \[int32\]") + with pytest.raises(TypeError, match=msg): + td.astype('int32') # this is an invalid casting - def f(): + msg = "Could not convert object to NumPy timedelta" + with pytest.raises(ValueError, match=msg): Series([timedelta(days=1), 'foo'], dtype='m8[ns]') - pytest.raises(Exception, f) - # leave as object here td = Series([timedelta(days=i) for i in range(3)] + ['foo']) assert td.dtype == 'object' @@ -1134,9 +1151,11 @@ def test_constructor_name_hashable(self): assert s.name == n def test_constructor_name_unhashable(self): + msg = r"Series\.name must be a hashable type" for n in [['name_list'], np.ones(2), {1: 2}]: for data in [['name_list'], np.ones(2), {1: 2}]: - pytest.raises(TypeError, Series, data, name=n) + with pytest.raises(TypeError, match=msg): + Series(data, name=n) def test_auto_conversion(self): series = Series(list(date_range('1/1/2000', periods=10))) diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 2bc009c5a2fc8..5ec3f69e55fde 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -74,7 +74,8 @@ def test_astype_cast_nan_inf_int(self, dtype, value): @pytest.mark.parametrize("dtype", [int, np.int8, np.int64]) def test_astype_cast_object_int_fail(self, dtype): arr = Series(["car", "house", "tree", "1"]) - with pytest.raises(ValueError): + msg = r"invalid literal for (int|long)\(\) with base 10: 'car'" + with pytest.raises(ValueError, match=msg): arr.astype(dtype) def test_astype_cast_object_int(self): @@ -213,17 +214,19 @@ def test_astype_dict_like(self, dtype_class): tm.assert_series_equal(result, expected) dt3 = dtype_class({'abc': str, 'def': str}) - with pytest.raises(KeyError): + msg = ("Only the Series name can be used for the key in Series dtype" + r" mappings\.") + with pytest.raises(KeyError, match=msg): s.astype(dt3) dt4 = dtype_class({0: str}) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): s.astype(dt4) # GH16717 # if dtypes provided is empty, it should error dt5 = dtype_class({}) - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=msg): s.astype(dt5) def test_astype_categories_deprecation(self): @@ -288,7 +291,10 @@ def test_astype_categorical_to_other(self): expected = s tm.assert_series_equal(s.astype('category'), expected) tm.assert_series_equal(s.astype(CategoricalDtype()), expected) - pytest.raises(ValueError, lambda: s.astype('float64')) + msg = (r"could not convert string to float: '(0 - 499|9500 - 9999)'|" + r"invalid literal for float\(\): 9500 - 9999") + with pytest.raises(ValueError, match=msg): + s.astype('float64') cat = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])) exp = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) @@ -324,9 +330,12 @@ def cmp(a, b): tm.assert_series_equal(result, s, check_categorical=False) # invalid conversion (these are NOT a dtype) + msg = (r"invalid type <class 'pandas\.core\.arrays\.categorical\." + "Categorical'> for astype") for invalid in [lambda x: x.astype(Categorical), lambda x: x.astype('object').astype(Categorical)]: - pytest.raises(TypeError, lambda: invalid(s)) + with pytest.raises(TypeError, match=msg): + invalid(s) @pytest.mark.parametrize('name', [None, 'foo']) @pytest.mark.parametrize('dtype_ordered', [True, False]) @@ -387,11 +396,14 @@ def test_astype_categoricaldtype_with_args(self): s = Series(['a', 'b']) type_ = CategoricalDtype(['a', 'b']) - with pytest.raises(TypeError): + msg = (r"Cannot specify a CategoricalDtype and also `categories` or" + r" `ordered`\. Use `dtype=CategoricalDtype\(categories," + r" ordered\)` instead\.") + with pytest.raises(TypeError, match=msg): s.astype(type_, ordered=True) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): s.astype(type_, categories=['a', 'b']) - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg): s.astype(type_, categories=['a', 'b'], ordered=False) @pytest.mark.parametrize("dtype", [ @@ -435,7 +447,9 @@ def test_arg_for_errors_in_astype(self): # see gh-14878 s = Series([1, 2, 3]) - with pytest.raises(ValueError): + msg = (r"Expected value of kwarg 'errors' to be one of \['raise'," + r" 'ignore'\]\. Supplied value is 'False'") + with pytest.raises(ValueError, match=msg): s.astype(np.float64, errors=False) s.astype(np.int8, errors='raise') diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index 772617c494aef..26b868872ee0d 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -293,7 +293,9 @@ def test_convert(self): def test_convert_no_arg_error(self): s = Series(['1.0', '2']) - pytest.raises(ValueError, s._convert) + msg = r"At least one of datetime, numeric or timedelta must be True\." + with pytest.raises(ValueError, match=msg): + s._convert() def test_convert_preserve_bool(self): s = Series([1, True, 3, 5], dtype=object) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index f4f16ff2d3ac1..985288c439917 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -360,14 +360,24 @@ def test_fillna_int(self): def test_fillna_raise(self): s = Series(np.random.randint(-100, 100, 50)) - pytest.raises(TypeError, s.fillna, [1, 2]) - pytest.raises(TypeError, s.fillna, (1, 2)) + msg = ('"value" parameter must be a scalar or dict, but you passed a' + ' "list"') + with pytest.raises(TypeError, match=msg): + s.fillna([1, 2]) + + msg = ('"value" parameter must be a scalar or dict, but you passed a' + ' "tuple"') + with pytest.raises(TypeError, match=msg): + s.fillna((1, 2)) # related GH 9217, make sure limit is an int and greater than 0 s = Series([1, 2, 3, None]) + msg = (r"Cannot specify both 'value' and 'method'\.|" + r"Limit must be greater than 0|" + "Limit must be an integer") for limit in [-1, 0, 1., 2.]: for method in ['backfill', 'bfill', 'pad', 'ffill', None]: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): s.fillna(1, limit=limit, method=method) def test_categorical_nan_equality(self): @@ -508,9 +518,13 @@ def test_fillna(self, datetime_series): exp = Series([0., 1., 5., 3., 4.], index=ts.index) tm.assert_series_equal(ts.fillna(value=5), exp) - pytest.raises(ValueError, ts.fillna) - pytest.raises(ValueError, datetime_series.fillna, value=0, - method='ffill') + msg = "Must specify a fill 'value' or 'method'" + with pytest.raises(ValueError, match=msg): + ts.fillna() + + msg = "Cannot specify both 'value' and 'method'" + with pytest.raises(ValueError, match=msg): + datetime_series.fillna(value=0, method='ffill') # GH 5703 s1 = Series([np.nan]) @@ -647,7 +661,9 @@ def test_dropna_empty(self): assert len(s) == 0 # invalid axis - pytest.raises(ValueError, s.dropna, axis=1) + msg = r"No axis named 1 for object type <(class|type) 'type'>" + with pytest.raises(ValueError, match=msg): + s.dropna(axis=1) def test_datetime64_tz_dropna(self): # DatetimeBlock @@ -736,7 +752,9 @@ def test_pad_require_monotonicity(self): # neither monotonic increasing or decreasing rng2 = rng[[1, 0, 2]] - pytest.raises(ValueError, rng2.get_indexer, rng, method='pad') + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(ValueError, match=msg): + rng2.get_indexer(rng, method='pad') def test_dropna_preserve_name(self, datetime_series): datetime_series[:5] = np.nan @@ -861,7 +879,10 @@ def test_interpolate(self, datetime_series, string_series): # Only raises ValueError if there are NaNs. non_ts = string_series.copy() non_ts[0] = np.NaN - pytest.raises(ValueError, non_ts.interpolate, method='time') + msg = ("time-weighted interpolation only works on Series or DataFrames" + " with a DatetimeIndex") + with pytest.raises(ValueError, match=msg): + non_ts.interpolate(method='time') @td.skip_if_no_scipy def test_interpolate_pchip(self): @@ -956,7 +977,9 @@ def test_interpolate_index_values(self): def test_interpolate_non_ts(self): s = Series([1, 3, np.nan, np.nan, np.nan, 11]) - with pytest.raises(ValueError): + msg = ("time-weighted interpolation only works on Series or DataFrames" + " with a DatetimeIndex") + with pytest.raises(ValueError, match=msg): s.interpolate(method='time') @pytest.mark.parametrize("kwargs", [ @@ -1044,9 +1067,14 @@ def test_interp_limit(self): 'polynomial', 'spline', 'piecewise_polynomial', None, 'from_derivatives', 'pchip', 'akima'] s = pd.Series([1, 2, np.nan, np.nan, 5]) + msg = (r"Limit must be greater than 0|" + "time-weighted interpolation only works on Series or" + r" DataFrames with a DatetimeIndex|" + r"invalid method '(polynomial|spline|None)' to interpolate|" + "Limit must be an integer") for limit in [-1, 0, 1., 2.]: for method in methods: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): s.interpolate(limit=limit, method=method) def test_interp_limit_forward(self): @@ -1084,12 +1112,14 @@ def test_interp_unlimited(self): def test_interp_limit_bad_direction(self): s = Series([1, 3, np.nan, np.nan, np.nan, 11]) - pytest.raises(ValueError, s.interpolate, method='linear', limit=2, - limit_direction='abc') + msg = (r"Invalid limit_direction: expecting one of \['forward'," + r" 'backward', 'both'\], got 'abc'") + with pytest.raises(ValueError, match=msg): + s.interpolate(method='linear', limit=2, limit_direction='abc') # raises an error even if no limit is specified. - pytest.raises(ValueError, s.interpolate, method='linear', - limit_direction='abc') + with pytest.raises(ValueError, match=msg): + s.interpolate(method='linear', limit_direction='abc') # limit_area introduced GH #16284 def test_interp_limit_area(self): @@ -1127,8 +1157,10 @@ def test_interp_limit_area(self): direction='backward') # raises an error even if limit type is wrong. - pytest.raises(ValueError, s.interpolate, method='linear', - limit_area='abc') + msg = (r"Invalid limit_area: expecting one of \['inside', 'outside'\]," + " got abc") + with pytest.raises(ValueError, match=msg): + s.interpolate(method='linear', limit_area='abc') def test_interp_limit_direction(self): # These tests are for issue #9218 -- fill NaNs in both directions. @@ -1214,14 +1246,16 @@ def test_interp_multiIndex(self, check_scipy): result = s.interpolate() assert_series_equal(result, expected) + msg = "Only `method=linear` interpolation is supported on MultiIndexes" if check_scipy: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): s.interpolate(method='polynomial', order=1) @td.skip_if_no_scipy def test_interp_nonmono_raise(self): s = Series([1, np.nan, 3], index=[0, 2, 1]) - with pytest.raises(ValueError): + msg = "krogh interpolation requires that the index be monotonic" + with pytest.raises(ValueError, match=msg): s.interpolate(method='krogh') @td.skip_if_no_scipy @@ -1243,7 +1277,8 @@ def test_interp_limit_no_nans(self): @pytest.mark.parametrize("method", ['polynomial', 'spline']) def test_no_order(self, method): s = Series([0, 1, np.nan, 3]) - with pytest.raises(ValueError): + msg = "invalid method '{}' to interpolate".format(method) + with pytest.raises(ValueError, match=msg): s.interpolate(method=method) @td.skip_if_no_scipy @@ -1283,10 +1318,12 @@ def test_spline_error(self): # see gh-10633 s = pd.Series(np.arange(10) ** 2) s[np.random.randint(0, 9, 3)] = np.nan - with pytest.raises(ValueError): + msg = "invalid method 'spline' to interpolate" + with pytest.raises(ValueError, match=msg): s.interpolate(method='spline') - with pytest.raises(ValueError): + msg = "order needs to be specified and greater than 0" + with pytest.raises(ValueError, match=msg): s.interpolate(method='spline', order=0) def test_interp_timedelta64(self): diff --git a/pandas/tests/series/test_rank.py b/pandas/tests/series/test_rank.py index da414a577ae0b..510a51e002918 100644 --- a/pandas/tests/series/test_rank.py +++ b/pandas/tests/series/test_rank.py @@ -206,7 +206,9 @@ def test_rank_categorical(self): def test_rank_signature(self): s = Series([0, 1]) s.rank(method='average') - pytest.raises(ValueError, s.rank, 'average') + msg = r"No axis named average for object type <(class|type) 'type'>" + with pytest.raises(ValueError, match=msg): + s.rank('average') @pytest.mark.parametrize('contents,dtype', [ ([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, diff --git a/pandas/tests/series/test_replace.py b/pandas/tests/series/test_replace.py index 3a9c210017625..40b28047080da 100644 --- a/pandas/tests/series/test_replace.py +++ b/pandas/tests/series/test_replace.py @@ -73,7 +73,9 @@ def test_replace(self): tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0)) # malformed - pytest.raises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0]) + msg = r"Replacement lists must match in length\. Expecting 3 got 2" + with pytest.raises(ValueError, match=msg): + ser.replace([1, 2, 3], [np.nan, 0]) # make sure that we aren't just masking a TypeError because bools don't # implement indexing @@ -125,7 +127,9 @@ def test_replace_with_single_list(self): # make sure things don't get corrupted when fillna call fails s = ser.copy() - with pytest.raises(ValueError): + msg = (r"Invalid fill method\. Expecting pad \(ffill\) or backfill" + r" \(bfill\)\. Got crash_cymbal") + with pytest.raises(ValueError, match=msg): s.replace([1, 2, 3], inplace=True, method='crash_cymbal') tm.assert_series_equal(s, ser) diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index ef6998c1a3e12..216f84c8f077a 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -55,16 +55,21 @@ def test_sort_values(self): expected = ts.sort_values(ascending=False, na_position='first') assert_series_equal(expected, ordered) - pytest.raises(ValueError, - lambda: ts.sort_values(ascending=None)) - pytest.raises(ValueError, - lambda: ts.sort_values(ascending=[])) - pytest.raises(ValueError, - lambda: ts.sort_values(ascending=[1, 2, 3])) - pytest.raises(ValueError, - lambda: ts.sort_values(ascending=[False, False])) - pytest.raises(ValueError, - lambda: ts.sort_values(ascending='foobar')) + msg = "ascending must be boolean" + with pytest.raises(ValueError, match=msg): + ts.sort_values(ascending=None) + msg = r"Length of ascending \(0\) must be 1 for Series" + with pytest.raises(ValueError, match=msg): + ts.sort_values(ascending=[]) + msg = r"Length of ascending \(3\) must be 1 for Series" + with pytest.raises(ValueError, match=msg): + ts.sort_values(ascending=[1, 2, 3]) + msg = r"Length of ascending \(2\) must be 1 for Series" + with pytest.raises(ValueError, match=msg): + ts.sort_values(ascending=[False, False]) + msg = "ascending must be boolean" + with pytest.raises(ValueError, match=msg): + ts.sort_values(ascending='foobar') # inplace=True ts = self.ts.copy() @@ -78,11 +83,11 @@ def test_sort_values(self): df = DataFrame(np.random.randn(10, 4)) s = df.iloc[:, 0] - def f(): + msg = ("This Series is a view of some other array, to sort in-place" + " you must create a copy") + with pytest.raises(ValueError, match=msg): s.sort_values(inplace=True) - pytest.raises(ValueError, f) - def test_sort_index(self): rindex = list(self.ts.index) random.shuffle(rindex) @@ -104,13 +109,15 @@ def test_sort_index(self): sorted_series = random_order.sort_index(axis=0) assert_series_equal(sorted_series, self.ts) - pytest.raises(ValueError, lambda: random_order.sort_values(axis=1)) + msg = r"No axis named 1 for object type <(class|type) 'type'>" + with pytest.raises(ValueError, match=msg): + random_order.sort_values(axis=1) sorted_series = random_order.sort_index(level=0, axis=0) assert_series_equal(sorted_series, self.ts) - pytest.raises(ValueError, - lambda: random_order.sort_index(level=0, axis=1)) + with pytest.raises(ValueError, match=msg): + random_order.sort_index(level=0, axis=1) def test_sort_index_inplace(self): diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 07808008c081c..d082b023e1f27 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -7,6 +7,7 @@ import pytest from pandas._libs.tslib import iNaT +from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas.compat import StringIO, lrange, product from pandas.errors import NullFrequencyError import pandas.util._test_decorators as td @@ -78,7 +79,8 @@ def test_shift(self): assert_series_equal(shifted2, shifted3) assert_series_equal(ps, shifted2.shift(-1, 'B')) - with pytest.raises(ValueError): + msg = "Given freq D does not match PeriodIndex freq B" + with pytest.raises(ValueError, match=msg): ps.shift(freq='D') # legacy support @@ -110,7 +112,9 @@ def test_shift(self): # incompat tz s2 = Series(date_range('2000-01-01 09:00:00', periods=5, tz='CET'), name='foo') - with pytest.raises(TypeError): + msg = ("DatetimeArray subtraction must have the same timezones or no" + " timezones") + with pytest.raises(TypeError, match=msg): s - s2 def test_shift2(self): @@ -127,7 +131,9 @@ def test_shift2(self): tm.assert_index_equal(result.index, exp_index) idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) - pytest.raises(NullFrequencyError, idx.shift, 1) + msg = "Cannot shift with no freq" + with pytest.raises(NullFrequencyError, match=msg): + idx.shift(1) def test_shift_fill_value(self): # GH #24128 @@ -158,7 +164,8 @@ def test_categorical_shift_fill_value(self): tm.assert_equal(res, expected) # check for incorrect fill_value - with pytest.raises(ValueError): + msg = "'fill_value=f' is not present in this Categorical's categories" + with pytest.raises(ValueError, match=msg): ts.shift(1, fill_value='f') def test_shift_dst(self): @@ -202,7 +209,8 @@ def test_tshift(self): shifted3 = ps.tshift(freq=BDay()) assert_series_equal(shifted, shifted3) - with pytest.raises(ValueError): + msg = "Given freq M does not match PeriodIndex freq B" + with pytest.raises(ValueError, match=msg): ps.tshift(freq='M') # DatetimeIndex @@ -222,7 +230,8 @@ def test_tshift(self): assert_series_equal(unshifted, inferred_ts) no_freq = self.ts[[0, 5, 7]] - with pytest.raises(ValueError): + msg = "Freq was not given and was not set in the index" + with pytest.raises(ValueError, match=msg): no_freq.tshift() def test_truncate(self): @@ -271,9 +280,10 @@ def test_truncate(self): truncated = ts.truncate(before=self.ts.index[-1] + offset) assert (len(truncated) == 0) - pytest.raises(ValueError, ts.truncate, - before=self.ts.index[-1] + offset, - after=self.ts.index[0] - offset) + msg = "Truncate: 1999-12-31 00:00:00 must be after 2000-02-14 00:00:00" + with pytest.raises(ValueError, match=msg): + ts.truncate(before=self.ts.index[-1] + offset, + after=self.ts.index[0] - offset) def test_truncate_nonsortedindex(self): # GH 17935 @@ -553,9 +563,11 @@ def test_to_datetime_unit(self): Timestamp('1970-01-03')] + ['NaT'] * 3) tm.assert_index_equal(result, expected) - with pytest.raises(ValueError): + msg = "non convertible value foo with the unit 'D'" + with pytest.raises(ValueError, match=msg): to_datetime([1, 2, 'foo'], unit='D') - with pytest.raises(ValueError): + msg = "cannot convert input 111111111 with the unit 'D'" + with pytest.raises(OutOfBoundsDatetime, match=msg): to_datetime([1, 2, 111111111], unit='D') # coerce we can process @@ -660,7 +672,8 @@ def test_first_subset(self): def test_first_raises(self): # GH20725 ser = pd.Series('a b c'.split()) - with pytest.raises(TypeError): # index is not a DatetimeIndex + msg = "'first' only supports a DatetimeIndex index" + with pytest.raises(TypeError, match=msg): ser.first('1D') def test_last_subset(self): @@ -686,7 +699,8 @@ def test_last_subset(self): def test_last_raises(self): # GH20725 ser = pd.Series('a b c'.split()) - with pytest.raises(TypeError): # index is not a DatetimeIndex + msg = "'last' only supports a DatetimeIndex index" + with pytest.raises(TypeError, match=msg): ser.last('1D') def test_format_pre_1900_dates(self): @@ -740,7 +754,8 @@ def test_at_time(self): def test_at_time_raises(self): # GH20725 ser = pd.Series('a b c'.split()) - with pytest.raises(TypeError): # index is not a DatetimeIndex + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): ser.at_time('00:00') def test_between(self): @@ -814,23 +829,26 @@ def test_between_time(self): def test_between_time_raises(self): # GH20725 ser = pd.Series('a b c'.split()) - with pytest.raises(TypeError): # index is not a DatetimeIndex + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): ser.between_time(start_time='00:00', end_time='12:00') def test_between_time_types(self): # GH11818 rng = date_range('1/1/2000', '1/5/2000', freq='5min') - with pytest.raises(ValueError): + msg = (r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\]" + " to a time") + with pytest.raises(ValueError, match=msg): rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) frame = DataFrame({'A': 0}, index=rng) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): frame.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) series = Series(0, index=rng) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): series.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) @@ -858,7 +876,9 @@ def test_between_time_axis(self): assert len(ts.between_time(stime, etime)) == expected_length assert len(ts.between_time(stime, etime, axis=0)) == expected_length - pytest.raises(ValueError, ts.between_time, stime, etime, axis=1) + msg = r"No axis named 1 for object type <(class|type) 'type'>" + with pytest.raises(ValueError, match=msg): + ts.between_time(stime, etime, axis=1) def test_to_period(self): from pandas.core.indexes.period import period_range
xref #24332
https://api.github.com/repos/pandas-dev/pandas/pulls/24812
2019-01-17T00:44:31Z
2019-01-17T12:48:42Z
2019-01-17T12:48:42Z
2019-01-20T22:35:09Z
BUG: Offset-based rolling window, with only one raw in dataframe and closed='left', max and min functions make python crash
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 5213120b33f06..bcd1abba30a67 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1808,6 +1808,7 @@ Plotting Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ +- Bug in :func:`pandas.core.window.Rolling.min` and :func:`pandas.core.window.Rolling.max` with ``closed='left'``, a datetime-like index and only one entry in the series leading to segfault (:issue:`24718`) - Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` with ``as_index=False`` leading to the loss of timezone information (:issue:`15884`) - Bug in :meth:`DateFrame.resample` when downsampling across a DST boundary (:issue:`8531`) - Bug in date anchoring for :meth:`DateFrame.resample` with offset :class:`Day` when n > 1 (:issue:`24127`) diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 5f2c0233a0f13..e8f3de64c3823 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1339,7 +1339,10 @@ cdef _roll_min_max_variable(ndarray[numeric] values, Q.push_back(i) W.push_back(i) - output[N-1] = calc_mm(minp, nobs, values[Q.front()]) + if not Q.empty(): + output[N-1] = calc_mm(minp, nobs, values[Q.front()]) + else: + output[N-1] = NaN return output diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 412f70a3cb516..e816d4c04344a 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -520,6 +520,26 @@ def test_closed(self): with pytest.raises(ValueError): df.rolling(window=3, closed='neither') + @pytest.mark.parametrize("func", ['min', 'max']) + def test_closed_one_entry(self, func): + # GH24718 + ser = pd.Series(data=[2], index=pd.date_range('2000', periods=1)) + result = getattr(ser.rolling('10D', closed='left'), func)() + tm.assert_series_equal(result, pd.Series([np.nan], index=ser.index)) + + @pytest.mark.parametrize("func", ['min', 'max']) + def test_closed_one_entry_groupby(self, func): + # GH24718 + ser = pd.DataFrame(data={'A': [1, 1, 2], 'B': [3, 2, 1]}, + index=pd.date_range('2000', periods=3)) + result = getattr( + ser.groupby('A', sort=False)['B'].rolling('10D', closed='left'), + func)() + exp_idx = pd.MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], + names=('A', None)) + expected = pd.Series(data=[np.nan, 3, np.nan], index=exp_idx, name='B') + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize("input_dtype", ['int', 'float']) @pytest.mark.parametrize("func,closed,expected", [ ('min', 'right', [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),
- [ ] closes #24718 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24811
2019-01-17T00:14:12Z
2019-01-18T12:22:46Z
2019-01-18T12:22:46Z
2019-01-21T23:18:19Z
TST/CLN: follow-up to #24750
diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index 89b481b92b73f..9017d13051b88 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -160,28 +160,22 @@ def test_where_unsafe_float(float_dtype): assert_series_equal(s, expected) -@pytest.mark.parametrize("dtype", [np.int64, np.float64]) -def test_where_unsafe_upcast(dtype): - s = Series(np.arange(10), dtype=dtype) - values = [2.5, 3.5, 4.5, 5.5, 6.5] - - mask = s < 5 - expected = Series(values + lrange(5, 10), dtype="float64") - - s[mask] = values - assert_series_equal(s, expected) - - -@pytest.mark.parametrize("dtype", [ - np.int8, np.int16, np.int32, np.float32 +@pytest.mark.parametrize("dtype,expected_dtype", [ + (np.int8, np.float64), + (np.int16, np.float64), + (np.int32, np.float64), + (np.int64, np.float64), + (np.float32, np.float32), + (np.float64, np.float64) ]) -def test_where_upcast(dtype): +def test_where_unsafe_upcast(dtype, expected_dtype): # see gh-9743 s = Series(np.arange(10), dtype=dtype) - mask = s < 5 - values = [2.5, 3.5, 4.5, 5.5, 6.5] + mask = s < 5 + expected = Series(values + lrange(5, 10), dtype=expected_dtype) s[mask] = values + assert_series_equal(s, expected) def test_where_unsafe():
cc @jreback `test_where_unsafe_itemsize_fail` changed to `test_where_upcast` in #24750 this follow-on PR merges `test_where_upcast` into `test_where_unsafe_upcast` to avoid duplication.
https://api.github.com/repos/pandas-dev/pandas/pulls/24810
2019-01-16T21:44:38Z
2019-01-17T12:55:12Z
2019-01-17T12:55:12Z
2019-01-20T22:36:01Z
str.replace('.','') should replace every character? (fix)
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3685a24d60e74..463142715e311 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -432,6 +432,30 @@ Backwards incompatible API changes Pandas 0.24.0 includes a number of API breaking changes. +Replacing strings using Pattern +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Be sure to perform a replace of literal strings by passing the +regex=False parameter to func:`str.replace`. Mainly when the +pattern is 1 size string (:issue:`24809`) + +Before: + +.. ipython:: python + + s = pd.Series(['A|B|C']) + + result = s.str.replace('|', ' ') + result + +After: + +.. ipython:: python + + s = pd.Series(['A|B|C']) + + result = s.str.replace('|', ' ', regex=False) + result .. _whatsnew_0240.api_breaking.deps: @@ -1645,6 +1669,7 @@ Strings - Bug in :meth:`Index.str.split` was not nan-safe (:issue:`23677`). - Bug :func:`Series.str.contains` not respecting the ``na`` argument for a ``Categorical`` dtype ``Series`` (:issue:`22158`) - Bug in :meth:`Index.str.cat` when the result contained only ``NaN`` (:issue:`24044`) +- Bug in :func:`Series.str.replace` not applying regex in patterns of length 1 (:issue:`24809`) Interval ^^^^^^^^ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index ca79dcd9408d8..f2586602e5498 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -425,7 +425,7 @@ def str_endswith(arr, pat, na=np.nan): return _na_map(f, arr, na, dtype=bool) -def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): +def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=None): r""" Replace occurrences of pattern/regex in the Series/Index with some other string. Equivalent to :meth:`str.replace` or @@ -564,7 +564,7 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): # add case flag, if provided if case is False: flags |= re.IGNORECASE - if is_compiled_re or len(pat) > 1 or flags or callable(repl): + if is_compiled_re or pat or flags or callable(repl): n = n if n >= 0 else 0 compiled = re.compile(pat, flags=flags) f = lambda x: compiled.sub(repl=repl, string=x, count=n) @@ -577,6 +577,9 @@ def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True): if callable(repl): raise ValueError("Cannot use a callable replacement when " "regex=False") + if regex==None: + warnings.warn("Warning: Interpreting '%s' as a literal, not a regex... " % pat + + "The default will change in the future.", FutureWarning, stacklevel=3) f = lambda x: x.replace(pat, repl, n) return _na_map(f, arr) @@ -2529,7 +2532,7 @@ def match(self, pat, case=True, flags=0, na=np.nan): return self._wrap_result(result, fill_value=na) @copy(str_replace) - def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): + def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None): result = str_replace(self._parent, pat, repl, n=n, case=case, flags=flags, regex=regex) return self._wrap_result(result) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 7cea3be03d1a7..18acf46eb4c71 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -1008,6 +1008,21 @@ def test_replace(self): values = klass(data) pytest.raises(TypeError, values.str.replace, 'a', repl) + # GH 24804 + def test_replace_single_pattern(self): + values = Series(['abc', '123']) + + result = values.str.replace('.', 'foo', regex=True) + expected = Series(['foofoofoo', 'foofoofoo']) + tm.assert_series_equal(result, expected) + + def test_replace_without_specifying_regex_parameter(self): + values = Series(['a.c']) + + result = values.str.replace('.', 'b') + expected = Series(['abc']) + tm.assert_series_equal(result, expected) + def test_replace_callable(self): # GH 15055 values = Series(['fooBAD__barBAD', NA]) @@ -2924,7 +2939,7 @@ def test_pipe_failures(self): tm.assert_series_equal(result, exp) - result = s.str.replace('|', ' ') + result = s.str.replace('|', ' ', regex=False) exp = Series(['A B C']) tm.assert_series_equal(result, exp)
- [ X] closes #24804 - [X ] tests added / passed - [ X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ X] fix replace pattern problem
https://api.github.com/repos/pandas-dev/pandas/pulls/24809
2019-01-16T21:42:14Z
2019-01-24T18:13:08Z
null
2019-01-24T18:13:08Z
ENH Series.getattr for attributes lacking built-in accessors
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index d69e948e31e33..7d98ecdcb1bbb 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -200,6 +200,27 @@ is a float. pd.array([1, 2, np.nan]) +.. _whatsnew_0240.enhancements.series_getattr + +Attribute accessor for Series values +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New method :meth:`Series.getattr` will fetch a specific attribute from all +elements of Series and return an identically indexed :class:`Series`. Useful +for working with Series of objects that do not have built-in accessors. + +.. ipython:: python + + ser = pd.Series([1, 2, 3 + 1j]) + ser.getattr('imag') # like ser.imag + + tzser = pd.Series(pd.date_range('2000', periods=4)) + tzser.getattr('day') # like ser.dt.day + + Point = collections.namedtuple('Point', ['x', 'y']) + ptser = pd.Series([Point(1, 2), Point(3, 7)]) + ptser.getattr('x') + .. _whatsnew_0240.enhancements.read_html: ``read_html`` Enhancements diff --git a/pandas/core/series.py b/pandas/core/series.py index eb412add7bbbb..907e5818eab2e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -5,6 +5,7 @@ from textwrap import dedent import warnings +import operator import numpy as np @@ -3381,6 +3382,60 @@ def map(self, arg, na_action=None): return self._constructor(new_values, index=self.index).__finalize__(self) + def getattr(self, attr): + """ + Get attribute from values of Series by attr name. + + Used to substitute each value in a Series with one of its attributes. + + .. versionadded:: 0.24.0 + + Parameters + ---------- + attr : str + Name of attribute to fetch from Series values. + + Returns + ------- + Series + Same index as caller. + + See Also + -------- + Series.map: For element-wise operations. + Series.apply : For applying more complex functions on a Series. + + Notes + ----- + Values in Series that do not have the requested attribute are converted + to ``NaN``. + + Examples + -------- + >>> ser = pd.Series([1.0, 2.5 + 0.1j, 3.7 + 2.1j]) + >>> ser.getattr('real') # same as ser.real + 0 1.0 + 1 2.5 + 2 3.7 + dtype: float64 + + For time data + >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) + >>> tzser.getattr('tz') + 0 CET + 1 CET + dtype: object + """ + attrgetter = operator.attrgetter(attr) + + def attrgetter_noerr(obj): + try: + return attrgetter(obj) + except AttributeError: + return np.NaN + + return self.map(attrgetter_noerr) + def _gotitem(self, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 90cf6916df0d1..a243a209a58db 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -1,7 +1,7 @@ # coding=utf-8 # pylint: disable-msg=E1101,W0612 -from collections import Counter, OrderedDict, defaultdict +from collections import Counter, OrderedDict, defaultdict, namedtuple from itertools import chain import numpy as np @@ -410,6 +410,61 @@ def test_agg_cython_table_raises(self, series, func, expected): series.agg(func) +class TestSeriesGetattr(): + + def test_getattr(self): + s = Series([1.5, 3, 5, 7.0]) + result = s.getattr('real') + assert_series_equal(result, s) + + s = Series([1.1 + 0.2j, 4.1 + 1.0j, np.nan, 1.0]) + tm.assert_numpy_array_equal(s.getattr('real').values, s.real) + tm.assert_numpy_array_equal(s.getattr('imag').values, s.imag) + + index, data = tm.getMixedTypeDict() + + dates = Series(data['D']) + tm.assert_series_equal( + dates.map(lambda d: d.day), + dates.getattr('day')) + + tm.assert_series_equal( + dates.map(lambda d: d.resolution.microseconds), + dates.getattr('resolution.microseconds')) + + tdiff = Series(tm.makeTimedeltaIndex()) + tm.assert_series_equal( + tdiff.map(lambda td: td.days), + tdiff.getattr('days')) + + def test_getattr_namedtuple(self): + Point = namedtuple('Point', ['x', 'y']) + ptser = pd.Series([Point(1, 2), Point(3, 7)]) + tm.assert_series_equal( + ptser.getattr('x'), + ptser.map(lambda pt: pt.x)) + + @pytest.mark.parametrize("index", tm.all_index_generator(10)) + def test_getattr_empty(self, index): + s = Series(index) + result = s.getattr('') + + expected = pd.Series(np.nan, index=s.index) + tm.assert_series_equal(result, expected) + + def test_getattr_na_exclusion(self): + s = Series([1.5, np.nan, 3, np.nan, 5]) + tm.assert_series_equal( + s.getattr('real'), + Series(s.real, index=s.index)) + + def test_getattr_missing_attr(self): + s = Series(list('ABCDEFG')) + result = s.getattr('missing') + exp = pd.Series(np.NaN, index=s.index) + assert_series_equal(result, exp) + + class TestSeriesMap(): def test_map(self, datetime_series):
Adds a `Series.getattr()` method for accessing attributes from a Series of objects that do not have built-in accessors like those in `Series.str` or `Series.dt`. This is faster than mapping/applying a `lambda obj: obj.attr` function because it uses `operator.attrgetter` and is also syntactically concise, similar to other accessors like `series.imag` or `series.dt.date`. - [x] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24808
2019-01-16T19:02:42Z
2019-01-18T12:23:16Z
null
2019-01-18T12:23:25Z
DEPR/API: Non-ns precision in Index constructors
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index fbc3c4fe4ce92..ff4aa9968f294 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1159,6 +1159,7 @@ Other API Changes - :meth:`CategoricalIndex.reindex` now raises a ``ValueError`` if the target index is non-unique and not equal to the current index. It previously only raised if the target index was not of a categorical dtype (:issue:`23963`). - :func:`Series.to_list` and :func:`Index.to_list` are now aliases of ``Series.tolist`` respectively ``Index.tolist`` (:issue:`8826`) - The result of ``SparseSeries.unstack`` is now a :class:`DataFrame` with sparse values, rather than a :class:`SparseDataFrame` (:issue:`24372`). +- :class:`DatetimeIndex` and :class:`TimedeltaIndex` no longer ignore the dtype precision. Passing a non-nanosecond resolution dtype will raise a ``ValueError`` (:issue:`24753`) .. _whatsnew_0240.api.extension: @@ -1259,6 +1260,7 @@ Deprecations - :meth:`Series.nonzero` is deprecated and will be removed in a future version (:issue:`18262`) - Passing an integer to :meth:`Series.fillna` and :meth:`DataFrame.fillna` with ``timedelta64[ns]`` dtypes is deprecated, will raise ``TypeError`` in a future version. Use ``obj.fillna(pd.Timedelta(...))`` instead (:issue:`24694`) - ``Series.cat.categorical``, ``Series.cat.name`` and ``Sersies.cat.index`` have been deprecated. Use the attributes on ``Series.cat`` or ``Series`` directly. (:issue:`24751`). +- Passing a dtype without a precision like ``np.dtype('datetime64')`` or ``timedelta64`` to :class:`Index`, :class:`DatetimeIndex` and :class:`TimedeltaIndex` is now deprecated. Use the nanosecond-precision dtype instead (:issue:`24753`). .. _whatsnew_0240.deprecations.datetimelike_int_ops: diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index a2d67efbecbba..f2aeb1c1309de 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- from datetime import datetime, time, timedelta +import textwrap import warnings import numpy as np @@ -1986,6 +1987,15 @@ def _validate_dt64_dtype(dtype): """ if dtype is not None: dtype = pandas_dtype(dtype) + if is_dtype_equal(dtype, np.dtype("M8")): + # no precision, warn + dtype = _NS_DTYPE + msg = textwrap.dedent("""\ + Passing in 'datetime64' dtype with no precision is deprecated + and will raise in a future version. Please pass in + 'datetime64[ns]' instead.""") + warnings.warn(msg, FutureWarning, stacklevel=5) + if ((isinstance(dtype, np.dtype) and dtype != _NS_DTYPE) or not isinstance(dtype, (np.dtype, DatetimeTZDtype))): raise ValueError("Unexpected value for 'dtype': '{dtype}'. " diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index be1a7097b0e0d..910cb96a86216 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -2,6 +2,7 @@ from __future__ import division from datetime import timedelta +import textwrap import warnings import numpy as np @@ -15,8 +16,8 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( - _NS_DTYPE, _TD_DTYPE, ensure_int64, is_datetime64_dtype, is_float_dtype, - is_integer_dtype, is_list_like, is_object_dtype, is_scalar, + _NS_DTYPE, _TD_DTYPE, ensure_int64, is_datetime64_dtype, is_dtype_equal, + is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_scalar, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype @@ -160,16 +161,8 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): # nanosecond UTC (or tz-naive) unix timestamps values = values.view(_TD_DTYPE) - if values.dtype != _TD_DTYPE: - raise TypeError(_BAD_DTYPE.format(dtype=values.dtype)) - - try: - dtype_mismatch = dtype != _TD_DTYPE - except TypeError: - raise TypeError(_BAD_DTYPE.format(dtype=dtype)) - else: - if dtype_mismatch: - raise TypeError(_BAD_DTYPE.format(dtype=dtype)) + _validate_td64_dtype(values.dtype) + dtype = _validate_td64_dtype(dtype) if freq == "infer": msg = ( @@ -204,9 +197,8 @@ def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): @classmethod def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False, freq=None, unit=None): - if dtype != _TD_DTYPE: - raise ValueError("Only timedelta64[ns] dtype is valid.") - + if dtype: + _validate_td64_dtype(dtype) freq, freq_infer = dtl.maybe_infer_freq(freq) data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit) @@ -997,6 +989,22 @@ def objects_to_td64ns(data, unit="ns", errors="raise"): return result.view('timedelta64[ns]') +def _validate_td64_dtype(dtype): + dtype = pandas_dtype(dtype) + if is_dtype_equal(dtype, np.dtype("timedelta64")): + dtype = _TD_DTYPE + msg = textwrap.dedent("""\ + Passing in 'timedelta' dtype with no precision is deprecated + and will raise in a future version. Please pass in + 'timedelta64[ns]' instead.""") + warnings.warn(msg, FutureWarning, stacklevel=4) + + if not is_dtype_equal(dtype, _TD_DTYPE): + raise ValueError(_BAD_DTYPE.format(dtype=dtype)) + + return dtype + + def _generate_regular_range(start, end, periods, offset): stride = offset.nanos if periods is None: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 93091f5125b7c..767da81c5c43a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -311,10 +311,14 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif (is_timedelta64_dtype(data) or (dtype is not None and is_timedelta64_dtype(dtype))): from pandas import TimedeltaIndex - result = TimedeltaIndex(data, copy=copy, name=name, **kwargs) - if dtype is not None and _o_dtype == dtype: - return Index(result.to_pytimedelta(), dtype=_o_dtype) + if dtype is not None and is_dtype_equal(_o_dtype, dtype): + # Note we can pass copy=False because the .astype below + # will always make a copy + result = TimedeltaIndex(data, copy=False, name=name, **kwargs) + return result.astype(object) else: + result = TimedeltaIndex(data, copy=copy, name=name, + dtype=dtype, **kwargs) return result elif is_period_dtype(data) and not is_object_dtype(dtype): diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index a8745f78392ca..6b4662ca02e80 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -23,18 +23,18 @@ def test_non_array_raises(self): TimedeltaArray([1, 2, 3]) def test_other_type_raises(self): - with pytest.raises(TypeError, + with pytest.raises(ValueError, match="dtype bool cannot be converted"): TimedeltaArray(np.array([1, 2, 3], dtype='bool')) def test_incorrect_dtype_raises(self): # TODO: why TypeError for 'category' but ValueError for i8? - with pytest.raises(TypeError, + with pytest.raises(ValueError, match=r'category cannot be converted ' r'to timedelta64\[ns\]'): TimedeltaArray(np.array([1, 2, 3], dtype='i8'), dtype='category') - with pytest.raises(TypeError, + with pytest.raises(ValueError, match=r"dtype int64 cannot be converted " r"to timedelta64\[ns\]"): TimedeltaArray(np.array([1, 2, 3], dtype='i8'), @@ -52,7 +52,7 @@ def test_copy(self): class TestTimedeltaArray(object): def test_from_sequence_dtype(self): - msg = r"Only timedelta64\[ns\] dtype is valid" + msg = "dtype .*object.* cannot be converted to timedelta64" with pytest.raises(ValueError, match=msg): TimedeltaArray._from_sequence([], dtype=object) diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 7c9ca9da89d53..7ebebbf6dee28 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -634,6 +634,23 @@ def test_construction_with_nat_and_tzlocal(self): expected = DatetimeIndex([Timestamp('2018', tz=tz), pd.NaT]) tm.assert_index_equal(result, expected) + def test_constructor_no_precision_warns(self): + # GH-24753, GH-24739 + expected = pd.DatetimeIndex(['2000'], dtype='datetime64[ns]') + + # we set the stacklevel for DatetimeIndex + with tm.assert_produces_warning(FutureWarning): + result = pd.DatetimeIndex(['2000'], dtype='datetime64') + tm.assert_index_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = pd.Index(['2000'], dtype='datetime64') + tm.assert_index_equal(result, expected) + + def test_constructor_wrong_precision_raises(self): + with pytest.raises(ValueError): + pd.DatetimeIndex(['2000'], dtype='datetime64[us]') + class TestTimeSeries(object): diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index 76f79e86e6f11..3938d6acad2f0 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -180,3 +180,20 @@ def test_constructor_name(self): # GH10025 idx2 = TimedeltaIndex(idx, name='something else') assert idx2.name == 'something else' + + def test_constructor_no_precision_warns(self): + # GH-24753, GH-24739 + expected = pd.TimedeltaIndex(['2000'], dtype='timedelta64[ns]') + + # we set the stacklevel for DatetimeIndex + with tm.assert_produces_warning(FutureWarning): + result = pd.TimedeltaIndex(['2000'], dtype='timedelta64') + tm.assert_index_equal(result, expected) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = pd.Index(['2000'], dtype='timedelta64') + tm.assert_index_equal(result, expected) + + def test_constructor_wrong_precision_raises(self): + with pytest.raises(ValueError): + pd.TimedeltaIndex(['2000'], dtype='timedelta64[us]')
This deprecates passing dtypes without a precision to DatetimeIndex and TimedeltaIndex ```python In [2]: pd.DatetimeIndex(['2000'], dtype='datetime64') /Users/taugspurger/.virtualenvs/pandas-dev/bin/ipython:1: FutureWarning: Passing in 'datetime64' dtype with no precision is deprecated and will raise in a future version. Please pass in 'datetime64[ns]' instead. #!/Users/taugspurger/Envs/pandas-dev/bin/python3 Out[2]: DatetimeIndex(['2000-01-01'], dtype='datetime64[ns]', freq=None) ``` Closes https://github.com/pandas-dev/pandas/issues/24739 Closes https://github.com/pandas-dev/pandas/issues/24753 - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24806
2019-01-16T16:13:26Z
2019-01-21T14:18:15Z
2019-01-21T14:18:14Z
2019-01-21T14:18:18Z
Revert unnecessary changes from 22019
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index d69e948e31e33..868c5e280d2c3 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1163,36 +1163,6 @@ data is incompatible with a passed ``dtype=`` (:issue:`15832`) ... OverflowError: Trying to coerce negative values to unsigned integers -.. _whatsnew_0240.api.crosstab_dtypes: - -Crosstab Preserves Dtypes -^^^^^^^^^^^^^^^^^^^^^^^^^ - -:func:`crosstab` will preserve now dtypes in some cases that previously would -cast from integer dtype to floating dtype (:issue:`22019`) - -*Previous Behavior*: - -.. code-block:: ipython - - In [3]: df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4], - ...: 'c': [1, 1, np.nan, 1, 1]}) - In [4]: pd.crosstab(df.a, df.b, normalize='columns') - Out[4]: - b 3 4 - a - 1 0.5 0.0 - 2 0.5 1.0 - -*New Behavior*: - -.. code-block:: ipython - - In [3]: df = pd.DataFrame({'a': [1, 2, 2, 2, 2], - ...: 'b': [3, 3, 4, 4, 4], - ...: 'c': [1, 1, np.nan, 1, 1]}) - In [4]: pd.crosstab(df.a, df.b, normalize='columns') - .. _whatsnew_0240.api.concat_categorical: Concatenation Changes diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 96f52e5dd17a3..508a68d44bb04 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -717,7 +717,7 @@ def test_align_int_fill_bug(self): result = df1 - df1.mean() expected = df2 - df2.mean() - assert_frame_equal(result.astype('f8'), expected) + assert_frame_equal(result, expected) def test_align_multiindex(self): # GH 10665 diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index f0d1ad57ba829..7c70f8177d846 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1573,9 +1573,8 @@ def test_crosstab_normalize(self): full_normal) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index'), row_normal) - tm.assert_frame_equal( - pd.crosstab(df.a, df.b, normalize='columns').astype('f8'), - col_normal) + tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns'), + col_normal) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=1), pd.crosstab(df.a, df.b, normalize='columns')) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=0), @@ -1608,7 +1607,7 @@ def test_crosstab_normalize(self): tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index', margins=True), row_normal_margins) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns', - margins=True).astype('f8'), + margins=True), col_normal_margins) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins)
At the time, https://github.com/pandas-dev/pandas/pull/22019 seemed to break API in a few places. Since then, those API breaking changes have disappeared, so the changes to the tests and the release note are no longer necessary. This restores the tests to the 0.23.4 version. Closes https://github.com/pandas-dev/pandas/issues/24789
https://api.github.com/repos/pandas-dev/pandas/pulls/24802
2019-01-16T14:31:03Z
2019-01-16T16:02:00Z
2019-01-16T16:01:59Z
2019-01-16T16:02:04Z
Whatsnew reorganization
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 868c5e280d2c3..3685a24d60e74 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -10,92 +10,16 @@ What's New in 0.24.0 (January XX, 2019) {{ header }} - These are the changes in pandas 0.24.0. See :ref:`release` for a full changelog including other versions of pandas. -.. _whatsnew_0240.enhancements: +Highlights include -New features -~~~~~~~~~~~~ -- :func:`merge` now directly allows merge between objects of type ``DataFrame`` and named ``Series``, without the need to convert the ``Series`` object into a ``DataFrame`` beforehand (:issue:`21220`) -- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`) -- ``FrozenList`` has gained the ``.union()`` and ``.difference()`` methods. This functionality greatly simplifies groupby's that rely on explicitly excluding certain columns. See :ref:`Splitting an object into groups <groupby.split>` for more information (:issue:`15475`, :issue:`15506`). -- :func:`DataFrame.to_parquet` now accepts ``index`` as an argument, allowing - the user to override the engine's default behavior to include or omit the - dataframe's indexes from the resulting Parquet file. (:issue:`20768`) -- :meth:`DataFrame.corr` and :meth:`Series.corr` now accept a callable for generic calculation methods of correlation, e.g. histogram intersection (:issue:`22684`) -- :func:`DataFrame.to_string` now accepts ``decimal`` as an argument, allowing the user to specify which decimal separator should be used in the output. (:issue:`23614`) -- :func:`read_feather` now accepts ``columns`` as an argument, allowing the user to specify which columns should be read. (:issue:`24025`) -- :func:`DataFrame.to_html` now accepts ``render_links`` as an argument, allowing the user to generate HTML with links to any URLs that appear in the DataFrame. - See the :ref:`section on writing HTML <io.html>` in the IO docs for example usage. (:issue:`2679`) -- :func:`pandas.read_csv` now supports pandas extension types as an argument to ``dtype``, allowing the user to use pandas extension types when reading CSVs. (:issue:`23228`) -- :meth:`DataFrame.shift` :meth:`Series.shift`, :meth:`ExtensionArray.shift`, :meth:`SparseArray.shift`, :meth:`Period.shift`, :meth:`GroupBy.shift`, :meth:`Categorical.shift`, :meth:`NDFrame.shift` and :meth:`Block.shift` now accept `fill_value` as an argument, allowing the user to specify a value which will be used instead of NA/NaT in the empty periods. (:issue:`15486`) - -.. _whatsnew_0240.values_api: - -Accessing the values in a Series or Index -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:attr:`Series.array` and :attr:`Index.array` have been added for extracting the array backing a -``Series`` or ``Index``. (:issue:`19954`, :issue:`23623`) - -.. ipython:: python - - idx = pd.period_range('2000', periods=4) - idx.array - pd.Series(idx).array - -Historically, this would have been done with ``series.values``, but with -``.values`` it was unclear whether the returned value would be the actual array, -some transformation of it, or one of pandas custom arrays (like -``Categorical``). For example, with :class:`PeriodIndex`, ``.values`` generates -a new ndarray of period objects each time. - -.. ipython:: python - - id(idx.values) - id(idx.values) - -If you need an actual NumPy array, use :meth:`Series.to_numpy` or :meth:`Index.to_numpy`. - -.. ipython:: python - - idx.to_numpy() - pd.Series(idx).to_numpy() - -For Series and Indexes backed by normal NumPy arrays, :attr:`Series.array` will return a -new :class:`arrays.PandasArray`, which is a thin (no-copy) wrapper around a -:class:`numpy.ndarray`. :class:`arrays.PandasArray` isn't especially useful on its own, -but it does provide the same interface as any extension array defined in pandas or by -a third-party library. - -.. ipython:: python - - ser = pd.Series([1, 2, 3]) - ser.array - ser.to_numpy() - -We haven't removed or deprecated :attr:`Series.values` or :attr:`DataFrame.values`, but we -highly recommend and using ``.array`` or ``.to_numpy()`` instead. - -See :ref:`Dtypes <basics.dtypes>` and :ref:`Attributes and Underlying Data <basics.attrs>` for more. - -.. _whatsnew_0240.enhancements.extension_array_operators: - -``ExtensionArray`` operator support -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison -operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: - -1. Define each of the operators on your ``ExtensionArray`` subclass. -2. Use an operator implementation from pandas that depends on operators that are already defined - on the underlying elements (scalars) of the ``ExtensionArray``. - -See the :ref:`ExtensionArray Operator Support -<extending.extension.operator>` documentation section for details on both -ways of adding operator support. +* :ref:`Optional Nullable Integer Support <whatsnew_0240.enhancements.intna>` +* :ref:`New APIs for accessing the array backing a Series or Index <whatsnew_0240.values_api>` +* :ref:`A new top-level method for creating arrays <whatsnew_0240.enhancements.array>` +* :ref:`Store Interval and Period data in a Series or DataFrame <whatsnew_0240.enhancements.interval>` +* :ref:`Support for joining on two MultiIndexes <whatsnew_0240.enhancements.join_with_two_multiindexes>` .. _whatsnew_0240.enhancements.intna: @@ -162,6 +86,57 @@ Reduction and groupby operations such as ``sum`` work. See :ref:`integer_na` for more. + +.. _whatsnew_0240.values_api: + +Accessing the values in a Series or Index +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:attr:`Series.array` and :attr:`Index.array` have been added for extracting the array backing a +``Series`` or ``Index``. (:issue:`19954`, :issue:`23623`) + +.. ipython:: python + + idx = pd.period_range('2000', periods=4) + idx.array + pd.Series(idx).array + +Historically, this would have been done with ``series.values``, but with +``.values`` it was unclear whether the returned value would be the actual array, +some transformation of it, or one of pandas custom arrays (like +``Categorical``). For example, with :class:`PeriodIndex`, ``.values`` generates +a new ndarray of period objects each time. + +.. ipython:: python + + id(idx.values) + id(idx.values) + +If you need an actual NumPy array, use :meth:`Series.to_numpy` or :meth:`Index.to_numpy`. + +.. ipython:: python + + idx.to_numpy() + pd.Series(idx).to_numpy() + +For Series and Indexes backed by normal NumPy arrays, :attr:`Series.array` will return a +new :class:`arrays.PandasArray`, which is a thin (no-copy) wrapper around a +:class:`numpy.ndarray`. :class:`arrays.PandasArray` isn't especially useful on its own, +but it does provide the same interface as any extension array defined in pandas or by +a third-party library. + +.. ipython:: python + + ser = pd.Series([1, 2, 3]) + ser.array + ser.to_numpy() + +We haven't removed or deprecated :attr:`Series.values` or :attr:`DataFrame.values`, but we +highly recommend and using ``.array`` or ``.to_numpy()`` instead. + +See :ref:`Dtypes <basics.dtypes>` and :ref:`Attributes and Underlying Data <basics.attrs>` for more. + + .. _whatsnew_0240.enhancements.array: Array @@ -200,46 +175,6 @@ is a float. pd.array([1, 2, np.nan]) -.. _whatsnew_0240.enhancements.read_html: - -``read_html`` Enhancements -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -:func:`read_html` previously ignored ``colspan`` and ``rowspan`` attributes. -Now it understands them, treating them as sequences of cells with the same -value. (:issue:`17054`) - -.. ipython:: python - - result = pd.read_html(""" - <table> - <thead> - <tr> - <th>A</th><th>B</th><th>C</th> - </tr> - </thead> - <tbody> - <tr> - <td colspan="2">1</td><td>2</td> - </tr> - </tbody> - </table>""") - -*Previous Behavior*: - -.. code-block:: ipython - - In [13]: result - Out [13]: - [ A B C - 0 1 2 NaN] - -*New Behavior*: - -.. ipython:: python - - result - .. _whatsnew_0240.enhancements.interval: @@ -286,27 +221,6 @@ from the ``Series``: for more. -New ``Styler.pipe()`` method -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The :class:`~pandas.io.formats.style.Styler` class has gained a -:meth:`~pandas.io.formats.style.Styler.pipe` method. This provides a -convenient way to apply users' predefined styling functions, and can help reduce -"boilerplate" when using DataFrame styling functionality repeatedly within a notebook. (:issue:`23229`) - -.. ipython:: python - - df = pd.DataFrame({'N': [1250, 1500, 1750], 'X': [0.25, 0.35, 0.50]}) - - def format_and_align(styler): - return (styler.format({'N': '{:,}', 'X': '{:.1%}'}) - .set_properties(**{'text-align': 'right'})) - - df.style.pipe(format_and_align).set_caption('Summary of results.') - -Similar methods already exist for other classes in pandas, including :meth:`DataFrame.pipe`, -:meth:`pandas.core.groupby.GroupBy.pipe`, and :meth:`pandas.core.resample.Resampler.pipe`. - - .. _whatsnew_0240.enhancements.join_with_two_multiindexes: Joining with two multi-indexes @@ -343,6 +257,83 @@ For earlier versions this can be done using the following. on=['key'], how='inner').set_index(['key', 'X', 'Y']) +.. _whatsnew_0240.enhancements.extension_array_operators: + +``ExtensionArray`` operator support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison +operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: + +1. Define each of the operators on your ``ExtensionArray`` subclass. +2. Use an operator implementation from pandas that depends on operators that are already defined + on the underlying elements (scalars) of the ``ExtensionArray``. + +See the :ref:`ExtensionArray Operator Support +<extending.extension.operator>` documentation section for details on both +ways of adding operator support. + +.. _whatsnew_0240.enhancements.read_html: + +``read_html`` Enhancements +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`read_html` previously ignored ``colspan`` and ``rowspan`` attributes. +Now it understands them, treating them as sequences of cells with the same +value. (:issue:`17054`) + +.. ipython:: python + + result = pd.read_html(""" + <table> + <thead> + <tr> + <th>A</th><th>B</th><th>C</th> + </tr> + </thead> + <tbody> + <tr> + <td colspan="2">1</td><td>2</td> + </tr> + </tbody> + </table>""") + +*Previous Behavior*: + +.. code-block:: ipython + + In [13]: result + Out [13]: + [ A B C + 0 1 2 NaN] + +*New Behavior*: + +.. ipython:: python + + result + + +New ``Styler.pipe()`` method +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The :class:`~pandas.io.formats.style.Styler` class has gained a +:meth:`~pandas.io.formats.style.Styler.pipe` method. This provides a +convenient way to apply users' predefined styling functions, and can help reduce +"boilerplate" when using DataFrame styling functionality repeatedly within a notebook. (:issue:`23229`) + +.. ipython:: python + + df = pd.DataFrame({'N': [1250, 1500, 1750], 'X': [0.25, 0.35, 0.50]}) + + def format_and_align(styler): + return (styler.format({'N': '{:,}', 'X': '{:.1%}'}) + .set_properties(**{'text-align': 'right'})) + + df.style.pipe(format_and_align).set_caption('Summary of results.') + +Similar methods already exist for other classes in pandas, including :meth:`DataFrame.pipe`, +:meth:`pandas.core.groupby.GroupBy.pipe`, and :meth:`pandas.core.resample.Resampler.pipe`. + .. _whatsnew_0240.enhancements.rename_axis: Renaming names in a MultiIndex @@ -366,12 +357,24 @@ Example: See the :ref:`Advanced documentation on renaming<advanced.index_names>` for more details. - .. _whatsnew_0240.enhancements.other: Other Enhancements ^^^^^^^^^^^^^^^^^^ +- :func:`merge` now directly allows merge between objects of type ``DataFrame`` and named ``Series``, without the need to convert the ``Series`` object into a ``DataFrame`` beforehand (:issue:`21220`) +- ``ExcelWriter`` now accepts ``mode`` as a keyword argument, enabling append to existing workbooks when using the ``openpyxl`` engine (:issue:`3441`) +- ``FrozenList`` has gained the ``.union()`` and ``.difference()`` methods. This functionality greatly simplifies groupby's that rely on explicitly excluding certain columns. See :ref:`Splitting an object into groups <groupby.split>` for more information (:issue:`15475`, :issue:`15506`). +- :func:`DataFrame.to_parquet` now accepts ``index`` as an argument, allowing + the user to override the engine's default behavior to include or omit the + dataframe's indexes from the resulting Parquet file. (:issue:`20768`) +- :meth:`DataFrame.corr` and :meth:`Series.corr` now accept a callable for generic calculation methods of correlation, e.g. histogram intersection (:issue:`22684`) +- :func:`DataFrame.to_string` now accepts ``decimal`` as an argument, allowing the user to specify which decimal separator should be used in the output. (:issue:`23614`) +- :func:`read_feather` now accepts ``columns`` as an argument, allowing the user to specify which columns should be read. (:issue:`24025`) +- :func:`DataFrame.to_html` now accepts ``render_links`` as an argument, allowing the user to generate HTML with links to any URLs that appear in the DataFrame. + See the :ref:`section on writing HTML <io.html>` in the IO docs for example usage. (:issue:`2679`) +- :func:`pandas.read_csv` now supports pandas extension types as an argument to ``dtype``, allowing the user to use pandas extension types when reading CSVs. (:issue:`23228`) +- :meth:`DataFrame.shift` :meth:`Series.shift`, :meth:`ExtensionArray.shift`, :meth:`SparseArray.shift`, :meth:`Period.shift`, :meth:`GroupBy.shift`, :meth:`Categorical.shift`, :meth:`NDFrame.shift` and :meth:`Block.shift` now accept `fill_value` as an argument, allowing the user to specify a value which will be used instead of NA/NaT in the empty periods. (:issue:`15486`) - :func:`to_datetime` now supports the ``%Z`` and ``%z`` directive when passed into ``format`` (:issue:`13486`) - :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether ``NaN``/``NaT`` values should be considered (:issue:`17534`) - :func:`DataFrame.to_csv` and :func:`Series.to_csv` now support the ``compression`` keyword when a file handle is passed. (:issue:`21227`) @@ -427,42 +430,8 @@ Other Enhancements Backwards incompatible API changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- A newly constructed empty :class:`DataFrame` with integer as the ``dtype`` will now only be cast to ``float64`` if ``index`` is specified (:issue:`22858`) -- :meth:`Series.str.cat` will now raise if ``others`` is a ``set`` (:issue:`23009`) -- Passing scalar values to :class:`DatetimeIndex` or :class:`TimedeltaIndex` will now raise ``TypeError`` instead of ``ValueError`` (:issue:`23539`) -- ``max_rows`` and ``max_cols`` parameters removed from :class:`HTMLFormatter` since truncation is handled by :class:`DataFrameFormatter` (:issue:`23818`) -- :func:`read_csv` will now raise a ``ValueError`` if a column with missing values is declared as having dtype ``bool`` (:issue:`20591`) -- The column order of the resultant :class:`DataFrame` from :meth:`MultiIndex.to_frame` is now guaranteed to match the :attr:`MultiIndex.names` order. (:issue:`22420`) -- Incorrectly passing a :class:`DatetimeIndex` to :meth:`MultiIndex.from_tuples`, rather than a sequence of tuples, now raises a ``TypeError`` rather than a ``ValueError`` (:issue:`24024`) -- :func:`pd.offsets.generate_range` argument ``time_rule`` has been removed; use ``offset`` instead (:issue:`24157`) -- In 0.23.x, pandas would raise a ``ValueError`` on a merge of a numeric column (e.g. ``int`` dtyped column) and an ``object`` dtyped column (:issue:`9780`). We have re-enabled the ability to merge ``object`` and other dtypes; pandas will still raise on a merge between a numeric and an ``object`` dtyped column that is composed only of strings (:issue:`21681`) - -Percentage change on groupby -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Fixed a bug where calling :func:`pancas.core.groupby.SeriesGroupBy.pct_change` or :func:`pandas.core.groupby.DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`). - -.. ipython:: python - - df = pd.DataFrame({'grp': ['a', 'a', 'b'], 'foo': [1.0, 1.1, 2.2]}) - df - -Previous behavior: +Pandas 0.24.0 includes a number of API breaking changes. -.. code-block:: ipython - - In [1]: df.groupby('grp').pct_change() - Out[1]: - foo - 0 NaN - 1 0.1 - 2 1.0 - -New behavior: - -.. ipython:: python - - df.groupby('grp').pct_change() .. _whatsnew_0240.api_breaking.deps: @@ -716,8 +685,8 @@ is the case with :attr:`Period.end_time`, for example .. _whatsnew_0240.api_breaking.datetime_unique: -Datetime w/tz and unique -^^^^^^^^^^^^^^^^^^^^^^^^ +Series.unique for Timezone-Aware Data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The return type of :meth:`Series.unique` for datetime with timezone values has changed from an :class:`numpy.ndarray` of :class:`Timestamp` objects to a :class:`arrays.DatetimeArray` (:issue:`24024`). @@ -1086,57 +1055,6 @@ broadcast. (:issue:`23000`) df + arr[[0], :] # 1 row, 2 columns df + arr[:, [1]] # 1 column, 3 rows - -.. _whatsnew_0240.api.extension: - -ExtensionType Changes -^^^^^^^^^^^^^^^^^^^^^ - - **Equality and Hashability** - -Pandas now requires that extension dtypes be hashable. The base class implements -a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should -update the ``ExtensionDtype._metadata`` tuple to match the signature of your -``__init__`` method. See :class:`pandas.api.extensions.ExtensionDtype` for more (:issue:`22476`). - -**Reshaping changes** - -- :meth:`~pandas.api.types.ExtensionArray.dropna` has been added (:issue:`21185`) -- :meth:`~pandas.api.types.ExtensionArray.repeat` has been added (:issue:`24349`) -- The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`) -- :meth:`pandas.api.extensions.ExtensionArray.shift` added as part of the basic ``ExtensionArray`` interface (:issue:`22387`). -- :meth:`~pandas.api.types.ExtensionArray.searchsorted` has been added (:issue:`24350`) -- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`) -- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`). - -**Dtype changes** - -- ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore - the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`) -- Added ``ExtensionDtype._is_numeric`` for controlling whether an extension dtype is considered numeric (:issue:`22290`). -- Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`) -- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`) - -**Other changes** - -- A default repr for :class:`pandas.api.extensions.ExtensionArray` is now provided (:issue:`23601`). -- An ``ExtensionArray`` with a boolean dtype now works correctly as a boolean indexer. :meth:`pandas.api.types.is_bool_dtype` now properly considers them boolean (:issue:`22326`) - -**Bug Fixes** - -- Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`) -- :meth:`~Series.shift` now dispatches to :meth:`ExtensionArray.shift` (:issue:`22386`) -- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`) -- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`) -- :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185`). -- Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`) -- Bug when concatenating multiple ``Series`` with different extension dtypes not casting to object dtype (:issue:`22994`) -- Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`) -- :meth:`DataFrame.stack` no longer converts to object dtype for DataFrames where each column has the same extension dtype. The output Series will have the same dtype as the columns (:issue:`23077`). -- :meth:`Series.unstack` and :meth:`DataFrame.unstack` no longer convert extension arrays to object-dtype ndarrays. Each column in the output ``DataFrame`` will now have the same dtype as the input (:issue:`23077`). -- Bug when grouping :meth:`Dataframe.groupby()` and aggregating on ``ExtensionArray`` it was not returning the actual ``ExtensionArray`` dtype (:issue:`23227`). -- Bug in :func:`pandas.merge` when merging on an extension array-backed column (:issue:`23020`). - .. _whatsnew_0240.api.incompatibilities: Series and Index Data-Dtype Incompatibilities @@ -1212,6 +1130,15 @@ Datetimelike API Changes Other API Changes ^^^^^^^^^^^^^^^^^ +- A newly constructed empty :class:`DataFrame` with integer as the ``dtype`` will now only be cast to ``float64`` if ``index`` is specified (:issue:`22858`) +- :meth:`Series.str.cat` will now raise if ``others`` is a ``set`` (:issue:`23009`) +- Passing scalar values to :class:`DatetimeIndex` or :class:`TimedeltaIndex` will now raise ``TypeError`` instead of ``ValueError`` (:issue:`23539`) +- ``max_rows`` and ``max_cols`` parameters removed from :class:`HTMLFormatter` since truncation is handled by :class:`DataFrameFormatter` (:issue:`23818`) +- :func:`read_csv` will now raise a ``ValueError`` if a column with missing values is declared as having dtype ``bool`` (:issue:`20591`) +- The column order of the resultant :class:`DataFrame` from :meth:`MultiIndex.to_frame` is now guaranteed to match the :attr:`MultiIndex.names` order. (:issue:`22420`) +- Incorrectly passing a :class:`DatetimeIndex` to :meth:`MultiIndex.from_tuples`, rather than a sequence of tuples, now raises a ``TypeError`` rather than a ``ValueError`` (:issue:`24024`) +- :func:`pd.offsets.generate_range` argument ``time_rule`` has been removed; use ``offset`` instead (:issue:`24157`) +- In 0.23.x, pandas would raise a ``ValueError`` on a merge of a numeric column (e.g. ``int`` dtyped column) and an ``object`` dtyped column (:issue:`9780`). We have re-enabled the ability to merge ``object`` and other dtypes; pandas will still raise on a merge between a numeric and an ``object`` dtyped column that is composed only of strings (:issue:`21681`) - Accessing a level of a ``MultiIndex`` with a duplicate name (e.g. in :meth:`~MultiIndex.get_level_values`) now raises a ``ValueError`` instead of a ``KeyError`` (:issue:`21678`). - Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`) @@ -1233,6 +1160,58 @@ Other API Changes - :func:`Series.to_list` and :func:`Index.to_list` are now aliases of ``Series.tolist`` respectively ``Index.tolist`` (:issue:`8826`) - The result of ``SparseSeries.unstack`` is now a :class:`DataFrame` with sparse values, rather than a :class:`SparseDataFrame` (:issue:`24372`). + +.. _whatsnew_0240.api.extension: + +ExtensionType Changes +^^^^^^^^^^^^^^^^^^^^^ + +**Equality and Hashability** + +Pandas now requires that extension dtypes be hashable. The base class implements +a default ``__eq__`` and ``__hash__``. If you have a parametrized dtype, you should +update the ``ExtensionDtype._metadata`` tuple to match the signature of your +``__init__`` method. See :class:`pandas.api.extensions.ExtensionDtype` for more (:issue:`22476`). + +**Reshaping changes** + +- :meth:`~pandas.api.types.ExtensionArray.dropna` has been added (:issue:`21185`) +- :meth:`~pandas.api.types.ExtensionArray.repeat` has been added (:issue:`24349`) +- The ``ExtensionArray`` constructor, ``_from_sequence`` now take the keyword arg ``copy=False`` (:issue:`21185`) +- :meth:`pandas.api.extensions.ExtensionArray.shift` added as part of the basic ``ExtensionArray`` interface (:issue:`22387`). +- :meth:`~pandas.api.types.ExtensionArray.searchsorted` has been added (:issue:`24350`) +- Support for reduction operations such as ``sum``, ``mean`` via opt-in base class method override (:issue:`22762`) +- :func:`ExtensionArray.isna` is allowed to return an ``ExtensionArray`` (:issue:`22325`). + +**Dtype changes** + +- ``ExtensionDtype`` has gained the ability to instantiate from string dtypes, e.g. ``decimal`` would instantiate a registered ``DecimalDtype``; furthermore + the ``ExtensionDtype`` has gained the method ``construct_array_type`` (:issue:`21185`) +- Added ``ExtensionDtype._is_numeric`` for controlling whether an extension dtype is considered numeric (:issue:`22290`). +- Added :meth:`pandas.api.types.register_extension_dtype` to register an extension type with pandas (:issue:`22664`) +- Updated the ``.type`` attribute for ``PeriodDtype``, ``DatetimeTZDtype``, and ``IntervalDtype`` to be instances of the dtype (``Period``, ``Timestamp``, and ``Interval`` respectively) (:issue:`22938`) + +**Other changes** + +- A default repr for :class:`pandas.api.extensions.ExtensionArray` is now provided (:issue:`23601`). +- An ``ExtensionArray`` with a boolean dtype now works correctly as a boolean indexer. :meth:`pandas.api.types.is_bool_dtype` now properly considers them boolean (:issue:`22326`) + +**Bug Fixes** + +- Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`) +- :meth:`~Series.shift` now dispatches to :meth:`ExtensionArray.shift` (:issue:`22386`) +- :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`) +- :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`) +- :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185`). +- Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`) +- Bug when concatenating multiple ``Series`` with different extension dtypes not casting to object dtype (:issue:`22994`) +- Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`) +- :meth:`DataFrame.stack` no longer converts to object dtype for DataFrames where each column has the same extension dtype. The output Series will have the same dtype as the columns (:issue:`23077`). +- :meth:`Series.unstack` and :meth:`DataFrame.unstack` no longer convert extension arrays to object-dtype ndarrays. Each column in the output ``DataFrame`` will now have the same dtype as the input (:issue:`23077`). +- Bug when grouping :meth:`Dataframe.groupby()` and aggregating on ``ExtensionArray`` it was not returning the actual ``ExtensionArray`` dtype (:issue:`23227`). +- Bug in :func:`pandas.merge` when merging on an extension array-backed column (:issue:`23020`). + + .. _whatsnew_0240.deprecations: Deprecations @@ -1800,6 +1779,7 @@ Groupby/Resample/Rolling - Calling :meth:`pandas.core.groupby.GroupBy.rank` with empty groups and ``pct=True`` was raising a ``ZeroDivisionError`` (:issue:`22519`) - Bug in :meth:`DataFrame.resample` when resampling ``NaT`` in ``TimeDeltaIndex`` (:issue:`13223`). - Bug in :meth:`DataFrame.groupby` did not respect the ``observed`` argument when selecting a column and instead always used ``observed=False`` (:issue:`23970`) +- Bug in :func:`pandas.core.groupby.SeriesGroupBy.pct_change` or :func:`pandas.core.groupby.DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`). - Bug preventing hash table creation with very large number (2^32) of rows (:issue:`22805`) Reshaping
Changes 1. Adds a highlights section (open to suggestions on what should be added / removed from here) 2. Reorders the enhcancements section to be 1. Highlighted features 2. Other features (sections) 3. List of other new features (one-liners) 3. Reorders the "Backwards incompatible changes" section. We had two lists of "other incompatible changes" that I've merged into one at the end of the section. 4. Moved extension type changes out of the "Backwards incompatible changes section". cc @jorisvandenbossche
https://api.github.com/repos/pandas-dev/pandas/pulls/24799
2019-01-16T13:23:18Z
2019-01-16T16:23:25Z
2019-01-16T16:23:25Z
2019-01-24T14:42:06Z
TST: Add test for C parser handling binary mode
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py index a405617b4132d..c089a189ae551 100644 --- a/pandas/tests/io/parser/test_c_parser_only.py +++ b/pandas/tests/io/parser/test_c_parser_only.py @@ -575,3 +575,17 @@ def test_file_handles_mmap(c_parser_only, csv1): if PY3: assert not m.closed m.close() + + +def test_file_binary_mode(c_parser_only): + # see gh-23779 + parser = c_parser_only + expected = DataFrame([[1, 2, 3], [4, 5, 6]]) + + with tm.ensure_clean() as path: + with open(path, "w") as f: + f.write("1,2,3\n4,5,6") + + with open(path, "rb") as f: + result = parser.read_csv(f, header=None) + tm.assert_frame_equal(result, expected)
Python's native CSV library doesn't accept such files, but we do for the C parser. Closes #23779.
https://api.github.com/repos/pandas-dev/pandas/pulls/24797
2019-01-16T10:49:52Z
2019-01-16T21:28:51Z
2019-01-16T21:28:50Z
2019-01-16T21:28:51Z
PERF: support parallel calculation of nancorr
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index b3c519ab99b6e..2de8573b13b91 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -2,10 +2,12 @@ import cython from cython import Py_ssize_t +from cython.parallel import prange from libc.stdlib cimport malloc, free from libc.string cimport memmove from libc.math cimport fabs, sqrt +from cpython cimport bool import numpy as np cimport numpy as cnp @@ -230,14 +232,15 @@ def kth_smallest(numeric[:] a, Py_ssize_t k) -> numeric: @cython.boundscheck(False) @cython.wraparound(False) -def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None): +def nancorr(float64_t[:, :] mat, bint cov=0, minp=None, bool parallel=False): cdef: Py_ssize_t i, j, xi, yi, N, K bint minpv - ndarray[float64_t, ndim=2] result - ndarray[uint8_t, ndim=2] mask + float64_t[:, :] result + uint8_t[:, :] mask int64_t nobs = 0 float64_t vx, vy, sumx, sumy, sumxx, sumyy, meanx, meany, divisor + int64_t blah = 0 N, K = (<object>mat).shape @@ -249,44 +252,82 @@ def nancorr(ndarray[float64_t, ndim=2] mat, bint cov=0, minp=None): result = np.empty((K, K), dtype=np.float64) mask = np.isfinite(mat).view(np.uint8) - with nogil: - for xi in range(K): - for yi in range(xi + 1): - nobs = sumxx = sumyy = sumx = sumy = 0 - for i in range(N): - if mask[i, xi] and mask[i, yi]: - vx = mat[i, xi] - vy = mat[i, yi] - nobs += 1 - sumx += vx - sumy += vy + if parallel: + with nogil: + for xi in prange(K, schedule='dynamic'): + nancorr_single_row(mat, N, K, result, xi, mask, minpv, cov) + else: + with nogil: + for xi in range(K): + nancorr_single_row(mat, N, K, result, xi, mask, minpv, cov) - if nobs < minpv: - result[xi, yi] = result[yi, xi] = NaN - else: - meanx = sumx / nobs - meany = sumy / nobs + return np.asarray(result) - # now the cov numerator - sumx = 0 - for i in range(N): - if mask[i, xi] and mask[i, yi]: - vx = mat[i, xi] - meanx - vy = mat[i, yi] - meany +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void nancorr_single_row(float64_t[:, :] mat, + Py_ssize_t N, + Py_ssize_t K, + float64_t[:, :] result, + Py_ssize_t xi, + uint8_t[:, :] mask, + bint minpv, + bint cov=0) nogil: + for yi in range(xi + 1): + nancorr_single(mat, N, K, result, xi, yi, mask, minpv, cov) - sumx += vx * vy - sumxx += vx * vx - sumyy += vy * vy - divisor = (nobs - 1.0) if cov else sqrt(sumxx * sumyy) +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void nancorr_single(float64_t[:, :] mat, + Py_ssize_t N, + Py_ssize_t K, + float64_t[:, :] result, + Py_ssize_t xi, + Py_ssize_t yi, + uint8_t[:, :] mask, + bint minpv, + bint cov=0) nogil: + cdef: + Py_ssize_t i, j + int64_t nobs = 0 + float64_t vx, vy, sumx, sumy, sumxx, sumyy, meanx, meany, divisor - if divisor != 0: - result[xi, yi] = result[yi, xi] = sumx / divisor - else: - result[xi, yi] = result[yi, xi] = NaN + nobs = sumxx = sumyy = sumx = sumy = 0 + for i in range(N): + if mask[i, xi] and mask[i, yi]: + vx = mat[i, xi] + vy = mat[i, yi] + nobs += 1 + sumx += vx + sumy += vy + + if nobs < minpv: + result[xi, yi] = result[yi, xi] = NaN + else: + meanx = sumx / nobs + meany = sumy / nobs + + # now the cov numerator + sumx = 0 + + for i in range(N): + if mask[i, xi] and mask[i, yi]: + vx = mat[i, xi] - meanx + vy = mat[i, yi] - meany + + sumx += vx * vy + sumxx += vx * vx + sumyy += vy * vy + + divisor = (nobs - 1.0) if cov else sqrt(sumxx * sumyy) + + if divisor != 0: + result[xi, yi] = result[yi, xi] = sumx / divisor + else: + result[xi, yi] = result[yi, xi] = NaN - return result # ---------------------------------------------------------------------- # Pairwise Spearman correlation diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7bbbdd70e062e..feef6cc5eac3e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6996,7 +6996,8 @@ def corr(self, method='pearson', min_periods=1): mat = numeric_df.values if method == 'pearson': - correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) + correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods, + parallel=True) elif method == 'spearman': correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) diff --git a/setup.py b/setup.py index ed2d905f4358b..8e10d1f6e5181 100755 --- a/setup.py +++ b/setup.py @@ -9,6 +9,7 @@ import os from os.path import join as pjoin +import numpy import pkg_resources import platform from distutils.sysconfig import get_config_var @@ -677,10 +678,11 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): obj = Extension('pandas.{name}'.format(name=name), sources=sources, depends=data.get('depends', []), - include_dirs=include, + include_dirs=include + [numpy.get_include()], language=data.get('language', 'c'), define_macros=data.get('macros', macros), - extra_compile_args=extra_compile_args) + extra_compile_args=['-fopenmp'] + extra_compile_args, + extra_link_args=['-fopenmp']) extensions.append(obj) @@ -704,12 +706,13 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): np_datetime_sources), include_dirs=['pandas/_libs/src/ujson/python', 'pandas/_libs/src/ujson/lib', - 'pandas/_libs/src/datetime'], - extra_compile_args=(['-D_GNU_SOURCE'] + + 'pandas/_libs/src/datetime', + numpy.get_include()], + extra_compile_args=(['-D_GNU_SOURCE', '-fopenmp'] + extra_compile_args), + extra_link_args=['-fopenmp'], define_macros=macros) - extensions.append(ujson_ext) # ----------------------------------------------------------------------
This is a proposal for using openmp to speedup the nancorr function (used by pd.DataFrame.corr). If this is something that is useful, it can probably be implemented for other cython algorithms implemented in algos.pyx. Also, the interface has to be decided on: how to choose whether to use parallelization or not, how many cpus, schedule strategy for the prange, etc. I am not sure what the implications are for adding openmp to compilation and linkage in terms of portability. Using 4 cpus I got ~60% speedup on a pd.DataFrame.corr (of size 20000 x 1300).
https://api.github.com/repos/pandas-dev/pandas/pulls/24795
2019-01-16T08:47:15Z
2019-01-16T15:15:16Z
null
2019-01-16T16:29:04Z
Avoid unnecessary use of _coerce_scalar_to_timedelta_type
diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index db409b215a78d..00de29b07c75d 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -16,7 +16,6 @@ from pandas.core.computation.common import _ensure_decoded from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.ops import UndefinedVariableError, is_term -from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded @@ -191,7 +190,7 @@ def stringify(value): v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == u('timedelta64') or kind == u('timedelta'): - v = _coerce_scalar_to_timedelta_type(v, unit='s').value + v = pd.Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == u('category'): metadata = com.values_from_object(self.metadata) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index bd6094596c5e1..4049b0321f221 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -569,8 +569,6 @@ def coerce_to_dtypes(result, dtypes): if len(result) != len(dtypes): raise AssertionError("_coerce_to_dtypes requires equal len arrays") - from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type - def conv(r, dtype): try: if isna(r): @@ -578,7 +576,7 @@ def conv(r, dtype): elif dtype == _NS_DTYPE: r = tslibs.Timestamp(r) elif dtype == _TD_DTYPE: - r = _coerce_scalar_to_timedelta_type(r) + r = tslibs.Timedelta(r) elif dtype == np.bool_: # messy. non 0/1 integers do not get converted. if is_integer(r) and r not in [0, 1]: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 893926cc076ab..cbe5ae198838f 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -26,7 +26,6 @@ wrap_arithmetic_op) from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name -from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type from pandas.tseries.frequencies import to_offset @@ -582,7 +581,7 @@ def _maybe_cast_slice_bound(self, label, side, kind): assert kind in ['ix', 'loc', 'getitem', None] if isinstance(label, compat.string_types): - parsed = _coerce_scalar_to_timedelta_type(label, box=True) + parsed = Timedelta(label) lbound = parsed.round(parsed.resolution) if side == 'left': return lbound diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 9f64b71ea455c..244e8f83bea37 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1,7 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import print_function - from datetime import timedelta import operator from string import ascii_lowercase @@ -1128,7 +1126,6 @@ def test_mode_sortwarning(self): tm.assert_frame_equal(result, expected) def test_operators_timedelta64(self): - from datetime import timedelta df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'), B=date_range('2012-1-2', periods=3, freq='D'), C=Timestamp('20120101') - @@ -1169,12 +1166,9 @@ def test_operators_timedelta64(self): mixed['F'] = Timestamp('20130101') # results in an object array - from pandas.core.tools.timedeltas import ( - _coerce_scalar_to_timedelta_type as _coerce) - result = mixed.min() - expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)), - _coerce(timedelta(days=-1)), + expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)), + pd.Timedelta(timedelta(days=-1)), 'foo', 1, 1.0, Timestamp('20130101')], index=mixed.columns)
In nearly all the places where its used, we can just use `Timedelta` instead. Way simpler, avoids using a private function.
https://api.github.com/repos/pandas-dev/pandas/pulls/24793
2019-01-16T03:02:00Z
2019-01-17T12:56:54Z
2019-01-17T12:56:54Z
2019-01-17T15:41:13Z
PERF: avoid object-dtype comparisons
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 20881972c068a..df764aa4ba666 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2465,6 +2465,12 @@ def setitem(self, indexer, value): klass=ObjectBlock,) return newb.setitem(indexer, value) + def equals(self, other): + # override for significant performance improvement + if self.dtype != other.dtype or self.shape != other.shape: + return False + return (self.values.view('i8') == other.values.view('i8')).all() + class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): __slots__ = () diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 2df43cd678764..f441dd20f3982 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1199,6 +1199,11 @@ def assert_extension_array_equal(left, right, check_dtype=True, if check_dtype: assert_attr_equal('dtype', left, right, obj='ExtensionArray') + if hasattr(left, "asi8") and type(right) == type(left): + # Avoid slow object-dtype comparisons + assert_numpy_array_equal(left.asi8, right.asi8) + return + left_na = np.asarray(left.isna()) right_na = np.asarray(right.isna()) assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
Looking at the --durations=10 results in the tests, one that stands out is ``` 7.08s call pandas/tests/series/test_datetime_values.py::TestSeriesDatetimeValues::test_dt_namespace_accessor ``` It turns out all but about 0.15 seconds of this is spent in a `tm.assert_series_equal` call for a Datetime64TZ Series. Tracking that down, its in array_equivalent, which casts to object-dtype before comparing. This avoids that casting, shaves almost 7 seconds off that test.
https://api.github.com/repos/pandas-dev/pandas/pulls/24792
2019-01-16T00:02:24Z
2019-01-16T01:34:54Z
2019-01-16T01:34:54Z
2019-01-16T02:57:35Z
DOC: IntervalArray and IntervalIndex minor doc fixes
diff --git a/doc/source/api/indexing.rst b/doc/source/api/indexing.rst index b324bb4854f38..d27b05322c1f2 100644 --- a/doc/source/api/indexing.rst +++ b/doc/source/api/indexing.rst @@ -258,6 +258,7 @@ IntervalIndex Components IntervalIndex.get_indexer IntervalIndex.set_closed IntervalIndex.overlaps + IntervalIndex.to_tuples .. _api.multiindex: diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 2e7216108a23e..45470e03c041a 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -71,7 +71,6 @@ closed mid length -values is_non_overlapping_monotonic %(extra_attributes)s\ @@ -936,13 +935,16 @@ def mid(self): # datetime safe version return self.left + 0.5 * self.length - @property - def is_non_overlapping_monotonic(self): - """ - Return True if the IntervalArray is non-overlapping (no Intervals share + _interval_shared_docs['is_non_overlapping_monotonic'] = """ + Return True if the %(klass)s is non-overlapping (no Intervals share points) and is either monotonic increasing or monotonic decreasing, else False """ + + @property + @Appender(_interval_shared_docs['is_non_overlapping_monotonic'] + % _shared_docs_kwargs) + def is_non_overlapping_monotonic(self): # must be increasing (e.g., [0, 1), [1, 2), [2, 3), ... ) # or decreasing (e.g., [-1, 0), [-2, -1), [-3, -2), ...) # we already require left <= right @@ -986,7 +988,7 @@ def __array__(self, dtype=None): Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA value itself if False, ``nan``. - ..versionadded:: 0.23.0 + .. versionadded:: 0.23.0 Returns ------- diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index c2aca197c55f6..f4c37413260b5 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -102,7 +102,7 @@ def _new_IntervalIndex(cls, d): summary="Immutable index of intervals that are closed on the same side.", name=_index_doc_kwargs['name'], versionadded="0.20.0", - extra_attributes="is_overlapping\n", + extra_attributes="is_overlapping\nvalues\n", extra_methods="contains\n", examples=textwrap.dedent("""\ Examples @@ -465,6 +465,8 @@ def is_unique(self): return self._multiindex.is_unique @cache_readonly + @Appender(_interval_shared_docs['is_non_overlapping_monotonic'] + % _index_doc_kwargs) def is_non_overlapping_monotonic(self): return self._data.is_non_overlapping_monotonic
Some minor fixes for things that aren't rendering correctly for `IntervalArray` and `IntervalIndex`. I've built the docs locally and the changes look good. For [`IntervalArray` ](https://pandas-docs.github.io/pandas-docs-travis/api/generated/pandas.IntervalArray.html): - `IntervalArray.values` doesn't exist, so removing it. ![image](https://user-images.githubusercontent.com/5332445/51159115-bf6dd600-1844-11e9-9d0f-ec5a1e132a5f.png) For [`IntervalIndex`](https://pandas-docs.github.io/pandas-docs-travis/api/generated/pandas.IntervalIndex.html): - `IntervalIndex.is_non_overlapping_monotonic` lost it's docstring, so adding it back via shared docs - `IntervalIndex.to_tuples` isn't being generated, so now generating it. Also fixed versionadded directive. ![image](https://user-images.githubusercontent.com/5332445/51159221-520e7500-1845-11e9-8633-fdfc5d646d52.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/24780
2019-01-15T06:04:51Z
2019-01-15T12:09:12Z
2019-01-15T12:09:12Z
2019-01-15T15:55:56Z
TST/REF: Collect Reduction, Arithmetic Tests
diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 6694946902836..da1b3f1da5322 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -1055,3 +1055,22 @@ def test_numeric_compat2(self): (pd.RangeIndex(-100, -200, 3), 2, pd.RangeIndex(0))] for idx, div, expected in cases_exact: tm.assert_index_equal(idx // div, expected, exact=True) + + @pytest.mark.parametrize('dtype', [np.int64, np.float64]) + @pytest.mark.parametrize('delta', [1, 0, -1]) + def test_addsub_arithmetic(self, dtype, delta): + # GH#8142 + delta = dtype(delta) + index = pd.Index([10, 11, 12], dtype=dtype) + result = index + delta + expected = pd.Index(index.values + delta, dtype=dtype) + tm.assert_index_equal(result, expected) + + # this subtraction used to fail + result = index - delta + expected = pd.Index(index.values - delta, dtype=dtype) + tm.assert_index_equal(result, expected) + + tm.assert_index_equal(index + index, 2 * index) + tm.assert_index_equal(index - index, 0 * index) + assert not (index - index).empty diff --git a/pandas/tests/arithmetic/test_object.py b/pandas/tests/arithmetic/test_object.py index 9917c45ef6d12..29063ae3f50e3 100644 --- a/pandas/tests/arithmetic/test_object.py +++ b/pandas/tests/arithmetic/test_object.py @@ -2,6 +2,7 @@ # Arithmetc tests for DataFrame/Series/Index/Array classes that should # behave identically. # Specifically for object dtype +from decimal import Decimal import operator import numpy as np @@ -224,3 +225,90 @@ def test_mixed_timezone_series_ops_object(self): name='xxx') tm.assert_series_equal(ser + pd.Timedelta('00:30:00'), exp) tm.assert_series_equal(pd.Timedelta('00:30:00') + ser, exp) + + # TODO: cleanup & parametrize over box + def test_iadd_preserves_name(self): + # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name + ser = pd.Series([1, 2, 3]) + ser.index.name = 'foo' + + ser.index += 1 + assert ser.index.name == "foo" + + ser.index -= 1 + assert ser.index.name == "foo" + + def test_add_string(self): + # from bug report + index = pd.Index(['a', 'b', 'c']) + index2 = index + 'foo' + + assert 'a' not in index2 + assert 'afoo' in index2 + + def test_iadd_string(self): + index = pd.Index(['a', 'b', 'c']) + # doesn't fail test unless there is a check before `+=` + assert 'a' in index + + index += '_x' + assert 'a_x' in index + + def test_add(self): + index = tm.makeStringIndex(100) + expected = pd.Index(index.values * 2) + tm.assert_index_equal(index + index, expected) + tm.assert_index_equal(index + index.tolist(), expected) + tm.assert_index_equal(index.tolist() + index, expected) + + # test add and radd + index = pd.Index(list('abc')) + expected = pd.Index(['a1', 'b1', 'c1']) + tm.assert_index_equal(index + '1', expected) + expected = pd.Index(['1a', '1b', '1c']) + tm.assert_index_equal('1' + index, expected) + + def test_sub_fail(self): + index = tm.makeStringIndex(100) + with pytest.raises(TypeError): + index - 'a' + with pytest.raises(TypeError): + index - index + with pytest.raises(TypeError): + index - index.tolist() + with pytest.raises(TypeError): + index.tolist() - index + + def test_sub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(0), Decimal(1)]) + + result = index - Decimal(1) + tm.assert_index_equal(result, expected) + + result = index - pd.Index([Decimal(1), Decimal(1)]) + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + index - 'foo' + + with pytest.raises(TypeError): + index - np.array([2, 'foo']) + + def test_rsub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(1), Decimal(0)]) + + result = Decimal(2) - index + tm.assert_index_equal(result, expected) + + result = np.array([Decimal(2), Decimal(2)]) - index + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + 'foo' - index + + with pytest.raises(TypeError): + np.array([True, pd.Timestamp.now()]) - index diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 4be4372f65dcc..2a546af79931e 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -47,59 +47,6 @@ def test_ops_properties_basic(self): assert s.day == 10 pytest.raises(AttributeError, lambda: s.weekday) - def test_minmax_tz(self, tz_naive_fixture): - tz = tz_naive_fixture - # monotonic - idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', - '2011-01-03'], tz=tz) - assert idx1.is_monotonic - - # non-monotonic - idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03', - '2011-01-02', pd.NaT], tz=tz) - assert not idx2.is_monotonic - - for idx in [idx1, idx2]: - assert idx.min() == Timestamp('2011-01-01', tz=tz) - assert idx.max() == Timestamp('2011-01-03', tz=tz) - assert idx.argmin() == 0 - assert idx.argmax() == 2 - - @pytest.mark.parametrize('op', ['min', 'max']) - def test_minmax_nat(self, op): - # Return NaT - obj = DatetimeIndex([]) - assert pd.isna(getattr(obj, op)()) - - obj = DatetimeIndex([pd.NaT]) - assert pd.isna(getattr(obj, op)()) - - obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) - assert pd.isna(getattr(obj, op)()) - - def test_numpy_minmax(self): - dr = pd.date_range(start='2016-01-15', end='2016-01-20') - - assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D') - assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D') - - errmsg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=errmsg): - np.min(dr, out=0) - - with pytest.raises(ValueError, match=errmsg): - np.max(dr, out=0) - - assert np.argmin(dr) == 0 - assert np.argmax(dr) == 5 - - errmsg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=errmsg): - np.argmin(dr, out=0) - - with pytest.raises(ValueError, match=errmsg): - np.argmax(dr, out=0) - def test_repeat_range(self, tz_naive_fixture): tz = tz_naive_fixture rng = date_range('1/1/2000', '1/1/2001') diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index eebff39fdf46f..8b022268897b6 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -3,7 +3,7 @@ import pytest import pandas as pd -from pandas import DatetimeIndex, Index, NaT, Period, PeriodIndex, Series +from pandas import DatetimeIndex, Index, NaT, PeriodIndex, Series from pandas.core.arrays import PeriodArray from pandas.tests.test_base import Ops import pandas.util.testing as tm @@ -24,61 +24,6 @@ def test_ops_properties(self): self.check_ops_properties(PeriodArray._object_ops, f) self.check_ops_properties(PeriodArray._bool_ops, f) - def test_minmax(self): - - # monotonic - idx1 = pd.PeriodIndex([NaT, '2011-01-01', '2011-01-02', - '2011-01-03'], freq='D') - assert idx1.is_monotonic - - # non-monotonic - idx2 = pd.PeriodIndex(['2011-01-01', NaT, '2011-01-03', - '2011-01-02', NaT], freq='D') - assert not idx2.is_monotonic - - for idx in [idx1, idx2]: - assert idx.min() == pd.Period('2011-01-01', freq='D') - assert idx.max() == pd.Period('2011-01-03', freq='D') - assert idx1.argmin() == 1 - assert idx2.argmin() == 0 - assert idx1.argmax() == 3 - assert idx2.argmax() == 2 - - for op in ['min', 'max']: - # Return NaT - obj = PeriodIndex([], freq='M') - result = getattr(obj, op)() - assert result is NaT - - obj = PeriodIndex([NaT], freq='M') - result = getattr(obj, op)() - assert result is NaT - - obj = PeriodIndex([NaT, NaT, NaT], freq='M') - result = getattr(obj, op)() - assert result is NaT - - def test_numpy_minmax(self): - pr = pd.period_range(start='2016-01-15', end='2016-01-20') - - assert np.min(pr) == Period('2016-01-15', freq='D') - assert np.max(pr) == Period('2016-01-20', freq='D') - - errmsg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=errmsg): - np.min(pr, out=0) - with pytest.raises(ValueError, match=errmsg): - np.max(pr, out=0) - - assert np.argmin(pr) == 0 - assert np.argmax(pr) == 5 - - errmsg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=errmsg): - np.argmin(pr, out=0) - with pytest.raises(ValueError, match=errmsg): - np.argmax(pr, out=0) - def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index d75800b763cb9..7f6b76f7442af 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2,7 +2,6 @@ from collections import defaultdict from datetime import datetime, timedelta -from decimal import Decimal import math import sys @@ -834,61 +833,6 @@ def test_union_dt_as_obj(self): tm.assert_contains_all(self.strIndex, secondCat) tm.assert_contains_all(self.dateIndex, firstCat) - def test_add(self): - index = self.strIndex - expected = Index(self.strIndex.values * 2) - tm.assert_index_equal(index + index, expected) - tm.assert_index_equal(index + index.tolist(), expected) - tm.assert_index_equal(index.tolist() + index, expected) - - # test add and radd - index = Index(list('abc')) - expected = Index(['a1', 'b1', 'c1']) - tm.assert_index_equal(index + '1', expected) - expected = Index(['1a', '1b', '1c']) - tm.assert_index_equal('1' + index, expected) - - def test_sub_fail(self): - index = self.strIndex - pytest.raises(TypeError, lambda: index - 'a') - pytest.raises(TypeError, lambda: index - index) - pytest.raises(TypeError, lambda: index - index.tolist()) - pytest.raises(TypeError, lambda: index.tolist() - index) - - def test_sub_object(self): - # GH#19369 - index = pd.Index([Decimal(1), Decimal(2)]) - expected = pd.Index([Decimal(0), Decimal(1)]) - - result = index - Decimal(1) - tm.assert_index_equal(result, expected) - - result = index - pd.Index([Decimal(1), Decimal(1)]) - tm.assert_index_equal(result, expected) - - with pytest.raises(TypeError): - index - 'foo' - - with pytest.raises(TypeError): - index - np.array([2, 'foo']) - - def test_rsub_object(self): - # GH#19369 - index = pd.Index([Decimal(1), Decimal(2)]) - expected = pd.Index([Decimal(1), Decimal(0)]) - - result = Decimal(2) - index - tm.assert_index_equal(result, expected) - - result = np.array([Decimal(2), Decimal(2)]) - index - tm.assert_index_equal(result, expected) - - with pytest.raises(TypeError): - 'foo' - index - - with pytest.raises(TypeError): - np.array([True, pd.Timestamp.now()]) - index - def test_map_identity_mapping(self): # GH 12766 # TODO: replace with fixture @@ -1008,22 +952,6 @@ def test_append_empty_preserve_name(self, name, expected): result = left.append(right) assert result.name == expected - def test_add_string(self): - # from bug report - index = Index(['a', 'b', 'c']) - index2 = index + 'foo' - - assert 'a' not in index2 - assert 'afoo' in index2 - - def test_iadd_string(self): - index = pd.Index(['a', 'b', 'c']) - # doesn't fail test unless there is a check before `+=` - assert 'a' in index - - index += '_x' - assert 'a_x' in index - @pytest.mark.parametrize("second_name,expected", [ (None, None), ('name', 'name')]) @pytest.mark.parametrize("sort", [True, False]) @@ -2146,36 +2074,6 @@ def test_string_index_repr_with_unicode_option_compat(self, index, result = unicode(index) # noqa assert result == expected - @pytest.mark.parametrize('dtype', [np.int64, np.float64]) - @pytest.mark.parametrize('delta', [1, 0, -1]) - def test_addsub_arithmetic(self, dtype, delta): - # GH 8142 - delta = dtype(delta) - index = pd.Index([10, 11, 12], dtype=dtype) - result = index + delta - expected = pd.Index(index.values + delta, dtype=dtype) - tm.assert_index_equal(result, expected) - - # this subtraction used to fail - result = index - delta - expected = pd.Index(index.values - delta, dtype=dtype) - tm.assert_index_equal(result, expected) - - tm.assert_index_equal(index + index, 2 * index) - tm.assert_index_equal(index - index, 0 * index) - assert not (index - index).empty - - def test_iadd_preserves_name(self): - # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name - ser = pd.Series([1, 2, 3]) - ser.index.name = 'foo' - - ser.index += 1 - assert ser.index.name == "foo" - - ser.index -= 1 - assert ser.index.name == "foo" - def test_cached_properties_not_settable(self): index = pd.Index([1, 2, 3]) with pytest.raises(AttributeError, match="Can't set attribute"): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index d85568ce67d16..582d466c6178e 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -251,17 +251,6 @@ def test_contains(self): list('aabbca') + [np.nan], categories=list('cabdef')) assert np.nan in ci - def test_min_max(self): - - ci = self.create_index(ordered=False) - pytest.raises(TypeError, lambda: ci.min()) - pytest.raises(TypeError, lambda: ci.max()) - - ci = self.create_index(ordered=True) - - assert ci.min() == 'c' - assert ci.max() == 'b' - def test_map(self): ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'), ordered=True) diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 7cb62c275e621..30822975a3ea0 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -8,7 +8,7 @@ from pandas.compat import PY3, range, u import pandas as pd -from pandas import Float64Index, Index, Int64Index, RangeIndex, Series, isna +from pandas import Float64Index, Index, Int64Index, RangeIndex, Series import pandas.util.testing as tm from .test_numeric import Numeric @@ -884,30 +884,3 @@ def test_append(self): # Append single item rather than list result2 = indices[0].append(indices[1]) tm.assert_index_equal(result2, expected, exact=True) - - @pytest.mark.parametrize('start,stop,step', - [(0, 400, 3), (500, 0, -6), (-10**6, 10**6, 4), - (10**6, -10**6, -4), (0, 10, 20)]) - def test_max_min(self, start, stop, step): - # GH17607 - idx = RangeIndex(start, stop, step) - expected = idx._int64index.max() - result = idx.max() - assert result == expected - - # skipna should be irrelevant since RangeIndex should never have NAs - result2 = idx.max(skipna=False) - assert result2 == expected - - expected = idx._int64index.min() - result = idx.min() - assert result == expected - - # skipna should be irrelevant since RangeIndex should never have NAs - result2 = idx.min(skipna=False) - assert result2 == expected - - # empty - idx = RangeIndex(start, stop, -step) - assert isna(idx.max()) - assert isna(idx.min()) diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 97898dd8942f8..40377e4362b75 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1,4 +1,3 @@ -from datetime import timedelta import numpy as np import pytest @@ -6,9 +5,7 @@ from pandas.core.dtypes.generic import ABCDateOffset import pandas as pd -from pandas import ( - Series, Timedelta, TimedeltaIndex, Timestamp, timedelta_range, - to_timedelta) +from pandas import Series, TimedeltaIndex, timedelta_range from pandas.tests.test_base import Ops import pandas.util.testing as tm @@ -27,54 +24,6 @@ def test_ops_properties(self): self.check_ops_properties(TimedeltaIndex._field_ops, f) self.check_ops_properties(TimedeltaIndex._object_ops, f) - def test_minmax(self): - - # monotonic - idx1 = TimedeltaIndex(['1 days', '2 days', '3 days']) - assert idx1.is_monotonic - - # non-monotonic - idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT']) - assert not idx2.is_monotonic - - for idx in [idx1, idx2]: - assert idx.min() == Timedelta('1 days') - assert idx.max() == Timedelta('3 days') - assert idx.argmin() == 0 - assert idx.argmax() == 2 - - for op in ['min', 'max']: - # Return NaT - obj = TimedeltaIndex([]) - assert pd.isna(getattr(obj, op)()) - - obj = TimedeltaIndex([pd.NaT]) - assert pd.isna(getattr(obj, op)()) - - obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT]) - assert pd.isna(getattr(obj, op)()) - - def test_numpy_minmax(self): - td = timedelta_range('16815 days', '16820 days', freq='D') - - assert np.min(td) == Timedelta('16815 days') - assert np.max(td) == Timedelta('16820 days') - - errmsg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=errmsg): - np.min(td, out=0) - with pytest.raises(ValueError, match=errmsg): - np.max(td, out=0) - - assert np.argmin(td) == 0 - assert np.argmax(td) == 5 - - errmsg = "the 'out' parameter is not supported" - with pytest.raises(ValueError, match=errmsg): - np.argmin(td, out=0) - with pytest.raises(ValueError, match=errmsg): - np.argmax(td, out=0) - def test_value_counts_unique(self): # GH 7735 @@ -330,61 +279,3 @@ def test_freq_setter_errors(self): # setting with non-freq string with pytest.raises(ValueError, match='Invalid frequency'): idx.freq = 'foo' - - -class TestTimedeltas(object): - - def test_timedelta_ops(self): - # GH4984 - # make sure ops return Timedelta - s = Series([Timestamp('20130101') + timedelta(seconds=i * i) - for i in range(10)]) - td = s.diff() - - result = td.mean() - expected = to_timedelta(timedelta(seconds=9)) - assert result == expected - - result = td.to_frame().mean() - assert result[0] == expected - - result = td.quantile(.1) - expected = Timedelta(np.timedelta64(2600, 'ms')) - assert result == expected - - result = td.median() - expected = to_timedelta('00:00:09') - assert result == expected - - result = td.to_frame().median() - assert result[0] == expected - - # GH 6462 - # consistency in returned values for sum - result = td.sum() - expected = to_timedelta('00:01:21') - assert result == expected - - result = td.to_frame().sum() - assert result[0] == expected - - # std - result = td.std() - expected = to_timedelta(Series(td.dropna().values).std()) - assert result == expected - - result = td.to_frame().std() - assert result[0] == expected - - # invalid ops - for op in ['skew', 'kurt', 'sem', 'prod']: - pytest.raises(TypeError, getattr(td, op)) - - # GH 10040 - # make sure NaT is properly handled by median() - s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')]) - assert s.diff().median() == timedelta(days=4) - - s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), - Timestamp('2015-02-15')]) - assert s.diff().median() == timedelta(days=6) diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index d27308029fa19..c7bb667f71fac 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -1,11 +1,14 @@ # -*- coding: utf-8 -*- -from datetime import datetime +from datetime import datetime, timedelta import numpy as np import pytest import pandas as pd -from pandas import Categorical, DataFrame, Index, PeriodIndex, Series, compat +from pandas import ( + Categorical, DataFrame, DatetimeIndex, Index, NaT, Period, PeriodIndex, + RangeIndex, Series, Timedelta, TimedeltaIndex, Timestamp, compat, isna, + timedelta_range, to_timedelta) from pandas.core import nanops import pandas.util.testing as tm @@ -136,6 +139,266 @@ def test_nanops(self): assert obj.argmax(skipna=False) == -1 +class TestIndexReductions(object): + # Note: the name TestIndexReductions indicates these tests + # were moved from a Index-specific test file, _not_ that these tests are + # intended long-term to be Index-specific + + @pytest.mark.parametrize('start,stop,step', + [(0, 400, 3), (500, 0, -6), (-10**6, 10**6, 4), + (10**6, -10**6, -4), (0, 10, 20)]) + def test_max_min_range(self, start, stop, step): + # GH#17607 + idx = RangeIndex(start, stop, step) + expected = idx._int64index.max() + result = idx.max() + assert result == expected + + # skipna should be irrelevant since RangeIndex should never have NAs + result2 = idx.max(skipna=False) + assert result2 == expected + + expected = idx._int64index.min() + result = idx.min() + assert result == expected + + # skipna should be irrelevant since RangeIndex should never have NAs + result2 = idx.min(skipna=False) + assert result2 == expected + + # empty + idx = RangeIndex(start, stop, -step) + assert isna(idx.max()) + assert isna(idx.min()) + + def test_minmax_timedelta64(self): + + # monotonic + idx1 = TimedeltaIndex(['1 days', '2 days', '3 days']) + assert idx1.is_monotonic + + # non-monotonic + idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT']) + assert not idx2.is_monotonic + + for idx in [idx1, idx2]: + assert idx.min() == Timedelta('1 days') + assert idx.max() == Timedelta('3 days') + assert idx.argmin() == 0 + assert idx.argmax() == 2 + + for op in ['min', 'max']: + # Return NaT + obj = TimedeltaIndex([]) + assert pd.isna(getattr(obj, op)()) + + obj = TimedeltaIndex([pd.NaT]) + assert pd.isna(getattr(obj, op)()) + + obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT]) + assert pd.isna(getattr(obj, op)()) + + def test_numpy_minmax_timedelta64(self): + td = timedelta_range('16815 days', '16820 days', freq='D') + + assert np.min(td) == Timedelta('16815 days') + assert np.max(td) == Timedelta('16820 days') + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(td, out=0) + with pytest.raises(ValueError, match=errmsg): + np.max(td, out=0) + + assert np.argmin(td) == 0 + assert np.argmax(td) == 5 + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.argmin(td, out=0) + with pytest.raises(ValueError, match=errmsg): + np.argmax(td, out=0) + + def test_timedelta_ops(self): + # GH#4984 + # make sure ops return Timedelta + s = Series([Timestamp('20130101') + timedelta(seconds=i * i) + for i in range(10)]) + td = s.diff() + + result = td.mean() + expected = to_timedelta(timedelta(seconds=9)) + assert result == expected + + result = td.to_frame().mean() + assert result[0] == expected + + result = td.quantile(.1) + expected = Timedelta(np.timedelta64(2600, 'ms')) + assert result == expected + + result = td.median() + expected = to_timedelta('00:00:09') + assert result == expected + + result = td.to_frame().median() + assert result[0] == expected + + # GH#6462 + # consistency in returned values for sum + result = td.sum() + expected = to_timedelta('00:01:21') + assert result == expected + + result = td.to_frame().sum() + assert result[0] == expected + + # std + result = td.std() + expected = to_timedelta(Series(td.dropna().values).std()) + assert result == expected + + result = td.to_frame().std() + assert result[0] == expected + + # invalid ops + for op in ['skew', 'kurt', 'sem', 'prod']: + pytest.raises(TypeError, getattr(td, op)) + + # GH#10040 + # make sure NaT is properly handled by median() + s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')]) + assert s.diff().median() == timedelta(days=4) + + s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), + Timestamp('2015-02-15')]) + assert s.diff().median() == timedelta(days=6) + + def test_minmax_tz(self, tz_naive_fixture): + tz = tz_naive_fixture + # monotonic + idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', + '2011-01-03'], tz=tz) + assert idx1.is_monotonic + + # non-monotonic + idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03', + '2011-01-02', pd.NaT], tz=tz) + assert not idx2.is_monotonic + + for idx in [idx1, idx2]: + assert idx.min() == Timestamp('2011-01-01', tz=tz) + assert idx.max() == Timestamp('2011-01-03', tz=tz) + assert idx.argmin() == 0 + assert idx.argmax() == 2 + + @pytest.mark.parametrize('op', ['min', 'max']) + def test_minmax_nat_datetime64(self, op): + # Return NaT + obj = DatetimeIndex([]) + assert pd.isna(getattr(obj, op)()) + + obj = DatetimeIndex([pd.NaT]) + assert pd.isna(getattr(obj, op)()) + + obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT]) + assert pd.isna(getattr(obj, op)()) + + def test_numpy_minmax_datetime64(self): + dr = pd.date_range(start='2016-01-15', end='2016-01-20') + + assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D') + assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D') + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(dr, out=0) + + with pytest.raises(ValueError, match=errmsg): + np.max(dr, out=0) + + assert np.argmin(dr) == 0 + assert np.argmax(dr) == 5 + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.argmin(dr, out=0) + + with pytest.raises(ValueError, match=errmsg): + np.argmax(dr, out=0) + + def test_minmax_period(self): + + # monotonic + idx1 = pd.PeriodIndex([NaT, '2011-01-01', '2011-01-02', + '2011-01-03'], freq='D') + assert idx1.is_monotonic + + # non-monotonic + idx2 = pd.PeriodIndex(['2011-01-01', NaT, '2011-01-03', + '2011-01-02', NaT], freq='D') + assert not idx2.is_monotonic + + for idx in [idx1, idx2]: + assert idx.min() == pd.Period('2011-01-01', freq='D') + assert idx.max() == pd.Period('2011-01-03', freq='D') + assert idx1.argmin() == 1 + assert idx2.argmin() == 0 + assert idx1.argmax() == 3 + assert idx2.argmax() == 2 + + for op in ['min', 'max']: + # Return NaT + obj = PeriodIndex([], freq='M') + result = getattr(obj, op)() + assert result is NaT + + obj = PeriodIndex([NaT], freq='M') + result = getattr(obj, op)() + assert result is NaT + + obj = PeriodIndex([NaT, NaT, NaT], freq='M') + result = getattr(obj, op)() + assert result is NaT + + def test_numpy_minmax_period(self): + pr = pd.period_range(start='2016-01-15', end='2016-01-20') + + assert np.min(pr) == Period('2016-01-15', freq='D') + assert np.max(pr) == Period('2016-01-20', freq='D') + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.min(pr, out=0) + with pytest.raises(ValueError, match=errmsg): + np.max(pr, out=0) + + assert np.argmin(pr) == 0 + assert np.argmax(pr) == 5 + + errmsg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=errmsg): + np.argmin(pr, out=0) + with pytest.raises(ValueError, match=errmsg): + np.argmax(pr, out=0) + + def test_min_max_categorical(self): + + ci = pd.CategoricalIndex(list('aabbca'), + categories=list('cab'), + ordered=False) + with pytest.raises(TypeError): + ci.min() + with pytest.raises(TypeError): + ci.max() + + ci = pd.CategoricalIndex(list('aabbca'), + categories=list('cab'), + ordered=True) + assert ci.min() == 'c' + assert ci.max() == 'b' + + class TestSeriesReductions(object): # Note: the name TestSeriesReductions indicates these tests # were moved from a series-specific test file, _not_ that these tests are diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 1e65118194be7..4bcd16a86e865 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -27,21 +27,21 @@ def setup_method(self, method): np.random.seed(11235) nanops._USE_BOTTLENECK = False - self.arr_shape = (11, 7, 5) + arr_shape = (11, 7, 5) - self.arr_float = np.random.randn(*self.arr_shape) - self.arr_float1 = np.random.randn(*self.arr_shape) + self.arr_float = np.random.randn(*arr_shape) + self.arr_float1 = np.random.randn(*arr_shape) self.arr_complex = self.arr_float + self.arr_float1 * 1j - self.arr_int = np.random.randint(-10, 10, self.arr_shape) - self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0 + self.arr_int = np.random.randint(-10, 10, arr_shape) + self.arr_bool = np.random.randint(0, 2, arr_shape) == 0 self.arr_str = np.abs(self.arr_float).astype('S') self.arr_utf = np.abs(self.arr_float).astype('U') self.arr_date = np.random.randint(0, 20000, - self.arr_shape).astype('M8[ns]') + arr_shape).astype('M8[ns]') self.arr_tdelta = np.random.randint(0, 20000, - self.arr_shape).astype('m8[ns]') + arr_shape).astype('m8[ns]') - self.arr_nan = np.tile(np.nan, self.arr_shape) + self.arr_nan = np.tile(np.nan, arr_shape) self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) @@ -49,22 +49,22 @@ def setup_method(self, method): self.arr_inf = self.arr_float * np.inf self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) - self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf]) - self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1]) - self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf]) self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf]) - self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf, - self.arr_nan]) self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf]) - self.arr_obj = np.vstack([self.arr_float.astype( - 'O'), self.arr_int.astype('O'), self.arr_bool.astype( - 'O'), self.arr_complex.astype('O'), self.arr_str.astype( - 'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'), - self.arr_tdelta.astype('O')]) + self.arr_obj = np.vstack([ + self.arr_float.astype('O'), + self.arr_int.astype('O'), + self.arr_bool.astype('O'), + self.arr_complex.astype('O'), + self.arr_str.astype('O'), + self.arr_utf.astype('O'), + self.arr_date.astype('O'), + self.arr_tdelta.astype('O') + ]) with np.errstate(invalid='ignore'): self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j @@ -77,51 +77,19 @@ def setup_method(self, method): self.arr_float_2d = self.arr_float[:, :, 0] self.arr_float1_2d = self.arr_float1[:, :, 0] - self.arr_complex_2d = self.arr_complex[:, :, 0] - self.arr_int_2d = self.arr_int[:, :, 0] - self.arr_bool_2d = self.arr_bool[:, :, 0] - self.arr_str_2d = self.arr_str[:, :, 0] - self.arr_utf_2d = self.arr_utf[:, :, 0] - self.arr_date_2d = self.arr_date[:, :, 0] - self.arr_tdelta_2d = self.arr_tdelta[:, :, 0] self.arr_nan_2d = self.arr_nan[:, :, 0] self.arr_float_nan_2d = self.arr_float_nan[:, :, 0] self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0] self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0] - self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0] - self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0] - self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0] - - self.arr_inf_2d = self.arr_inf[:, :, 0] - self.arr_float_inf_2d = self.arr_float_inf[:, :, 0] - self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0] - self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0] - self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0] self.arr_float_1d = self.arr_float[:, 0, 0] self.arr_float1_1d = self.arr_float1[:, 0, 0] - self.arr_complex_1d = self.arr_complex[:, 0, 0] - self.arr_int_1d = self.arr_int[:, 0, 0] - self.arr_bool_1d = self.arr_bool[:, 0, 0] - self.arr_str_1d = self.arr_str[:, 0, 0] - self.arr_utf_1d = self.arr_utf[:, 0, 0] - self.arr_date_1d = self.arr_date[:, 0, 0] - self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0] self.arr_nan_1d = self.arr_nan[:, 0, 0] self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0] self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0] self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0] - self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0] - self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0] - self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0] - - self.arr_inf_1d = self.arr_inf.ravel() - self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0] - self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0] - self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0] - self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0] def teardown_method(self, method): nanops._USE_BOTTLENECK = use_bn
Remove unused attributes in test_nanops
https://api.github.com/repos/pandas-dev/pandas/pulls/24776
2019-01-14T23:37:18Z
2019-01-16T01:43:21Z
2019-01-16T01:43:21Z
2019-01-16T02:57:03Z
Update DataFrame.eq docstring
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index e11f0ee01e57c..10cebc6f94b92 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -732,8 +732,7 @@ def _get_op_name(op, special): B 150 250 C 100 300 -Compare to a scalar and operator version which return the same -results. +Comparison with a scalar, using either the operator or method: >>> df == 100 cost revenue @@ -747,33 +746,40 @@ def _get_op_name(op, special): B False False C True False -Compare to a list and Series by axis and operator version. As shown, -for list axis is by default 'index', but for Series axis is by -default 'columns'. +When `other` is a :class:`Series`, the columns of a DataFrame are aligned +with the index of `other` and broadcast: ->>> df != [100, 250, 300] - cost revenue -A True False -B True False -C True False +>>> df != pd.Series([100, 250], index=["cost", "revenue"]) + cost revenue +A True True +B True False +C False True + +Use the method to control the broadcast axis: ->>> df.ne([100, 250, 300], axis='index') +>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') cost revenue A True False -B True False -C True False +B True True +C True True +D True True ->>> df != pd.Series([100, 250, 300]) - cost revenue 0 1 2 -A True True True True True -B True True True True True -C True True True True True +When comparing to an arbitrary sequence, the number of columns must +match the number elements in `other`: ->>> df.ne(pd.Series([100, 250, 300]), axis='columns') - cost revenue 0 1 2 -A True True True True True -B True True True True True -C True True True True True +>>> df == [250, 100] + cost revenue +A True True +B False False +C False False + +Use the method to control the axis: + +>>> df.eq([250, 250, 100], axis='index') + cost revenue +A True False +B False True +C True False Compare to a DataFrame of different shape. @@ -798,7 +804,7 @@ def _get_op_name(op, special): >>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], ... 'revenue': [100, 250, 300, 200, 175, 225]}}, ... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], -... ['A', 'B', 'C', 'A', 'B' ,'C']]) +... ['A', 'B', 'C', 'A', 'B', 'C']]) >>> df_multindex cost revenue Q1 A 250 100
This was failing due to http://pandas.pydata.org/pandas-docs/version/0.24.0rc1/whatsnew/v0.24.0.html#dataframe-comparison-operations-broadcasting-changes I'm surprised the doctests didn't catch this. @datapythonista, `pandas.DataFrame.eq` is included in the API docs at http://pandas-docs.github.io/pandas-docs-travis/api/generated/pandas.DataFrame.eq.html. I believe it's because EX02 isn't passed in the code checks. @datapythonista do you know if that's deliberate?
https://api.github.com/repos/pandas-dev/pandas/pulls/24774
2019-01-14T22:33:39Z
2019-01-15T14:04:20Z
2019-01-15T14:04:20Z
2019-02-13T13:08:28Z
Fix flake8 issues in doc/source/enhancingperf.rst
diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index a4a96eea4d8e2..0e3d389aa4f6e 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -73,7 +73,7 @@ four calls) using the `prun ipython magic function <http://ipython.org/ipython-d .. ipython:: python - %prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1) + %prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1) # noqa E999 By far the majority of time is spend inside either ``integrate_f`` or ``f``, hence we'll concentrate our efforts cythonizing these two functions. @@ -189,8 +189,10 @@ in Python, so maybe we could minimize these by cythonizing the apply part. ...: for i in range(N): ...: s += f_typed(a + i * dx) ...: return s * dx - ...: cpdef np.ndarray[double] apply_integrate_f(np.ndarray col_a, np.ndarray col_b, np.ndarray col_N): - ...: assert (col_a.dtype == np.float and col_b.dtype == np.float and col_N.dtype == np.int) + ...: cpdef np.ndarray[double] apply_integrate_f(np.ndarray col_a, np.ndarray col_b, + ...: np.ndarray col_N): + ...: assert (col_a.dtype == np.float + ...: and col_b.dtype == np.float and col_N.dtype == np.int) ...: cdef Py_ssize_t i, n = len(col_N) ...: assert (len(col_a) == len(col_b) == n) ...: cdef np.ndarray[double] res = np.empty(n) @@ -271,7 +273,9 @@ advanced Cython techniques: ...: return s * dx ...: @cython.boundscheck(False) ...: @cython.wraparound(False) - ...: cpdef np.ndarray[double] apply_integrate_f_wrap(np.ndarray[double] col_a, np.ndarray[double] col_b, np.ndarray[int] col_N): + ...: cpdef np.ndarray[double] apply_integrate_f_wrap(np.ndarray[double] col_a, + ...: np.ndarray[double] col_b, + ...: np.ndarray[int] col_N): ...: cdef int i, n = len(col_N) ...: assert len(col_a) == len(col_b) == n ...: cdef np.ndarray[double] res = np.empty(n) @@ -317,45 +321,45 @@ take the plain Python code from above and annotate with the ``@jit`` decorator. .. code-block:: python - import numba + import numba - @numba.jit - def f_plain(x): - return x * (x - 1) + @numba.jit + def f_plain(x): + return x * (x - 1) - @numba.jit - def integrate_f_numba(a, b, N): - s = 0 - dx = (b - a) / N - for i in range(N): - s += f_plain(a + i * dx) - return s * dx + @numba.jit + def integrate_f_numba(a, b, N): + s = 0 + dx = (b - a) / N + for i in range(N): + s += f_plain(a + i * dx) + return s * dx - @numba.jit - def apply_integrate_f_numba(col_a, col_b, col_N): - n = len(col_N) - result = np.empty(n, dtype='float64') - assert len(col_a) == len(col_b) == n - for i in range(n): - result[i] = integrate_f_numba(col_a[i], col_b[i], col_N[i]) - return result + @numba.jit + def apply_integrate_f_numba(col_a, col_b, col_N): + n = len(col_N) + result = np.empty(n, dtype='float64') + assert len(col_a) == len(col_b) == n + for i in range(n): + result[i] = integrate_f_numba(col_a[i], col_b[i], col_N[i]) + return result - def compute_numba(df): - result = apply_integrate_f_numba(df['a'].values, df['b'].values, - df['N'].values) - return pd.Series(result, index=df.index, name='result') + def compute_numba(df): + result = apply_integrate_f_numba(df['a'].values, df['b'].values, + df['N'].values) + return pd.Series(result, index=df.index, name='result') Note that we directly pass NumPy arrays to the Numba function. ``compute_numba`` is just a wrapper that provides a nicer interface by passing/returning pandas objects. .. code-block:: ipython - In [4]: %timeit compute_numba(df) - 1000 loops, best of 3: 798 us per loop + In [4]: %timeit compute_numba(df) + 1000 loops, best of 3: 798 us per loop In this example, using Numba was faster than Cython. @@ -368,30 +372,30 @@ Consider the following toy example of doubling each observation: .. code-block:: python - import numba + import numba - def double_every_value_nonumba(x): - return x * 2 + def double_every_value_nonumba(x): + return x * 2 - @numba.vectorize - def double_every_value_withnumba(x): - return x * 2 + @numba.vectorize + def double_every_value_withnumba(x): # noqa E501 + return x * 2 .. code-block:: ipython - # Custom function without numba - In [5]: %timeit df['col1_doubled'] = df.a.apply(double_every_value_nonumba) - 1000 loops, best of 3: 797 us per loop + # Custom function without numba + In [5]: %timeit df['col1_doubled'] = df.a.apply(double_every_value_nonumba) # noqa E501 + 1000 loops, best of 3: 797 us per loop - # Standard implementation (faster than a custom function) - In [6]: %timeit df['col1_doubled'] = df.a*2 - 1000 loops, best of 3: 233 us per loop + # Standard implementation (faster than a custom function) + In [6]: %timeit df['col1_doubled'] = df.a * 2 + 1000 loops, best of 3: 233 us per loop - # Custom function with numba - In [7]: %timeit df['col1_doubled'] = double_every_value_withnumba(df.a.values) - 1000 loops, best of 3: 145 us per loop + # Custom function with numba + In [7]: %timeit (df['col1_doubled'] = double_every_value_withnumba(df.a.values) + 1000 loops, best of 3: 145 us per loop Caveats ~~~~~~~ diff --git a/setup.cfg b/setup.cfg index 44d92c0b8777f..95c71826a80d4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -48,7 +48,6 @@ ignore = E402, # module level import not at top of file exclude = doc/source/basics.rst doc/source/contributing_docstring.rst - doc/source/enhancingperf.rst [yapf]
- [ ] closes #24176 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24772
2019-01-14T20:14:27Z
2019-01-15T14:12:07Z
2019-01-15T14:12:07Z
2019-01-15T14:12:07Z
API: Added is_extension_array_dtype
diff --git a/doc/source/api/general_utility_functions.rst b/doc/source/api/general_utility_functions.rst index bed76d5b04b5e..e151f8f57ed5e 100644 --- a/doc/source/api/general_utility_functions.rst +++ b/doc/source/api/general_utility_functions.rst @@ -63,6 +63,7 @@ Dtype introspection api.types.is_datetime64_ns_dtype api.types.is_datetime64tz_dtype api.types.is_extension_type + api.types.is_extension_array_dtype api.types.is_float_dtype api.types.is_int64_dtype api.types.is_integer_dtype diff --git a/pandas/core/dtypes/api.py b/pandas/core/dtypes/api.py index 76021705563bf..e9d7b9c4281bd 100644 --- a/pandas/core/dtypes/api.py +++ b/pandas/core/dtypes/api.py @@ -5,10 +5,10 @@ is_categorical_dtype, is_complex, is_complex_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, is_datetimetz, is_dict_like, is_dtype_equal, - is_extension_type, is_file_like, is_float, is_float_dtype, is_hashable, - is_int64_dtype, is_integer, is_integer_dtype, is_interval, - is_interval_dtype, is_iterator, is_list_like, is_named_tuple, is_number, - is_numeric_dtype, is_object_dtype, is_period, is_period_dtype, is_re, - is_re_compilable, is_scalar, is_signed_integer_dtype, is_sparse, + is_extension_array_dtype, is_extension_type, is_file_like, is_float, + is_float_dtype, is_hashable, is_int64_dtype, is_integer, is_integer_dtype, + is_interval, is_interval_dtype, is_iterator, is_list_like, is_named_tuple, + is_number, is_numeric_dtype, is_object_dtype, is_period, is_period_dtype, + is_re, is_re_compilable, is_scalar, is_signed_integer_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, is_unsigned_integer_dtype, pandas_dtype) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 507dacb5322a6..e9bf0f87088db 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1700,15 +1700,21 @@ def is_extension_type(arr): def is_extension_array_dtype(arr_or_dtype): - """Check if an object is a pandas extension array type. + """ + Check if an object is a pandas extension array type. + + See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object + For array-like input, the ``.dtype`` attribute will + be extracted. Returns ------- bool + Whether the `arr_or_dtype` is an extension array type. Notes ----- @@ -1718,9 +1724,25 @@ def is_extension_array_dtype(arr_or_dtype): * Categorical * Sparse * Interval + * Period + * DatetimeArray + * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. + + Examples + -------- + >>> from pandas.api.types import is_extension_array_dtype + >>> arr = pd.Categorical(['a', 'b']) + >>> is_extension_array_dtype(arr) + True + >>> is_extension_array_dtype(arr.dtype) + True + + >>> arr = np.array(['a', 'b']) + >>> is_extension_array_dtype(arr.dtype) + False """ dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) return (isinstance(dtype, ExtensionDtype) or diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py index 0a81557005477..235d7ecc64f60 100644 --- a/pandas/tests/api/test_types.py +++ b/pandas/tests/api/test_types.py @@ -24,7 +24,8 @@ class TestTypes(Base): 'is_dict_like', 'is_iterator', 'is_file_like', 'is_list_like', 'is_hashable', 'is_array_like', 'is_named_tuple', - 'pandas_dtype', 'union_categoricals', 'infer_dtype'] + 'pandas_dtype', 'union_categoricals', 'infer_dtype', + 'is_extension_array_dtype'] deprecated = ['is_period', 'is_datetimetz'] dtypes = ['CategoricalDtype', 'DatetimeTZDtype', 'PeriodDtype', 'IntervalDtype']
https://api.github.com/repos/pandas-dev/pandas/pulls/24771
2019-01-14T19:57:16Z
2019-01-14T21:05:34Z
2019-01-14T21:05:34Z
2019-11-21T16:17:21Z
DOC: Correct minor spelling error
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7097a702227d7..856aa52f82cf5 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -412,12 +412,12 @@ class _BaseOffset(object): **self.kwds) def __neg__(self): - # Note: we are defering directly to __mul__ instead of __rmul__, as + # Note: we are deferring directly to __mul__ instead of __rmul__, as # that allows us to use methods that can go in a `cdef class` return self * -1 def copy(self): - # Note: we are defering directly to __mul__ instead of __rmul__, as + # Note: we are deferring directly to __mul__ instead of __rmul__, as # that allows us to use methods that can go in a `cdef class` return self * 1
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24770
2019-01-14T19:16:05Z
2019-01-14T20:07:00Z
2019-01-14T20:07:00Z
2019-01-14T20:07:05Z
REF/TST: Stop using singleton fixtures
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py deleted file mode 100644 index 377e737a53158..0000000000000 --- a/pandas/tests/frame/conftest.py +++ /dev/null @@ -1,221 +0,0 @@ -import numpy as np -import pytest - -from pandas import DataFrame, NaT, compat, date_range -import pandas.util.testing as tm - - -@pytest.fixture -def float_frame(): - """ - Fixture for DataFrame of floats with index of unique strings - - Columns are ['A', 'B', 'C', 'D']. - """ - return DataFrame(tm.getSeriesData()) - - -@pytest.fixture -def float_frame_with_na(): - """ - Fixture for DataFrame of floats with index of unique strings - - Columns are ['A', 'B', 'C', 'D']; some entries are missing - """ - df = DataFrame(tm.getSeriesData()) - # set some NAs - df.loc[5:10] = np.nan - df.loc[15:20, -2:] = np.nan - return df - - -@pytest.fixture -def float_frame2(): - """ - Fixture for DataFrame of floats with index of unique strings - - Columns are ['D', 'C', 'B', 'A'] - """ - return DataFrame(tm.getSeriesData(), columns=['D', 'C', 'B', 'A']) - - -@pytest.fixture -def bool_frame_with_na(): - """ - Fixture for DataFrame of booleans with index of unique strings - - Columns are ['A', 'B', 'C', 'D']; some entries are missing - """ - df = DataFrame(tm.getSeriesData()) > 0 - df = df.astype(object) - # set some NAs - df.loc[5:10] = np.nan - df.loc[15:20, -2:] = np.nan - return df - - -@pytest.fixture -def int_frame(): - """ - Fixture for DataFrame of ints with index of unique strings - - Columns are ['A', 'B', 'C', 'D'] - """ - df = DataFrame({k: v.astype(int) - for k, v in compat.iteritems(tm.getSeriesData())}) - # force these all to int64 to avoid platform testing issues - return DataFrame({c: s for c, s in compat.iteritems(df)}, dtype=np.int64) - - -@pytest.fixture -def datetime_frame(): - """ - Fixture for DataFrame of floats with DatetimeIndex - - Columns are ['A', 'B', 'C', 'D'] - """ - return DataFrame(tm.getTimeSeriesData()) - - -@pytest.fixture -def float_string_frame(): - """ - Fixture for DataFrame of floats and strings with index of unique strings - - Columns are ['A', 'B', 'C', 'D', 'foo']. - """ - df = DataFrame(tm.getSeriesData()) - df['foo'] = 'bar' - return df - - -@pytest.fixture -def mixed_float_frame(): - """ - Fixture for DataFrame of different float types with index of unique strings - - Columns are ['A', 'B', 'C', 'D']. - """ - df = DataFrame(tm.getSeriesData()) - df.A = df.A.astype('float32') - df.B = df.B.astype('float32') - df.C = df.C.astype('float16') - df.D = df.D.astype('float64') - return df - - -@pytest.fixture -def mixed_float_frame2(): - """ - Fixture for DataFrame of different float types with index of unique strings - - Columns are ['A', 'B', 'C', 'D']. - """ - df = DataFrame(tm.getSeriesData()) - df.D = df.D.astype('float32') - df.C = df.C.astype('float32') - df.B = df.B.astype('float16') - df.D = df.D.astype('float64') - return df - - -@pytest.fixture -def mixed_int_frame(): - """ - Fixture for DataFrame of different int types with index of unique strings - - Columns are ['A', 'B', 'C', 'D']. - """ - df = DataFrame({k: v.astype(int) - for k, v in compat.iteritems(tm.getSeriesData())}) - df.A = df.A.astype('int32') - df.B = np.ones(len(df.B), dtype='uint64') - df.C = df.C.astype('uint8') - df.D = df.C.astype('int64') - return df - - -@pytest.fixture -def mixed_type_frame(): - """ - Fixture for DataFrame of float/int/string columns with RangeIndex - - Columns are ['a', 'b', 'c', 'float32', 'int32']. - """ - return DataFrame({'a': 1., 'b': 2, 'c': 'foo', - 'float32': np.array([1.] * 10, dtype='float32'), - 'int32': np.array([1] * 10, dtype='int32')}, - index=np.arange(10)) - - -@pytest.fixture -def timezone_frame(): - """ - Fixture for DataFrame of date_range Series with different time zones - - Columns are ['A', 'B', 'C']; some entries are missing - """ - df = DataFrame({'A': date_range('20130101', periods=3), - 'B': date_range('20130101', periods=3, - tz='US/Eastern'), - 'C': date_range('20130101', periods=3, - tz='CET')}) - df.iloc[1, 1] = NaT - df.iloc[1, 2] = NaT - return df - - -@pytest.fixture -def empty_frame(): - """ - Fixture for empty DataFrame - """ - return DataFrame({}) - - -@pytest.fixture -def datetime_series(): - """ - Fixture for Series of floats with DatetimeIndex - """ - return tm.makeTimeSeries(nper=30) - - -@pytest.fixture -def datetime_series_short(): - """ - Fixture for Series of floats with DatetimeIndex - """ - return tm.makeTimeSeries(nper=30)[5:] - - -@pytest.fixture -def simple_frame(): - """ - Fixture for simple 3x3 DataFrame - - Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c']. - """ - arr = np.array([[1., 2., 3.], - [4., 5., 6.], - [7., 8., 9.]]) - - return DataFrame(arr, columns=['one', 'two', 'three'], - index=['a', 'b', 'c']) - - -@pytest.fixture -def frame_of_index_cols(): - """ - Fixture for DataFrame of columns that can be used for indexing - - Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')]; - 'A' & 'B' contain duplicates (but are jointly unique), the rest are unique. - """ - df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'], - 'B': ['one', 'two', 'three', 'one', 'two'], - 'C': ['a', 'b', 'c', 'd', 'e'], - 'D': np.random.randn(5), - 'E': np.random.randn(5), - ('tuple', 'as', 'label'): np.random.randn(5)}) - return df diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index c2355742199dc..99c4d7b982ebc 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -21,7 +21,9 @@ class TestDataFrameAlterAxes(): - def test_set_index_directly(self, float_string_frame): + def test_set_index_directly(self): + float_string_frame = tm.get_float_string_frame() + df = float_string_frame idx = Index(np.arange(len(df))[::-1]) @@ -30,7 +32,9 @@ def test_set_index_directly(self, float_string_frame): with pytest.raises(ValueError, match='Length mismatch'): df.index = idx[::2] - def test_set_index(self, float_string_frame): + def test_set_index(self): + float_string_frame = tm.get_float_string_frame() + df = float_string_frame idx = Index(np.arange(len(df))[::-1]) @@ -51,9 +55,8 @@ def test_set_index_cast(self): ('tuple', 'as', 'label')]) @pytest.mark.parametrize('inplace', [True, False]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_drop_inplace(self, frame_of_index_cols, - drop, inplace, keys): - df = frame_of_index_cols + def test_set_index_drop_inplace(self, drop, inplace, keys): + df = tm.get_frame_of_index_cols() if isinstance(keys, list): idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys) @@ -74,8 +77,8 @@ def test_set_index_drop_inplace(self, frame_of_index_cols, @pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'], ('tuple', 'as', 'label')]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_append(self, frame_of_index_cols, drop, keys): - df = frame_of_index_cols + def test_set_index_append(self, drop, keys): + df = tm.get_frame_of_index_cols() keys = keys if isinstance(keys, list) else [keys] idx = MultiIndex.from_arrays([df.index] + [df[x] for x in keys], @@ -91,9 +94,9 @@ def test_set_index_append(self, frame_of_index_cols, drop, keys): @pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'], ('tuple', 'as', 'label')]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_append_to_multiindex(self, frame_of_index_cols, - drop, keys): + def test_set_index_append_to_multiindex(self, drop, keys): # append to existing multiindex + frame_of_index_cols = tm.get_frame_of_index_cols() df = frame_of_index_cols.set_index(['D'], drop=drop, append=True) keys = keys if isinstance(keys, list) else [keys] @@ -123,9 +126,8 @@ def test_set_index_after_mutation(self): @pytest.mark.parametrize('append, index_name', [(True, None), (True, 'B'), (True, 'test'), (False, None)]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_pass_single_array(self, frame_of_index_cols, - drop, append, index_name, box): - df = frame_of_index_cols + def test_set_index_pass_single_array(self, drop, append, index_name, box): + df = tm.get_frame_of_index_cols() df.index.name = index_name key = box(df['B']) @@ -156,9 +158,8 @@ def test_set_index_pass_single_array(self, frame_of_index_cols, [(True, None), (True, 'A'), (True, 'B'), (True, 'test'), (False, None)]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_pass_arrays(self, frame_of_index_cols, - drop, append, index_name, box): - df = frame_of_index_cols + def test_set_index_pass_arrays(self, drop, append, index_name, box): + df = tm.get_frame_of_index_cols() df.index.name = index_name keys = ['A', box(df['B'])] @@ -187,9 +188,9 @@ def test_set_index_pass_arrays(self, frame_of_index_cols, @pytest.mark.parametrize('append, index_name', [(True, None), (True, 'A'), (True, 'test'), (False, None)]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop, + def test_set_index_pass_arrays_duplicate(self, drop, append, index_name, box1, box2): - df = frame_of_index_cols + df = tm.get_frame_of_index_cols() df.index.name = index_name keys = [box1(df['A']), box2(df['A'])] @@ -209,9 +210,8 @@ def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop, @pytest.mark.parametrize('append', [True, False]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_pass_multiindex(self, frame_of_index_cols, - drop, append): - df = frame_of_index_cols + def test_set_index_pass_multiindex(self, drop, append): + df = tm.get_frame_of_index_cols() keys = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B']) result = df.set_index(keys, drop=drop, append=append) @@ -221,8 +221,8 @@ def test_set_index_pass_multiindex(self, frame_of_index_cols, tm.assert_frame_equal(result, expected) - def test_set_index_verify_integrity(self, frame_of_index_cols): - df = frame_of_index_cols + def test_set_index_verify_integrity(self): + df = tm.get_frame_of_index_cols() with pytest.raises(ValueError, match='Index has duplicate keys'): df.set_index('A', verify_integrity=True) @@ -232,8 +232,8 @@ def test_set_index_verify_integrity(self, frame_of_index_cols): @pytest.mark.parametrize('append', [True, False]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_raise_keys(self, frame_of_index_cols, drop, append): - df = frame_of_index_cols + def test_set_index_raise(self, drop, append): + df = tm.get_frame_of_index_cols() with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"): # column names are A-E, as well as one tuple @@ -256,9 +256,8 @@ def test_set_index_raise_keys(self, frame_of_index_cols, drop, append): @pytest.mark.parametrize('append', [True, False]) @pytest.mark.parametrize('drop', [True, False]) @pytest.mark.parametrize('box', [set, iter]) - def test_set_index_raise_on_type(self, frame_of_index_cols, box, - drop, append): - df = frame_of_index_cols + def test_set_index_raise_on_type(self, box, drop, append): + df = tm.get_frame_of_index_cols() msg = 'The parameter "keys" may be a column key, .*' # forbidden type, e.g. set/tuple/iter @@ -440,7 +439,9 @@ def test_set_index_empty_column(self): names=['a', 'x']) tm.assert_frame_equal(result, expected) - def test_set_columns(self, float_string_frame): + def test_set_columns(self): + float_string_frame = tm.get_float_string_frame() + cols = Index(np.arange(len(float_string_frame.columns))) float_string_frame.columns = cols with pytest.raises(ValueError, match='Length mismatch'): @@ -472,7 +473,9 @@ def test_dti_set_index_reindex(self): # Renaming - def test_rename(self, float_frame): + def test_rename(self): + float_frame = DataFrame(tm.getSeriesData()) + mapping = { 'A': 'a', 'B': 'b', @@ -519,8 +522,10 @@ def test_rename(self, float_frame): Index(['bar', 'foo'], name='name')) assert renamed.index.name == renamer.index.name - def test_rename_axis_inplace(self, float_frame): + def test_rename_axis_inplace(self): # GH 15704 + float_frame = DataFrame(tm.getSeriesData()) + expected = float_frame.rename_axis('foo') result = float_frame.copy() no_return = result.rename_axis('foo', inplace=True) @@ -675,12 +680,18 @@ def test_rename_multiindex(self): level=0) tm.assert_index_equal(renamed.index, new_index) - def test_rename_nocopy(self, float_frame): + def test_rename_nocopy(self): + float_frame = DataFrame(tm.getSeriesData()) + renamed = float_frame.rename(columns={'C': 'foo'}, copy=False) renamed['foo'] = 1. assert (float_frame['C'] == 1.).all() - def test_rename_inplace(self, float_frame): + def test_rename_inplace(self): + # See GH#24769 re dereferencing semantics + float_frame = DataFrame(tm.getSeriesData()) + float_frame_orig = float_frame + float_frame.rename(columns={'C': 'foo'}) assert 'C' in float_frame assert 'foo' not in float_frame @@ -692,6 +703,7 @@ def test_rename_inplace(self, float_frame): assert 'C' not in float_frame assert 'foo' in float_frame assert id(float_frame['foo']) != c_id + assert float_frame is not float_frame_orig def test_rename_bug(self): # GH 5344 @@ -759,7 +771,9 @@ def test_reorder_levels(self): result = df.reorder_levels(['L0', 'L0', 'L0']) tm.assert_frame_equal(result, expected) - def test_reset_index(self, float_frame): + def test_reset_index(self): + float_frame = DataFrame(tm.getSeriesData()) + stacked = float_frame.stack()[::2] stacked = DataFrame({'foo': stacked, 'bar': stacked}) @@ -1015,7 +1029,9 @@ def test_set_index_names(self): # Check equality tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2) - def test_rename_objects(self, float_string_frame): + def test_rename_objects(self): + float_string_frame = tm.get_float_string_frame() + renamed = float_string_frame.rename(columns=str.upper) assert 'FOO' in renamed @@ -1139,7 +1155,8 @@ def test_rename_positional(self): assert 'rename' in message assert 'Use named arguments' in message - def test_assign_columns(self, float_frame): + def test_assign_columns(self): + float_frame = DataFrame(tm.getSeriesData()) float_frame['hi'] = 'there' df = float_frame.copy() diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index f2c3f50c291c3..86858d9d54343 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -237,21 +237,27 @@ class TestDataFrameAnalytics(): # Correlation and covariance @td.skip_if_no_scipy - def test_corr_pearson(self, float_frame): + def test_corr_pearson(self): + float_frame = DataFrame(tm.getSeriesData()) + float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'pearson') @td.skip_if_no_scipy - def test_corr_kendall(self, float_frame): + def test_corr_kendall(self): + float_frame = DataFrame(tm.getSeriesData()) + float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan self._check_method(float_frame, 'kendall') @td.skip_if_no_scipy - def test_corr_spearman(self, float_frame): + def test_corr_spearman(self): + float_frame = DataFrame(tm.getSeriesData()) + float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan @@ -263,7 +269,10 @@ def _check_method(self, frame, method='pearson'): tm.assert_almost_equal(correls['A']['C'], expected) @td.skip_if_no_scipy - def test_corr_non_numeric(self, float_frame, float_string_frame): + def test_corr_non_numeric(self): + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + float_frame['A'][:5] = np.nan float_frame['B'][5:10] = np.nan @@ -337,8 +346,11 @@ def test_corr_invalid_method(self): with pytest.raises(ValueError, match=msg): df.corr(method="____") - def test_cov(self, float_frame, float_string_frame): + def test_cov(self): # min_periods no NAs (corner case) + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + expected = float_frame.cov() result = float_frame.cov(min_periods=len(float_frame)) @@ -381,7 +393,9 @@ def test_cov(self, float_frame, float_string_frame): index=df.columns, columns=df.columns) tm.assert_frame_equal(result, expected) - def test_corrwith(self, datetime_frame): + def test_corrwith(self): + datetime_frame = DataFrame(tm.getTimeSeriesData()) + a = datetime_frame noise = Series(np.random.randn(len(a)), index=a.index) @@ -431,7 +445,9 @@ def test_corrwith_with_objects(self): expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1) tm.assert_series_equal(result, expected) - def test_corrwith_series(self, datetime_frame): + def test_corrwith_series(self): + datetime_frame = DataFrame(tm.getTimeSeriesData()) + result = datetime_frame.corrwith(datetime_frame['A']) expected = datetime_frame.apply(datetime_frame['A'].corr) @@ -706,7 +722,12 @@ def test_reduce_mixed_frame(self): np.array([2, 150, 'abcde'], dtype=object)) tm.assert_series_equal(test, df.T.sum(axis=1)) - def test_count(self, float_frame_with_na, float_frame, float_string_frame): + def test_count(self): + + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + f = lambda s: notna(s).sum() assert_stat_op_calc('count', f, float_frame_with_na, has_skipna=False, check_dtype=False, check_dates=True) @@ -737,8 +758,12 @@ def test_count(self, float_frame_with_na, float_frame, float_string_frame): expected = Series(0, index=[]) tm.assert_series_equal(result, expected) - def test_nunique(self, float_frame_with_na, float_frame, - float_string_frame): + def test_nunique(self): + + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + f = lambda s: len(algorithms.unique1d(s.dropna())) assert_stat_op_calc('nunique', f, float_frame_with_na, has_skipna=False, check_dtype=False, @@ -755,8 +780,13 @@ def test_nunique(self, float_frame_with_na, float_frame, tm.assert_series_equal(df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})) - def test_sum(self, float_frame_with_na, mixed_float_frame, - float_frame, float_string_frame): + def test_sum(self): + + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + mixed_float_frame = tm.get_mixed_float_frame() + float_frame = DataFrame(tm.getSeriesData()) + assert_stat_op_api('sum', float_frame, float_string_frame, has_numeric_only=True) assert_stat_op_calc('sum', np.sum, float_frame_with_na, @@ -789,20 +819,33 @@ def test_stat_operators_attempt_obj_array(self, method): if method in ['sum', 'prod']: tm.assert_series_equal(result, expected) - def test_mean(self, float_frame_with_na, float_frame, float_string_frame): + def test_mean(self): + + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + assert_stat_op_calc('mean', np.mean, float_frame_with_na, check_dates=True) assert_stat_op_api('mean', float_frame, float_string_frame) - def test_product(self, float_frame_with_na, float_frame, - float_string_frame): + def test_product(self): + + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + assert_stat_op_calc('product', np.prod, float_frame_with_na) assert_stat_op_api('product', float_frame, float_string_frame) # TODO: Ensure warning isn't emitted in the first place @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") - def test_median(self, float_frame_with_na, float_frame, - float_string_frame): + def test_median(self): + + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + def wrapper(x): if isna(x).any(): return np.nan @@ -812,8 +855,12 @@ def wrapper(x): check_dates=True) assert_stat_op_api('median', float_frame, float_string_frame) - def test_min(self, float_frame_with_na, int_frame, - float_frame, float_string_frame): + def test_min(self): + int_frame = tm.get_int_frame() + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) assert_stat_op_calc('min', np.min, float_frame_with_na, @@ -821,7 +868,9 @@ def test_min(self, float_frame_with_na, int_frame, assert_stat_op_calc('min', np.min, int_frame) assert_stat_op_api('min', float_frame, float_string_frame) - def test_cummin(self, datetime_frame): + def test_cummin(self): + datetime_frame = DataFrame(tm.getTimeSeriesData()) + datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan @@ -844,7 +893,9 @@ def test_cummin(self, datetime_frame): cummin_xs = datetime_frame.cummin(axis=1) assert np.shape(cummin_xs) == np.shape(datetime_frame) - def test_cummax(self, datetime_frame): + def test_cummax(self): + datetime_frame = DataFrame(tm.getTimeSeriesData()) + datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan @@ -867,8 +918,13 @@ def test_cummax(self, datetime_frame): cummax_xs = datetime_frame.cummax(axis=1) assert np.shape(cummax_xs) == np.shape(datetime_frame) - def test_max(self, float_frame_with_na, int_frame, - float_frame, float_string_frame): + def test_max(self): + + int_frame = tm.get_int_frame() + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", RuntimeWarning) assert_stat_op_calc('max', np.max, float_frame_with_na, @@ -876,13 +932,21 @@ def test_max(self, float_frame_with_na, int_frame, assert_stat_op_calc('max', np.max, int_frame) assert_stat_op_api('max', float_frame, float_string_frame) - def test_mad(self, float_frame_with_na, float_frame, float_string_frame): + def test_mad(self): + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + f = lambda x: np.abs(x - x.mean()).mean() assert_stat_op_calc('mad', f, float_frame_with_na) assert_stat_op_api('mad', float_frame, float_string_frame) - def test_var_std(self, float_frame_with_na, datetime_frame, float_frame, - float_string_frame): + def test_var_std(self): + datetime_frame = DataFrame(tm.getTimeSeriesData()) + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + alt = lambda x: np.var(x, ddof=1) assert_stat_op_calc('var', alt, float_frame_with_na) assert_stat_op_api('var', float_frame, float_string_frame) @@ -948,7 +1012,9 @@ def test_mixed_ops(self, op): result = getattr(df, op)() assert len(result) == 2 - def test_cumsum(self, datetime_frame): + def test_cumsum(self): + datetime_frame = DataFrame(tm.getTimeSeriesData()) + datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan @@ -971,7 +1037,9 @@ def test_cumsum(self, datetime_frame): cumsum_xs = datetime_frame.cumsum(axis=1) assert np.shape(cumsum_xs) == np.shape(datetime_frame) - def test_cumprod(self, datetime_frame): + def test_cumprod(self): + datetime_frame = DataFrame(tm.getTimeSeriesData()) + datetime_frame.loc[5:10, 0] = np.nan datetime_frame.loc[10:15, 1] = np.nan datetime_frame.loc[15:, 2] = np.nan @@ -1000,8 +1068,13 @@ def test_cumprod(self, datetime_frame): df.cumprod(0) df.cumprod(1) - def test_sem(self, float_frame_with_na, datetime_frame, - float_frame, float_string_frame): + def test_sem(self): + + datetime_frame = DataFrame(tm.getTimeSeriesData()) + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x)) assert_stat_op_calc('sem', alt, float_frame_with_na) assert_stat_op_api('sem', float_frame, float_string_frame) @@ -1020,9 +1093,13 @@ def test_sem(self, float_frame_with_na, datetime_frame, assert not (result < 0).any() @td.skip_if_no_scipy - def test_skew(self, float_frame_with_na, float_frame, float_string_frame): + def test_skew(self): from scipy.stats import skew + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + def alt(x): if len(x) < 3: return np.nan @@ -1032,9 +1109,13 @@ def alt(x): assert_stat_op_api('skew', float_frame, float_string_frame) @td.skip_if_no_scipy - def test_kurt(self, float_frame_with_na, float_frame, float_string_frame): + def test_kurt(self): from scipy.stats import kurtosis + float_frame_with_na = tm.get_float_frame_with_na() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + def alt(x): if len(x) < 4: return np.nan @@ -1199,7 +1280,9 @@ def test_operators_timedelta64(self): assert df['off1'].dtype == 'timedelta64[ns]' assert df['off2'].dtype == 'timedelta64[ns]' - def test_sum_corner(self, empty_frame): + def test_sum_corner(self): + empty_frame = DataFrame({}) + axis0 = empty_frame.sum(0) axis1 = empty_frame.sum(1) assert isinstance(axis0, Series) @@ -1267,21 +1350,28 @@ def test_sum_nanops_timedelta(self): expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) tm.assert_series_equal(result, expected) - def test_sum_object(self, float_frame): + def test_sum_object(self): + float_frame = DataFrame(tm.getSeriesData()) + values = float_frame.values.astype(int) frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns) deltas = frame * timedelta(1) deltas.sum() - def test_sum_bool(self, float_frame): + def test_sum_bool(self): # ensure this works, bug report + float_frame = DataFrame(tm.getSeriesData()) + bools = np.isnan(float_frame) bools.sum(1) bools.sum(0) - def test_mean_corner(self, float_frame, float_string_frame): + def test_mean_corner(self): # unit test when have object data + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + the_mean = float_string_frame.mean(axis=0) the_sum = float_string_frame.sum(axis=0, numeric_only=True) tm.assert_index_equal(the_sum.index, the_mean.index) @@ -1297,8 +1387,10 @@ def test_mean_corner(self, float_frame, float_string_frame): means = float_frame.mean(0) assert means['bool'] == float_frame['bool'].values.mean() - def test_stats_mixed_type(self, float_string_frame): + def test_stats_mixed_type(self): # don't blow up + float_string_frame = tm.get_float_string_frame() + float_string_frame.std(1) float_string_frame.var(1) float_string_frame.mean(1) @@ -1306,7 +1398,12 @@ def test_stats_mixed_type(self, float_string_frame): # TODO: Ensure warning isn't emitted in the first place @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") - def test_median_corner(self, int_frame, float_frame, float_string_frame): + def test_median_corner(self): + + int_frame = tm.get_int_frame() + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + def wrapper(x): if isna(x).any(): return np.nan @@ -1318,7 +1415,9 @@ def wrapper(x): # Miscellanea - def test_count_objects(self, float_string_frame): + def test_count_objects(self): + float_string_frame = tm.get_float_string_frame() + dm = DataFrame(float_string_frame._series) df = DataFrame(float_string_frame._series) @@ -1338,7 +1437,11 @@ def test_sum_bools(self): # Index of max / min - def test_idxmin(self, float_frame, int_frame): + def test_idxmin(self): + + int_frame = tm.get_int_frame() + float_frame = DataFrame(tm.getSeriesData()) + frame = float_frame frame.loc[5:10] = np.nan frame.loc[15:20, -2:] = np.nan @@ -1352,7 +1455,11 @@ def test_idxmin(self, float_frame, int_frame): pytest.raises(ValueError, frame.idxmin, axis=2) - def test_idxmax(self, float_frame, int_frame): + def test_idxmax(self): + + int_frame = tm.get_int_frame() + float_frame = DataFrame(tm.getSeriesData()) + frame = float_frame frame.loc[5:10] = np.nan frame.loc[15:20, -2:] = np.nan @@ -1370,7 +1477,15 @@ def test_idxmax(self, float_frame, int_frame): # Logical reductions @pytest.mark.parametrize('opname', ['any', 'all']) - def test_any_all(self, opname, bool_frame_with_na, float_string_frame): + def test_any_all(self, opname): + float_string_frame = tm.get_float_string_frame() + + df = DataFrame(tm.getSeriesData()) > 0 + bool_frame_with_na = df.astype(object) + # set some NAs + bool_frame_with_na.loc[5:10] = np.nan + bool_frame_with_na.loc[15:20, -2:] = np.nan + assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na, has_skipna=True) assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, @@ -1865,7 +1980,9 @@ def test_pct_change(self): tm.assert_frame_equal(result, expected) # Clip - def test_clip(self, float_frame): + def test_clip(self): + float_frame = DataFrame(tm.getSeriesData()) + median = float_frame.median().median() original = float_frame.copy() @@ -1883,8 +2000,10 @@ def test_clip(self, float_frame): # Verify that float_frame was not changed inplace assert (float_frame.values == original.values).all() - def test_inplace_clip(self, float_frame): + def test_inplace_clip(self): # GH 15388 + float_frame = DataFrame(tm.getSeriesData()) + median = float_frame.median().median() frame_copy = float_frame.copy() @@ -1969,9 +2088,9 @@ def test_clip_against_series(self, inplace): (0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]), (1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]]) ]) - def test_clip_against_list_like(self, simple_frame, - inplace, lower, axis, res): + def test_clip_against_list_like(self, inplace, lower, axis, res): # GH 15390 + simple_frame = tm.get_simple_frame() original = simple_frame.copy(deep=True) result = original.clip(lower=lower, upper=[5, 6, 7], @@ -2016,9 +2135,11 @@ def test_clip_against_unordered_columns(self): tm.assert_frame_equal(result_lower, expected_lower) tm.assert_frame_equal(result_lower_upper, expected_lower_upper) - def test_clip_with_na_args(self, float_frame): + def test_clip_with_na_args(self): """Should process np.nan argument as None """ # GH 17276 + float_frame = DataFrame(tm.getSeriesData()) + tm.assert_frame_equal(float_frame.clip(np.nan), float_frame) tm.assert_frame_equal(float_frame.clip(upper=np.nan, lower=np.nan), float_frame) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 0934dd20638e4..060ed3f8bdd10 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -36,9 +36,12 @@ def _assert_series_equal(self, left, right): """Dispatch to series class dependent assertion""" raise NotImplementedError - def test_copy_index_name_checking(self, float_frame): + def test_copy_index_name_checking(self): # don't want to be able to modify the index stored elsewhere after # making a copy + + float_frame = DataFrame(tm.getSeriesData()) + for attr in ('index', 'columns'): ind = getattr(float_frame, attr) ind.name = None @@ -46,7 +49,9 @@ def test_copy_index_name_checking(self, float_frame): getattr(cp, attr).name = 'foo' assert getattr(float_frame, attr).name is None - def test_getitem_pop_assign_name(self, float_frame): + def test_getitem_pop_assign_name(self): + float_frame = DataFrame(tm.getSeriesData()) + s = float_frame['A'] assert s.name == 'A' @@ -59,7 +64,9 @@ def test_getitem_pop_assign_name(self, float_frame): s2 = s.loc[:] assert s2.name == 'B' - def test_get_value(self, float_frame): + def test_get_value(self): + float_frame = DataFrame(tm.getSeriesData()) + for idx in float_frame.index: for col in float_frame.columns: with tm.assert_produces_warning(FutureWarning, @@ -68,7 +75,9 @@ def test_get_value(self, float_frame): expected = float_frame[col][idx] tm.assert_almost_equal(result, expected) - def test_add_prefix_suffix(self, float_frame): + def test_add_prefix_suffix(self): + float_frame = DataFrame(tm.getSeriesData()) + with_prefix = float_frame.add_prefix('foo#') expected = pd.Index(['foo#%s' % c for c in float_frame.columns]) tm.assert_index_equal(with_prefix.columns, expected) @@ -85,7 +94,9 @@ def test_add_prefix_suffix(self, float_frame): expected = pd.Index(['{}%'.format(c) for c in float_frame.columns]) tm.assert_index_equal(with_pct_suffix.columns, expected) - def test_get_axis(self, float_frame): + def test_get_axis(self): + float_frame = DataFrame(tm.getSeriesData()) + f = float_frame assert f._get_axis_number(0) == 0 assert f._get_axis_number(1) == 1 @@ -114,11 +125,15 @@ def test_get_axis(self, float_frame): with pytest.raises(ValueError, match='No axis named'): f._get_axis_number(None) - def test_keys(self, float_frame): + def test_keys(self): + float_frame = DataFrame(tm.getSeriesData()) + getkeys = float_frame.keys assert getkeys() is float_frame.columns - def test_column_contains_typeerror(self, float_frame): + def test_column_contains_typeerror(self): + float_frame = DataFrame(tm.getSeriesData()) + try: float_frame.columns in float_frame except TypeError: @@ -142,7 +157,8 @@ def test_tab_completion(self): assert key not in dir(df) assert isinstance(df.__getitem__('A'), pd.DataFrame) - def test_not_hashable(self, empty_frame): + def test_not_hashable(self): + empty_frame = DataFrame({}) df = self.klass([1]) pytest.raises(TypeError, hash, df) pytest.raises(TypeError, hash, empty_frame) @@ -153,7 +169,9 @@ def test_new_empty_index(self): df1.index.name = 'foo' assert df2.index.name is None - def test_array_interface(self, float_frame): + def test_array_interface(self): + float_frame = DataFrame(tm.getSeriesData()) + with np.errstate(all='ignore'): result = np.sqrt(float_frame) assert isinstance(result, type(float_frame)) @@ -162,7 +180,9 @@ def test_array_interface(self, float_frame): self._assert_frame_equal(result, float_frame.apply(np.sqrt)) - def test_get_agg_axis(self, float_frame): + def test_get_agg_axis(self): + float_frame = DataFrame(tm.getSeriesData()) + cols = float_frame._get_agg_axis(0) assert cols is float_frame.columns @@ -171,7 +191,12 @@ def test_get_agg_axis(self, float_frame): pytest.raises(ValueError, float_frame._get_agg_axis, 2) - def test_nonzero(self, float_frame, float_string_frame, empty_frame): + def test_nonzero(self): + float_frame = DataFrame(tm.getSeriesData()) + + float_string_frame = tm.get_float_string_frame() + empty_frame = DataFrame({}) + assert empty_frame.empty assert not float_frame.empty @@ -198,10 +223,15 @@ def test_items(self): assert isinstance(v, Series) assert (df[k] == v).all() - def test_iter(self, float_frame): + def test_iter(self): + float_frame = DataFrame(tm.getSeriesData()) + assert tm.equalContents(list(float_frame), float_frame.columns) - def test_iterrows(self, float_frame, float_string_frame): + def test_iterrows(self): + float_frame = DataFrame(tm.getSeriesData()) + float_string_frame = tm.get_float_string_frame() + for k, v in float_frame.iterrows(): exp = float_frame.loc[k] self._assert_series_equal(v, exp) @@ -222,7 +252,9 @@ def test_iterrows_iso8601(self): exp = s.loc[k] self._assert_series_equal(v, exp) - def test_itertuples(self, float_frame): + def test_itertuples(self): + float_frame = DataFrame(tm.getSeriesData()) + for i, tup in enumerate(float_frame.itertuples()): s = self.klass._constructor_sliced(tup[1:]) s.name = tup[0] @@ -285,10 +317,14 @@ def test_sequence_like_with_categorical(self): for c, col in df.iteritems(): str(s) - def test_len(self, float_frame): + def test_len(self): + float_frame = DataFrame(tm.getSeriesData()) assert len(float_frame) == len(float_frame.index) - def test_values(self, float_frame, float_string_frame): + def test_values(self): + float_frame = DataFrame(tm.getSeriesData()) + float_string_frame = tm.get_float_string_frame() + frame = float_frame arr = frame.values @@ -333,7 +369,8 @@ def test_to_numpy_copy(self): assert df.to_numpy(copy=False).base is arr assert df.to_numpy(copy=True).base is None - def test_transpose(self, float_frame): + def test_transpose(self): + float_frame = DataFrame(tm.getSeriesData()) frame = float_frame dft = frame.T for idx, series in compat.iteritems(dft): @@ -358,7 +395,8 @@ def test_swapaxes(self): self._assert_frame_equal(df, df.swapaxes(0, 0)) pytest.raises(ValueError, df.swapaxes, 2, 5) - def test_axis_aliases(self, float_frame): + def test_axis_aliases(self): + float_frame = DataFrame(tm.getSeriesData()) f = float_frame # reg name @@ -376,22 +414,25 @@ def test_class_axis(self): assert pydoc.getdoc(DataFrame.index) assert pydoc.getdoc(DataFrame.columns) - def test_more_values(self, float_string_frame): + def test_more_values(self): + float_string_frame = tm.get_float_string_frame() values = float_string_frame.values assert values.shape[1] == len(float_string_frame.columns) - def test_repr_with_mi_nat(self, float_string_frame): + def test_repr_with_mi_nat(self): df = self.klass({'X': [1, 2]}, index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']]) result = repr(df) expected = ' X\nNaT a 1\n2013-01-01 b 2' assert result == expected - def test_iteritems_names(self, float_string_frame): + def test_iteritems_names(self): + float_string_frame = tm.get_float_string_frame() for k, v in compat.iteritems(float_string_frame): assert v.name == k - def test_series_put_names(self, float_string_frame): + def test_series_put_names(self): + float_string_frame = tm.get_float_string_frame() series = float_string_frame._series for k, v in compat.iteritems(series): assert v.name == k @@ -433,26 +474,30 @@ class TestDataFrameMisc(SharedWithSparse): _assert_frame_equal = staticmethod(assert_frame_equal) _assert_series_equal = staticmethod(assert_series_equal) - def test_values(self, float_frame): + def test_values(self): + float_frame = DataFrame(tm.getSeriesData()) float_frame.values[:, 0] = 5. assert (float_frame.values[:, 0] == 5).all() - def test_as_matrix_deprecated(self, float_frame): + def test_as_matrix_deprecated(self): # GH 18458 + float_frame = DataFrame(tm.getSeriesData()) with tm.assert_produces_warning(FutureWarning): cols = float_frame.columns.tolist() result = float_frame.as_matrix(columns=cols) expected = float_frame.values tm.assert_numpy_array_equal(result, expected) - def test_deepcopy(self, float_frame): + def test_deepcopy(self): + float_frame = DataFrame(tm.getSeriesData()) cp = deepcopy(float_frame) series = cp['A'] series[:] = 10 for idx, value in compat.iteritems(series): assert float_frame['A'][idx] != value - def test_transpose_get_view(self, float_frame): + def test_transpose_get_view(self): + float_frame = DataFrame(tm.getSeriesData()) dft = float_frame.T dft.values[:, 5:10] = 5 diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index ade527a16c902..5425e44b15046 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -36,7 +36,9 @@ def int_frame_const_col(): class TestDataFrameApply(): - def test_apply(self, float_frame): + def test_apply(self): + float_frame = DataFrame(tm.getSeriesData()) + with np.errstate(all='ignore'): # ufunc applied = float_frame.apply(np.sqrt) @@ -74,14 +76,17 @@ def test_apply_mixed_datetimelike(self): result = df.apply(lambda x: x, axis=1) assert_frame_equal(result, df) - def test_apply_empty(self, float_frame, empty_frame): + def test_apply_empty(self): # empty + empty_frame = DataFrame({}) + applied = empty_frame.apply(np.sqrt) assert applied.empty applied = empty_frame.apply(np.mean) assert applied.empty + float_frame = DataFrame(tm.getSeriesData()) no_rows = float_frame[:0] result = no_rows.apply(lambda x: x.mean()) expected = Series(np.nan, index=float_frame.columns) @@ -97,8 +102,10 @@ def test_apply_empty(self, float_frame, empty_frame): result = expected.apply(lambda x: x['a'], axis=1) assert_frame_equal(expected, result) - def test_apply_with_reduce_empty(self, empty_frame): + def test_apply_with_reduce_empty(self): # reduce with an empty DataFrame + empty_frame = DataFrame({}) + x = [] result = empty_frame.apply(x.append, axis=1, result_type='expand') assert_frame_equal(result, empty_frame) @@ -116,7 +123,9 @@ def test_apply_with_reduce_empty(self, empty_frame): # Ensure that x.append hasn't been called assert x == [] - def test_apply_deprecate_reduce(self, empty_frame): + def test_apply_deprecate_reduce(self): + empty_frame = DataFrame({}) + x = [] with tm.assert_produces_warning(FutureWarning): empty_frame.apply(x.append, axis=1, reduce=True) @@ -140,16 +149,21 @@ def test_apply_standard_nonunique(self): pytest.param([], {'numeric_only': True}, id='optional_kwds'), pytest.param([1, None], {'numeric_only': True}, id='args_and_kwds') ]) - def test_apply_with_string_funcs(self, float_frame, func, args, kwds): + def test_apply_with_string_funcs(self, func, args, kwds): + float_frame = DataFrame(tm.getSeriesData()) + result = float_frame.apply(func, *args, **kwds) expected = getattr(float_frame, func)(*args, **kwds) tm.assert_series_equal(result, expected) - def test_apply_broadcast_deprecated(self, float_frame): + def test_apply_broadcast_deprecated(self): + float_frame = DataFrame(tm.getSeriesData()) + with tm.assert_produces_warning(FutureWarning): float_frame.apply(np.mean, broadcast=True) - def test_apply_broadcast(self, float_frame, int_frame_const_col): + def test_apply_broadcast(self, int_frame_const_col): + float_frame = DataFrame(tm.getSeriesData()) # scalars result = float_frame.apply(np.mean, result_type='broadcast') @@ -208,7 +222,9 @@ def test_apply_broadcast_error(self, int_frame_const_col): with pytest.raises(ValueError): df.apply(lambda x: Series([1, 2]), axis=1, result_type='broadcast') - def test_apply_raw(self, float_frame): + def test_apply_raw(self): + float_frame = DataFrame(tm.getSeriesData()) + result0 = float_frame.apply(np.mean, raw=True) result1 = float_frame.apply(np.mean, axis=1, raw=True) @@ -223,12 +239,16 @@ def test_apply_raw(self, float_frame): expected = float_frame * 2 assert_frame_equal(result, expected) - def test_apply_axis1(self, float_frame): + def test_apply_axis1(self): + float_frame = DataFrame(tm.getSeriesData()) + d = float_frame.index[0] tapplied = float_frame.apply(np.mean, axis=1) assert tapplied[d] == np.mean(float_frame.xs(d)) - def test_apply_ignore_failures(self, float_string_frame): + def test_apply_ignore_failures(self): + float_string_frame = tm.get_float_string_frame() + result = frame_apply(float_string_frame, np.mean, 0, ignore_failures=True).apply_standard() expected = float_string_frame._get_numeric_data().apply(np.mean) @@ -286,7 +306,9 @@ def _checkit(axis=0, raw=False): result = no_cols.apply(lambda x: x.mean(), result_type='broadcast') assert isinstance(result, DataFrame) - def test_apply_with_args_kwds(self, float_frame): + def test_apply_with_args_kwds(self): + float_frame = DataFrame(tm.getSeriesData()) + def add_some(x, howmuch=0): return x + howmuch @@ -308,11 +330,15 @@ def subtract_and_divide(x, sub, divide=1): expected = float_frame.apply(lambda x: (x - 2.) / 2.) assert_frame_equal(result, expected) - def test_apply_yield_list(self, float_frame): + def test_apply_yield_list(self): + float_frame = DataFrame(tm.getSeriesData()) + result = float_frame.apply(list) assert_frame_equal(result, float_frame) - def test_apply_reduce_Series(self, float_frame): + def test_apply_reduce_Series(self): + float_frame = DataFrame(tm.getSeriesData()) + float_frame.loc[::2, 'A'] = np.nan expected = float_frame.mean(1) result = float_frame.apply(np.mean, axis=1) @@ -406,7 +432,9 @@ def test_apply_convert_objects(self): result = data.apply(lambda x: x, axis=1) assert_frame_equal(result._convert(datetime=True), data) - def test_apply_attach_name(self, float_frame): + def test_apply_attach_name(self): + float_frame = DataFrame(tm.getSeriesData()) + result = float_frame.apply(lambda x: x.name) expected = Series(float_frame.columns, index=float_frame.columns) assert_series_equal(result, expected) @@ -430,7 +458,8 @@ def test_apply_attach_name(self, float_frame): expected.index = float_frame.index assert_series_equal(result, expected) - def test_apply_multi_index(self, float_frame): + def test_apply_multi_index(self): + index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']]) s = DataFrame([[1, 2], [3, 4], [5, 6]], index=index, @@ -461,7 +490,9 @@ def test_apply_dict(self): assert_frame_equal(reduce_false, df) assert_series_equal(reduce_none, dicts) - def test_applymap(self, float_frame): + def test_applymap(self): + float_frame = DataFrame(tm.getSeriesData()) + applied = float_frame.applymap(lambda x: x * 2) tm.assert_frame_equal(applied, float_frame * 2) float_frame.applymap(type) @@ -823,7 +854,9 @@ def zip_frames(frames, axis=1): class TestDataFrameAggregate(): - def test_agg_transform(self, axis, float_frame): + def test_agg_transform(self, axis): + float_frame = DataFrame(tm.getSeriesData()) + other_axis = 1 if axis in {0, 'index'} else 0 with np.errstate(all='ignore'): @@ -872,7 +905,9 @@ def test_agg_transform(self, axis, float_frame): result = float_frame.transform([np.abs, 'sqrt'], axis=axis) assert_frame_equal(result, expected) - def test_transform_and_agg_err(self, axis, float_frame): + def test_transform_and_agg_err(self, axis): + float_frame = DataFrame(tm.getSeriesData()) + # cannot both transform and agg with pytest.raises(ValueError): float_frame.transform(['max', 'min'], axis=axis) @@ -952,7 +987,9 @@ def test_agg_dict_nested_renaming_depr(self): df.agg({'A': {'foo': 'min'}, 'B': {'bar': 'max'}}) - def test_agg_reduce(self, axis, float_frame): + def test_agg_reduce(self, axis): + float_frame = DataFrame(tm.getSeriesData()) + other_axis = 1 if axis in {0, 'index'} else 0 name1, name2 = float_frame.axes[other_axis].unique()[:2].sort_values() diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index f14ecae448723..10493c816ecac 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -65,11 +65,11 @@ def check(df, df2): def test_timestamp_compare(self): # make sure we can compare Timestamps on the right AND left hand side # GH#4982 - df = pd. DataFrame({'dates1': pd.date_range('20010101', periods=10), - 'dates2': pd.date_range('20010102', periods=10), - 'intcol': np.random.randint(1000000000, size=10), - 'floatcol': np.random.randn(10), - 'stringcol': list(tm.rands(10))}) + df = pd.DataFrame({'dates1': pd.date_range('20010101', periods=10), + 'dates2': pd.date_range('20010102', periods=10), + 'intcol': np.random.randint(1000000000, size=10), + 'floatcol': np.random.randn(10), + 'stringcol': list(tm.rands(10))}) df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} @@ -322,11 +322,13 @@ def test_df_add_flex_filled_mixed_dtypes(self): 'B': ser * 2}) tm.assert_frame_equal(result, expected) - def test_arith_flex_frame(self, all_arithmetic_operators, float_frame, - mixed_float_frame): + def test_arith_flex_frame(self, all_arithmetic_operators): # one instance of parametrized fixture op = all_arithmetic_operators + mixed_float_frame = tm.get_mixed_float_frame() + float_frame = pd.DataFrame(tm.getSeriesData()) + def f(x, y): # r-versions not in operator-stdlib; get op without "r" and invert if op.startswith('__r'): @@ -344,8 +346,12 @@ def f(x, y): _check_mixed_float(result, dtype=dict(C=None)) @pytest.mark.parametrize('op', ['__add__', '__sub__', '__mul__']) - def test_arith_flex_frame_mixed(self, op, int_frame, mixed_int_frame, - mixed_float_frame): + def test_arith_flex_frame_mixed(self, op): + + int_frame = tm.get_int_frame() + mixed_int_frame = tm.get_mixed_int_frame() + mixed_float_frame = tm.get_mixed_float_frame() + f = getattr(operator, op) # vs mix int @@ -372,11 +378,12 @@ def test_arith_flex_frame_mixed(self, op, int_frame, mixed_int_frame, expected = f(int_frame, 2 * int_frame) tm.assert_frame_equal(result, expected) - def test_arith_flex_frame_raise(self, all_arithmetic_operators, - float_frame): + def test_arith_flex_frame_raise(self, all_arithmetic_operators): # one instance of parametrized fixture op = all_arithmetic_operators + float_frame = pd.DataFrame(tm.getSeriesData()) + # Check that arrays with dim >= 3 raise for dim in range(3, 6): arr = np.ones((1,) * dim) @@ -384,7 +391,8 @@ def test_arith_flex_frame_raise(self, all_arithmetic_operators, with pytest.raises(ValueError, match=msg): getattr(float_frame, op)(arr) - def test_arith_flex_frame_corner(self, float_frame): + def test_arith_flex_frame_corner(self): + float_frame = pd.DataFrame(tm.getSeriesData()) const_add = float_frame.add(1) tm.assert_frame_equal(const_add, float_frame + 1) @@ -402,8 +410,8 @@ def test_arith_flex_frame_corner(self, float_frame): with pytest.raises(NotImplementedError, match='fill_value'): float_frame.add(float_frame.iloc[0], axis='index', fill_value=3) - def test_arith_flex_series(self, simple_frame): - df = simple_frame + def test_arith_flex_series(self): + df = tm.get_simple_frame() row = df.xs('a') col = df['two'] diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 5419f4d5127f6..a19c8ae7f45d9 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -42,7 +42,8 @@ def test_setitem_invalidates_datetime_index_freq(self): assert dti.freq == 'D' assert dti[1] == ts - def test_cast_internals(self, float_frame): + def test_cast_internals(self): + float_frame = DataFrame(tm.getSeriesData()) casted = DataFrame(float_frame._data, dtype=int) expected = DataFrame(float_frame._series, dtype=int) assert_frame_equal(casted, expected) @@ -51,7 +52,8 @@ def test_cast_internals(self, float_frame): expected = DataFrame(float_frame._series, dtype=np.int32) assert_frame_equal(casted, expected) - def test_consolidate(self, float_frame): + def test_consolidate(self): + float_frame = DataFrame(tm.getSeriesData()) float_frame['E'] = 7. consolidated = float_frame._consolidate() assert len(consolidated._data.blocks) == 1 @@ -67,20 +69,23 @@ def test_consolidate(self, float_frame): float_frame._consolidate(inplace=True) assert len(float_frame._data.blocks) == 1 - def test_consolidate_inplace(self, float_frame): + def test_consolidate_inplace(self): + float_frame = DataFrame(tm.getSeriesData()) frame = float_frame.copy() # noqa # triggers in-place consolidation for letter in range(ord('A'), ord('Z')): float_frame[chr(letter)] = chr(letter) - def test_values_consolidate(self, float_frame): + def test_values_consolidate(self): + float_frame = DataFrame(tm.getSeriesData()) float_frame['E'] = 7. assert not float_frame._data.is_consolidated() _ = float_frame.values # noqa assert float_frame._data.is_consolidated() - def test_modify_values(self, float_frame): + def test_modify_values(self): + float_frame = DataFrame(tm.getSeriesData()) float_frame.values[5] = 5 assert (float_frame.values[5] == 5).all() @@ -89,7 +94,8 @@ def test_modify_values(self, float_frame): float_frame.values[6] = 6 assert (float_frame.values[6] == 6).all() - def test_boolean_set_uncons(self, float_frame): + def test_boolean_set_uncons(self): + float_frame = DataFrame(tm.getSeriesData()) float_frame['E'] = 7. expected = float_frame.values.copy() @@ -98,13 +104,15 @@ def test_boolean_set_uncons(self, float_frame): float_frame[float_frame > 1] = 2 assert_almost_equal(expected, float_frame.values) - def test_values_numeric_cols(self, float_frame): + def test_values_numeric_cols(self): + float_frame = DataFrame(tm.getSeriesData()) float_frame['foo'] = 'bar' values = float_frame[['A', 'B', 'C', 'D']].values assert values.dtype == np.float64 - def test_values_lcd(self, mixed_float_frame, mixed_int_frame): + def test_values_lcd(self): + mixed_float_frame = tm.get_mixed_float_frame() # mixed lcd values = mixed_float_frame[['A', 'B', 'C', 'D']].values @@ -118,6 +126,8 @@ def test_values_lcd(self, mixed_float_frame, mixed_int_frame): # GH 10364 # B uint64 forces float because there are other signed int types + mixed_int_frame = tm.get_mixed_int_frame() + values = mixed_int_frame[['A', 'B', 'C', 'D']].values assert values.dtype == np.float64 @@ -211,9 +221,11 @@ def test_constructor_with_convert(self): None], np.object_), name='A') assert_series_equal(result, expected) - def test_construction_with_mixed(self, float_string_frame): + def test_construction_with_mixed(self): # test construction edge cases with mixed types + float_string_frame = tm.get_float_string_frame() + # f7u12, this does not work without extensive workaround data = [[datetime(2001, 1, 5), np.nan, datetime(2001, 1, 2)], [datetime(2000, 1, 2), datetime(2000, 1, 3), @@ -302,8 +314,9 @@ def test_equals_different_blocks(self): assert df0.equals(df1) assert df1.equals(df0) - def test_copy_blocks(self, float_frame): + def test_copy_blocks(self): # API/ENH 9607 + float_frame = DataFrame(tm.getSeriesData()) df = DataFrame(float_frame, copy=True) column = df.columns[0] @@ -320,8 +333,9 @@ def test_copy_blocks(self, float_frame): # make sure we did not change the original DataFrame assert not _df[column].equals(df[column]) - def test_no_copy_blocks(self, float_frame): + def test_no_copy_blocks(self): # API/ENH 9607 + float_frame = DataFrame(tm.getSeriesData()) df = DataFrame(float_frame, copy=True) column = df.columns[0] @@ -338,7 +352,10 @@ def test_no_copy_blocks(self, float_frame): # make sure we did change the original DataFrame assert _df[column].equals(df[column]) - def test_copy(self, float_frame, float_string_frame): + def test_copy(self): + float_frame = DataFrame(tm.getSeriesData()) + float_string_frame = tm.get_float_string_frame() + cop = float_frame.copy() cop['E'] = cop['A'] assert 'E' not in float_frame @@ -347,7 +364,10 @@ def test_copy(self, float_frame, float_string_frame): copy = float_string_frame.copy() assert copy._data is not float_string_frame._data - def test_pickle(self, float_string_frame, empty_frame, timezone_frame): + def test_pickle(self): + empty_frame = DataFrame({}) + float_string_frame = tm.get_float_string_frame() + unpickled = tm.round_trip_pickle(float_string_frame) assert_frame_equal(float_string_frame, unpickled) @@ -359,6 +379,7 @@ def test_pickle(self, float_string_frame, empty_frame, timezone_frame): repr(unpickled) # tz frame + timezone_frame = tm.get_timezone_frame() unpickled = tm.round_trip_pickle(timezone_frame) assert_frame_equal(timezone_frame, unpickled) @@ -394,7 +415,10 @@ def test_consolidate_datetime64(self): df.starting), ser_starting.index) tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index) - def test_is_mixed_type(self, float_frame, float_string_frame): + def test_is_mixed_type(self): + float_string_frame = tm.get_float_string_frame() + float_frame = DataFrame(tm.getSeriesData()) + assert not float_frame._is_mixed_type assert float_string_frame._is_mixed_type @@ -454,7 +478,9 @@ def test_get_numeric_data_extension_dtype(self): expected = df.loc[:, ['A', 'C']] assert_frame_equal(result, expected) - def test_convert_objects(self, float_string_frame): + def test_convert_objects(self): + + float_string_frame = tm.get_float_string_frame() oops = float_string_frame.T.T converted = oops._convert(datetime=True) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index f441dd20f3982..b197a5f73d801 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -33,7 +33,8 @@ import pandas as pd from pandas import ( Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, - IntervalIndex, MultiIndex, Panel, RangeIndex, Series, bdate_range) + IntervalIndex, MultiIndex, NaT, Panel, RangeIndex, Series, bdate_range, + date_range) from pandas.core.algorithms import take_1d from pandas.core.arrays import ( DatetimeArray, ExtensionArray, IntervalArray, PeriodArray, TimedeltaArray, @@ -3065,3 +3066,117 @@ def convert_rows_list_to_csv_str(rows_list): sep = os.linesep expected = sep.join(rows_list) + sep return expected + + +# ----------------------------------------------------------------------------- +# Fixture-Like Singletons + +def get_simple_frame(): + """ + Fixture for simple 3x3 DataFrame + + Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c']. + """ + arr = np.array([[1., 2., 3.], + [4., 5., 6.], + [7., 8., 9.]]) + + return DataFrame(arr, columns=['one', 'two', 'three'], + index=['a', 'b', 'c']) + + +def get_int_frame(): + """ + Fixture for DataFrame of ints with index of unique strings + + Columns are ['A', 'B', 'C', 'D'] + """ + df = DataFrame({k: v.astype(int) + for k, v in compat.iteritems(getSeriesData())}) + # force these all to int64 to avoid platform testing issues + return DataFrame({c: s for c, s in compat.iteritems(df)}, dtype=np.int64) + + +def get_mixed_int_frame(): + """ + Fixture for DataFrame of different int types with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + """ + df = DataFrame({k: v.astype(int) + for k, v in compat.iteritems(getSeriesData())}) + df.A = df.A.astype('int32') + df.B = np.ones(len(df.B), dtype='uint64') + df.C = df.C.astype('uint8') + df.D = df.C.astype('int64') + return df + + +def get_float_frame_with_na(): + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']; some entries are missing + """ + df = DataFrame(getSeriesData()) + # set some NAs + df.loc[5:10] = np.nan + df.loc[15:20, -2:] = np.nan + return df + + +def get_float_string_frame(): + """ + Fixture for DataFrame of floats and strings with index of unique strings + + Columns are ['A', 'B', 'C', 'D', 'foo']. + """ + df = DataFrame(getSeriesData()) + df['foo'] = 'bar' + return df + + +def get_mixed_float_frame(): + """ + Fixture for DataFrame of different float types with index of unique strings + + Columns are ['A', 'B', 'C', 'D']. + """ + df = DataFrame(getSeriesData()) + df.A = df.A.astype('float32') + df.B = df.B.astype('float32') + df.C = df.C.astype('float16') + df.D = df.D.astype('float64') + return df + + +def get_timezone_frame(): + """ + Fixture for DataFrame of date_range Series with different time zones + + Columns are ['A', 'B', 'C']; some entries are missing + """ + df = DataFrame({'A': date_range('20130101', periods=3), + 'B': date_range('20130101', periods=3, + tz='US/Eastern'), + 'C': date_range('20130101', periods=3, + tz='CET')}) + df.iloc[1, 1] = NaT + df.iloc[1, 2] = NaT + return df + + +def get_frame_of_index_cols(): + """ + Fixture for DataFrame of columns that can be used for indexing + + Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')]; + 'A' & 'B' contain duplicates (but are jointly unique), the rest are unique. + """ + df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'], + 'B': ['one', 'two', 'three', 'one', 'two'], + 'C': ['a', 'b', 'c', 'd', 'e'], + 'D': np.random.randn(5), + 'E': np.random.randn(5), + ('tuple', 'as', 'label'): np.random.randn(5)}) + return df
Found when trying to collect reduction tests that we are still using a lot of singleton fixtures (agreed undesirable in #23701). This gets rid of singleton fixtures in tests.frame Following this it will be easier to more usefully parametrize some of those tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/24769
2019-01-14T17:52:46Z
2019-02-03T04:04:31Z
null
2020-04-05T17:37:07Z
Include tables module in required libraries
diff --git a/setup.py b/setup.py index ed2d905f4358b..e556073ff8c1b 100755 --- a/setup.py +++ b/setup.py @@ -36,6 +36,7 @@ def is_platform_mac(): 'python-dateutil >= 2.5.0', 'pytz >= 2011k', 'numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver), + 'tables >= 3.4.4' ], 'setup_requires': ['numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver)], 'zip_safe': False,
Using `to_hdf` method without the tables module installed may results in infinite recursion crash: Fatal Python error: Cannot recover from stack overflow. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24765
2019-01-14T15:32:03Z
2019-01-15T09:37:24Z
null
2019-01-15T09:37:37Z
DOC: update DF.set_index
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3685a24d60e74..de9583ed208a5 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1147,8 +1147,8 @@ Other API Changes - :class:`pandas.io.formats.style.Styler` supports a ``number-format`` property when using :meth:`~pandas.io.formats.style.Styler.to_excel` (:issue:`22015`) - :meth:`DataFrame.corr` and :meth:`Series.corr` now raise a ``ValueError`` along with a helpful error message instead of a ``KeyError`` when supplied with an invalid method (:issue:`22298`) - :meth:`shift` will now always return a copy, instead of the previous behaviour of returning self when shifting by 0 (:issue:`22397`) -- :meth:`DataFrame.set_index` now allows all one-dimensional list-likes, raises a ``TypeError`` for incorrect types, - has an improved ``KeyError`` message, and will not fail on duplicate column names with ``drop=True``. (:issue:`22484`) +- :meth:`DataFrame.set_index` now gives a better (and less frequent) KeyError, raises a ``ValueError`` for incorrect types, + and will not fail on duplicate column names with ``drop=True``. (:issue:`22484`) - Slicing a single row of a DataFrame with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`) - :class:`DateOffset` attribute `_cacheable` and method `_should_cache` have been removed (:issue:`23118`) - :meth:`Series.searchsorted`, when supplied a scalar value to search for, now returns a scalar instead of an array (:issue:`23801`). diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 309fb3b841461..b4f79bda25517 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4042,12 +4042,16 @@ def set_index(self, keys, drop=True, append=False, inplace=False, Set the DataFrame index using existing columns. Set the DataFrame index (row labels) using one or more existing - columns. The index can replace the existing index or expand on it. + columns or arrays (of the correct length). The index can replace the + existing index or expand on it. Parameters ---------- - keys : label or list of label - Name or names of the columns that will be used as the index. + keys : label or array-like or list of labels/arrays + This parameter can be either a single column key, a single array of + the same length as the calling DataFrame, or a list containing an + arbitrary combination of column keys and arrays. Here, "array" + encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False @@ -4092,7 +4096,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, 7 2013 84 10 2014 31 - Create a multi-index using columns 'year' and 'month': + Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) sale @@ -4102,35 +4106,51 @@ def set_index(self, keys, drop=True, append=False, inplace=False, 2013 7 84 2014 10 31 - Create a multi-index using a set of values and a column: + Create a MultiIndex using an Index and a column: - >>> df.set_index([[1, 2, 3, 4], 'year']) + >>> df.set_index([pd.Index([1, 2, 3, 4]), 'year']) month sale year 1 2012 1 55 2 2014 4 40 3 2013 7 84 4 2014 10 31 + + Create a MultiIndex using two Series: + + >>> s = pd.Series([1, 2, 3, 4]) + >>> df.set_index([s, s**2]) + month year sale + 1 1 1 2012 55 + 2 4 4 2014 40 + 3 9 7 2013 84 + 4 16 10 2014 31 """ inplace = validate_bool_kwarg(inplace, 'inplace') - if not isinstance(keys, list): + + err_msg = ('The parameter "keys" may be a column key, one-dimensional ' + 'array, or a list containing only valid column keys and ' + 'one-dimensional arrays.') + + if (is_scalar(keys) or isinstance(keys, tuple) + or isinstance(keys, (ABCIndexClass, ABCSeries, np.ndarray))): + # make sure we have a container of keys/arrays we can iterate over + # tuples can appear as valid column keys! keys = [keys] + elif not isinstance(keys, list): + raise ValueError(err_msg) missing = [] for col in keys: - if (is_scalar(col) or isinstance(col, tuple)) and col in self: - # tuples can be both column keys or list-likes - # if they are valid column keys, everything is fine - continue - elif is_scalar(col) and col not in self: - # tuples that are not column keys are considered list-like, - # not considered missing - missing.append(col) - elif (not is_list_like(col, allow_sets=False) + if (is_scalar(col) or isinstance(col, tuple)): + # if col is a valid column key, everything is fine + # tuples are always considered keys, never as list-likes + if col not in self: + missing.append(col) + elif (not isinstance(col, (ABCIndexClass, ABCSeries, + np.ndarray, list)) or getattr(col, 'ndim', 1) > 1): - raise TypeError('The parameter "keys" may only contain a ' - 'combination of valid column keys and ' - 'one-dimensional list-likes') + raise ValueError(err_msg) if missing: raise KeyError('{}'.format(missing)) @@ -4163,12 +4183,6 @@ def set_index(self, keys, drop=True, append=False, inplace=False, elif isinstance(col, (list, np.ndarray)): arrays.append(col) names.append(None) - elif (is_list_like(col) - and not (isinstance(col, tuple) and col in self)): - # all other list-likes (but avoid valid column keys) - col = list(col) # ensure iterator do not get read twice etc. - arrays.append(col) - names.append(None) # from here, col can only be a column label else: arrays.append(frame[col]._values) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index b63151dfb459e..c2355742199dc 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -118,7 +118,7 @@ def test_set_index_after_mutation(self): # Add list-of-list constructor because list is ambiguous -> lambda # also test index name if append=True (name is duplicate here for B) @pytest.mark.parametrize('box', [Series, Index, np.array, - list, tuple, iter, lambda x: [list(x)], + list, lambda x: [list(x)], lambda x: MultiIndex.from_arrays([x])]) @pytest.mark.parametrize('append, index_name', [(True, None), (True, 'B'), (True, 'test'), (False, None)]) @@ -135,7 +135,7 @@ def test_set_index_pass_single_array(self, frame_of_index_cols, with pytest.raises(KeyError, match=msg): df.set_index(key, drop=drop, append=append) else: - # np.array/tuple/iter/list-of-list "forget" the name of B + # np.array/list-of-list "forget" the name of B name_mi = getattr(key, 'names', None) name = [getattr(key, 'name', None)] if name_mi is None else name_mi @@ -150,8 +150,7 @@ def test_set_index_pass_single_array(self, frame_of_index_cols, # MultiIndex constructor does not work directly on Series -> lambda # also test index name if append=True (name is duplicate here for A & B) - @pytest.mark.parametrize('box', [Series, Index, np.array, - list, tuple, iter, + @pytest.mark.parametrize('box', [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]) @pytest.mark.parametrize('append, index_name', [(True, None), (True, 'A'), (True, 'B'), @@ -163,7 +162,7 @@ def test_set_index_pass_arrays(self, frame_of_index_cols, df.index.name = index_name keys = ['A', box(df['B'])] - # np.array/list/tuple/iter "forget" the name of B + # np.array/list "forget" the name of B names = ['A', None if box in [np.array, list, tuple, iter] else 'B'] result = df.set_index(keys, drop=drop, append=append) @@ -179,12 +178,10 @@ def test_set_index_pass_arrays(self, frame_of_index_cols, # MultiIndex constructor does not work directly on Series -> lambda # We also emulate a "constructor" for the label -> lambda # also test index name if append=True (name is duplicate here for A) - @pytest.mark.parametrize('box2', [Series, Index, np.array, - list, tuple, iter, + @pytest.mark.parametrize('box2', [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x]), lambda x: x.name]) - @pytest.mark.parametrize('box1', [Series, Index, np.array, - list, tuple, iter, + @pytest.mark.parametrize('box1', [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x]), lambda x: x.name]) @pytest.mark.parametrize('append, index_name', [(True, None), @@ -198,9 +195,6 @@ def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop, keys = [box1(df['A']), box2(df['A'])] result = df.set_index(keys, drop=drop, append=append) - # if either box was iter, the content has been consumed; re-read it - keys = [box1(df['A']), box2(df['A'])] - # need to adapt first drop for case that both keys are 'A' -- # cannot drop the same column twice; # use "is" because == would give ambiguous Boolean error for containers @@ -208,7 +202,7 @@ def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop, # to test against already-tested behaviour, we add sequentially, # hence second append always True; must wrap keys in list, otherwise - # box = list would be illegal + # box = list would be interpreted as keys expected = df.set_index([keys[0]], drop=first_drop, append=append) expected = expected.set_index([keys[1]], drop=drop, append=True) tm.assert_frame_equal(result, expected) @@ -238,7 +232,7 @@ def test_set_index_verify_integrity(self, frame_of_index_cols): @pytest.mark.parametrize('append', [True, False]) @pytest.mark.parametrize('drop', [True, False]) - def test_set_index_raise(self, frame_of_index_cols, drop, append): + def test_set_index_raise_keys(self, frame_of_index_cols, drop, append): df = frame_of_index_cols with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"): @@ -249,14 +243,31 @@ def test_set_index_raise(self, frame_of_index_cols, drop, append): with pytest.raises(KeyError, match='X'): df.set_index([df['A'], df['B'], 'X'], drop=drop, append=append) - msg = 'The parameter "keys" may only contain a combination of.*' - # forbidden type, e.g. set - with pytest.raises(TypeError, match=msg): - df.set_index(set(df['A']), drop=drop, append=append) + msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]" + # tuples always raise KeyError + with pytest.raises(KeyError, match=msg): + df.set_index(tuple(df['A']), drop=drop, append=append) + + # also within a list + with pytest.raises(KeyError, match=msg): + df.set_index(['A', df['A'], tuple(df['A'])], + drop=drop, append=append) + + @pytest.mark.parametrize('append', [True, False]) + @pytest.mark.parametrize('drop', [True, False]) + @pytest.mark.parametrize('box', [set, iter]) + def test_set_index_raise_on_type(self, frame_of_index_cols, box, + drop, append): + df = frame_of_index_cols + + msg = 'The parameter "keys" may be a column key, .*' + # forbidden type, e.g. set/tuple/iter + with pytest.raises(ValueError, match=msg): + df.set_index(box(df['A']), drop=drop, append=append) - # forbidden type in list, e.g. set - with pytest.raises(TypeError, match=msg): - df.set_index(['A', df['A'], set(df['A'])], + # forbidden type in list, e.g. set/tuple/iter + with pytest.raises(ValueError, match=msg): + df.set_index(['A', df['A'], box(df['A'])], drop=drop, append=append) def test_construction_with_categorical_index(self):
Split off from #24697 by request of @jorisvandenbossche & @jreback I kept the change for the whatsnew of #22486, to at least not *emphasize* that there are now ambiguous list-likes available for `DataFrame.set_index` (which haven't seen a release yet and would be removed again by #24697), which would/will make moving forward on this a bit easier. @toobaz
https://api.github.com/repos/pandas-dev/pandas/pulls/24762
2019-01-14T07:01:51Z
2019-01-19T21:18:53Z
2019-01-19T21:18:53Z
2019-01-20T02:39:47Z
REF/TST: Collect DataFrame Reduction Tests
diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index f234e4fadec61..b87f86318dc43 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -38,10 +38,10 @@ def datetime_index(request): """ freqstr = request.param # TODO: non-monotone indexes; NaTs, different start dates, timezones - pi = pd.date_range(start=pd.Timestamp('2000-01-01'), - periods=100, - freq=freqstr) - return pi + dti = pd.date_range(start=pd.Timestamp('2000-01-01'), + periods=100, + freq=freqstr) + return dti @pytest.fixture diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 9f64b71ea455c..6b65ce9c809cb 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -2,7 +2,6 @@ from __future__ import print_function -from datetime import timedelta import operator from string import ascii_lowercase import warnings @@ -18,9 +17,8 @@ import pandas as pd from pandas import ( Categorical, DataFrame, MultiIndex, Series, Timestamp, compat, date_range, - isna, notna, to_datetime, to_timedelta) + isna, notna) import pandas.core.algorithms as algorithms -import pandas.core.nanops as nanops import pandas.util.testing as tm @@ -145,96 +143,6 @@ def assert_stat_op_api(opname, float_frame, float_string_frame, getattr(float_frame, opname)(axis=1, numeric_only=False) -def assert_bool_op_calc(opname, alternative, frame, has_skipna=True): - """ - Check that bool operator opname works as advertised on frame - - Parameters - ---------- - opname : string - Name of the operator to test on frame - alternative : function - Function that opname is tested against; i.e. "frame.opname()" should - equal "alternative(frame)". - frame : DataFrame - The object that the tests are executed on - has_skipna : bool, default True - Whether the method "opname" has the kwarg "skip_na" - """ - - f = getattr(frame, opname) - - if has_skipna: - def skipna_wrapper(x): - nona = x.dropna().values - return alternative(nona) - - def wrapper(x): - return alternative(x.values) - - result0 = f(axis=0, skipna=False) - result1 = f(axis=1, skipna=False) - - tm.assert_series_equal(result0, frame.apply(wrapper)) - tm.assert_series_equal(result1, frame.apply(wrapper, axis=1), - check_dtype=False) # HACK: win32 - else: - skipna_wrapper = alternative - wrapper = alternative - - result0 = f(axis=0) - result1 = f(axis=1) - - tm.assert_series_equal(result0, frame.apply(skipna_wrapper)) - tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1), - check_dtype=False) - - # bad axis - with pytest.raises(ValueError, match='No axis named 2'): - f(axis=2) - - # all NA case - if has_skipna: - all_na = frame * np.NaN - r0 = getattr(all_na, opname)(axis=0) - r1 = getattr(all_na, opname)(axis=1) - if opname == 'any': - assert not r0.any() - assert not r1.any() - else: - assert r0.all() - assert r1.all() - - -def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, - has_bool_only=False): - """ - Check that API for boolean operator opname works as advertised on frame - - Parameters - ---------- - opname : string - Name of the operator to test on frame - float_frame : DataFrame - DataFrame with columns of type float - float_string_frame : DataFrame - DataFrame with both float and string columns - has_bool_only : bool, default False - Whether the method "opname" has the kwarg "bool_only" - """ - # make sure op works on mixed-type frame - mixed = float_string_frame - mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5 - getattr(mixed, opname)(axis=0) - getattr(mixed, opname)(axis=1) - - if has_bool_only: - getattr(mixed, opname)(axis=0, bool_only=True) - getattr(mixed, opname)(axis=1, bool_only=True) - getattr(bool_frame_with_na, opname)(axis=0, bool_only=False) - getattr(bool_frame_with_na, opname)(axis=1, bool_only=False) - - class TestDataFrameAnalytics(): # ---------------------------------------------------------------------= @@ -696,19 +604,6 @@ def test_describe_tz_values(self, tz_naive_fixture): result = df.describe(include='all') tm.assert_frame_equal(result, expected) - def test_reduce_mixed_frame(self): - # GH 6806 - df = DataFrame({ - 'bool_data': [True, True, False, False, False], - 'int_data': [10, 20, 30, 40, 50], - 'string_data': ['a', 'b', 'c', 'd', 'e'], - }) - df.reindex(columns=['bool_data', 'int_data', 'string_data']) - test = df.sum(axis=0) - tm.assert_numpy_array_equal(test.values, - np.array([2, 150, 'abcde'], dtype=object)) - tm.assert_series_equal(test, df.T.sum(axis=1)) - def test_count(self, float_frame_with_na, float_frame, float_string_frame): f = lambda s: notna(s).sum() assert_stat_op_calc('count', f, float_frame_with_na, has_skipna=False, @@ -758,72 +653,6 @@ def test_nunique(self, float_frame_with_na, float_frame, tm.assert_series_equal(df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})) - def test_sum(self, float_frame_with_na, mixed_float_frame, - float_frame, float_string_frame): - assert_stat_op_api('sum', float_frame, float_string_frame, - has_numeric_only=True) - assert_stat_op_calc('sum', np.sum, float_frame_with_na, - skipna_alternative=np.nansum) - # mixed types (with upcasting happening) - assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'), - check_dtype=False, check_less_precise=True) - - @pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var', - 'std', 'skew', 'min', 'max']) - def test_stat_operators_attempt_obj_array(self, method): - # GH 676 - data = { - 'a': [-0.00049987540199591344, -0.0016467257772919831, - 0.00067695870775883013], - 'b': [-0, -0, 0.0], - 'c': [0.00031111847529610595, 0.0014902627951905339, - -0.00094099200035979691] - } - df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O') - - df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3], - 2: [np.nan, 4]}, dtype=object) - - for df in [df1, df2]: - assert df.values.dtype == np.object_ - result = getattr(df, method)(1) - expected = getattr(df.astype('f8'), method)(1) - - if method in ['sum', 'prod']: - tm.assert_series_equal(result, expected) - - def test_mean(self, float_frame_with_na, float_frame, float_string_frame): - assert_stat_op_calc('mean', np.mean, float_frame_with_na, - check_dates=True) - assert_stat_op_api('mean', float_frame, float_string_frame) - - def test_product(self, float_frame_with_na, float_frame, - float_string_frame): - assert_stat_op_calc('product', np.prod, float_frame_with_na) - assert_stat_op_api('product', float_frame, float_string_frame) - - # TODO: Ensure warning isn't emitted in the first place - @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") - def test_median(self, float_frame_with_na, float_frame, - float_string_frame): - def wrapper(x): - if isna(x).any(): - return np.nan - return np.median(x) - - assert_stat_op_calc('median', wrapper, float_frame_with_na, - check_dates=True) - assert_stat_op_api('median', float_frame, float_string_frame) - - def test_min(self, float_frame_with_na, int_frame, - float_frame, float_string_frame): - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - assert_stat_op_calc('min', np.min, float_frame_with_na, - check_dates=True) - assert_stat_op_calc('min', np.min, int_frame) - assert_stat_op_api('min', float_frame, float_string_frame) - def test_cummin(self, datetime_frame): datetime_frame.loc[5:10, 0] = nan datetime_frame.loc[10:15, 1] = nan @@ -870,87 +699,6 @@ def test_cummax(self, datetime_frame): cummax_xs = datetime_frame.cummax(axis=1) assert np.shape(cummax_xs) == np.shape(datetime_frame) - def test_max(self, float_frame_with_na, int_frame, - float_frame, float_string_frame): - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore", RuntimeWarning) - assert_stat_op_calc('max', np.max, float_frame_with_na, - check_dates=True) - assert_stat_op_calc('max', np.max, int_frame) - assert_stat_op_api('max', float_frame, float_string_frame) - - def test_mad(self, float_frame_with_na, float_frame, float_string_frame): - f = lambda x: np.abs(x - x.mean()).mean() - assert_stat_op_calc('mad', f, float_frame_with_na) - assert_stat_op_api('mad', float_frame, float_string_frame) - - def test_var_std(self, float_frame_with_na, datetime_frame, float_frame, - float_string_frame): - alt = lambda x: np.var(x, ddof=1) - assert_stat_op_calc('var', alt, float_frame_with_na) - assert_stat_op_api('var', float_frame, float_string_frame) - - alt = lambda x: np.std(x, ddof=1) - assert_stat_op_calc('std', alt, float_frame_with_na) - assert_stat_op_api('std', float_frame, float_string_frame) - - result = datetime_frame.std(ddof=4) - expected = datetime_frame.apply(lambda x: x.std(ddof=4)) - tm.assert_almost_equal(result, expected) - - result = datetime_frame.var(ddof=4) - expected = datetime_frame.apply(lambda x: x.var(ddof=4)) - tm.assert_almost_equal(result, expected) - - arr = np.repeat(np.random.random((1, 1000)), 1000, 0) - result = nanops.nanvar(arr, axis=0) - assert not (result < 0).any() - - with pd.option_context('use_bottleneck', False): - result = nanops.nanvar(arr, axis=0) - assert not (result < 0).any() - - @pytest.mark.parametrize( - "meth", ['sem', 'var', 'std']) - def test_numeric_only_flag(self, meth): - # GH 9201 - df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) - # set one entry to a number in str format - df1.loc[0, 'foo'] = '100' - - df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) - # set one entry to a non-number str - df2.loc[0, 'foo'] = 'a' - - result = getattr(df1, meth)(axis=1, numeric_only=True) - expected = getattr(df1[['bar', 'baz']], meth)(axis=1) - tm.assert_series_equal(expected, result) - - result = getattr(df2, meth)(axis=1, numeric_only=True) - expected = getattr(df2[['bar', 'baz']], meth)(axis=1) - tm.assert_series_equal(expected, result) - - # df1 has all numbers, df2 has a letter inside - pytest.raises(TypeError, lambda: getattr(df1, meth)( - axis=1, numeric_only=False)) - pytest.raises(TypeError, lambda: getattr(df2, meth)( - axis=1, numeric_only=False)) - - @pytest.mark.parametrize('op', ['mean', 'std', 'var', - 'skew', 'kurt', 'sem']) - def test_mixed_ops(self, op): - # GH 16116 - df = DataFrame({'int': [1, 2, 3, 4], - 'float': [1., 2., 3., 4.], - 'str': ['a', 'b', 'c', 'd']}) - - result = getattr(df, op)() - assert len(result) == 2 - - with pd.option_context('use_bottleneck', False): - result = getattr(df, op)() - assert len(result) == 2 - def test_cumsum(self, datetime_frame): datetime_frame.loc[5:10, 0] = nan datetime_frame.loc[10:15, 1] = nan @@ -1003,130 +751,6 @@ def test_cumprod(self, datetime_frame): df.cumprod(0) df.cumprod(1) - def test_sem(self, float_frame_with_na, datetime_frame, - float_frame, float_string_frame): - alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x)) - assert_stat_op_calc('sem', alt, float_frame_with_na) - assert_stat_op_api('sem', float_frame, float_string_frame) - - result = datetime_frame.sem(ddof=4) - expected = datetime_frame.apply( - lambda x: x.std(ddof=4) / np.sqrt(len(x))) - tm.assert_almost_equal(result, expected) - - arr = np.repeat(np.random.random((1, 1000)), 1000, 0) - result = nanops.nansem(arr, axis=0) - assert not (result < 0).any() - - with pd.option_context('use_bottleneck', False): - result = nanops.nansem(arr, axis=0) - assert not (result < 0).any() - - @td.skip_if_no_scipy - def test_skew(self, float_frame_with_na, float_frame, float_string_frame): - from scipy.stats import skew - - def alt(x): - if len(x) < 3: - return np.nan - return skew(x, bias=False) - - assert_stat_op_calc('skew', alt, float_frame_with_na) - assert_stat_op_api('skew', float_frame, float_string_frame) - - @td.skip_if_no_scipy - def test_kurt(self, float_frame_with_na, float_frame, float_string_frame): - from scipy.stats import kurtosis - - def alt(x): - if len(x) < 4: - return np.nan - return kurtosis(x, bias=False) - - assert_stat_op_calc('kurt', alt, float_frame_with_na) - assert_stat_op_api('kurt', float_frame, float_string_frame) - - index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], - codes=[[0, 0, 0, 0, 0, 0], - [0, 1, 2, 0, 1, 2], - [0, 1, 0, 1, 0, 1]]) - df = DataFrame(np.random.randn(6, 3), index=index) - - kurt = df.kurt() - kurt2 = df.kurt(level=0).xs('bar') - tm.assert_series_equal(kurt, kurt2, check_names=False) - assert kurt.name is None - assert kurt2.name == 'bar' - - @pytest.mark.parametrize("dropna, expected", [ - (True, {'A': [12], - 'B': [10.0], - 'C': [1.0], - 'D': ['a'], - 'E': Categorical(['a'], categories=['a']), - 'F': to_datetime(['2000-1-2']), - 'G': to_timedelta(['1 days'])}), - (False, {'A': [12], - 'B': [10.0], - 'C': [np.nan], - 'D': np.array([np.nan], dtype=object), - 'E': Categorical([np.nan], categories=['a']), - 'F': [pd.NaT], - 'G': to_timedelta([pd.NaT])}), - (True, {'H': [8, 9, np.nan, np.nan], - 'I': [8, 9, np.nan, np.nan], - 'J': [1, np.nan, np.nan, np.nan], - 'K': Categorical(['a', np.nan, np.nan, np.nan], - categories=['a']), - 'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']), - 'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']), - 'N': [0, 1, 2, 3]}), - (False, {'H': [8, 9, np.nan, np.nan], - 'I': [8, 9, np.nan, np.nan], - 'J': [1, np.nan, np.nan, np.nan], - 'K': Categorical([np.nan, 'a', np.nan, np.nan], - categories=['a']), - 'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']), - 'M': to_timedelta(['nan', '1 days', 'nan', 'nan']), - 'N': [0, 1, 2, 3]}) - ]) - def test_mode_dropna(self, dropna, expected): - - df = DataFrame({"A": [12, 12, 19, 11], - "B": [10, 10, np.nan, 3], - "C": [1, np.nan, np.nan, np.nan], - "D": [np.nan, np.nan, 'a', np.nan], - "E": Categorical([np.nan, np.nan, 'a', np.nan]), - "F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']), - "G": to_timedelta(['1 days', 'nan', 'nan', 'nan']), - "H": [8, 8, 9, 9], - "I": [9, 9, 8, 8], - "J": [1, 1, np.nan, np.nan], - "K": Categorical(['a', np.nan, 'a', np.nan]), - "L": to_datetime(['2000-1-2', '2000-1-2', - 'NaT', 'NaT']), - "M": to_timedelta(['1 days', 'nan', - '1 days', 'nan']), - "N": np.arange(4, dtype='int64')}) - - result = df[sorted(list(expected.keys()))].mode(dropna=dropna) - expected = DataFrame(expected) - tm.assert_frame_equal(result, expected) - - @pytest.mark.skipif(not compat.PY3, reason="only PY3") - def test_mode_sortwarning(self): - # Check for the warning that is raised when the mode - # results cannot be sorted - - df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']}) - expected = DataFrame({'A': ['a', np.nan]}) - - with tm.assert_produces_warning(UserWarning, check_stacklevel=False): - result = df.mode(dropna=False) - result = result.sort_values(by='A').reset_index(drop=True) - - tm.assert_frame_equal(result, expected) - def test_operators_timedelta64(self): from datetime import timedelta df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'), @@ -1206,123 +830,6 @@ def test_operators_timedelta64(self): assert df['off1'].dtype == 'timedelta64[ns]' assert df['off2'].dtype == 'timedelta64[ns]' - def test_sum_corner(self, empty_frame): - axis0 = empty_frame.sum(0) - axis1 = empty_frame.sum(1) - assert isinstance(axis0, Series) - assert isinstance(axis1, Series) - assert len(axis0) == 0 - assert len(axis1) == 0 - - @pytest.mark.parametrize('method, unit', [ - ('sum', 0), - ('prod', 1), - ]) - def test_sum_prod_nanops(self, method, unit): - idx = ['a', 'b', 'c'] - df = pd.DataFrame({"a": [unit, unit], - "b": [unit, np.nan], - "c": [np.nan, np.nan]}) - # The default - result = getattr(df, method) - expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') - - # min_count=1 - result = getattr(df, method)(min_count=1) - expected = pd.Series([unit, unit, np.nan], index=idx) - tm.assert_series_equal(result, expected) - - # min_count=0 - result = getattr(df, method)(min_count=0) - expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') - tm.assert_series_equal(result, expected) - - result = getattr(df.iloc[1:], method)(min_count=1) - expected = pd.Series([unit, np.nan, np.nan], index=idx) - tm.assert_series_equal(result, expected) - - # min_count > 1 - df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) - result = getattr(df, method)(min_count=5) - expected = pd.Series(result, index=['A', 'B']) - tm.assert_series_equal(result, expected) - - result = getattr(df, method)(min_count=6) - expected = pd.Series(result, index=['A', 'B']) - tm.assert_series_equal(result, expected) - - def test_sum_nanops_timedelta(self): - # prod isn't defined on timedeltas - idx = ['a', 'b', 'c'] - df = pd.DataFrame({"a": [0, 0], - "b": [0, np.nan], - "c": [np.nan, np.nan]}) - - df2 = df.apply(pd.to_timedelta) - - # 0 by default - result = df2.sum() - expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx) - tm.assert_series_equal(result, expected) - - # min_count=0 - result = df2.sum(min_count=0) - tm.assert_series_equal(result, expected) - - # min_count=1 - result = df2.sum(min_count=1) - expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) - tm.assert_series_equal(result, expected) - - def test_sum_object(self, float_frame): - values = float_frame.values.astype(int) - frame = DataFrame(values, index=float_frame.index, - columns=float_frame.columns) - deltas = frame * timedelta(1) - deltas.sum() - - def test_sum_bool(self, float_frame): - # ensure this works, bug report - bools = np.isnan(float_frame) - bools.sum(1) - bools.sum(0) - - def test_mean_corner(self, float_frame, float_string_frame): - # unit test when have object data - the_mean = float_string_frame.mean(axis=0) - the_sum = float_string_frame.sum(axis=0, numeric_only=True) - tm.assert_index_equal(the_sum.index, the_mean.index) - assert len(the_mean.index) < len(float_string_frame.columns) - - # xs sum mixed type, just want to know it works... - the_mean = float_string_frame.mean(axis=1) - the_sum = float_string_frame.sum(axis=1, numeric_only=True) - tm.assert_index_equal(the_sum.index, the_mean.index) - - # take mean of boolean column - float_frame['bool'] = float_frame['A'] > 0 - means = float_frame.mean(0) - assert means['bool'] == float_frame['bool'].values.mean() - - def test_stats_mixed_type(self, float_string_frame): - # don't blow up - float_string_frame.std(1) - float_string_frame.var(1) - float_string_frame.mean(1) - float_string_frame.skew(1) - - # TODO: Ensure warning isn't emitted in the first place - @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") - def test_median_corner(self, int_frame, float_frame, float_string_frame): - def wrapper(x): - if isna(x).any(): - return np.nan - return np.median(x) - - assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False, - check_dates=True) - assert_stat_op_api('median', float_frame, float_string_frame) - # Miscellanea def test_count_objects(self, float_string_frame): @@ -1338,11 +845,6 @@ def test_cumsum_corner(self): # ?(wesm) result = dm.cumsum() # noqa - def test_sum_bools(self): - df = DataFrame(index=lrange(1), columns=lrange(10)) - bools = isna(df) - assert bools.sum(axis=1)[0] == 10 - # Index of max / min def test_idxmin(self, float_frame, int_frame): @@ -1373,143 +875,6 @@ def test_idxmax(self, float_frame, int_frame): pytest.raises(ValueError, frame.idxmax, axis=2) - # ---------------------------------------------------------------------- - # Logical reductions - - @pytest.mark.parametrize('opname', ['any', 'all']) - def test_any_all(self, opname, bool_frame_with_na, float_string_frame): - assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na, - has_skipna=True) - assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, - has_bool_only=True) - - def test_any_all_extra(self): - df = DataFrame({ - 'A': [True, False, False], - 'B': [True, True, False], - 'C': [True, True, True], - }, index=['a', 'b', 'c']) - result = df[['A', 'B']].any(1) - expected = Series([True, True, False], index=['a', 'b', 'c']) - tm.assert_series_equal(result, expected) - - result = df[['A', 'B']].any(1, bool_only=True) - tm.assert_series_equal(result, expected) - - result = df.all(1) - expected = Series([True, False, False], index=['a', 'b', 'c']) - tm.assert_series_equal(result, expected) - - result = df.all(1, bool_only=True) - tm.assert_series_equal(result, expected) - - # Axis is None - result = df.all(axis=None).item() - assert result is False - - result = df.any(axis=None).item() - assert result is True - - result = df[['C']].all(axis=None).item() - assert result is True - - def test_any_datetime(self): - - # GH 23070 - float_data = [1, np.nan, 3, np.nan] - datetime_data = [pd.Timestamp('1960-02-15'), - pd.Timestamp('1960-02-16'), - pd.NaT, - pd.NaT] - df = DataFrame({ - "A": float_data, - "B": datetime_data - }) - - result = df.any(1) - expected = Series([True, True, True, False]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('func, data, expected', [ - (np.any, {}, False), - (np.all, {}, True), - (np.any, {'A': []}, False), - (np.all, {'A': []}, True), - (np.any, {'A': [False, False]}, False), - (np.all, {'A': [False, False]}, False), - (np.any, {'A': [True, False]}, True), - (np.all, {'A': [True, False]}, False), - (np.any, {'A': [True, True]}, True), - (np.all, {'A': [True, True]}, True), - - (np.any, {'A': [False], 'B': [False]}, False), - (np.all, {'A': [False], 'B': [False]}, False), - - (np.any, {'A': [False, False], 'B': [False, True]}, True), - (np.all, {'A': [False, False], 'B': [False, True]}, False), - - # other types - (np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False), - (np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True), - (np.all, {'A': pd.Series([0, 1], dtype=int)}, False), - (np.any, {'A': pd.Series([0, 1], dtype=int)}, True), - pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False, - marks=[td.skip_if_np_lt_115]), - pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True, - marks=[td.skip_if_np_lt_115]), - pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True, - marks=[td.skip_if_np_lt_115]), - pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True, - marks=[td.skip_if_np_lt_115]), - pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False, - marks=[td.skip_if_np_lt_115]), - pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True, - marks=[td.skip_if_np_lt_115]), - pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True, - marks=[td.skip_if_np_lt_115]), - pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True, - marks=[td.skip_if_np_lt_115]), - (np.all, {'A': pd.Series([0, 1], dtype='category')}, False), - (np.any, {'A': pd.Series([0, 1], dtype='category')}, True), - (np.all, {'A': pd.Series([1, 2], dtype='category')}, True), - (np.any, {'A': pd.Series([1, 2], dtype='category')}, True), - - # # Mix - # GH 21484 - # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'), - # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True), - ]) - def test_any_all_np_func(self, func, data, expected): - # GH 19976 - data = DataFrame(data) - result = func(data) - assert isinstance(result, np.bool_) - assert result.item() is expected - - # method version - result = getattr(DataFrame(data), func.__name__)(axis=None) - assert isinstance(result, np.bool_) - assert result.item() is expected - - def test_any_all_object(self): - # GH 19976 - result = np.all(DataFrame(columns=['a', 'b'])).item() - assert result is True - - result = np.any(DataFrame(columns=['a', 'b'])).item() - assert result is False - - @pytest.mark.parametrize('method', ['any', 'all']) - def test_any_all_level_axis_none_raises(self, method): - df = DataFrame( - {"A": 1}, - index=MultiIndex.from_product([['A', 'B'], ['a', 'b']], - names=['out', 'in']) - ) - xpr = "Must specify 'axis' when aggregating by level." - with pytest.raises(ValueError, match=xpr): - getattr(df, method)(axis=None, level='out') - # ---------------------------------------------------------------------- # Isin diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index d27308029fa19..a7b32c4d36a70 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -1,15 +1,284 @@ # -*- coding: utf-8 -*- -from datetime import datetime +from datetime import datetime, timedelta +import warnings import numpy as np import pytest +from pandas.compat import lrange +import pandas.util._test_decorators as td + import pandas as pd -from pandas import Categorical, DataFrame, Index, PeriodIndex, Series, compat +from pandas import ( + Categorical, DataFrame, Index, MultiIndex, PeriodIndex, Series, compat, + date_range, isna) from pandas.core import nanops import pandas.util.testing as tm +# TODO: copied from tests.frame.test_analytics; belongs in pd.util.testing? +def assert_stat_op_calc(opname, alternative, frame, has_skipna=True, + check_dtype=True, check_dates=False, + check_less_precise=False, skipna_alternative=None): + """ + Check that operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + alternative : function + Function that opname is tested against; i.e. "frame.opname()" should + equal "alternative(frame)". + frame : DataFrame + The object that the tests are executed on + has_skipna : bool, default True + Whether the method "opname" has the kwarg "skip_na" + check_dtype : bool, default True + Whether the dtypes of the result of "frame.opname()" and + "alternative(frame)" should be checked. + check_dates : bool, default false + Whether opname should be tested on a Datetime Series + check_less_precise : bool, default False + Whether results should only be compared approximately; + passed on to tm.assert_series_equal + skipna_alternative : function, default None + NaN-safe version of alternative + """ + + f = getattr(frame, opname) + + if check_dates: + df = DataFrame({'b': date_range('1/1/2001', periods=2)}) + result = getattr(df, opname)() + assert isinstance(result, Series) + + df['a'] = lrange(len(df)) + result = getattr(df, opname)() + assert isinstance(result, Series) + assert len(result) + + if has_skipna: + def wrapper(x): + return alternative(x.values) + + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) + result0 = f(axis=0, skipna=False) + result1 = f(axis=1, skipna=False) + tm.assert_series_equal(result0, frame.apply(wrapper), + check_dtype=check_dtype, + check_less_precise=check_less_precise) + # HACK: win32 + tm.assert_series_equal(result1, frame.apply(wrapper, axis=1), + check_dtype=False, + check_less_precise=check_less_precise) + else: + skipna_wrapper = alternative + + result0 = f(axis=0) + result1 = f(axis=1) + tm.assert_series_equal(result0, frame.apply(skipna_wrapper), + check_dtype=check_dtype, + check_less_precise=check_less_precise) + + if opname in ['sum', 'prod']: + expected = frame.apply(skipna_wrapper, axis=1) + tm.assert_series_equal(result1, expected, check_dtype=False, + check_less_precise=check_less_precise) + + # check dtypes + if check_dtype: + lcd_dtype = frame.values.dtype + assert lcd_dtype == result0.dtype + assert lcd_dtype == result1.dtype + + # bad axis + with pytest.raises(ValueError, match='No axis named 2'): + f(axis=2) + + # all NA case + if has_skipna: + all_na = frame * np.NaN + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname in ['sum', 'prod']: + unit = 1 if opname == 'prod' else 0 # result for empty sum/prod + expected = pd.Series(unit, index=r0.index, dtype=r0.dtype) + tm.assert_series_equal(r0, expected) + expected = pd.Series(unit, index=r1.index, dtype=r1.dtype) + tm.assert_series_equal(r1, expected) + + +# TODO: copied from tests.frame.test_analytics; belongs in pd.util.testing? +def assert_stat_op_api(opname, float_frame, float_string_frame, + has_numeric_only=False): + """ + Check that API for operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + float_frame : DataFrame + DataFrame with columns of type float + float_string_frame : DataFrame + DataFrame with both float and string columns + has_numeric_only : bool, default False + Whether the method "opname" has the kwarg "numeric_only" + """ + + # make sure works on mixed-type frame + getattr(float_string_frame, opname)(axis=0) + getattr(float_string_frame, opname)(axis=1) + + if has_numeric_only: + getattr(float_string_frame, opname)(axis=0, numeric_only=True) + getattr(float_string_frame, opname)(axis=1, numeric_only=True) + getattr(float_frame, opname)(axis=0, numeric_only=False) + getattr(float_frame, opname)(axis=1, numeric_only=False) + + +def assert_bool_op_calc(opname, alternative, frame, has_skipna=True): + """ + Check that bool operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + alternative : function + Function that opname is tested against; i.e. "frame.opname()" should + equal "alternative(frame)". + frame : DataFrame + The object that the tests are executed on + has_skipna : bool, default True + Whether the method "opname" has the kwarg "skip_na" + """ + + f = getattr(frame, opname) + + if has_skipna: + def skipna_wrapper(x): + nona = x.dropna().values + return alternative(nona) + + def wrapper(x): + return alternative(x.values) + + result0 = f(axis=0, skipna=False) + result1 = f(axis=1, skipna=False) + + tm.assert_series_equal(result0, frame.apply(wrapper)) + tm.assert_series_equal(result1, frame.apply(wrapper, axis=1), + check_dtype=False) # HACK: win32 + else: + skipna_wrapper = alternative + wrapper = alternative + + result0 = f(axis=0) + result1 = f(axis=1) + + tm.assert_series_equal(result0, frame.apply(skipna_wrapper)) + tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1), + check_dtype=False) + + # bad axis + with pytest.raises(ValueError, match='No axis named 2'): + f(axis=2) + + # all NA case + if has_skipna: + all_na = frame * np.NaN + r0 = getattr(all_na, opname)(axis=0) + r1 = getattr(all_na, opname)(axis=1) + if opname == 'any': + assert not r0.any() + assert not r1.any() + else: + assert r0.all() + assert r1.all() + + +def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, + has_bool_only=False): + """ + Check that API for boolean operator opname works as advertised on frame + + Parameters + ---------- + opname : string + Name of the operator to test on frame + float_frame : DataFrame + DataFrame with columns of type float + float_string_frame : DataFrame + DataFrame with both float and string columns + has_bool_only : bool, default False + Whether the method "opname" has the kwarg "bool_only" + """ + # make sure op works on mixed-type frame + mixed = float_string_frame + mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5 + getattr(mixed, opname)(axis=0) + getattr(mixed, opname)(axis=1) + + if has_bool_only: + getattr(mixed, opname)(axis=0, bool_only=True) + getattr(mixed, opname)(axis=1, bool_only=True) + getattr(bool_frame_with_na, opname)(axis=0, bool_only=False) + getattr(bool_frame_with_na, opname)(axis=1, bool_only=False) + + +def get_float_frame_with_na(): + """ + Fixture for DataFrame of floats with index of unique strings + + Columns are ['A', 'B', 'C', 'D']; some entries are missing + """ + df = DataFrame(tm.getSeriesData()) + # set some NAs + df.loc[5:10] = np.nan + df.loc[15:20, -2:] = np.nan + return df + + +def get_float_string_frame(): + """ + Fixture for DataFrame of floats and strings with index of unique strings + + Columns are ['A', 'B', 'C', 'D', 'foo']. + """ + df = DataFrame(tm.getSeriesData()) + df['foo'] = 'bar' + return df + + +def get_int_frame(): + """ + Fixture for DataFrame of ints with index of unique strings + + Columns are ['A', 'B', 'C', 'D'] + """ + df = DataFrame({k: v.astype(int) + for k, v in compat.iteritems(tm.getSeriesData())}) + # force these all to int64 to avoid platform testing issues + return DataFrame({c: s for c, s in compat.iteritems(df)}, dtype=np.int64) + + +def get_bool_frame_with_na(): + """ + Fixture for DataFrame of booleans with index of unique strings + + Columns are ['A', 'B', 'C', 'D']; some entries are missing + """ + df = DataFrame(tm.getSeriesData()) > 0 + df = df.astype(object) + # set some NAs + df.loc[5:10] = np.nan + df.loc[15:20, -2:] = np.nan + return df + + def get_objs(): indexes = [ tm.makeBoolIndex(10, name='a'), @@ -260,7 +529,7 @@ def test_empty(self, method, unit, use_bottleneck): ]) def test_empty_multi(self, method, unit): s = pd.Series([1, np.nan, np.nan, np.nan], - index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)])) + index=MultiIndex.from_product([('a', 'b'), (0, 1)])) # 1 / 0 by default result = getattr(s, method)(level=0) expected = pd.Series([1, unit], index=['a', 'b']) @@ -404,7 +673,7 @@ def test_idxmin(self): assert pd.isna(allna.idxmin()) # datetime64[ns] - s = Series(pd.date_range('20130102', periods=6)) + s = Series(date_range('20130102', periods=6)) result = s.idxmin() assert result == 0 @@ -434,7 +703,6 @@ def test_idxmax(self): allna = string_series * np.nan assert pd.isna(allna.idxmax()) - from pandas import date_range s = Series(date_range('20130102', periods=6)) result = s.idxmax() assert result == 5 @@ -497,7 +765,7 @@ def test_all_any_params(self): def test_timedelta64_analytics(self): # index min/max - dti = pd.date_range('2012-1-1', periods=3, freq='D') + dti = date_range('2012-1-1', periods=3, freq='D') td = Series(dti) - pd.Timestamp('20120101') result = td.idxmin() @@ -517,8 +785,8 @@ def test_timedelta64_analytics(self): assert result == 2 # abs - s1 = Series(pd.date_range('20120101', periods=3)) - s2 = Series(pd.date_range('20120102', periods=3)) + s1 = Series(date_range('20120101', periods=3)) + s2 = Series(date_range('20120102', periods=3)) expected = Series(s2 - s1) # FIXME: don't leave commented-out code @@ -614,7 +882,7 @@ def test_minmax_nat_dataframe(self, nat_df): assert nat_df.max(skipna=False)[0] is pd.NaT def test_min_max(self): - rng = pd.date_range('1/1/2000', '12/31/2000') + rng = date_range('1/1/2000', '12/31/2000') rng2 = rng.take(np.random.permutation(len(rng))) the_min = rng2.min() @@ -628,7 +896,7 @@ def test_min_max(self): assert rng.max() == rng[-1] def test_min_max_series(self): - rng = pd.date_range('1/1/2000', periods=10, freq='4h') + rng = date_range('1/1/2000', periods=10, freq='4h') lvls = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C', 'C'] df = DataFrame({'TS': rng, 'V': np.random.randn(len(rng)), 'L': lvls}) @@ -860,3 +1128,640 @@ def test_mode_sortwarning(self): result = result.sort_values().reset_index(drop=True) tm.assert_series_equal(result, expected) + + +class TestDataFrameReductions(object): + # Note: the name TestDataFrameReductions indicates these tests + # were moved from a DataFrame-specific test file, _not_ that these + # tests are intended long-term to be DataFrame-specific + + def test_reduce_mixed_frame(self): + # GH#6806 + df = DataFrame({ + 'bool_data': [True, True, False, False, False], + 'int_data': [10, 20, 30, 40, 50], + 'string_data': ['a', 'b', 'c', 'd', 'e'], + }) + df.reindex(columns=['bool_data', 'int_data', 'string_data']) + test = df.sum(axis=0) + tm.assert_numpy_array_equal(test.values, + np.array([2, 150, 'abcde'], dtype=object)) + tm.assert_series_equal(test, df.T.sum(axis=1)) + + # ---------------------------------------------------------------- + # Min/Max + + def test_max(self): + + int_frame = get_int_frame() + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + assert_stat_op_calc('max', np.max, float_frame_with_na, + check_dates=True) + assert_stat_op_calc('max', np.max, int_frame) + assert_stat_op_api('max', float_frame, float_string_frame) + + def test_min(self): + + int_frame = get_int_frame() + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore", RuntimeWarning) + assert_stat_op_calc('min', np.min, float_frame_with_na, + check_dates=True) + assert_stat_op_calc('min', np.min, int_frame) + assert_stat_op_api('min', float_frame, float_string_frame) + + # ---------------------------------------------------------------- + # Any/All + + @pytest.mark.parametrize('opname', ['any', 'all']) + def test_any_all(self, opname): + float_string_frame = get_float_string_frame() + bool_frame_with_na = get_bool_frame_with_na() + + assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na, + has_skipna=True) + assert_bool_op_api(opname, bool_frame_with_na, float_string_frame, + has_bool_only=True) + + def test_any_all_extra(self): + df = DataFrame({ + 'A': [True, False, False], + 'B': [True, True, False], + 'C': [True, True, True], + }, index=['a', 'b', 'c']) + result = df[['A', 'B']].any(1) + expected = Series([True, True, False], index=['a', 'b', 'c']) + tm.assert_series_equal(result, expected) + + result = df[['A', 'B']].any(1, bool_only=True) + tm.assert_series_equal(result, expected) + + result = df.all(1) + expected = Series([True, False, False], index=['a', 'b', 'c']) + tm.assert_series_equal(result, expected) + + result = df.all(1, bool_only=True) + tm.assert_series_equal(result, expected) + + # Axis is None + result = df.all(axis=None).item() + assert result is False + + result = df.any(axis=None).item() + assert result is True + + result = df[['C']].all(axis=None).item() + assert result is True + + def test_any_datetime(self): + # GH#23070 + float_data = [1, np.nan, 3, np.nan] + datetime_data = [pd.Timestamp('1960-02-15'), + pd.Timestamp('1960-02-16'), + pd.NaT, + pd.NaT] + df = DataFrame({ + "A": float_data, + "B": datetime_data + }) + + result = df.any(1) + expected = Series([True, True, True, False]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('func, data, expected', [ + (np.any, {}, False), + (np.all, {}, True), + (np.any, {'A': []}, False), + (np.all, {'A': []}, True), + (np.any, {'A': [False, False]}, False), + (np.all, {'A': [False, False]}, False), + (np.any, {'A': [True, False]}, True), + (np.all, {'A': [True, False]}, False), + (np.any, {'A': [True, True]}, True), + (np.all, {'A': [True, True]}, True), + + (np.any, {'A': [False], 'B': [False]}, False), + (np.all, {'A': [False], 'B': [False]}, False), + + (np.any, {'A': [False, False], 'B': [False, True]}, True), + (np.all, {'A': [False, False], 'B': [False, True]}, False), + + # other types + (np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False), + (np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True), + (np.all, {'A': pd.Series([0, 1], dtype=int)}, False), + (np.any, {'A': pd.Series([0, 1], dtype=int)}, True), + pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True, + marks=[td.skip_if_np_lt_115]), + (np.all, {'A': pd.Series([0, 1], dtype='category')}, False), + (np.any, {'A': pd.Series([0, 1], dtype='category')}, True), + (np.all, {'A': pd.Series([1, 2], dtype='category')}, True), + (np.any, {'A': pd.Series([1, 2], dtype='category')}, True), + + # # Mix + # GH#21484 + # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'), + # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True), + ]) + def test_any_all_np_func(self, func, data, expected): + # GH#19976 + data = DataFrame(data) + result = func(data) + assert isinstance(result, np.bool_) + assert result.item() is expected + + # method version + result = getattr(DataFrame(data), func.__name__)(axis=None) + assert isinstance(result, np.bool_) + assert result.item() is expected + + def test_any_all_object(self): + # GH#19976 + result = np.all(DataFrame(columns=['a', 'b'])).item() + assert result is True + + result = np.any(DataFrame(columns=['a', 'b'])).item() + assert result is False + + @pytest.mark.parametrize('method', ['any', 'all']) + def test_any_all_level_axis_none_raises(self, method): + df = DataFrame( + {"A": 1}, + index=MultiIndex.from_product([['A', 'B'], ['a', 'b']], + names=['out', 'in']) + ) + xpr = "Must specify 'axis' when aggregating by level." + with pytest.raises(ValueError, match=xpr): + getattr(df, method)(axis=None, level='out') + + # ---------------------------------------------------------------- + # Statistical Reductions + # TODO: belongs in test_stat_reductions + + # TODO: Ensure warning isn't emitted in the first place + @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") + def test_median(self): + + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + def wrapper(x): + if isna(x).any(): + return np.nan + return np.median(x) + + assert_stat_op_calc('median', wrapper, float_frame_with_na, + check_dates=True) + assert_stat_op_api('median', float_frame, float_string_frame) + + def test_mean(self): + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + assert_stat_op_calc('mean', np.mean, float_frame_with_na, + check_dates=True) + assert_stat_op_api('mean', float_frame, float_string_frame) + + def test_mean_corner(self): + # unit test when have object data + float_string_frame = pd.DataFrame(tm.getSeriesData()) + float_string_frame['foo'] = 'bar' + + the_mean = float_string_frame.mean(axis=0) + the_sum = float_string_frame.sum(axis=0, numeric_only=True) + tm.assert_index_equal(the_sum.index, the_mean.index) + assert len(the_mean.index) < len(float_string_frame.columns) + + # xs sum mixed type, just want to know it works... + the_mean = float_string_frame.mean(axis=1) + the_sum = float_string_frame.sum(axis=1, numeric_only=True) + tm.assert_index_equal(the_sum.index, the_mean.index) + + float_frame = pd.DataFrame(tm.getSeriesData()) + + # take mean of boolean column + float_frame['bool'] = float_frame['A'] > 0 + means = float_frame.mean(0) + assert means['bool'] == float_frame['bool'].values.mean() + + # TODO: Ensure warning isn't emitted in the first place + @pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning") + def test_median_corner(self): + + int_frame = get_int_frame() + float_frame = pd.DataFrame(tm.getSeriesData()) + + float_string_frame = pd.DataFrame(tm.getSeriesData()) + float_string_frame['foo'] = 'bar' + + def wrapper(x): + if isna(x).any(): + return np.nan + return np.median(x) + + assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False, + check_dates=True) + assert_stat_op_api('median', float_frame, float_string_frame) + + def test_stats_mixed_type(self): + # don't blow up + float_string_frame = pd.DataFrame(tm.getSeriesData()) + float_string_frame['foo'] = 'bar' + + float_string_frame.std(1) + float_string_frame.var(1) + float_string_frame.mean(1) + float_string_frame.skew(1) + + def test_mad(self): + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + f = lambda x: np.abs(x - x.mean()).mean() + assert_stat_op_calc('mad', f, float_frame_with_na) + assert_stat_op_api('mad', float_frame, float_string_frame) + + def test_var_std(self): + + float_frame = DataFrame(tm.getSeriesData()) + datetime_frame = DataFrame(tm.getTimeSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + alt = lambda x: np.var(x, ddof=1) + assert_stat_op_calc('var', alt, float_frame_with_na) + assert_stat_op_api('var', float_frame, float_string_frame) + + alt = lambda x: np.std(x, ddof=1) + assert_stat_op_calc('std', alt, float_frame_with_na) + assert_stat_op_api('std', float_frame, float_string_frame) + + result = datetime_frame.std(ddof=4) + expected = datetime_frame.apply(lambda x: x.std(ddof=4)) + tm.assert_almost_equal(result, expected) + + result = datetime_frame.var(ddof=4) + expected = datetime_frame.apply(lambda x: x.var(ddof=4)) + tm.assert_almost_equal(result, expected) + + arr = np.repeat(np.random.random((1, 1000)), 1000, 0) + result = nanops.nanvar(arr, axis=0) + assert not (result < 0).any() + + with pd.option_context('use_bottleneck', False): + result = nanops.nanvar(arr, axis=0) + assert not (result < 0).any() + + @pytest.mark.parametrize('op', ['mean', 'std', 'var', + 'skew', 'kurt', 'sem']) + def test_mixed_ops(self, op): + # GH#16116 + df = DataFrame({'int': [1, 2, 3, 4], + 'float': [1., 2., 3., 4.], + 'str': ['a', 'b', 'c', 'd']}) + + result = getattr(df, op)() + assert len(result) == 2 + + with pd.option_context('use_bottleneck', False): + result = getattr(df, op)() + assert len(result) == 2 + + @pytest.mark.parametrize("meth", ['sem', 'var', 'std']) + def test_numeric_only_flag(self, meth): + # GH#9201 + df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) + # set one entry to a number in str format + df1.loc[0, 'foo'] = '100' + + df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz']) + # set one entry to a non-number str + df2.loc[0, 'foo'] = 'a' + + result = getattr(df1, meth)(axis=1, numeric_only=True) + expected = getattr(df1[['bar', 'baz']], meth)(axis=1) + tm.assert_series_equal(expected, result) + + result = getattr(df2, meth)(axis=1, numeric_only=True) + expected = getattr(df2[['bar', 'baz']], meth)(axis=1) + tm.assert_series_equal(expected, result) + + # df1 has all numbers, df2 has a letter inside + with pytest.raises(TypeError): + getattr(df1, meth)(axis=1, numeric_only=False) + with pytest.raises(TypeError): + getattr(df2, meth)(axis=1, numeric_only=False) + + def test_sem(self): + + float_frame = DataFrame(tm.getSeriesData()) + datetime_frame = DataFrame(tm.getTimeSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x)) + assert_stat_op_calc('sem', alt, float_frame_with_na) + assert_stat_op_api('sem', float_frame, float_string_frame) + + result = datetime_frame.sem(ddof=4) + expected = datetime_frame.apply( + lambda x: x.std(ddof=4) / np.sqrt(len(x))) + tm.assert_almost_equal(result, expected) + + arr = np.repeat(np.random.random((1, 1000)), 1000, 0) + result = nanops.nansem(arr, axis=0) + assert not (result < 0).any() + + with pd.option_context('use_bottleneck', False): + result = nanops.nansem(arr, axis=0) + assert not (result < 0).any() + + @td.skip_if_no_scipy + def test_skew(self): + from scipy.stats import skew + + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + def alt(x): + if len(x) < 3: + return np.nan + return skew(x, bias=False) + + assert_stat_op_calc('skew', alt, float_frame_with_na) + assert_stat_op_api('skew', float_frame, float_string_frame) + + @td.skip_if_no_scipy + def test_kurt(self): + from scipy.stats import kurtosis + + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + def alt(x): + if len(x) < 4: + return np.nan + return kurtosis(x, bias=False) + + assert_stat_op_calc('kurt', alt, float_frame_with_na) + assert_stat_op_api('kurt', float_frame, float_string_frame) + + index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], + codes=[[0, 0, 0, 0, 0, 0], + [0, 1, 2, 0, 1, 2], + [0, 1, 0, 1, 0, 1]]) + df = DataFrame(np.random.randn(6, 3), index=index) + + kurt = df.kurt() + kurt2 = df.kurt(level=0).xs('bar') + tm.assert_series_equal(kurt, kurt2, check_names=False) + assert kurt.name is None + assert kurt2.name == 'bar' + + @pytest.mark.skipif(not compat.PY3, reason="only PY3") + def test_mode_sortwarning(self): + # Check for the warning that is raised when the mode + # results cannot be sorted + + df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']}) + expected = DataFrame({'A': ['a', np.nan]}) + + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + result = df.mode(dropna=False) + result = result.sort_values(by='A').reset_index(drop=True) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dropna, expected", [ + (True, {'A': [12], + 'B': [10.0], + 'C': [1.0], + 'D': ['a'], + 'E': Categorical(['a'], categories=['a']), + 'F': pd.to_datetime(['2000-1-2']), + 'G': pd.to_timedelta(['1 days'])}), + (False, {'A': [12], + 'B': [10.0], + 'C': [np.nan], + 'D': np.array([np.nan], dtype=object), + 'E': Categorical([np.nan], categories=['a']), + 'F': [pd.NaT], + 'G': pd.to_timedelta([pd.NaT])}), + (True, {'H': [8, 9, np.nan, np.nan], + 'I': [8, 9, np.nan, np.nan], + 'J': [1, np.nan, np.nan, np.nan], + 'K': Categorical(['a', np.nan, np.nan, np.nan], + categories=['a']), + 'L': pd.to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']), + 'M': pd.to_timedelta(['1 days', 'nan', 'nan', 'nan']), + 'N': [0, 1, 2, 3]}), + (False, {'H': [8, 9, np.nan, np.nan], + 'I': [8, 9, np.nan, np.nan], + 'J': [1, np.nan, np.nan, np.nan], + 'K': Categorical([np.nan, 'a', np.nan, np.nan], + categories=['a']), + 'L': pd.to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']), + 'M': pd.to_timedelta(['nan', '1 days', 'nan', 'nan']), + 'N': [0, 1, 2, 3]}) + ]) + def test_mode_dropna(self, dropna, expected): + + df = DataFrame({"A": [12, 12, 19, 11], + "B": [10, 10, np.nan, 3], + "C": [1, np.nan, np.nan, np.nan], + "D": [np.nan, np.nan, 'a', np.nan], + "E": Categorical([np.nan, np.nan, 'a', np.nan]), + "F": pd.to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']), + "G": pd.to_timedelta(['1 days', 'nan', 'nan', 'nan']), + "H": [8, 8, 9, 9], + "I": [9, 9, 8, 8], + "J": [1, 1, np.nan, np.nan], + "K": Categorical(['a', np.nan, 'a', np.nan]), + "L": pd.to_datetime(['2000-1-2', '2000-1-2', + 'NaT', 'NaT']), + "M": pd.to_timedelta(['1 days', 'nan', + '1 days', 'nan']), + "N": np.arange(4, dtype='int64')}) + + result = df[sorted(list(expected.keys()))].mode(dropna=dropna) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var', + 'std', 'skew', 'min', 'max']) + def test_stat_operators_attempt_obj_array(self, method): + # GH#676 + data = { + 'a': [-0.00049987540199591344, -0.0016467257772919831, + 0.00067695870775883013], + 'b': [-0, -0, 0.0], + 'c': [0.00031111847529610595, 0.0014902627951905339, + -0.00094099200035979691] + } + df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O') + + df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3], + 2: [np.nan, 4]}, dtype=object) + + for df in [df1, df2]: + assert df.values.dtype == np.object_ + result = getattr(df, method)(1) + expected = getattr(df.astype('f8'), method)(1) + + if method in ['sum', 'prod']: + tm.assert_series_equal(result, expected) + + # ---------------------------------------------------------------- + # Sums + + def test_sum(self): + + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + mixed_float_frame = DataFrame(tm.getSeriesData()) + mixed_float_frame.A = mixed_float_frame.A.astype('float32') + mixed_float_frame.B = mixed_float_frame.B.astype('float32') + mixed_float_frame.C = mixed_float_frame.C.astype('float16') + mixed_float_frame.D = mixed_float_frame.D.astype('float64') + # TODO: the only place we use this casts to float32; is this useful? + + assert_stat_op_api('sum', float_frame, float_string_frame, + has_numeric_only=True) + assert_stat_op_calc('sum', np.sum, float_frame_with_na, + skipna_alternative=np.nansum) + # mixed types (with upcasting happening) + assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'), + check_dtype=False, check_less_precise=True) + + def test_sum_object(self): + float_frame = DataFrame(tm.getSeriesData()) + values = float_frame.values.astype(int) + frame = DataFrame(values, index=float_frame.index, + columns=float_frame.columns) + deltas = frame * timedelta(1) + deltas.sum() + + def test_sum_bool(self): + float_frame = DataFrame(tm.getSeriesData()) + + # ensure this works, bug report + bools = np.isnan(float_frame) + bools.sum(1) + bools.sum(0) + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_sum_prod_nanops(self, method, unit): + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [unit, unit], + "b": [unit, np.nan], + "c": [np.nan, np.nan]}) + # The default + result = getattr(df, method) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + + # min_count=1 + result = getattr(df, method)(min_count=1) + expected = pd.Series([unit, unit, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(df, method)(min_count=0) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + tm.assert_series_equal(result, expected) + + result = getattr(df.iloc[1:], method)(min_count=1) + expected = pd.Series([unit, np.nan, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count > 1 + df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + result = getattr(df, method)(min_count=5) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(min_count=6) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_timedelta(self): + # prod isn't defined on timedeltas + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [0, 0], + "b": [0, np.nan], + "c": [np.nan, np.nan]}) + + df2 = df.apply(pd.to_timedelta) + + # 0 by default + result = df2.sum() + expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df2.sum(min_count=0) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df2.sum(min_count=1) + expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + + def test_sum_corner(self): + empty_frame = DataFrame({}) + + axis0 = empty_frame.sum(0) + axis1 = empty_frame.sum(1) + assert isinstance(axis0, Series) + assert isinstance(axis1, Series) + assert len(axis0) == 0 + assert len(axis1) == 0 + + def test_sum_bools(self): + df = pd.DataFrame(index=lrange(1), columns=lrange(10)) + bools = isna(df) + assert bools.sum(axis=1)[0] == 10 + + # ---------------------------------------------------------------- + + def test_product(self): + + float_frame = DataFrame(tm.getSeriesData()) + float_frame_with_na = get_float_frame_with_na() + float_string_frame = get_float_string_frame() + + assert_stat_op_calc('product', np.prod, float_frame_with_na) + assert_stat_op_api('product', float_frame, float_string_frame)
Tests are mostly unchanged: avoid singleton fixtures, use context syntax for pytest.raises.
https://api.github.com/repos/pandas-dev/pandas/pulls/24761
2019-01-14T02:01:43Z
2019-01-14T13:36:23Z
null
2020-04-05T17:44:11Z
BUG: DataFrame.min/max with axis=1 and uniform datetime64[ns, tz] types does not return NaNs
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 5213120b33f06..61902ede045ef 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1647,6 +1647,7 @@ Timezones - Bug in :meth:`DataFrame.any` returns wrong value when ``axis=1`` and the data is of datetimelike type (:issue:`23070`) - Bug in :meth:`DatetimeIndex.to_period` where a timezone aware index was converted to UTC first before creating :class:`PeriodIndex` (:issue:`22905`) - Bug in :meth:`DataFrame.tz_localize`, :meth:`DataFrame.tz_convert`, :meth:`Series.tz_localize`, and :meth:`Series.tz_convert` where ``copy=False`` would mutate the original argument inplace (:issue:`6326`) +- Bug in :meth:`DataFrame.max` and :meth:`DataFrame.min` with ``axis=1`` where a :class:`Series` with ``NaN`` would be returned when all columns contained the same timezone (:issue:`10390`) Offsets ^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7bbbdd70e062e..309fb3b841461 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -49,6 +49,7 @@ find_common_type) from pandas.core.dtypes.common import ( is_dict_like, + is_datetime64tz_dtype, is_object_dtype, is_extension_type, is_extension_array_dtype, @@ -7390,7 +7391,9 @@ def f(x): return op(x, axis=axis, skipna=skipna, **kwds) # exclude timedelta/datetime unless we are uniform types - if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type: + if (axis == 1 and self._is_datelike_mixed_type + and (not self._is_homogeneous_type + and not is_datetime64tz_dtype(self.dtypes[0]))): numeric_only = True if numeric_only is None: diff --git a/pandas/tests/reductions/test_reductions.py b/pandas/tests/reductions/test_reductions.py index d27308029fa19..b269fca6f9ea6 100644 --- a/pandas/tests/reductions/test_reductions.py +++ b/pandas/tests/reductions/test_reductions.py @@ -135,6 +135,19 @@ def test_nanops(self): assert obj.argmin(skipna=False) == -1 assert obj.argmax(skipna=False) == -1 + @pytest.mark.parametrize('op, expected_col', [ + ['max', 'a'], ['min', 'b'] + ]) + def test_same_tz_min_max_axis_1(self, op, expected_col): + # GH 10390 + df = DataFrame(pd.date_range('2016-01-01 00:00:00', periods=3, + tz='UTC'), + columns=['a']) + df['b'] = df.a.subtract(pd.Timedelta(seconds=3600)) + result = getattr(df, op)(axis=1) + expected = df[expected_col] + tm.assert_series_equal(result, expected) + class TestSeriesReductions(object): # Note: the name TestSeriesReductions indicates these tests
- [x] closes #10390 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Essentially, check that all blocks are are datetimetz type and the same timezone before performing `max`/`min` since datetimetz blocks are only 1D
https://api.github.com/repos/pandas-dev/pandas/pulls/24759
2019-01-14T00:28:56Z
2019-01-16T01:36:21Z
2019-01-16T01:36:21Z
2019-01-16T05:11:24Z
COMPAT: Properly encode filenames in read_csv
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index a18739f4b26bf..5213120b33f06 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1790,6 +1790,7 @@ I/O - Bug in :meth:`DataFrame.to_dict` when the resulting dict contains non-Python scalars in the case of numeric data (:issue:`23753`) - :func:`DataFrame.to_string()`, :func:`DataFrame.to_html()`, :func:`DataFrame.to_latex()` will correctly format output when a string is passed as the ``float_format`` argument (:issue:`21625`, :issue:`22270`) - Bug in :func:`read_csv` that caused it to raise ``OverflowError`` when trying to use 'inf' as ``na_value`` with integer index column (:issue:`17128`) +- Bug in :func:`read_csv` that caused the C engine on Python 3.6+ on Windows to improperly read CSV filenames with accented or special characters (:issue:`15086`) - Bug in :func:`read_fwf` in which the compression type of a file was not being properly inferred (:issue:`22199`) - Bug in :func:`pandas.io.json.json_normalize` that caused it to raise ``TypeError`` when two consecutive elements of ``record_path`` are dicts (:issue:`22706`) - Bug in :meth:`DataFrame.to_stata`, :class:`pandas.io.stata.StataWriter` and :class:`pandas.io.stata.StataWriter117` where a exception would leave a partially written and invalid dta file (:issue:`23573`) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index e0fcf102701f4..6cb6ed749f87b 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -677,7 +677,13 @@ cdef class TextReader: if isinstance(source, basestring): if not isinstance(source, bytes): - source = source.encode(sys.getfilesystemencoding() or 'utf-8') + if compat.PY36 and compat.is_platform_windows(): + # see gh-15086. + encoding = "mbcs" + else: + encoding = sys.getfilesystemencoding() or "utf-8" + + source = source.encode(encoding) if self.memory_map: ptr = new_mmap(source) diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py index 6860452f5ccc4..d87ef7cd15a64 100644 --- a/pandas/tests/io/parser/test_common.py +++ b/pandas/tests/io/parser/test_common.py @@ -1904,6 +1904,18 @@ def test_suppress_error_output(all_parsers, capsys): assert captured.err == "" +def test_filename_with_special_chars(all_parsers): + # see gh-15086. + parser = all_parsers + df = DataFrame({"a": [1, 2, 3]}) + + with tm.ensure_clean("sé-es-vé.csv") as path: + df.to_csv(path, index=False) + + result = parser.read_csv(path) + tm.assert_frame_equal(result, df) + + def test_read_table_deprecated(all_parsers): # see gh-21948 parser = all_parsers
Python 3.6+ changes the default encoding to `utf8` ([PEP 529](https://www.python.org/dev/peps/pep-0529/)), which conflicts with the encoding of Windows (`mbcs`). This conflicts rears its head when using only the C engine of `read_csv` because of the low level functionality that we implement ourselves. This fix checks if we're using Python 3.6+ and on Windows, after which we force the encoding to `mbcs` Closes #15086.
https://api.github.com/repos/pandas-dev/pandas/pulls/24758
2019-01-13T23:42:56Z
2019-01-14T13:36:13Z
2019-01-14T13:36:12Z
2019-01-14T17:59:14Z
implement+test mean for datetimelike EA/Index/Series
diff --git a/doc/source/reference/indexing.rst b/doc/source/reference/indexing.rst index 42ebf648f299f..6d27e225b681e 100644 --- a/doc/source/reference/indexing.rst +++ b/doc/source/reference/indexing.rst @@ -403,6 +403,13 @@ Conversion DatetimeIndex.to_series DatetimeIndex.to_frame +Methods +~~~~~~~ +.. autosummary:: + :toctree: api/ + + DatetimeIndex.mean + TimedeltaIndex -------------- .. autosummary:: @@ -435,6 +442,13 @@ Conversion TimedeltaIndex.ceil TimedeltaIndex.to_frame +Methods +~~~~~~~ +.. autosummary:: + :toctree: api/ + + TimedeltaIndex.mean + .. currentmodule:: pandas PeriodIndex diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 0e8cd95084a8d..6794c0c01e653 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -82,6 +82,7 @@ Other Enhancements - :meth:`DataFrame.query` and :meth:`DataFrame.eval` now supports quoting column names with backticks to refer to names with spaces (:issue:`6508`) - :func:`merge_asof` now gives a more clear error message when merge keys are categoricals that are not equal (:issue:`26136`) - :meth:`pandas.core.window.Rolling` supports exponential (or Poisson) window type (:issue:`21303`) +- :class:`DatetimeIndex` and :class:`TimedeltaIndex` now have a `mean` method (:issue:`24757`) - .. _whatsnew_0250.api_breaking: diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index c32f8642dc2ed..61594be8ec385 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1382,7 +1382,7 @@ def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise', def _reduce(self, name, axis=0, skipna=True, **kwargs): op = getattr(self, name, None) if op: - return op(axis=axis, skipna=skipna, **kwargs) + return op(skipna=skipna, **kwargs) else: return super()._reduce(name, skipna, **kwargs) @@ -1438,6 +1438,54 @@ def max(self, axis=None, skipna=True, *args, **kwargs): # Don't have to worry about NA `result`, since no NA went in. return self._box_func(result) + def mean(self, skipna=True): + """ + Return the mean value of the Array. + + .. versionadded:: 0.25.0 + + Parameters + ---------- + skipna : bool, default True + Whether to ignore any NaT elements + + Returns + ------- + scalar (Timestamp or Timedelta) + + See Also + -------- + numpy.ndarray.mean + Series.mean : Return the mean value in a Series. + + Notes + ----- + mean is only defined for Datetime and Timedelta dtypes, not for Period. + """ + if is_period_dtype(self): + # See discussion in GH#24757 + raise TypeError( + "mean is not implemented for {cls} since the meaning is " + "ambiguous. An alternative is " + "obj.to_timestamp(how='start').mean()" + .format(cls=type(self).__name__)) + + mask = self.isna() + if skipna: + values = self[~mask] + elif mask.any(): + return NaT + else: + values = self + + if not len(values): + # short-circut for empty max / min + return NaT + + result = nanops.nanmean(values.view('i8'), skipna=skipna) + # Don't have to worry about NA `result`, since no NA went in. + return self._box_func(result) + # ------------------------------------------------------------------- # Shared Constructor Helpers diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 092cec00228cd..6491a98812b8f 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -73,6 +73,7 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): _maybe_mask_results = ea_passthrough( DatetimeLikeArrayMixin._maybe_mask_results) __iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__) + mean = ea_passthrough(DatetimeLikeArrayMixin.mean) @property def freq(self): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 1bf3cb86811cb..f122a6525237c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -203,6 +203,7 @@ class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index, DatetimeDelegateMixin): to_frame month_name day_name + mean See Also -------- diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 0574a4b41c920..f5362c0b6bb5d 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -129,6 +129,7 @@ class TimedeltaIndex(DatetimeIndexOpsMixin, dtl.TimelikeOps, Int64Index, floor ceil to_frame + mean See Also -------- diff --git a/pandas/core/series.py b/pandas/core/series.py index 8fb6ad3e3ccc5..37f74b35ba2a8 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3729,6 +3729,10 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, elif is_datetime64_dtype(delegate): # use DatetimeIndex implementation to handle skipna correctly delegate = DatetimeIndex(delegate) + elif is_timedelta64_dtype(delegate) and hasattr(TimedeltaIndex, name): + # use TimedeltaIndex to handle skipna correctly + # TODO: remove hasattr check after TimedeltaIndex has `std` method + delegate = TimedeltaIndex(delegate) # dispatch to numpy arrays elif isinstance(delegate, np.ndarray): diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 487ff7932ec5f..568b229435434 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1205,6 +1205,47 @@ def test_mean_corner(self, float_frame, float_string_frame): means = float_frame.mean(0) assert means['bool'] == float_frame['bool'].values.mean() + def test_mean_datetimelike(self): + # GH#24757 check that datetimelike are excluded by default, handled + # correctly with numeric_only=True + + df = pd.DataFrame({ + 'A': np.arange(3), + 'B': pd.date_range('2016-01-01', periods=3), + 'C': pd.timedelta_range('1D', periods=3), + 'D': pd.period_range('2016', periods=3, freq='A') + }) + result = df.mean(numeric_only=True) + expected = pd.Series({'A': 1.}) + tm.assert_series_equal(result, expected) + + result = df.mean() + expected = pd.Series({ + 'A': 1., + 'C': df.loc[1, 'C'] + }) + tm.assert_series_equal(result, expected) + + @pytest.mark.xfail(reason="casts to object-dtype and then tries to " + "add timestamps", + raises=TypeError, strict=True) + def test_mean_datetimelike_numeric_only_false(self): + df = pd.DataFrame({ + 'A': np.arange(3), + 'B': pd.date_range('2016-01-01', periods=3), + 'C': pd.timedelta_range('1D', periods=3), + 'D': pd.period_range('2016', periods=3, freq='A') + }) + + result = df.mean(numeric_only=False) + expected = pd.Series({ + 'A': 1, + 'B': df.loc[1, 'B'], + 'C': df.loc[1, 'C'], + 'D': df.loc[1, 'D'] + }) + tm.assert_series_equal(result, expected) + def test_stats_mixed_type(self, float_string_frame): # don't blow up float_string_frame.std(1) diff --git a/pandas/tests/reductions/test_stat_reductions.py b/pandas/tests/reductions/test_stat_reductions.py index 223904048dd99..b0fd2f290031e 100644 --- a/pandas/tests/reductions/test_stat_reductions.py +++ b/pandas/tests/reductions/test_stat_reductions.py @@ -10,9 +10,78 @@ import pandas as pd from pandas import DataFrame, Series +from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray import pandas.util.testing as tm +class TestDatetimeLikeStatReductions: + + @pytest.mark.parametrize('box', [Series, pd.Index, DatetimeArray]) + def test_dt64_mean(self, tz_naive_fixture, box): + tz = tz_naive_fixture + + dti = pd.date_range('2001-01-01', periods=11, tz=tz) + # shuffle so that we are not just working with monotone-increasing + dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6]) + dtarr = dti._data + + obj = box(dtarr) + assert obj.mean() == pd.Timestamp('2001-01-06', tz=tz) + assert obj.mean(skipna=False) == pd.Timestamp('2001-01-06', tz=tz) + + # dtarr[-2] will be the first date 2001-01-1 + dtarr[-2] = pd.NaT + + obj = box(dtarr) + assert obj.mean() == pd.Timestamp('2001-01-06 07:12:00', tz=tz) + assert obj.mean(skipna=False) is pd.NaT + + @pytest.mark.parametrize('box', [Series, pd.Index, PeriodArray]) + def test_period_mean(self, box): + # GH#24757 + dti = pd.date_range('2001-01-01', periods=11) + # shuffle so that we are not just working with monotone-increasing + dti = dti.take([4, 1, 3, 10, 9, 7, 8, 5, 0, 2, 6]) + + # use hourly frequency to avoid rounding errors in expected results + # TODO: flesh this out with different frequencies + parr = dti._data.to_period('H') + obj = box(parr) + with pytest.raises(TypeError, match="ambiguous"): + obj.mean() + with pytest.raises(TypeError, match="ambiguous"): + obj.mean(skipna=True) + + # parr[-2] will be the first date 2001-01-1 + parr[-2] = pd.NaT + + with pytest.raises(TypeError, match="ambiguous"): + obj.mean() + with pytest.raises(TypeError, match="ambiguous"): + obj.mean(skipna=True) + + @pytest.mark.parametrize('box', [Series, pd.Index, TimedeltaArray]) + def test_td64_mean(self, box): + tdi = pd.TimedeltaIndex([0, 3, -2, -7, 1, 2, -1, 3, 5, -2, 4], + unit='D') + + tdarr = tdi._data + obj = box(tdarr) + + result = obj.mean() + expected = np.array(tdarr).mean() + assert result == expected + + tdarr[0] = pd.NaT + assert obj.mean(skipna=False) is pd.NaT + + result2 = obj.mean(skipna=True) + assert result2 == tdi[1:].mean() + + # exact equality fails by 1 nanosecond + assert result2.round('us') == (result * 11. / 10).round('us') + + class TestSeriesStatReductions: # Note: the name TestSeriesStatReductions indicates these tests # were moved from a series-specific test file, _not_ that these tests are
This takes over from #23890, implementing+testing reductions one at a time. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24757
2019-01-13T23:41:52Z
2019-06-10T17:31:06Z
2019-06-10T17:31:06Z
2019-06-27T20:10:27Z
CI: Pinning flake8-rst, last version raises incorrect errors
diff --git a/environment.yml b/environment.yml index a980499029478..7a177cfee3d39 100644 --- a/environment.yml +++ b/environment.yml @@ -14,7 +14,7 @@ dependencies: - cython>=0.28.2 - flake8 - flake8-comprehensions - - flake8-rst>=0.6.0 + - flake8-rst>=0.6.0,<=0.7.0 - gitpython - hypothesis>=3.82 - isort diff --git a/requirements-dev.txt b/requirements-dev.txt index 48bd95470d391..ba78430a4b19e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -5,7 +5,7 @@ asv cython>=0.28.2 flake8 flake8-comprehensions -flake8-rst>=0.6.0 +flake8-rst>=0.6.0,<=0.7.0 gitpython hypothesis>=3.82 isort
- [X] closes #24755 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry CC: @jreback @FHaase
https://api.github.com/repos/pandas-dev/pandas/pulls/24756
2019-01-13T22:14:47Z
2019-01-13T23:45:56Z
2019-01-13T23:45:56Z
2019-01-13T23:45:57Z
CategoricalAccessor.categorical removed in 0.24.0rc1 #24751
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 5213120b33f06..da581348d7592 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1309,6 +1309,7 @@ Deprecations - :meth:`Series.clip_lower`, :meth:`Series.clip_upper`, :meth:`DataFrame.clip_lower` and :meth:`DataFrame.clip_upper` are deprecated and will be removed in a future version. Use ``Series.clip(lower=threshold)``, ``Series.clip(upper=threshold)`` and the equivalent ``DataFrame`` methods (:issue:`24203`) - :meth:`Series.nonzero` is deprecated and will be removed in a future version (:issue:`18262`) - Passing an integer to :meth:`Series.fillna` and :meth:`DataFrame.fillna` with ``timedelta64[ns]`` dtypes is deprecated, will raise ``TypeError`` in a future version. Use ``obj.fillna(pd.Timedelta(...))`` instead (:issue:`24694`) +- ``Series.cat.categorical``, ``Series.cat.name`` and ``Sersies.cat.index`` have been deprecated. Use the attributes on ``Series.cat`` or ``Series`` directly. (:issue:`24751`). .. _whatsnew_0240.deprecations.datetimelike_int_ops: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 1368232470402..35b662eaae9a5 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2498,8 +2498,8 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): def __init__(self, data): self._validate(data) self._parent = data.values - self.index = data.index - self.name = data.name + self._index = data.index + self._name = data.name self._freeze() @staticmethod @@ -2520,15 +2520,47 @@ def codes(self): Return Series of codes as well as the index. """ from pandas import Series - return Series(self._parent.codes, index=self.index) + return Series(self._parent.codes, index=self._index) def _delegate_method(self, name, *args, **kwargs): from pandas import Series method = getattr(self._parent, name) res = method(*args, **kwargs) if res is not None: - return Series(res, index=self.index, name=self.name) + return Series(res, index=self._index, name=self._name) + @property + def categorical(self): + # Note: Upon deprecation, `test_tab_completion_with_categorical` will + # need to be updated. `categorical` will need to be removed from + # `ok_for_cat`. + warn("`Series.cat.categorical` has been deprecated. Use the " + "attributes on 'Series.cat' directly instead.", + FutureWarning, + stacklevel=2) + return self._parent + + @property + def name(self): + # Note: Upon deprecation, `test_tab_completion_with_categorical` will + # need to be updated. `name` will need to be removed from + # `ok_for_cat`. + warn("`Series.cat.name` has been deprecated. Use `Series.name` " + "instead.", + FutureWarning, + stacklevel=2) + return self._name + + @property + def index(self): + # Note: Upon deprecation, `test_tab_completion_with_categorical` will + # need to be updated. `index` will need to be removed from + # ok_for_cat`. + warn("`Series.cat.index` has been deprecated. Use `Series.index` " + "instead.", + FutureWarning, + stacklevel=2) + return self._index # utility routines diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py index 91278580254aa..23d00585f950e 100644 --- a/pandas/tests/arrays/categorical/test_warnings.py +++ b/pandas/tests/arrays/categorical/test_warnings.py @@ -2,6 +2,7 @@ import pytest +import pandas as pd import pandas.util.testing as tm @@ -16,3 +17,15 @@ def test_tab_complete_warning(self, ip): with tm.assert_produces_warning(None): with provisionalcompleter('ignore'): list(ip.Completer.completions('c.', 1)) + + def test_CategoricalAccessor_categorical_deprecation(object): + with tm.assert_produces_warning(FutureWarning): + pd.Series(['a', 'b'], dtype='category').cat.categorical + + def test_CategoricalAccessor_name_deprecation(object): + with tm.assert_produces_warning(FutureWarning): + pd.Series(['a', 'b'], dtype='category').cat.name + + def test_CategoricalAccessor_index_deprecation(object): + with tm.assert_produces_warning(FutureWarning): + pd.Series(['a', 'b'], dtype='category').cat.index diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 09e556af883c1..c95cf125e22f7 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -245,10 +245,11 @@ def test_tab_completion(self): def test_tab_completion_with_categorical(self): # test the tab completion display - ok_for_cat = ['categories', 'codes', 'ordered', 'set_categories', - 'add_categories', 'remove_categories', - 'rename_categories', 'reorder_categories', - 'remove_unused_categories', 'as_ordered', 'as_unordered'] + ok_for_cat = ['name', 'index', 'categorical', 'categories', 'codes', + 'ordered', 'set_categories', 'add_categories', + 'remove_categories', 'rename_categories', + 'reorder_categories', 'remove_unused_categories', + 'as_ordered', 'as_unordered'] def get_dir(s): results = [r for r in s.cat.__dir__() if not r.startswith('_')] diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index 2cdcb948eb917..6e47f5543012f 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -251,7 +251,6 @@ def test_deprecate_option(self): KeyError, message="Nonexistent option didn't raise KeyError"): self.cf.get_option('foo') - assert len(w) == 1 # should have raised one warning assert 'deprecated' in str(w[-1]) # we get the default message
- [ ] closes #24751 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24754
2019-01-13T20:43:46Z
2019-01-15T22:53:17Z
2019-01-15T22:53:16Z
2019-01-15T22:53:21Z
STY: use pytest.raises context syntax (series/indexing/*)
diff --git a/pandas/tests/series/indexing/test_alter_index.py b/pandas/tests/series/indexing/test_alter_index.py index e2cffe653d935..a826a0644fa78 100644 --- a/pandas/tests/series/indexing/test_alter_index.py +++ b/pandas/tests/series/indexing/test_alter_index.py @@ -243,7 +243,10 @@ def test_reindex_corner(test_data): # bad fill method ts = test_data.ts[::2] - pytest.raises(Exception, ts.reindex, test_data.ts.index, method='foo') + msg = (r"Invalid fill method\. Expecting pad \(ffill\), backfill" + r" \(bfill\) or nearest\. Got foo") + with pytest.raises(ValueError, match=msg): + ts.reindex(test_data.ts.index, method='foo') def test_reindex_pad(): diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index 9d024bffa3240..89b481b92b73f 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -49,10 +49,12 @@ def test_getitem_boolean_empty(): # invalid because of the boolean indexer # that's empty or not-aligned - with pytest.raises(IndexingError): + msg = (r"Unalignable boolean Series provided as indexer \(index of" + r" the boolean Series and of the indexed object do not match") + with pytest.raises(IndexingError, match=msg): s[Series([], dtype=bool)] - with pytest.raises(IndexingError): + with pytest.raises(IndexingError, match=msg): s[Series([True], dtype=bool)] @@ -77,8 +79,11 @@ def test_getitem_boolean_object(test_data): # nans raise exception omask[5:10] = np.nan - pytest.raises(Exception, s.__getitem__, omask) - pytest.raises(Exception, s.__setitem__, omask, 5) + msg = "cannot index with vector containing NA / NaN values" + with pytest.raises(ValueError, match=msg): + s[omask] + with pytest.raises(ValueError, match=msg): + s[omask] = 5 def test_getitem_setitem_boolean_corner(test_data): @@ -87,15 +92,17 @@ def test_getitem_setitem_boolean_corner(test_data): # these used to raise...?? - pytest.raises(Exception, ts.__getitem__, mask_shifted) - pytest.raises(Exception, ts.__setitem__, mask_shifted, 1) - # ts[mask_shifted] - # ts[mask_shifted] = 1 + msg = (r"Unalignable boolean Series provided as indexer \(index of" + r" the boolean Series and of the indexed object do not match") + with pytest.raises(IndexingError, match=msg): + ts[mask_shifted] + with pytest.raises(IndexingError, match=msg): + ts[mask_shifted] = 1 - pytest.raises(Exception, ts.loc.__getitem__, mask_shifted) - pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1) - # ts.loc[mask_shifted] - # ts.loc[mask_shifted] = 2 + with pytest.raises(IndexingError, match=msg): + ts.loc[mask_shifted] + with pytest.raises(IndexingError, match=msg): + ts.loc[mask_shifted] = 1 def test_setitem_boolean(test_data): @@ -168,14 +175,13 @@ def test_where_unsafe_upcast(dtype): @pytest.mark.parametrize("dtype", [ np.int8, np.int16, np.int32, np.float32 ]) -def test_where_unsafe_itemsize_fail(dtype): - # Can't do these, as we are forced to change the - # item size of the input to something we cannot. +def test_where_upcast(dtype): + # see gh-9743 s = Series(np.arange(10), dtype=dtype) mask = s < 5 values = [2.5, 3.5, 4.5, 5.5, 6.5] - pytest.raises(Exception, s.__setitem__, tuple(mask), values) + s[mask] = values def test_where_unsafe(): @@ -206,10 +212,11 @@ def test_where_unsafe(): s = Series(np.arange(10)) mask = s > 5 - with pytest.raises(ValueError): + msg = "cannot assign mismatch length to masked array" + with pytest.raises(ValueError, match=msg): s[mask] = [5, 4, 3, 2, 1] - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg): s[mask] = [0] * 5 # dtype changes @@ -276,8 +283,11 @@ def test_where_error(): s = Series(np.random.randn(5)) cond = s > 0 - pytest.raises(ValueError, s.where, 1) - pytest.raises(ValueError, s.where, cond[:3].values, -s) + msg = "Array conditional must be same shape as self" + with pytest.raises(ValueError, match=msg): + s.where(1) + with pytest.raises(ValueError, match=msg): + s.where(cond[:3].values, -s) # GH 2745 s = Series([1, 2]) @@ -286,10 +296,13 @@ def test_where_error(): assert_series_equal(s, expected) # failures - pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]), - [0, 2, 3]) - pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]), - []) + msg = "cannot assign mismatch length to masked array" + with pytest.raises(ValueError, match=msg): + s[[True, False]] = [0, 2, 3] + msg = ("NumPy boolean array indexing assignment cannot assign 0 input" + " values to the 1 output values where the mask is true") + with pytest.raises(ValueError, match=msg): + s[[True, False]] = [] @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) @@ -349,10 +362,13 @@ def test_where_setitem_invalid(): # GH 2702 # make sure correct exceptions are raised on invalid list assignment + msg = ("cannot set using a {} indexer with a different length than" + " the value") + # slice s = Series(list('abc')) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg.format('slice')): s[0:3] = list(range(27)) s[0:3] = list(range(3)) @@ -362,7 +378,7 @@ def test_where_setitem_invalid(): # slice with step s = Series(list('abcdef')) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg.format('slice')): s[0:4:2] = list(range(27)) s = Series(list('abcdef')) @@ -373,7 +389,7 @@ def test_where_setitem_invalid(): # neg slices s = Series(list('abcdef')) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg.format('slice')): s[:-1] = list(range(27)) s[-3:-1] = list(range(2)) @@ -383,12 +399,12 @@ def test_where_setitem_invalid(): # list s = Series(list('abc')) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg.format('list-like')): s[[0, 1, 2]] = list(range(27)) s = Series(list('abc')) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=msg.format('list-like')): s[[0, 1, 2]] = list(range(2)) # scalar @@ -590,8 +606,11 @@ def test_mask(): rs2 = s2.mask(cond[:3], -s2) assert_series_equal(rs, rs2) - pytest.raises(ValueError, s.mask, 1) - pytest.raises(ValueError, s.mask, cond[:3].values, -s) + msg = "Array conditional must be same shape as self" + with pytest.raises(ValueError, match=msg): + s.mask(1) + with pytest.raises(ValueError, match=msg): + s.mask(cond[:3].values, -s) # dtype changes s = Series([1, 2, 3, 4]) diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index 21395f6004760..0efc9feb0dbd4 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -33,8 +33,8 @@ def test_fancy_getitem(): assert s['2009-1-2'] == 48 assert s[datetime(2009, 1, 2)] == 48 assert s[Timestamp(datetime(2009, 1, 2))] == 48 - pytest.raises(KeyError, s.__getitem__, '2009-1-3') - + with pytest.raises(KeyError, match=r"^'2009-1-3'$"): + s['2009-1-3'] assert_series_equal(s['3/6/2009':'2009-06-05'], s[datetime(2009, 3, 6):datetime(2009, 6, 5)]) @@ -298,7 +298,8 @@ def test_getitem_setitem_datetimeindex(): lb = datetime(1990, 1, 1, 4) rb = datetime(1990, 1, 1, 7) - with pytest.raises(TypeError): + msg = "Cannot compare tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): # tznaive vs tzaware comparison is invalid # see GH#18376, GH#18162 ts[(ts.index >= lb) & (ts.index <= rb)] @@ -400,7 +401,8 @@ def test_datetime_indexing(): s = Series(len(index), index=index) stamp = Timestamp('1/8/2000') - pytest.raises(KeyError, s.__getitem__, stamp) + with pytest.raises(KeyError, match=r"^947289600000000000L?$"): + s[stamp] s[stamp] = 0 assert s[stamp] == 0 @@ -408,7 +410,8 @@ def test_datetime_indexing(): s = Series(len(index), index=index) s = s[::-1] - pytest.raises(KeyError, s.__getitem__, stamp) + with pytest.raises(KeyError, match=r"^947289600000000000L?$"): + s[stamp] s[stamp] = 0 assert s[stamp] == 0 @@ -499,7 +502,8 @@ def test_duplicate_dates_indexing(dups): expected = Series(np.where(mask, 0, ts), index=ts.index) assert_series_equal(cp, expected) - pytest.raises(KeyError, ts.__getitem__, datetime(2000, 1, 6)) + with pytest.raises(KeyError, match=r"^947116800000000000L?$"): + ts[datetime(2000, 1, 6)] # new index ts[datetime(2000, 1, 6)] = 0 @@ -664,8 +668,11 @@ def test_indexing(): expected = df.loc[[df.index[2]]] # this is a single date, so will raise - pytest.raises(KeyError, df.__getitem__, '2012-01-02 18:01:02', ) - pytest.raises(KeyError, df.__getitem__, df.index[2], ) + with pytest.raises(KeyError, match=r"^'2012-01-02 18:01:02'$"): + df['2012-01-02 18:01:02'] + msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central', freq='S'\)" + with pytest.raises(KeyError, match=msg): + df[df.index[2]] """ diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index 92c41f65eb831..a5855f68127f4 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -24,15 +24,24 @@ def test_basic_indexing(): s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b']) - pytest.raises(IndexError, s.__getitem__, 5) - pytest.raises(IndexError, s.__setitem__, 5, 0) + msg = "index out of bounds" + with pytest.raises(IndexError, match=msg): + s[5] + msg = "index 5 is out of bounds for axis 0 with size 5" + with pytest.raises(IndexError, match=msg): + s[5] = 0 - pytest.raises(KeyError, s.__getitem__, 'c') + with pytest.raises(KeyError, match=r"^'c'$"): + s['c'] s = s.sort_index() - pytest.raises(IndexError, s.__getitem__, 5) - pytest.raises(IndexError, s.__setitem__, 5, 0) + msg = r"index out of bounds|^5$" + with pytest.raises(IndexError, match=msg): + s[5] + msg = r"index 5 is out of bounds for axis (0|1) with size 5|^5$" + with pytest.raises(IndexError, match=msg): + s[5] = 0 def test_basic_getitem_with_labels(test_data): @@ -105,7 +114,8 @@ def test_getitem_get(test_data): # missing d = test_data.ts.index[0] - BDay() - pytest.raises(KeyError, test_data.ts.__getitem__, d) + with pytest.raises(KeyError, match=r"Timestamp\('1999-12-31 00:00:00'\)"): + test_data.ts[d] # None # GH 5652 @@ -166,11 +176,14 @@ def test_getitem_with_duplicates_indices( def test_getitem_out_of_bounds(test_data): # don't segfault, GH #495 - pytest.raises(IndexError, test_data.ts.__getitem__, len(test_data.ts)) + msg = "index out of bounds" + with pytest.raises(IndexError, match=msg): + test_data.ts[len(test_data.ts)] # GH #917 s = Series([]) - pytest.raises(IndexError, s.__getitem__, -1) + with pytest.raises(IndexError, match=msg): + s[-1] def test_getitem_setitem_integers(): @@ -245,8 +258,10 @@ def test_series_box_timestamp(): def test_getitem_ambiguous_keyerror(): s = Series(lrange(10), index=lrange(0, 20, 2)) - pytest.raises(KeyError, s.__getitem__, 1) - pytest.raises(KeyError, s.loc.__getitem__, 1) + with pytest.raises(KeyError, match=r"^1L?$"): + s[1] + with pytest.raises(KeyError, match=r"^1L?$"): + s.loc[1] def test_getitem_unordered_dup(): @@ -295,7 +310,10 @@ def test_getitem_dataframe(): rng = list(range(10)) s = pd.Series(10, index=rng) df = pd.DataFrame(rng, index=rng) - pytest.raises(TypeError, s.__getitem__, df > 5) + msg = ("Indexing a Series with DataFrame is not supported," + " use the appropriate DataFrame column") + with pytest.raises(TypeError, match=msg): + s[df > 5] def test_setitem(test_data): @@ -394,9 +412,10 @@ def test_setslice(test_data): @pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning") def test_basic_getitem_setitem_corner(test_data): # invalid tuples, e.g. td.ts[:, None] vs. td.ts[:, 2] - with pytest.raises(ValueError, match='tuple-index'): + msg = "Can only tuple-index with a MultiIndex" + with pytest.raises(ValueError, match=msg): test_data.ts[:, 2] - with pytest.raises(ValueError, match='tuple-index'): + with pytest.raises(ValueError, match=msg): test_data.ts[:, 2] = 2 # weird lists. [slice(0, 5)] will work but not two slices @@ -405,10 +424,11 @@ def test_basic_getitem_setitem_corner(test_data): assert_series_equal(result, expected) # OK - pytest.raises(Exception, test_data.ts.__getitem__, - [5, slice(None, None)]) - pytest.raises(Exception, test_data.ts.__setitem__, - [5, slice(None, None)], 2) + msg = r"unhashable type(: 'slice')?" + with pytest.raises(TypeError, match=msg): + test_data.ts[[5, slice(None, None)]] + with pytest.raises(TypeError, match=msg): + test_data.ts[[5, slice(None, None)]] = 2 @pytest.mark.parametrize('tz', ['US/Eastern', 'UTC', 'Asia/Tokyo']) @@ -730,7 +750,8 @@ def test_setitem_scalar_into_readonly_backing_data(): series = Series(array) for n in range(len(series)): - with pytest.raises(ValueError): + msg = "assignment destination is read-only" + with pytest.raises(ValueError, match=msg): series[n] = 1 assert array[n] == 0 @@ -743,7 +764,8 @@ def test_setitem_slice_into_readonly_backing_data(): array.flags.writeable = False # make the array immutable series = Series(array) - with pytest.raises(ValueError): + msg = "assignment destination is read-only" + with pytest.raises(ValueError, match=msg): series[1:3] = 1 assert not array.any() @@ -791,8 +813,11 @@ def test_take(): expected = Series([4, 2, 4], index=[4, 3, 4]) tm.assert_series_equal(actual, expected) - pytest.raises(IndexError, s.take, [1, 10]) - pytest.raises(IndexError, s.take, [2, 5]) + msg = "index {} is out of bounds for size 5" + with pytest.raises(IndexError, match=msg.format(10)): + s.take([1, 10]) + with pytest.raises(IndexError, match=msg.format(5)): + s.take([2, 5]) with tm.assert_produces_warning(FutureWarning): s.take([-1, 3, 4], convert=False) diff --git a/pandas/tests/series/indexing/test_loc.py b/pandas/tests/series/indexing/test_loc.py index 27d0eee673c11..8c1709ff016b3 100644 --- a/pandas/tests/series/indexing/test_loc.py +++ b/pandas/tests/series/indexing/test_loc.py @@ -48,8 +48,11 @@ def test_loc_getitem_not_monotonic(test_data): ts2 = test_data.ts[::2][[1, 2, 0]] - pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2)) - pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0) + msg = r"Timestamp\('2000-01-10 00:00:00'\)" + with pytest.raises(KeyError, match=msg): + ts2.loc[d1:d2] + with pytest.raises(KeyError, match=msg): + ts2.loc[d1:d2] = 0 def test_loc_getitem_setitem_integer_slice_keyerrors(): @@ -74,8 +77,10 @@ def test_loc_getitem_setitem_integer_slice_keyerrors(): # non-monotonic, raise KeyError s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]] - pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11)) - pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0) + with pytest.raises(KeyError, match=r"^3L?$"): + s2.loc[3:11] + with pytest.raises(KeyError, match=r"^3L?$"): + s2.loc[3:11] = 0 def test_loc_getitem_iterator(test_data): @@ -97,8 +102,9 @@ def test_loc_setitem_boolean(test_data): def test_loc_setitem_corner(test_data): inds = list(test_data.series.index[[5, 8, 12]]) test_data.series.loc[inds] = 5 - pytest.raises(Exception, test_data.series.loc.__setitem__, - inds + ['foo'], 5) + msg = r"\['foo'\] not in index" + with pytest.raises(KeyError, match=msg): + test_data.series.loc[inds + ['foo']] = 5 def test_basic_setitem_with_labels(test_data): @@ -135,8 +141,11 @@ def test_basic_setitem_with_labels(test_data): inds_notfound = [0, 4, 5, 6] arr_inds_notfound = np.array([0, 4, 5, 6]) - pytest.raises(Exception, s.__setitem__, inds_notfound, 0) - pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0) + msg = r"\[5\] not contained in the index" + with pytest.raises(ValueError, match=msg): + s[inds_notfound] = 0 + with pytest.raises(Exception, match=msg): + s[arr_inds_notfound] = 0 # GH12089 # with tz for values diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py index 8a4fdc7e12e4d..e4afb0e456706 100644 --- a/pandas/tests/series/indexing/test_numeric.py +++ b/pandas/tests/series/indexing/test_numeric.py @@ -96,7 +96,7 @@ def test_delitem(): # empty s = Series() - with pytest.raises(KeyError): + with pytest.raises(KeyError, match=r"^0$"): del s[0] # only 1 left, del, add, del @@ -150,8 +150,12 @@ def test_slice_float64(): def test_getitem_negative_out_of_bounds(): s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10)) - pytest.raises(IndexError, s.__getitem__, -11) - pytest.raises(IndexError, s.__setitem__, -11, 'foo') + msg = "index out of bounds" + with pytest.raises(IndexError, match=msg): + s[-11] + msg = "index -11 is out of bounds for axis 0 with size 10" + with pytest.raises(IndexError, match=msg): + s[-11] = 'foo' def test_getitem_regression(): @@ -203,13 +207,19 @@ def test_setitem_float_labels(): def test_slice_float_get_set(test_data): - pytest.raises(TypeError, lambda: test_data.ts[4.0:10.0]) + msg = (r"cannot do slice indexing on <class 'pandas\.core\.indexes" + r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\]" + r" of <(class|type) 'float'>") + with pytest.raises(TypeError, match=msg.format(key=r"4\.0")): + test_data.ts[4.0:10.0] - with pytest.raises(TypeError): + with pytest.raises(TypeError, match=msg.format(key=r"4\.0")): test_data.ts[4.0:10.0] = 0 - pytest.raises(TypeError, test_data.ts.__getitem__, slice(4.5, 10.0)) - pytest.raises(TypeError, test_data.ts.__setitem__, slice(4.5, 10.0), 0) + with pytest.raises(TypeError, match=msg.format(key=r"4\.5")): + test_data.ts[4.5:10.0] + with pytest.raises(TypeError, match=msg.format(key=r"4\.5")): + test_data.ts[4.5:10.0] = 0 def test_slice_floats2(): @@ -228,16 +238,20 @@ def test_slice_floats2(): def test_int_indexing(): s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2]) - pytest.raises(KeyError, s.__getitem__, 5) + with pytest.raises(KeyError, match=r"^5$"): + s[5] - pytest.raises(KeyError, s.__getitem__, 'c') + with pytest.raises(KeyError, match=r"^'c'$"): + s['c'] # not monotonic s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1]) - pytest.raises(KeyError, s.__getitem__, 5) + with pytest.raises(KeyError, match=r"^5$"): + s[5] - pytest.raises(KeyError, s.__getitem__, 'c') + with pytest.raises(KeyError, match=r"^'c'$"): + s['c'] def test_getitem_int64(test_data):
xref #24332 `@pytest.mark.xfail` applied to `test_where_unsafe_itemsize_fail` : does not raise using python index operator instead of `__setitem__` with `tuple`
https://api.github.com/repos/pandas-dev/pandas/pulls/24750
2019-01-13T13:01:25Z
2019-01-13T23:57:19Z
2019-01-13T23:57:19Z
2019-01-15T19:54:40Z
Split test_excel.py (#24472)
diff --git a/pandas/tests/io/excel/__init__.py b/pandas/tests/io/excel/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/io/excel/test_reader.py b/pandas/tests/io/excel/test_reader.py new file mode 100644 index 0000000000000..376aef6eb4833 --- /dev/null +++ b/pandas/tests/io/excel/test_reader.py @@ -0,0 +1,1151 @@ +from collections import OrderedDict +import contextlib +from datetime import datetime, time +from functools import partial +import os +import warnings + +import numpy as np +import pytest + +from pandas.compat import iteritems, range +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import DataFrame, Index, MultiIndex, Series +import pandas.util.testing as tm +from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf + +from pandas.io.common import URLError +from pandas.io.excel import ExcelFile, ExcelWriter, read_excel +from pandas.io.parsers import read_csv + +_seriesd = tm.getSeriesData() +_tsd = tm.getTimeSeriesData() +_frame = DataFrame(_seriesd)[:10] +_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10] +_tsframe = tm.makeTimeDataFrame()[:5] +_mixed_frame = _frame.copy() +_mixed_frame['foo'] = 'bar' + + +@contextlib.contextmanager +def ignore_xlrd_time_clock_warning(): + """ + Context manager to ignore warnings raised by the xlrd library, + regarding the deprecation of `time.clock` in Python 3.7. + """ + with warnings.catch_warnings(): + warnings.filterwarnings( + action='ignore', + message='time.clock has been deprecated', + category=DeprecationWarning) + yield + + +@td.skip_if_no('xlrd', '1.0.0') +class SharedItems(object): + + @pytest.fixture(autouse=True) + def setup_method(self, datapath): + self.dirpath = datapath("io", "data") + self.frame = _frame.copy() + self.frame2 = _frame2.copy() + self.tsframe = _tsframe.copy() + self.mixed_frame = _mixed_frame.copy() + + def get_csv_refdf(self, basename): + """ + Obtain the reference data from read_csv with the Python engine. + Parameters + ---------- + basename : str + File base name, excluding file extension. + Returns + ------- + dfref : DataFrame + """ + pref = os.path.join(self.dirpath, basename + '.csv') + dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python') + return dfref + + def get_excelfile(self, basename, ext): + """ + Return test data ExcelFile instance. + Parameters + ---------- + basename : str + File base name, excluding file extension. + Returns + ------- + excel : io.excel.ExcelFile + """ + return ExcelFile(os.path.join(self.dirpath, basename + ext)) + + def get_exceldf(self, basename, ext, *args, **kwds): + """ + Return test data DataFrame. + Parameters + ---------- + basename : str + File base name, excluding file extension. + Returns + ------- + df : DataFrame + """ + pth = os.path.join(self.dirpath, basename + ext) + return read_excel(pth, *args, **kwds) + + +class ReadingTestsBase(SharedItems): + # This is based on ExcelWriterBase + + @pytest.fixture(autouse=True, params=['xlrd', None]) + def set_engine(self, request): + func_name = "get_exceldf" + old_func = getattr(self, func_name) + new_func = partial(old_func, engine=request.param) + setattr(self, func_name, new_func) + yield + setattr(self, func_name, old_func) + + @td.skip_if_no("xlrd", "1.0.1") # see gh-22682 + def test_usecols_int(self, ext): + + df_ref = self.get_csv_refdf("test1") + df_ref = df_ref.reindex(columns=["A", "B", "C"]) + + # usecols as int + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + with ignore_xlrd_time_clock_warning(): + df1 = self.get_exceldf("test1", ext, "Sheet1", + index_col=0, usecols=3) + + # usecols as int + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + with ignore_xlrd_time_clock_warning(): + df2 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1], + index_col=0, usecols=3) + + # parse_cols instead of usecols, usecols as int + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + with ignore_xlrd_time_clock_warning(): + df3 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1], + index_col=0, parse_cols=3) + + # TODO add index to xls file) + tm.assert_frame_equal(df1, df_ref, check_names=False) + tm.assert_frame_equal(df2, df_ref, check_names=False) + tm.assert_frame_equal(df3, df_ref, check_names=False) + + @td.skip_if_no('xlrd', '1.0.1') # GH-22682 + def test_usecols_list(self, ext): + + dfref = self.get_csv_refdf('test1') + dfref = dfref.reindex(columns=['B', 'C']) + df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols=[0, 2, 3]) + df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols=[0, 2, 3]) + + with tm.assert_produces_warning(FutureWarning): + with ignore_xlrd_time_clock_warning(): + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, parse_cols=[0, 2, 3]) + + # TODO add index to xls file) + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) + tm.assert_frame_equal(df3, dfref, check_names=False) + + @td.skip_if_no('xlrd', '1.0.1') # GH-22682 + def test_usecols_str(self, ext): + + dfref = self.get_csv_refdf('test1') + + df1 = dfref.reindex(columns=['A', 'B', 'C']) + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols='A:D') + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols='A:D') + + with tm.assert_produces_warning(FutureWarning): + with ignore_xlrd_time_clock_warning(): + df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, parse_cols='A:D') + + # TODO add index to xls, read xls ignores index name ? + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + tm.assert_frame_equal(df4, df1, check_names=False) + + df1 = dfref.reindex(columns=['B', 'C']) + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols='A,C,D') + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols='A,C,D') + # TODO add index to xls file + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + df1 = dfref.reindex(columns=['B', 'C']) + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + usecols='A,C:D') + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols='A,C:D') + tm.assert_frame_equal(df2, df1, check_names=False) + tm.assert_frame_equal(df3, df1, check_names=False) + + @pytest.mark.parametrize("usecols", [ + [0, 1, 3], [0, 3, 1], + [1, 0, 3], [1, 3, 0], + [3, 0, 1], [3, 1, 0], + ]) + def test_usecols_diff_positional_int_columns_order(self, ext, usecols): + expected = self.get_csv_refdf("test1")[["A", "C"]] + result = self.get_exceldf("test1", ext, "Sheet1", + index_col=0, usecols=usecols) + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.parametrize("usecols", [ + ["B", "D"], ["D", "B"] + ]) + def test_usecols_diff_positional_str_columns_order(self, ext, usecols): + expected = self.get_csv_refdf("test1")[["B", "D"]] + expected.index = range(len(expected)) + + result = self.get_exceldf("test1", ext, "Sheet1", usecols=usecols) + tm.assert_frame_equal(result, expected, check_names=False) + + def test_read_excel_without_slicing(self, ext): + expected = self.get_csv_refdf("test1") + result = self.get_exceldf("test1", ext, "Sheet1", index_col=0) + tm.assert_frame_equal(result, expected, check_names=False) + + def test_usecols_excel_range_str(self, ext): + expected = self.get_csv_refdf("test1")[["C", "D"]] + result = self.get_exceldf("test1", ext, "Sheet1", + index_col=0, usecols="A,D:E") + tm.assert_frame_equal(result, expected, check_names=False) + + def test_usecols_excel_range_str_invalid(self, ext): + msg = "Invalid column name: E1" + + with pytest.raises(ValueError, match=msg): + self.get_exceldf("test1", ext, "Sheet1", usecols="D:E1") + + def test_index_col_label_error(self, ext): + msg = "list indices must be integers.*, not str" + + with pytest.raises(TypeError, match=msg): + self.get_exceldf("test1", ext, "Sheet1", index_col=["A"], + usecols=["A", "C"]) + + def test_index_col_empty(self, ext): + # see gh-9208 + result = self.get_exceldf("test1", ext, "Sheet3", + index_col=["A", "B", "C"]) + expected = DataFrame(columns=["D", "E", "F"], + index=MultiIndex(levels=[[]] * 3, + codes=[[]] * 3, + names=["A", "B", "C"])) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("index_col", [None, 2]) + def test_index_col_with_unnamed(self, ext, index_col): + # see gh-18792 + result = self.get_exceldf("test1", ext, "Sheet4", + index_col=index_col) + expected = DataFrame([["i1", "a", "x"], ["i2", "b", "y"]], + columns=["Unnamed: 0", "col1", "col2"]) + if index_col: + expected = expected.set_index(expected.columns[index_col]) + + tm.assert_frame_equal(result, expected) + + def test_usecols_pass_non_existent_column(self, ext): + msg = ("Usecols do not match columns, " + "columns expected but not found: " + r"\['E'\]") + + with pytest.raises(ValueError, match=msg): + self.get_exceldf("test1", ext, usecols=["E"]) + + def test_usecols_wrong_type(self, ext): + msg = ("'usecols' must either be list-like of " + "all strings, all unicode, all integers or a callable.") + + with pytest.raises(ValueError, match=msg): + self.get_exceldf("test1", ext, usecols=["E1", 0]) + + def test_excel_stop_iterator(self, ext): + + parsed = self.get_exceldf('test2', ext, 'Sheet1') + expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1']) + tm.assert_frame_equal(parsed, expected) + + def test_excel_cell_error_na(self, ext): + + parsed = self.get_exceldf('test3', ext, 'Sheet1') + expected = DataFrame([[np.nan]], columns=['Test']) + tm.assert_frame_equal(parsed, expected) + + def test_excel_passes_na(self, ext): + + excel = self.get_excelfile('test4', ext) + + parsed = read_excel(excel, 'Sheet1', keep_default_na=False, + na_values=['apple']) + expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) + + parsed = read_excel(excel, 'Sheet1', keep_default_na=True, + na_values=['apple']) + expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) + + # 13967 + excel = self.get_excelfile('test5', ext) + + parsed = read_excel(excel, 'Sheet1', keep_default_na=False, + na_values=['apple']) + expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) + + parsed = read_excel(excel, 'Sheet1', keep_default_na=True, + na_values=['apple']) + expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) + + @td.skip_if_no('xlrd', '1.0.1') # GH-22682 + def test_deprecated_sheetname(self, ext): + # gh-17964 + excel = self.get_excelfile('test1', ext) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + read_excel(excel, sheetname='Sheet1') + + with pytest.raises(TypeError): + read_excel(excel, sheet='Sheet1') + + @td.skip_if_no('xlrd', '1.0.1') # GH-22682 + def test_excel_table_sheet_by_index(self, ext): + + excel = self.get_excelfile('test1', ext) + dfref = self.get_csv_refdf('test1') + + df1 = read_excel(excel, 0, index_col=0) + df2 = read_excel(excel, 1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) + + df1 = excel.parse(0, index_col=0) + df2 = excel.parse(1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) + + df3 = read_excel(excel, 0, index_col=0, skipfooter=1) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + df4 = read_excel(excel, 0, index_col=0, skip_footer=1) + tm.assert_frame_equal(df3, df4) + + df3 = excel.parse(0, index_col=0, skipfooter=1) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + import xlrd + with pytest.raises(xlrd.XLRDError): + read_excel(excel, 'asdf') + + def test_excel_table(self, ext): + + dfref = self.get_csv_refdf('test1') + + df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0) + df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0) + # TODO add index to file + tm.assert_frame_equal(df1, dfref, check_names=False) + tm.assert_frame_equal(df2, dfref, check_names=False) + + df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, + skipfooter=1) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + def test_reader_special_dtypes(self, ext): + + expected = DataFrame.from_dict(OrderedDict([ + ("IntCol", [1, 2, -3, 4, 0]), + ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]), + ("BoolCol", [True, False, True, True, False]), + ("StrCol", [1, 2, 3, 4, 5]), + # GH5394 - this is why convert_float isn't vectorized + ("Str2Col", ["a", 3, "c", "d", "e"]), + ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31), + datetime(1905, 1, 1), datetime(2013, 12, 14), + datetime(2015, 3, 14)]) + ])) + basename = 'test_types' + + # should read in correctly and infer types + actual = self.get_exceldf(basename, ext, 'Sheet1') + tm.assert_frame_equal(actual, expected) + + # if not coercing number, then int comes in as float + float_expected = expected.copy() + float_expected["IntCol"] = float_expected["IntCol"].astype(float) + float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0 + actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False) + tm.assert_frame_equal(actual, float_expected) + + # check setting Index (assuming xls and xlsx are the same here) + for icol, name in enumerate(expected.columns): + actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol) + exp = expected.set_index(name) + tm.assert_frame_equal(actual, exp) + + # convert_float and converters should be different but both accepted + expected["StrCol"] = expected["StrCol"].apply(str) + actual = self.get_exceldf( + basename, ext, 'Sheet1', converters={"StrCol": str}) + tm.assert_frame_equal(actual, expected) + + no_convert_float = float_expected.copy() + no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str) + actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False, + converters={"StrCol": str}) + tm.assert_frame_equal(actual, no_convert_float) + + # GH8212 - support for converters and missing values + def test_reader_converters(self, ext): + + basename = 'test_converters' + + expected = DataFrame.from_dict(OrderedDict([ + ("IntCol", [1, 2, -3, -1000, 0]), + ("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]), + ("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']), + ("StrCol", ['1', np.nan, '3', '4', '5']), + ])) + + converters = {'IntCol': lambda x: int(x) if x != '' else -1000, + 'FloatCol': lambda x: 10 * x if x else np.nan, + 2: lambda x: 'Found' if x != '' else 'Not found', + 3: lambda x: str(x) if x else '', + } + + # should read in correctly and set types of single cells (not array + # dtypes) + actual = self.get_exceldf(basename, ext, 'Sheet1', + converters=converters) + tm.assert_frame_equal(actual, expected) + + def test_reader_dtype(self, ext): + # GH 8212 + basename = 'testdtype' + actual = self.get_exceldf(basename, ext) + + expected = DataFrame({ + 'a': [1, 2, 3, 4], + 'b': [2.5, 3.5, 4.5, 5.5], + 'c': [1, 2, 3, 4], + 'd': [1.0, 2.0, np.nan, 4.0]}).reindex( + columns=['a', 'b', 'c', 'd']) + + tm.assert_frame_equal(actual, expected) + + actual = self.get_exceldf(basename, ext, + dtype={'a': 'float64', + 'b': 'float32', + 'c': str}) + + expected['a'] = expected['a'].astype('float64') + expected['b'] = expected['b'].astype('float32') + expected['c'] = ['001', '002', '003', '004'] + tm.assert_frame_equal(actual, expected) + + with pytest.raises(ValueError): + self.get_exceldf(basename, ext, dtype={'d': 'int64'}) + + @pytest.mark.parametrize("dtype,expected", [ + (None, + DataFrame({ + "a": [1, 2, 3, 4], + "b": [2.5, 3.5, 4.5, 5.5], + "c": [1, 2, 3, 4], + "d": [1.0, 2.0, np.nan, 4.0] + })), + ({"a": "float64", + "b": "float32", + "c": str, + "d": str + }, + DataFrame({ + "a": Series([1, 2, 3, 4], dtype="float64"), + "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"), + "c": ["001", "002", "003", "004"], + "d": ["1", "2", np.nan, "4"] + })), + ]) + def test_reader_dtype_str(self, ext, dtype, expected): + # see gh-20377 + basename = "testdtype" + + actual = self.get_exceldf(basename, ext, dtype=dtype) + tm.assert_frame_equal(actual, expected) + + def test_reading_all_sheets(self, ext): + # Test reading all sheetnames by setting sheetname to None, + # Ensure a dict is returned. + # See PR #9450 + basename = 'test_multisheet' + dfs = self.get_exceldf(basename, ext, sheet_name=None) + # ensure this is not alphabetical to test order preservation + expected_keys = ['Charlie', 'Alpha', 'Beta'] + tm.assert_contains_all(expected_keys, dfs.keys()) + # Issue 9930 + # Ensure sheet order is preserved + assert expected_keys == list(dfs.keys()) + + def test_reading_multiple_specific_sheets(self, ext): + # Test reading specific sheetnames by specifying a mixed list + # of integers and strings, and confirm that duplicated sheet + # references (positions/names) are removed properly. + # Ensure a dict is returned + # See PR #9450 + basename = 'test_multisheet' + # Explicitly request duplicates. Only the set should be returned. + expected_keys = [2, 'Charlie', 'Charlie'] + dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys) + expected_keys = list(set(expected_keys)) + tm.assert_contains_all(expected_keys, dfs.keys()) + assert len(expected_keys) == len(dfs.keys()) + + def test_reading_all_sheets_with_blank(self, ext): + # Test reading all sheetnames by setting sheetname to None, + # In the case where some sheets are blank. + # Issue #11711 + basename = 'blank_with_header' + dfs = self.get_exceldf(basename, ext, sheet_name=None) + expected_keys = ['Sheet1', 'Sheet2', 'Sheet3'] + tm.assert_contains_all(expected_keys, dfs.keys()) + + # GH6403 + def test_read_excel_blank(self, ext): + actual = self.get_exceldf('blank', ext, 'Sheet1') + tm.assert_frame_equal(actual, DataFrame()) + + def test_read_excel_blank_with_header(self, ext): + expected = DataFrame(columns=['col_1', 'col_2']) + actual = self.get_exceldf('blank_with_header', ext, 'Sheet1') + tm.assert_frame_equal(actual, expected) + + @td.skip_if_no("xlwt") + @td.skip_if_no("openpyxl") + @pytest.mark.parametrize("header,expected", [ + (None, DataFrame([np.nan] * 4)), + (0, DataFrame({"Unnamed: 0": [np.nan] * 3})) + ]) + def test_read_one_empty_col_no_header(self, ext, header, expected): + # xref gh-12292 + filename = "no_header" + df = pd.DataFrame( + [["", 1, 100], + ["", 2, 200], + ["", 3, 300], + ["", 4, 400]] + ) + + with ensure_clean(ext) as path: + df.to_excel(path, filename, index=False, header=False) + result = read_excel(path, filename, usecols=[0], header=header) + + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("xlwt") + @td.skip_if_no("openpyxl") + @pytest.mark.parametrize("header,expected", [ + (None, DataFrame([0] + [np.nan] * 4)), + (0, DataFrame([np.nan] * 4)) + ]) + def test_read_one_empty_col_with_header(self, ext, header, expected): + filename = "with_header" + df = pd.DataFrame( + [["", 1, 100], + ["", 2, 200], + ["", 3, 300], + ["", 4, 400]] + ) + + with ensure_clean(ext) as path: + df.to_excel(path, 'with_header', index=False, header=True) + result = read_excel(path, filename, usecols=[0], header=header) + + tm.assert_frame_equal(result, expected) + + @td.skip_if_no('openpyxl') + @td.skip_if_no('xlwt') + def test_set_column_names_in_parameter(self, ext): + # GH 12870 : pass down column names associated with + # keyword argument names + refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'], + [3, 'baz']], columns=['a', 'b']) + + with ensure_clean(ext) as pth: + with ExcelWriter(pth) as writer: + refdf.to_excel(writer, 'Data_no_head', + header=False, index=False) + refdf.to_excel(writer, 'Data_with_head', index=False) + + refdf.columns = ['A', 'B'] + + with ExcelFile(pth) as reader: + xlsdf_no_head = read_excel(reader, 'Data_no_head', + header=None, names=['A', 'B']) + xlsdf_with_head = read_excel(reader, 'Data_with_head', + index_col=None, names=['A', 'B']) + + tm.assert_frame_equal(xlsdf_no_head, refdf) + tm.assert_frame_equal(xlsdf_with_head, refdf) + + def test_date_conversion_overflow(self, ext): + # GH 10001 : pandas.ExcelFile ignore parse_dates=False + expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'], + [pd.Timestamp('2016-03-16'), 'Jack Black'], + [1e+20, 'Timothy Brown']], + columns=['DateColWithBigInt', 'StringCol']) + + result = self.get_exceldf('testdateoverflow', ext) + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("xlrd", "1.0.1") # see gh-22682 + def test_sheet_name_and_sheetname(self, ext): + # gh-10559: Minor improvement: Change "sheet_name" to "sheetname" + # gh-10969: DOC: Consistent var names (sheetname vs sheet_name) + # gh-12604: CLN GH10559 Rename sheetname variable to sheet_name + # gh-20920: ExcelFile.parse() and pd.read_xlsx() have different + # behavior for "sheetname" argument + filename = "test1" + sheet_name = "Sheet1" + + df_ref = self.get_csv_refdf(filename) + df1 = self.get_exceldf(filename, ext, + sheet_name=sheet_name, index_col=0) # doc + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + with ignore_xlrd_time_clock_warning(): + df2 = self.get_exceldf(filename, ext, index_col=0, + sheetname=sheet_name) # backward compat + + excel = self.get_excelfile(filename, ext) + df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + df2_parse = excel.parse(index_col=0, + sheetname=sheet_name) # backward compat + + tm.assert_frame_equal(df1, df_ref, check_names=False) + tm.assert_frame_equal(df2, df_ref, check_names=False) + tm.assert_frame_equal(df1_parse, df_ref, check_names=False) + tm.assert_frame_equal(df2_parse, df_ref, check_names=False) + + def test_sheet_name_both_raises(self, ext): + with pytest.raises(TypeError, match="Cannot specify both"): + self.get_exceldf('test1', ext, sheetname='Sheet1', + sheet_name='Sheet1') + + excel = self.get_excelfile('test1', ext) + with pytest.raises(TypeError, match="Cannot specify both"): + excel.parse(sheetname='Sheet1', + sheet_name='Sheet1') + + def test_excel_read_buffer(self, ext): + + pth = os.path.join(self.dirpath, 'test1' + ext) + expected = read_excel(pth, 'Sheet1', index_col=0) + with open(pth, 'rb') as f: + actual = read_excel(f, 'Sheet1', index_col=0) + tm.assert_frame_equal(expected, actual) + + with open(pth, 'rb') as f: + xls = ExcelFile(f) + actual = read_excel(xls, 'Sheet1', index_col=0) + tm.assert_frame_equal(expected, actual) + + def test_bad_engine_raises(self, ext): + bad_engine = 'foo' + with pytest.raises(ValueError, match="Unknown engine: foo"): + read_excel('', engine=bad_engine) + + @tm.network + def test_read_from_http_url(self, ext): + url = ('https://raw.github.com/pandas-dev/pandas/master/' + 'pandas/tests/io/data/test1' + ext) + url_table = read_excel(url) + local_table = self.get_exceldf('test1', ext) + tm.assert_frame_equal(url_table, local_table) + + @td.skip_if_not_us_locale + def test_read_from_s3_url(self, ext, s3_resource): + # Bucket "pandas-test" created in tests/io/conftest.py + file_name = os.path.join(self.dirpath, 'test1' + ext) + + with open(file_name, "rb") as f: + s3_resource.Bucket("pandas-test").put_object(Key="test1" + ext, + Body=f) + + url = ('s3://pandas-test/test1' + ext) + url_table = read_excel(url) + local_table = self.get_exceldf('test1', ext) + tm.assert_frame_equal(url_table, local_table) + + @pytest.mark.slow + # ignore warning from old xlrd + @pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning") + def test_read_from_file_url(self, ext): + + # FILE + localtable = os.path.join(self.dirpath, 'test1' + ext) + local_table = read_excel(localtable) + + try: + url_table = read_excel('file://localhost/' + localtable) + except URLError: + # fails on some systems + import platform + pytest.skip("failing on %s" % + ' '.join(platform.uname()).strip()) + + tm.assert_frame_equal(url_table, local_table) + + @td.skip_if_no('pathlib') + def test_read_from_pathlib_path(self, ext): + + # GH12655 + from pathlib import Path + + str_path = os.path.join(self.dirpath, 'test1' + ext) + expected = read_excel(str_path, 'Sheet1', index_col=0) + + path_obj = Path(self.dirpath, 'test1' + ext) + actual = read_excel(path_obj, 'Sheet1', index_col=0) + + tm.assert_frame_equal(expected, actual) + + @td.skip_if_no('py.path') + def test_read_from_py_localpath(self, ext): + + # GH12655 + from py.path import local as LocalPath + + str_path = os.path.join(self.dirpath, 'test1' + ext) + expected = read_excel(str_path, 'Sheet1', index_col=0) + + abs_dir = os.path.abspath(self.dirpath) + path_obj = LocalPath(abs_dir).join('test1' + ext) + actual = read_excel(path_obj, 'Sheet1', index_col=0) + + tm.assert_frame_equal(expected, actual) + + def test_reader_closes_file(self, ext): + + pth = os.path.join(self.dirpath, 'test1' + ext) + f = open(pth, 'rb') + with ExcelFile(f) as xlsx: + # parses okay + read_excel(xlsx, 'Sheet1', index_col=0) + + assert f.closed + + @td.skip_if_no("xlwt") + @td.skip_if_no("openpyxl") + def test_creating_and_reading_multiple_sheets(self, ext): + # see gh-9450 + # + # Test reading multiple sheets, from a runtime + # created Excel file with multiple sheets. + def tdf(col_sheet_name): + d, i = [11, 22, 33], [1, 2, 3] + return DataFrame(d, i, columns=[col_sheet_name]) + + sheets = ["AAA", "BBB", "CCC"] + + dfs = [tdf(s) for s in sheets] + dfs = dict(zip(sheets, dfs)) + + with ensure_clean(ext) as pth: + with ExcelWriter(pth) as ew: + for sheetname, df in iteritems(dfs): + df.to_excel(ew, sheetname) + + dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0) + + for s in sheets: + tm.assert_frame_equal(dfs[s], dfs_returned[s]) + + def test_reader_seconds(self, ext): + + # Test reading times with and without milliseconds. GH5945. + expected = DataFrame.from_dict({"Time": [time(1, 2, 3), + time(2, 45, 56, 100000), + time(4, 29, 49, 200000), + time(6, 13, 42, 300000), + time(7, 57, 35, 400000), + time(9, 41, 28, 500000), + time(11, 25, 21, 600000), + time(13, 9, 14, 700000), + time(14, 53, 7, 800000), + time(16, 37, 0, 900000), + time(18, 20, 54)]}) + + actual = self.get_exceldf('times_1900', ext, 'Sheet1') + tm.assert_frame_equal(actual, expected) + + actual = self.get_exceldf('times_1904', ext, 'Sheet1') + tm.assert_frame_equal(actual, expected) + + def test_read_excel_multiindex(self, ext): + # see gh-4679 + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]]) + mi_file = os.path.join(self.dirpath, "testmultiindex" + ext) + + # "mi_column" sheet + expected = DataFrame([[1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True]], + columns=mi) + + actual = read_excel(mi_file, "mi_column", header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + # "mi_index" sheet + expected.index = mi + expected.columns = ["a", "b", "c", "d"] + + actual = read_excel(mi_file, "mi_index", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected, check_names=False) + + # "both" sheet + expected.columns = mi + + actual = read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1]) + tm.assert_frame_equal(actual, expected, check_names=False) + + # "mi_index_name" sheet + expected.columns = ["a", "b", "c", "d"] + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = read_excel(mi_file, "mi_index_name", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "mi_column_name" sheet + expected.index = list(range(4)) + expected.columns = mi.set_names(["c1", "c2"]) + actual = read_excel(mi_file, "mi_column_name", + header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + # see gh-11317 + # "name_with_int" sheet + expected.columns = mi.set_levels( + [1, 2], level=1).set_names(["c1", "c2"]) + + actual = read_excel(mi_file, "name_with_int", + index_col=0, header=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "both_name" sheet + expected.columns = mi.set_names(["c1", "c2"]) + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = read_excel(mi_file, "both_name", + index_col=[0, 1], header=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "both_skiprows" sheet + actual = read_excel(mi_file, "both_name_skiprows", index_col=[0, 1], + header=[0, 1], skiprows=2) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_multiindex_header_only(self, ext): + # see gh-11733. + # + # Don't try to parse a header name if there isn't one. + mi_file = os.path.join(self.dirpath, "testmultiindex" + ext) + result = read_excel(mi_file, "index_col_none", header=[0, 1]) + + exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")]) + expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns) + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("xlsxwriter") + def test_read_excel_multiindex_empty_level(self, ext): + # see gh-12453 + with ensure_clean(ext) as path: + df = DataFrame({ + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", ""): {0: 0} + }) + + expected = DataFrame({ + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", "Unnamed: 4_level_1"): {0: 0} + }) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + df = pd.DataFrame({ + ("Beg", ""): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7} + }) + + expected = pd.DataFrame({ + ("Beg", "Unnamed: 1_level_1"): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7} + }) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + @td.skip_if_no("xlsxwriter") + @pytest.mark.parametrize("c_idx_names", [True, False]) + @pytest.mark.parametrize("r_idx_names", [True, False]) + @pytest.mark.parametrize("c_idx_levels", [1, 3]) + @pytest.mark.parametrize("r_idx_levels", [1, 3]) + def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names, + c_idx_levels, r_idx_levels): + # see gh-4679 + with ensure_clean(ext) as pth: + if c_idx_levels == 1 and c_idx_names: + pytest.skip("Column index name cannot be " + "serialized unless it's a MultiIndex") + + # Empty name case current read in as + # unnamed levels, not Nones. + check_names = r_idx_names or r_idx_levels <= 1 + + df = mkdf(5, 5, c_idx_names, r_idx_names, + c_idx_levels, r_idx_levels) + df.to_excel(pth) + + act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels))) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[0, :] = np.nan + df.to_excel(pth) + + act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels))) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[-1, :] = np.nan + df.to_excel(pth) + act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels))) + tm.assert_frame_equal(df, act, check_names=check_names) + + def test_excel_old_index_format(self, ext): + # see gh-4679 + filename = "test_index_name_pre17" + ext + in_file = os.path.join(self.dirpath, filename) + + # We detect headers to determine if index names exist, so + # that "index" name in the "names" version of the data will + # now be interpreted as rows that include null data. + data = np.array([[None, None, None, None, None], + ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]]) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex(levels=[["R0", "R_l0_g0", "R_l0_g1", + "R_l0_g2", "R_l0_g3", "R_l0_g4"], + ["R1", "R_l1_g0", "R_l1_g1", + "R_l1_g2", "R_l1_g3", "R_l1_g4"]], + codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], + names=[None, None]) + si = Index(["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", + "R_l0_g3", "R_l0_g4"], name=None) + + expected = pd.DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(in_file, "single_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(in_file, "multi_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # The analogous versions of the "names" version data + # where there are explicitly no names for the indices. + data = np.array([["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]]) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex(levels=[["R_l0_g0", "R_l0_g1", "R_l0_g2", + "R_l0_g3", "R_l0_g4"], + ["R_l1_g0", "R_l1_g1", "R_l1_g2", + "R_l1_g3", "R_l1_g4"]], + codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], + names=[None, None]) + si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", + "R_l0_g3", "R_l0_g4"], name=None) + + expected = pd.DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(in_file, "single_no_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(in_file, "multi_no_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected, check_names=False) + + def test_read_excel_bool_header_arg(self, ext): + # GH 6114 + for arg in [True, False]: + with pytest.raises(TypeError): + pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), + header=arg) + + def test_read_excel_chunksize(self, ext): + # GH 8011 + with pytest.raises(NotImplementedError): + pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), + chunksize=100) + + @td.skip_if_no("xlwt") + @td.skip_if_no("openpyxl") + def test_read_excel_parse_dates(self, ext): + # see gh-11544, gh-12051 + df = DataFrame( + {"col": [1, 2, 3], + "date_strings": pd.date_range("2012-01-01", periods=3)}) + df2 = df.copy() + df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y") + + with ensure_clean(ext) as pth: + df2.to_excel(pth) + + res = read_excel(pth, index_col=0) + tm.assert_frame_equal(df2, res) + + res = read_excel(pth, parse_dates=["date_strings"], index_col=0) + tm.assert_frame_equal(df, res) + + date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y") + res = read_excel(pth, parse_dates=["date_strings"], + date_parser=date_parser, index_col=0) + tm.assert_frame_equal(df, res) + + def test_read_excel_skiprows_list(self, ext): + # GH 4903 + actual = pd.read_excel(os.path.join(self.dirpath, + 'testskiprows' + ext), + 'skiprows_list', skiprows=[0, 2]) + expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True], + [2, 3.5, pd.Timestamp('2015-01-02'), False], + [3, 4.5, pd.Timestamp('2015-01-03'), False], + [4, 5.5, pd.Timestamp('2015-01-04'), True]], + columns=['a', 'b', 'c', 'd']) + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel(os.path.join(self.dirpath, + 'testskiprows' + ext), + 'skiprows_list', skiprows=np.array([0, 2])) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows(self, ext): + # GH 16645 + num_rows_to_pull = 5 + actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), + nrows=num_rows_to_pull) + expected = pd.read_excel(os.path.join(self.dirpath, + 'test1' + ext)) + expected = expected[:num_rows_to_pull] + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_greater_than_nrows_in_file(self, ext): + # GH 16645 + expected = pd.read_excel(os.path.join(self.dirpath, + 'test1' + ext)) + num_records_in_file = len(expected) + num_rows_to_pull = num_records_in_file + 10 + actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), + nrows=num_rows_to_pull) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_non_integer_parameter(self, ext): + # GH 16645 + msg = "'nrows' must be an integer >=0" + with pytest.raises(ValueError, match=msg): + pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), + nrows='5') + + def test_read_excel_squeeze(self, ext): + # GH 12157 + f = os.path.join(self.dirpath, 'test_squeeze' + ext) + + actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True) + expected = pd.Series([2, 3, 4], [4, 5, 6], name='b') + expected.index.name = 'a' + tm.assert_series_equal(actual, expected) + + actual = pd.read_excel(f, 'two_columns', squeeze=True) + expected = pd.DataFrame({'a': [4, 5, 6], + 'b': [2, 3, 4]}) + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel(f, 'one_column', squeeze=True) + expected = pd.Series([1, 2, 3], name='a') + tm.assert_series_equal(actual, expected) + + +@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm']) +class TestXlrdReader(ReadingTestsBase): + """ + This is the base class for the xlrd tests, and 3 different file formats + are supported: xls, xlsx, xlsm + """ + + @td.skip_if_no("xlwt") + def test_read_xlrd_book(self, ext): + import xlrd + df = self.frame + + engine = "xlrd" + sheet_name = "SheetA" + + with ensure_clean(ext) as pth: + df.to_excel(pth, sheet_name) + book = xlrd.open_workbook(pth) + + with ExcelFile(book, engine=engine) as xl: + result = read_excel(xl, sheet_name, index_col=0) + tm.assert_frame_equal(df, result) + + result = read_excel(book, sheet_name=sheet_name, + engine=engine, index_col=0) + tm.assert_frame_equal(df, result) diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/excel/test_writer.py similarity index 56% rename from pandas/tests/io/test_excel.py rename to pandas/tests/io/excel/test_writer.py index 84383afed1d03..89d624344bce5 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/excel/test_writer.py @@ -1,6 +1,4 @@ -from collections import OrderedDict -import contextlib -from datetime import date, datetime, time, timedelta +from datetime import date, datetime, timedelta from distutils.version import LooseVersion from functools import partial import os @@ -11,16 +9,15 @@ from numpy import nan import pytest -from pandas.compat import PY36, BytesIO, iteritems, map, range, u +from pandas.compat import PY36, BytesIO, map, range, u import pandas.util._test_decorators as td import pandas as pd -from pandas import DataFrame, Index, MultiIndex, Series +from pandas import DataFrame, Index, MultiIndex from pandas.core.config import get_option, set_option import pandas.util.testing as tm from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf -from pandas.io.common import URLError from pandas.io.excel import ( ExcelFile, ExcelWriter, _OpenpyxlWriter, _XlsxWriter, _XlwtWriter, read_excel, register_writer) @@ -36,20 +33,6 @@ _mixed_frame['foo'] = 'bar' -@contextlib.contextmanager -def ignore_xlrd_time_clock_warning(): - """ - Context manager to ignore warnings raised by the xlrd library, - regarding the deprecation of `time.clock` in Python 3.7. - """ - with warnings.catch_warnings(): - warnings.filterwarnings( - action='ignore', - message='time.clock has been deprecated', - category=DeprecationWarning) - yield - - @td.skip_if_no('xlrd', '1.0.0') class SharedItems(object): @@ -64,16 +47,12 @@ def setup_method(self, datapath): def get_csv_refdf(self, basename): """ Obtain the reference data from read_csv with the Python engine. - Parameters ---------- - basename : str File base name, excluding file extension. - Returns ------- - dfref : DataFrame """ pref = os.path.join(self.dirpath, basename + '.csv') @@ -83,16 +62,12 @@ def get_csv_refdf(self, basename): def get_excelfile(self, basename, ext): """ Return test data ExcelFile instance. - Parameters ---------- - basename : str File base name, excluding file extension. - Returns ------- - excel : io.excel.ExcelFile """ return ExcelFile(os.path.join(self.dirpath, basename + ext)) @@ -100,1090 +75,29 @@ def get_excelfile(self, basename, ext): def get_exceldf(self, basename, ext, *args, **kwds): """ Return test data DataFrame. - Parameters ---------- - basename : str File base name, excluding file extension. - Returns ------- - df : DataFrame """ pth = os.path.join(self.dirpath, basename + ext) return read_excel(pth, *args, **kwds) -class ReadingTestsBase(SharedItems): - # This is based on ExcelWriterBase - - @pytest.fixture(autouse=True, params=['xlrd', None]) - def set_engine(self, request): - func_name = "get_exceldf" - old_func = getattr(self, func_name) - new_func = partial(old_func, engine=request.param) - setattr(self, func_name, new_func) - yield - setattr(self, func_name, old_func) - - @td.skip_if_no("xlrd", "1.0.1") # see gh-22682 - def test_usecols_int(self, ext): - - df_ref = self.get_csv_refdf("test1") - df_ref = df_ref.reindex(columns=["A", "B", "C"]) - - # usecols as int - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - with ignore_xlrd_time_clock_warning(): - df1 = self.get_exceldf("test1", ext, "Sheet1", - index_col=0, usecols=3) - - # usecols as int - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - with ignore_xlrd_time_clock_warning(): - df2 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1], - index_col=0, usecols=3) - - # parse_cols instead of usecols, usecols as int - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - with ignore_xlrd_time_clock_warning(): - df3 = self.get_exceldf("test1", ext, "Sheet2", skiprows=[1], - index_col=0, parse_cols=3) - - # TODO add index to xls file) - tm.assert_frame_equal(df1, df_ref, check_names=False) - tm.assert_frame_equal(df2, df_ref, check_names=False) - tm.assert_frame_equal(df3, df_ref, check_names=False) - - @td.skip_if_no('xlrd', '1.0.1') # GH-22682 - def test_usecols_list(self, ext): - - dfref = self.get_csv_refdf('test1') - dfref = dfref.reindex(columns=['B', 'C']) - df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols=[0, 2, 3]) - df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols=[0, 2, 3]) - - with tm.assert_produces_warning(FutureWarning): - with ignore_xlrd_time_clock_warning(): - df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, parse_cols=[0, 2, 3]) - - # TODO add index to xls file) - tm.assert_frame_equal(df1, dfref, check_names=False) - tm.assert_frame_equal(df2, dfref, check_names=False) - tm.assert_frame_equal(df3, dfref, check_names=False) - - @td.skip_if_no('xlrd', '1.0.1') # GH-22682 - def test_usecols_str(self, ext): - - dfref = self.get_csv_refdf('test1') - - df1 = dfref.reindex(columns=['A', 'B', 'C']) - df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols='A:D') - df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols='A:D') - - with tm.assert_produces_warning(FutureWarning): - with ignore_xlrd_time_clock_warning(): - df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, parse_cols='A:D') - - # TODO add index to xls, read xls ignores index name ? - tm.assert_frame_equal(df2, df1, check_names=False) - tm.assert_frame_equal(df3, df1, check_names=False) - tm.assert_frame_equal(df4, df1, check_names=False) - - df1 = dfref.reindex(columns=['B', 'C']) - df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols='A,C,D') - df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols='A,C,D') - # TODO add index to xls file - tm.assert_frame_equal(df2, df1, check_names=False) - tm.assert_frame_equal(df3, df1, check_names=False) - - df1 = dfref.reindex(columns=['B', 'C']) - df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - usecols='A,C:D') - df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0, usecols='A,C:D') - tm.assert_frame_equal(df2, df1, check_names=False) - tm.assert_frame_equal(df3, df1, check_names=False) - - @pytest.mark.parametrize("usecols", [ - [0, 1, 3], [0, 3, 1], - [1, 0, 3], [1, 3, 0], - [3, 0, 1], [3, 1, 0], - ]) - def test_usecols_diff_positional_int_columns_order(self, ext, usecols): - expected = self.get_csv_refdf("test1")[["A", "C"]] - result = self.get_exceldf("test1", ext, "Sheet1", - index_col=0, usecols=usecols) - tm.assert_frame_equal(result, expected, check_names=False) - - @pytest.mark.parametrize("usecols", [ - ["B", "D"], ["D", "B"] - ]) - def test_usecols_diff_positional_str_columns_order(self, ext, usecols): - expected = self.get_csv_refdf("test1")[["B", "D"]] - expected.index = range(len(expected)) - - result = self.get_exceldf("test1", ext, "Sheet1", usecols=usecols) - tm.assert_frame_equal(result, expected, check_names=False) - - def test_read_excel_without_slicing(self, ext): - expected = self.get_csv_refdf("test1") - result = self.get_exceldf("test1", ext, "Sheet1", index_col=0) - tm.assert_frame_equal(result, expected, check_names=False) - - def test_usecols_excel_range_str(self, ext): - expected = self.get_csv_refdf("test1")[["C", "D"]] - result = self.get_exceldf("test1", ext, "Sheet1", - index_col=0, usecols="A,D:E") - tm.assert_frame_equal(result, expected, check_names=False) - - def test_usecols_excel_range_str_invalid(self, ext): - msg = "Invalid column name: E1" - - with pytest.raises(ValueError, match=msg): - self.get_exceldf("test1", ext, "Sheet1", usecols="D:E1") - - def test_index_col_label_error(self, ext): - msg = "list indices must be integers.*, not str" - - with pytest.raises(TypeError, match=msg): - self.get_exceldf("test1", ext, "Sheet1", index_col=["A"], - usecols=["A", "C"]) - - def test_index_col_empty(self, ext): - # see gh-9208 - result = self.get_exceldf("test1", ext, "Sheet3", - index_col=["A", "B", "C"]) - expected = DataFrame(columns=["D", "E", "F"], - index=MultiIndex(levels=[[]] * 3, - codes=[[]] * 3, - names=["A", "B", "C"])) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("index_col", [None, 2]) - def test_index_col_with_unnamed(self, ext, index_col): - # see gh-18792 - result = self.get_exceldf("test1", ext, "Sheet4", - index_col=index_col) - expected = DataFrame([["i1", "a", "x"], ["i2", "b", "y"]], - columns=["Unnamed: 0", "col1", "col2"]) - if index_col: - expected = expected.set_index(expected.columns[index_col]) - - tm.assert_frame_equal(result, expected) - - def test_usecols_pass_non_existent_column(self, ext): - msg = ("Usecols do not match columns, " - "columns expected but not found: " + r"\['E'\]") - - with pytest.raises(ValueError, match=msg): - self.get_exceldf("test1", ext, usecols=["E"]) - - def test_usecols_wrong_type(self, ext): - msg = ("'usecols' must either be list-like of " - "all strings, all unicode, all integers or a callable.") - - with pytest.raises(ValueError, match=msg): - self.get_exceldf("test1", ext, usecols=["E1", 0]) - - def test_excel_stop_iterator(self, ext): - - parsed = self.get_exceldf('test2', ext, 'Sheet1') - expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1']) - tm.assert_frame_equal(parsed, expected) - - def test_excel_cell_error_na(self, ext): - - parsed = self.get_exceldf('test3', ext, 'Sheet1') - expected = DataFrame([[np.nan]], columns=['Test']) - tm.assert_frame_equal(parsed, expected) - - def test_excel_passes_na(self, ext): - - excel = self.get_excelfile('test4', ext) - - parsed = read_excel(excel, 'Sheet1', keep_default_na=False, - na_values=['apple']) - expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']], - columns=['Test']) - tm.assert_frame_equal(parsed, expected) - - parsed = read_excel(excel, 'Sheet1', keep_default_na=True, - na_values=['apple']) - expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], - columns=['Test']) - tm.assert_frame_equal(parsed, expected) - - # 13967 - excel = self.get_excelfile('test5', ext) - - parsed = read_excel(excel, 'Sheet1', keep_default_na=False, - na_values=['apple']) - expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']], - columns=['Test']) - tm.assert_frame_equal(parsed, expected) - - parsed = read_excel(excel, 'Sheet1', keep_default_na=True, - na_values=['apple']) - expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], - columns=['Test']) - tm.assert_frame_equal(parsed, expected) - - @td.skip_if_no('xlrd', '1.0.1') # GH-22682 - def test_deprecated_sheetname(self, ext): - # gh-17964 - excel = self.get_excelfile('test1', ext) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - read_excel(excel, sheetname='Sheet1') - - with pytest.raises(TypeError): - read_excel(excel, sheet='Sheet1') - - @td.skip_if_no('xlrd', '1.0.1') # GH-22682 - def test_excel_table_sheet_by_index(self, ext): - - excel = self.get_excelfile('test1', ext) - dfref = self.get_csv_refdf('test1') - - df1 = read_excel(excel, 0, index_col=0) - df2 = read_excel(excel, 1, skiprows=[1], index_col=0) - tm.assert_frame_equal(df1, dfref, check_names=False) - tm.assert_frame_equal(df2, dfref, check_names=False) - - df1 = excel.parse(0, index_col=0) - df2 = excel.parse(1, skiprows=[1], index_col=0) - tm.assert_frame_equal(df1, dfref, check_names=False) - tm.assert_frame_equal(df2, dfref, check_names=False) - - df3 = read_excel(excel, 0, index_col=0, skipfooter=1) - tm.assert_frame_equal(df3, df1.iloc[:-1]) - - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - df4 = read_excel(excel, 0, index_col=0, skip_footer=1) - tm.assert_frame_equal(df3, df4) - - df3 = excel.parse(0, index_col=0, skipfooter=1) - tm.assert_frame_equal(df3, df1.iloc[:-1]) - - import xlrd - with pytest.raises(xlrd.XLRDError): - read_excel(excel, 'asdf') - - def test_excel_table(self, ext): - - dfref = self.get_csv_refdf('test1') - - df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0) - df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], - index_col=0) - # TODO add index to file - tm.assert_frame_equal(df1, dfref, check_names=False) - tm.assert_frame_equal(df2, dfref, check_names=False) - - df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, - skipfooter=1) - tm.assert_frame_equal(df3, df1.iloc[:-1]) - - def test_reader_special_dtypes(self, ext): - - expected = DataFrame.from_dict(OrderedDict([ - ("IntCol", [1, 2, -3, 4, 0]), - ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]), - ("BoolCol", [True, False, True, True, False]), - ("StrCol", [1, 2, 3, 4, 5]), - # GH5394 - this is why convert_float isn't vectorized - ("Str2Col", ["a", 3, "c", "d", "e"]), - ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31), - datetime(1905, 1, 1), datetime(2013, 12, 14), - datetime(2015, 3, 14)]) - ])) - basename = 'test_types' - - # should read in correctly and infer types - actual = self.get_exceldf(basename, ext, 'Sheet1') - tm.assert_frame_equal(actual, expected) - - # if not coercing number, then int comes in as float - float_expected = expected.copy() - float_expected["IntCol"] = float_expected["IntCol"].astype(float) - float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0 - actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False) - tm.assert_frame_equal(actual, float_expected) - - # check setting Index (assuming xls and xlsx are the same here) - for icol, name in enumerate(expected.columns): - actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol) - exp = expected.set_index(name) - tm.assert_frame_equal(actual, exp) - - # convert_float and converters should be different but both accepted - expected["StrCol"] = expected["StrCol"].apply(str) - actual = self.get_exceldf( - basename, ext, 'Sheet1', converters={"StrCol": str}) - tm.assert_frame_equal(actual, expected) - - no_convert_float = float_expected.copy() - no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str) - actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False, - converters={"StrCol": str}) - tm.assert_frame_equal(actual, no_convert_float) - - # GH8212 - support for converters and missing values - def test_reader_converters(self, ext): - - basename = 'test_converters' - - expected = DataFrame.from_dict(OrderedDict([ - ("IntCol", [1, 2, -3, -1000, 0]), - ("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]), - ("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']), - ("StrCol", ['1', np.nan, '3', '4', '5']), - ])) - - converters = {'IntCol': lambda x: int(x) if x != '' else -1000, - 'FloatCol': lambda x: 10 * x if x else np.nan, - 2: lambda x: 'Found' if x != '' else 'Not found', - 3: lambda x: str(x) if x else '', - } - - # should read in correctly and set types of single cells (not array - # dtypes) - actual = self.get_exceldf(basename, ext, 'Sheet1', - converters=converters) - tm.assert_frame_equal(actual, expected) - - def test_reader_dtype(self, ext): - # GH 8212 - basename = 'testdtype' - actual = self.get_exceldf(basename, ext) - - expected = DataFrame({ - 'a': [1, 2, 3, 4], - 'b': [2.5, 3.5, 4.5, 5.5], - 'c': [1, 2, 3, 4], - 'd': [1.0, 2.0, np.nan, 4.0]}).reindex( - columns=['a', 'b', 'c', 'd']) - - tm.assert_frame_equal(actual, expected) - - actual = self.get_exceldf(basename, ext, - dtype={'a': 'float64', - 'b': 'float32', - 'c': str}) - - expected['a'] = expected['a'].astype('float64') - expected['b'] = expected['b'].astype('float32') - expected['c'] = ['001', '002', '003', '004'] - tm.assert_frame_equal(actual, expected) - - with pytest.raises(ValueError): - self.get_exceldf(basename, ext, dtype={'d': 'int64'}) - - @pytest.mark.parametrize("dtype,expected", [ - (None, - DataFrame({ - "a": [1, 2, 3, 4], - "b": [2.5, 3.5, 4.5, 5.5], - "c": [1, 2, 3, 4], - "d": [1.0, 2.0, np.nan, 4.0] - })), - ({"a": "float64", - "b": "float32", - "c": str, - "d": str - }, - DataFrame({ - "a": Series([1, 2, 3, 4], dtype="float64"), - "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"), - "c": ["001", "002", "003", "004"], - "d": ["1", "2", np.nan, "4"] - })), - ]) - def test_reader_dtype_str(self, ext, dtype, expected): - # see gh-20377 - basename = "testdtype" - - actual = self.get_exceldf(basename, ext, dtype=dtype) - tm.assert_frame_equal(actual, expected) - - def test_reading_all_sheets(self, ext): - # Test reading all sheetnames by setting sheetname to None, - # Ensure a dict is returned. - # See PR #9450 - basename = 'test_multisheet' - dfs = self.get_exceldf(basename, ext, sheet_name=None) - # ensure this is not alphabetical to test order preservation - expected_keys = ['Charlie', 'Alpha', 'Beta'] - tm.assert_contains_all(expected_keys, dfs.keys()) - # Issue 9930 - # Ensure sheet order is preserved - assert expected_keys == list(dfs.keys()) - - def test_reading_multiple_specific_sheets(self, ext): - # Test reading specific sheetnames by specifying a mixed list - # of integers and strings, and confirm that duplicated sheet - # references (positions/names) are removed properly. - # Ensure a dict is returned - # See PR #9450 - basename = 'test_multisheet' - # Explicitly request duplicates. Only the set should be returned. - expected_keys = [2, 'Charlie', 'Charlie'] - dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys) - expected_keys = list(set(expected_keys)) - tm.assert_contains_all(expected_keys, dfs.keys()) - assert len(expected_keys) == len(dfs.keys()) - - def test_reading_all_sheets_with_blank(self, ext): - # Test reading all sheetnames by setting sheetname to None, - # In the case where some sheets are blank. - # Issue #11711 - basename = 'blank_with_header' - dfs = self.get_exceldf(basename, ext, sheet_name=None) - expected_keys = ['Sheet1', 'Sheet2', 'Sheet3'] - tm.assert_contains_all(expected_keys, dfs.keys()) - - # GH6403 - def test_read_excel_blank(self, ext): - actual = self.get_exceldf('blank', ext, 'Sheet1') - tm.assert_frame_equal(actual, DataFrame()) - - def test_read_excel_blank_with_header(self, ext): - expected = DataFrame(columns=['col_1', 'col_2']) - actual = self.get_exceldf('blank_with_header', ext, 'Sheet1') - tm.assert_frame_equal(actual, expected) - - @td.skip_if_no("xlwt") - @td.skip_if_no("openpyxl") - @pytest.mark.parametrize("header,expected", [ - (None, DataFrame([np.nan] * 4)), - (0, DataFrame({"Unnamed: 0": [np.nan] * 3})) - ]) - def test_read_one_empty_col_no_header(self, ext, header, expected): - # xref gh-12292 - filename = "no_header" - df = pd.DataFrame( - [["", 1, 100], - ["", 2, 200], - ["", 3, 300], - ["", 4, 400]] - ) - - with ensure_clean(ext) as path: - df.to_excel(path, filename, index=False, header=False) - result = read_excel(path, filename, usecols=[0], header=header) - - tm.assert_frame_equal(result, expected) - - @td.skip_if_no("xlwt") - @td.skip_if_no("openpyxl") - @pytest.mark.parametrize("header,expected", [ - (None, DataFrame([0] + [np.nan] * 4)), - (0, DataFrame([np.nan] * 4)) - ]) - def test_read_one_empty_col_with_header(self, ext, header, expected): - filename = "with_header" - df = pd.DataFrame( - [["", 1, 100], - ["", 2, 200], - ["", 3, 300], - ["", 4, 400]] - ) - - with ensure_clean(ext) as path: - df.to_excel(path, 'with_header', index=False, header=True) - result = read_excel(path, filename, usecols=[0], header=header) - - tm.assert_frame_equal(result, expected) - - @td.skip_if_no('openpyxl') - @td.skip_if_no('xlwt') - def test_set_column_names_in_parameter(self, ext): - # GH 12870 : pass down column names associated with - # keyword argument names - refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'], - [3, 'baz']], columns=['a', 'b']) - - with ensure_clean(ext) as pth: - with ExcelWriter(pth) as writer: - refdf.to_excel(writer, 'Data_no_head', - header=False, index=False) - refdf.to_excel(writer, 'Data_with_head', index=False) - - refdf.columns = ['A', 'B'] - - with ExcelFile(pth) as reader: - xlsdf_no_head = read_excel(reader, 'Data_no_head', - header=None, names=['A', 'B']) - xlsdf_with_head = read_excel(reader, 'Data_with_head', - index_col=None, names=['A', 'B']) - - tm.assert_frame_equal(xlsdf_no_head, refdf) - tm.assert_frame_equal(xlsdf_with_head, refdf) - - def test_date_conversion_overflow(self, ext): - # GH 10001 : pandas.ExcelFile ignore parse_dates=False - expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'], - [pd.Timestamp('2016-03-16'), 'Jack Black'], - [1e+20, 'Timothy Brown']], - columns=['DateColWithBigInt', 'StringCol']) - - result = self.get_exceldf('testdateoverflow', ext) - tm.assert_frame_equal(result, expected) - - @td.skip_if_no("xlrd", "1.0.1") # see gh-22682 - def test_sheet_name_and_sheetname(self, ext): - # gh-10559: Minor improvement: Change "sheet_name" to "sheetname" - # gh-10969: DOC: Consistent var names (sheetname vs sheet_name) - # gh-12604: CLN GH10559 Rename sheetname variable to sheet_name - # gh-20920: ExcelFile.parse() and pd.read_xlsx() have different - # behavior for "sheetname" argument - filename = "test1" - sheet_name = "Sheet1" - - df_ref = self.get_csv_refdf(filename) - df1 = self.get_exceldf(filename, ext, - sheet_name=sheet_name, index_col=0) # doc - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - with ignore_xlrd_time_clock_warning(): - df2 = self.get_exceldf(filename, ext, index_col=0, - sheetname=sheet_name) # backward compat - - excel = self.get_excelfile(filename, ext) - df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - df2_parse = excel.parse(index_col=0, - sheetname=sheet_name) # backward compat - - tm.assert_frame_equal(df1, df_ref, check_names=False) - tm.assert_frame_equal(df2, df_ref, check_names=False) - tm.assert_frame_equal(df1_parse, df_ref, check_names=False) - tm.assert_frame_equal(df2_parse, df_ref, check_names=False) - - def test_sheet_name_both_raises(self, ext): - with pytest.raises(TypeError, match="Cannot specify both"): - self.get_exceldf('test1', ext, sheetname='Sheet1', - sheet_name='Sheet1') - - excel = self.get_excelfile('test1', ext) - with pytest.raises(TypeError, match="Cannot specify both"): - excel.parse(sheetname='Sheet1', - sheet_name='Sheet1') - - def test_excel_read_buffer(self, ext): - - pth = os.path.join(self.dirpath, 'test1' + ext) - expected = read_excel(pth, 'Sheet1', index_col=0) - with open(pth, 'rb') as f: - actual = read_excel(f, 'Sheet1', index_col=0) - tm.assert_frame_equal(expected, actual) - - with open(pth, 'rb') as f: - xls = ExcelFile(f) - actual = read_excel(xls, 'Sheet1', index_col=0) - tm.assert_frame_equal(expected, actual) - - def test_bad_engine_raises(self, ext): - bad_engine = 'foo' - with pytest.raises(ValueError, message="Unknown engine: foo"): - read_excel('', engine=bad_engine) - - @tm.network - def test_read_from_http_url(self, ext): - url = ('https://raw.github.com/pandas-dev/pandas/master/' - 'pandas/tests/io/data/test1' + ext) - url_table = read_excel(url) - local_table = self.get_exceldf('test1', ext) - tm.assert_frame_equal(url_table, local_table) - - @td.skip_if_not_us_locale - def test_read_from_s3_url(self, ext, s3_resource): - # Bucket "pandas-test" created in tests/io/conftest.py - file_name = os.path.join(self.dirpath, 'test1' + ext) - - with open(file_name, "rb") as f: - s3_resource.Bucket("pandas-test").put_object(Key="test1" + ext, - Body=f) - - url = ('s3://pandas-test/test1' + ext) - url_table = read_excel(url) - local_table = self.get_exceldf('test1', ext) - tm.assert_frame_equal(url_table, local_table) - - @pytest.mark.slow - # ignore warning from old xlrd - @pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning") - def test_read_from_file_url(self, ext): - - # FILE - localtable = os.path.join(self.dirpath, 'test1' + ext) - local_table = read_excel(localtable) - - try: - url_table = read_excel('file://localhost/' + localtable) - except URLError: - # fails on some systems - import platform - pytest.skip("failing on %s" % - ' '.join(platform.uname()).strip()) - - tm.assert_frame_equal(url_table, local_table) - - @td.skip_if_no('pathlib') - def test_read_from_pathlib_path(self, ext): - - # GH12655 - from pathlib import Path - - str_path = os.path.join(self.dirpath, 'test1' + ext) - expected = read_excel(str_path, 'Sheet1', index_col=0) - - path_obj = Path(self.dirpath, 'test1' + ext) - actual = read_excel(path_obj, 'Sheet1', index_col=0) - - tm.assert_frame_equal(expected, actual) - - @td.skip_if_no('py.path') - def test_read_from_py_localpath(self, ext): - - # GH12655 - from py.path import local as LocalPath - - str_path = os.path.join(self.dirpath, 'test1' + ext) - expected = read_excel(str_path, 'Sheet1', index_col=0) - - abs_dir = os.path.abspath(self.dirpath) - path_obj = LocalPath(abs_dir).join('test1' + ext) - actual = read_excel(path_obj, 'Sheet1', index_col=0) - - tm.assert_frame_equal(expected, actual) - - def test_reader_closes_file(self, ext): - - pth = os.path.join(self.dirpath, 'test1' + ext) - f = open(pth, 'rb') - with ExcelFile(f) as xlsx: - # parses okay - read_excel(xlsx, 'Sheet1', index_col=0) - - assert f.closed - - @td.skip_if_no("xlwt") - @td.skip_if_no("openpyxl") - def test_creating_and_reading_multiple_sheets(self, ext): - # see gh-9450 - # - # Test reading multiple sheets, from a runtime - # created Excel file with multiple sheets. - def tdf(col_sheet_name): - d, i = [11, 22, 33], [1, 2, 3] - return DataFrame(d, i, columns=[col_sheet_name]) - - sheets = ["AAA", "BBB", "CCC"] - - dfs = [tdf(s) for s in sheets] - dfs = dict(zip(sheets, dfs)) - - with ensure_clean(ext) as pth: - with ExcelWriter(pth) as ew: - for sheetname, df in iteritems(dfs): - df.to_excel(ew, sheetname) - - dfs_returned = read_excel(pth, sheet_name=sheets, index_col=0) - - for s in sheets: - tm.assert_frame_equal(dfs[s], dfs_returned[s]) - - def test_reader_seconds(self, ext): - - # Test reading times with and without milliseconds. GH5945. - expected = DataFrame.from_dict({"Time": [time(1, 2, 3), - time(2, 45, 56, 100000), - time(4, 29, 49, 200000), - time(6, 13, 42, 300000), - time(7, 57, 35, 400000), - time(9, 41, 28, 500000), - time(11, 25, 21, 600000), - time(13, 9, 14, 700000), - time(14, 53, 7, 800000), - time(16, 37, 0, 900000), - time(18, 20, 54)]}) - - actual = self.get_exceldf('times_1900', ext, 'Sheet1') - tm.assert_frame_equal(actual, expected) - - actual = self.get_exceldf('times_1904', ext, 'Sheet1') - tm.assert_frame_equal(actual, expected) - - def test_read_excel_multiindex(self, ext): - # see gh-4679 - mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]]) - mi_file = os.path.join(self.dirpath, "testmultiindex" + ext) - - # "mi_column" sheet - expected = DataFrame([[1, 2.5, pd.Timestamp("2015-01-01"), True], - [2, 3.5, pd.Timestamp("2015-01-02"), False], - [3, 4.5, pd.Timestamp("2015-01-03"), False], - [4, 5.5, pd.Timestamp("2015-01-04"), True]], - columns=mi) - - actual = read_excel(mi_file, "mi_column", header=[0, 1], index_col=0) - tm.assert_frame_equal(actual, expected) - - # "mi_index" sheet - expected.index = mi - expected.columns = ["a", "b", "c", "d"] - - actual = read_excel(mi_file, "mi_index", index_col=[0, 1]) - tm.assert_frame_equal(actual, expected, check_names=False) - - # "both" sheet - expected.columns = mi - - actual = read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1]) - tm.assert_frame_equal(actual, expected, check_names=False) - - # "mi_index_name" sheet - expected.columns = ["a", "b", "c", "d"] - expected.index = mi.set_names(["ilvl1", "ilvl2"]) - - actual = read_excel(mi_file, "mi_index_name", index_col=[0, 1]) - tm.assert_frame_equal(actual, expected) - - # "mi_column_name" sheet - expected.index = list(range(4)) - expected.columns = mi.set_names(["c1", "c2"]) - actual = read_excel(mi_file, "mi_column_name", - header=[0, 1], index_col=0) - tm.assert_frame_equal(actual, expected) - - # see gh-11317 - # "name_with_int" sheet - expected.columns = mi.set_levels( - [1, 2], level=1).set_names(["c1", "c2"]) - - actual = read_excel(mi_file, "name_with_int", - index_col=0, header=[0, 1]) - tm.assert_frame_equal(actual, expected) - - # "both_name" sheet - expected.columns = mi.set_names(["c1", "c2"]) - expected.index = mi.set_names(["ilvl1", "ilvl2"]) - - actual = read_excel(mi_file, "both_name", - index_col=[0, 1], header=[0, 1]) - tm.assert_frame_equal(actual, expected) - - # "both_skiprows" sheet - actual = read_excel(mi_file, "both_name_skiprows", index_col=[0, 1], - header=[0, 1], skiprows=2) - tm.assert_frame_equal(actual, expected) - - def test_read_excel_multiindex_header_only(self, ext): - # see gh-11733. - # - # Don't try to parse a header name if there isn't one. - mi_file = os.path.join(self.dirpath, "testmultiindex" + ext) - result = read_excel(mi_file, "index_col_none", header=[0, 1]) - - exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")]) - expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns) - tm.assert_frame_equal(result, expected) - - @td.skip_if_no("xlsxwriter") - def test_read_excel_multiindex_empty_level(self, ext): - # see gh-12453 - with ensure_clean(ext) as path: - df = DataFrame({ - ("One", "x"): {0: 1}, - ("Two", "X"): {0: 3}, - ("Two", "Y"): {0: 7}, - ("Zero", ""): {0: 0} - }) - - expected = DataFrame({ - ("One", "x"): {0: 1}, - ("Two", "X"): {0: 3}, - ("Two", "Y"): {0: 7}, - ("Zero", "Unnamed: 4_level_1"): {0: 0} - }) - - df.to_excel(path) - actual = pd.read_excel(path, header=[0, 1], index_col=0) - tm.assert_frame_equal(actual, expected) - - df = pd.DataFrame({ - ("Beg", ""): {0: 0}, - ("Middle", "x"): {0: 1}, - ("Tail", "X"): {0: 3}, - ("Tail", "Y"): {0: 7} - }) - - expected = pd.DataFrame({ - ("Beg", "Unnamed: 1_level_1"): {0: 0}, - ("Middle", "x"): {0: 1}, - ("Tail", "X"): {0: 3}, - ("Tail", "Y"): {0: 7} - }) - - df.to_excel(path) - actual = pd.read_excel(path, header=[0, 1], index_col=0) - tm.assert_frame_equal(actual, expected) - - @td.skip_if_no("xlsxwriter") - @pytest.mark.parametrize("c_idx_names", [True, False]) - @pytest.mark.parametrize("r_idx_names", [True, False]) - @pytest.mark.parametrize("c_idx_levels", [1, 3]) - @pytest.mark.parametrize("r_idx_levels", [1, 3]) - def test_excel_multindex_roundtrip(self, ext, c_idx_names, r_idx_names, - c_idx_levels, r_idx_levels): - # see gh-4679 - with ensure_clean(ext) as pth: - if c_idx_levels == 1 and c_idx_names: - pytest.skip("Column index name cannot be " - "serialized unless it's a MultiIndex") - - # Empty name case current read in as - # unnamed levels, not Nones. - check_names = r_idx_names or r_idx_levels <= 1 - - df = mkdf(5, 5, c_idx_names, r_idx_names, - c_idx_levels, r_idx_levels) - df.to_excel(pth) - - act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), - header=list(range(c_idx_levels))) - tm.assert_frame_equal(df, act, check_names=check_names) - - df.iloc[0, :] = np.nan - df.to_excel(pth) - - act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), - header=list(range(c_idx_levels))) - tm.assert_frame_equal(df, act, check_names=check_names) - - df.iloc[-1, :] = np.nan - df.to_excel(pth) - act = pd.read_excel(pth, index_col=list(range(r_idx_levels)), - header=list(range(c_idx_levels))) - tm.assert_frame_equal(df, act, check_names=check_names) - - def test_excel_old_index_format(self, ext): - # see gh-4679 - filename = "test_index_name_pre17" + ext - in_file = os.path.join(self.dirpath, filename) - - # We detect headers to determine if index names exist, so - # that "index" name in the "names" version of the data will - # now be interpreted as rows that include null data. - data = np.array([[None, None, None, None, None], - ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], - ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], - ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], - ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], - ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]]) - columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] - mi = MultiIndex(levels=[["R0", "R_l0_g0", "R_l0_g1", - "R_l0_g2", "R_l0_g3", "R_l0_g4"], - ["R1", "R_l1_g0", "R_l1_g1", - "R_l1_g2", "R_l1_g3", "R_l1_g4"]], - codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], - names=[None, None]) - si = Index(["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", - "R_l0_g3", "R_l0_g4"], name=None) - - expected = pd.DataFrame(data, index=si, columns=columns) - - actual = pd.read_excel(in_file, "single_names", index_col=0) - tm.assert_frame_equal(actual, expected) - - expected.index = mi - - actual = pd.read_excel(in_file, "multi_names", index_col=[0, 1]) - tm.assert_frame_equal(actual, expected) - - # The analogous versions of the "names" version data - # where there are explicitly no names for the indices. - data = np.array([["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], - ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], - ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], - ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], - ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"]]) - columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] - mi = MultiIndex(levels=[["R_l0_g0", "R_l0_g1", "R_l0_g2", - "R_l0_g3", "R_l0_g4"], - ["R_l1_g0", "R_l1_g1", "R_l1_g2", - "R_l1_g3", "R_l1_g4"]], - codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], - names=[None, None]) - si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", - "R_l0_g3", "R_l0_g4"], name=None) - - expected = pd.DataFrame(data, index=si, columns=columns) - - actual = pd.read_excel(in_file, "single_no_names", index_col=0) - tm.assert_frame_equal(actual, expected) - - expected.index = mi - - actual = pd.read_excel(in_file, "multi_no_names", index_col=[0, 1]) - tm.assert_frame_equal(actual, expected, check_names=False) - - def test_read_excel_bool_header_arg(self, ext): - # GH 6114 - for arg in [True, False]: - with pytest.raises(TypeError): - pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), - header=arg) - - def test_read_excel_chunksize(self, ext): - # GH 8011 - with pytest.raises(NotImplementedError): - pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), - chunksize=100) - - @td.skip_if_no("xlwt") - @td.skip_if_no("openpyxl") - def test_read_excel_parse_dates(self, ext): - # see gh-11544, gh-12051 - df = DataFrame( - {"col": [1, 2, 3], - "date_strings": pd.date_range("2012-01-01", periods=3)}) - df2 = df.copy() - df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y") - - with ensure_clean(ext) as pth: - df2.to_excel(pth) - - res = read_excel(pth, index_col=0) - tm.assert_frame_equal(df2, res) - - res = read_excel(pth, parse_dates=["date_strings"], index_col=0) - tm.assert_frame_equal(df, res) - - date_parser = lambda x: pd.datetime.strptime(x, "%m/%d/%Y") - res = read_excel(pth, parse_dates=["date_strings"], - date_parser=date_parser, index_col=0) - tm.assert_frame_equal(df, res) - - def test_read_excel_skiprows_list(self, ext): - # GH 4903 - actual = pd.read_excel(os.path.join(self.dirpath, - 'testskiprows' + ext), - 'skiprows_list', skiprows=[0, 2]) - expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True], - [2, 3.5, pd.Timestamp('2015-01-02'), False], - [3, 4.5, pd.Timestamp('2015-01-03'), False], - [4, 5.5, pd.Timestamp('2015-01-04'), True]], - columns=['a', 'b', 'c', 'd']) - tm.assert_frame_equal(actual, expected) - - actual = pd.read_excel(os.path.join(self.dirpath, - 'testskiprows' + ext), - 'skiprows_list', skiprows=np.array([0, 2])) - tm.assert_frame_equal(actual, expected) - - def test_read_excel_nrows(self, ext): - # GH 16645 - num_rows_to_pull = 5 - actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), - nrows=num_rows_to_pull) - expected = pd.read_excel(os.path.join(self.dirpath, - 'test1' + ext)) - expected = expected[:num_rows_to_pull] - tm.assert_frame_equal(actual, expected) - - def test_read_excel_nrows_greater_than_nrows_in_file(self, ext): - # GH 16645 - expected = pd.read_excel(os.path.join(self.dirpath, - 'test1' + ext)) - num_records_in_file = len(expected) - num_rows_to_pull = num_records_in_file + 10 - actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), - nrows=num_rows_to_pull) - tm.assert_frame_equal(actual, expected) - - def test_read_excel_nrows_non_integer_parameter(self, ext): - # GH 16645 - msg = "'nrows' must be an integer >=0" - with pytest.raises(ValueError, match=msg): - pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), - nrows='5') - - def test_read_excel_squeeze(self, ext): - # GH 12157 - f = os.path.join(self.dirpath, 'test_squeeze' + ext) - - actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True) - expected = pd.Series([2, 3, 4], [4, 5, 6], name='b') - expected.index.name = 'a' - tm.assert_series_equal(actual, expected) - - actual = pd.read_excel(f, 'two_columns', squeeze=True) - expected = pd.DataFrame({'a': [4, 5, 6], - 'b': [2, 3, 4]}) - tm.assert_frame_equal(actual, expected) - - actual = pd.read_excel(f, 'one_column', squeeze=True) - expected = pd.Series([1, 2, 3], name='a') - tm.assert_series_equal(actual, expected) - - -@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm']) -class TestXlrdReader(ReadingTestsBase): - """ - This is the base class for the xlrd tests, and 3 different file formats - are supported: xls, xlsx, xlsm - """ - - @td.skip_if_no("xlwt") - def test_read_xlrd_book(self, ext): - import xlrd - df = self.frame - - engine = "xlrd" - sheet_name = "SheetA" - - with ensure_clean(ext) as pth: - df.to_excel(pth, sheet_name) - book = xlrd.open_workbook(pth) - - with ExcelFile(book, engine=engine) as xl: - result = read_excel(xl, sheet_name, index_col=0) - tm.assert_frame_equal(df, result) - - result = read_excel(book, sheet_name=sheet_name, - engine=engine, index_col=0) - tm.assert_frame_equal(df, result) - - class _WriterBase(SharedItems): @pytest.fixture(autouse=True) def set_engine_and_path(self, request, merge_cells, engine, ext): """Fixture to set engine and open file for use in each test case - Rather than requiring `engine=...` to be provided explicitly as an argument in each test, this fixture sets a global option to dictate which engine should be used to write Excel files. After executing the test it rolls back said change to the global option. - It also uses a context manager to open a temporary excel file for the function to write to, accessible via `self.path` - Notes ----- This fixture will run as part of each test method defined in the @@ -1853,7 +767,6 @@ def roundtrip(data, header=True, parser_hdr=0, index=True): nrows = 5 ncols = 3 - from pandas.util.testing import makeCustomDataframe as mkdf # ensure limited functionality in 0.10 # override of gh-2370 until sorted out in 0.11 diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index 13eb517fcab6a..2bbbc002db729 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -1,6 +1,6 @@ """Tests formatting as writer-agnostic ExcelCells -ExcelFormatter is tested implicitly in pandas/tests/io/test_excel.py +ExcelFormatter is tested implicitly in pandas/tests/io/mainF.py """ import pytest
Split tests/io/test_excel.py into two files, base.py and xlrd.py located in tests/io/excel/. The previous file was very large and a logical split has been applied to separate the `xlrd` tests. - [ ] closes #24472 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24749
2019-01-13T11:25:26Z
2019-03-09T20:09:50Z
null
2019-03-09T20:10:00Z
ENH: Only apply first group once in fast GroupBy.apply
diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index e4dd82afcdf65..4f116a42253e5 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -946,23 +946,6 @@ that is itself a series, and possibly upcast the result to a DataFrame: So depending on the path taken, and exactly what you are grouping. Thus the grouped columns(s) may be included in the output as well as set the indices. -.. warning:: - - In the current implementation apply calls func twice on the - first group to decide whether it can take a fast or slow code - path. This can lead to unexpected behavior if func has - side-effects, as they will take effect twice for the first - group. - - .. ipython:: python - - d = pd.DataFrame({"a": ["x", "y"], "b": [1, 2]}) - def identity(df): - print(df) - return df - - d.groupby("a").apply(identity) - Other useful features --------------------- diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 0c3ef237126c2..561562f367db2 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -73,6 +73,50 @@ is respected in indexing. (:issue:`24076`, :issue:`16785`) df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00'] +.. _whatsnew_0250.api_breaking.groupby_apply_first_group_once: + +GroupBy.apply on ``DataFrame`` evaluates first group only once +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The implementation of :meth:`DataFrameGroupBy.apply() <pandas.core.groupby.DataFrameGroupBy.apply>` +previously evaluated the supplied function consistently twice on the first group +to infer if it is safe to use a fast code path. Particularly for functions with +side effects, this was an undesired behavior and may have led to surprises. + +(:issue:`2936`, :issue:`2656`, :issue:`7739`, :issue:`10519`, :issue:`12155`, +:issue:`20084`, :issue:`21417`) + +Now every group is evaluated only a single time. + +.. ipython:: python + + df = pd.DataFrame({"a": ["x", "y"], "b": [1, 2]}) + df + + def func(group): + print(group.name) + return group + +*Previous Behaviour*: + +.. code-block:: python + + In [3]: df.groupby('a').apply(func) + x + x + y + Out[3]: + a b + 0 x 1 + 1 y 2 + +*New Behaviour*: + +.. ipython:: python + + df.groupby("a").apply(func) + + Concatenating Sparse Values ^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -83,14 +127,14 @@ Series or DataFrame with sparse values, rather than a ``SparseDataFrame`` (:issu df = pd.DataFrame({"A": pd.SparseArray([0, 1])}) -*Previous Behavior:* +*Previous Behavior*: .. code-block:: ipython In [2]: type(pd.concat([df, df])) pandas.core.sparse.frame.SparseDataFrame -*New Behavior:* +*New Behavior*: .. ipython:: python diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 517d59c399179..5df6e02d6a040 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -509,17 +509,6 @@ def apply_frame_axis0(object frame, object f, object names, results = [] - # Need to infer if our low-level mucking is going to cause a segfault - if n > 0: - chunk = frame.iloc[starts[0]:ends[0]] - object.__setattr__(chunk, 'name', names[0]) - try: - result = f(chunk) - if result is chunk: - raise InvalidApply('Function unsafe for fast apply') - except: - raise InvalidApply('Let this error raise above us') - slider = BlockSlider(frame) mutated = False @@ -529,13 +518,18 @@ def apply_frame_axis0(object frame, object f, object names, slider.move(starts[i], ends[i]) item_cache.clear() # ugh + chunk = slider.dummy + object.__setattr__(chunk, 'name', names[i]) - object.__setattr__(slider.dummy, 'name', names[i]) - piece = f(slider.dummy) + try: + piece = f(chunk) + except: + raise InvalidApply('Let this error raise above us') - # I'm paying the price for index-sharing, ugh + # Need to infer if low level index slider will cause segfaults + require_slow_apply = i == 0 and piece is chunk try: - if piece.index is slider.dummy.index: + if piece.index is chunk.index: piece = piece.copy(deep='all') else: mutated = True @@ -543,6 +537,12 @@ def apply_frame_axis0(object frame, object f, object names, pass results.append(piece) + + # If the data was modified inplace we need to + # take the slow path to not risk segfaults + # we have already computed the first piece + if require_slow_apply: + break finally: slider.reset() diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 525af28c27ff5..ec22548de6da3 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -165,26 +165,45 @@ def apply(self, f, data, axis=0): mutated = self.mutated splitter = self._get_splitter(data, axis=axis) group_keys = self._get_group_keys() + result_values = None # oh boy f_name = com.get_callable_name(f) if (f_name not in base.plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: - values, mutated = splitter.fast_apply(f, group_keys) - return group_keys, values, mutated + result_values, mutated = splitter.fast_apply(f, group_keys) + + # If the fast apply path could be used we can return here. + # Otherwise we need to fall back to the slow implementation. + if len(result_values) == len(group_keys): + return group_keys, result_values, mutated + except reduction.InvalidApply: - # we detect a mutation of some kind - # so take slow path + # Cannot fast apply on MultiIndex (_has_complex_internals). + # This Exception is also raised if `f` triggers an exception + # but it is preferable to raise the exception in Python. pass except Exception: # raise this error to the caller pass - result_values = [] for key, (i, group) in zip(group_keys, splitter): object.__setattr__(group, 'name', key) + # result_values is None if fast apply path wasn't taken + # or fast apply aborted with an unexpected exception. + # In either case, initialize the result list and perform + # the slow iteration. + if result_values is None: + result_values = [] + + # If result_values is not None we're in the case that the + # fast apply loop was broken prematurely but we have + # already the result for the first group which we can reuse. + elif i == 0: + continue + # group might be modified group_axes = _get_axes(group) res = f(group) @@ -854,10 +873,7 @@ def fast_apply(self, f, names): return [], True sdata = self._get_sorted_data() - results, mutated = reduction.apply_frame_axis0(sdata, f, names, - starts, ends) - - return results, mutated + return reduction.apply_frame_axis0(sdata, f, names, starts, ends) def _chop(self, sdata, slice_obj): if self.axis == 0: diff --git a/pandas/tests/groupby/test_apply.py b/pandas/tests/groupby/test_apply.py index dea7e518ac605..d753556f9978d 100644 --- a/pandas/tests/groupby/test_apply.py +++ b/pandas/tests/groupby/test_apply.py @@ -102,9 +102,80 @@ def f(g): group_keys = grouper._get_group_keys() values, mutated = splitter.fast_apply(f, group_keys) + assert not mutated +@pytest.mark.parametrize( + "df, group_names", + [ + (DataFrame({"a": [1, 1, 1, 2, 3], + "b": ["a", "a", "a", "b", "c"]}), + [1, 2, 3]), + (DataFrame({"a": [0, 0, 1, 1], + "b": [0, 1, 0, 1]}), + [0, 1]), + (DataFrame({"a": [1]}), + [1]), + (DataFrame({"a": [1, 1, 1, 2, 2, 1, 1, 2], + "b": range(8)}), + [1, 2]), + (DataFrame({"a": [1, 2, 3, 1, 2, 3], + "two": [4, 5, 6, 7, 8, 9]}), + [1, 2, 3]), + (DataFrame({"a": list("aaabbbcccc"), + "B": [3, 4, 3, 6, 5, 2, 1, 9, 5, 4], + "C": [4, 0, 2, 2, 2, 7, 8, 6, 2, 8]}), + ["a", "b", "c"]), + (DataFrame([[1, 2, 3], [2, 2, 3]], columns=["a", "b", "c"]), + [1, 2]), + ], ids=['GH2936', 'GH7739 & GH10519', 'GH10519', + 'GH2656', 'GH12155', 'GH20084', 'GH21417']) +def test_group_apply_once_per_group(df, group_names): + # GH2936, GH7739, GH10519, GH2656, GH12155, GH20084, GH21417 + + # This test should ensure that a function is only evaluted + # once per group. Previously the function has been evaluated twice + # on the first group to check if the Cython index slider is safe to use + # This test ensures that the side effect (append to list) is only triggered + # once per group + + names = [] + # cannot parameterize over the functions since they need external + # `names` to detect side effects + + def f_copy(group): + # this takes the fast apply path + names.append(group.name) + return group.copy() + + def f_nocopy(group): + # this takes the slow apply path + names.append(group.name) + return group + + def f_scalar(group): + # GH7739, GH2656 + names.append(group.name) + return 0 + + def f_none(group): + # GH10519, GH12155, GH21417 + names.append(group.name) + return None + + def f_constant_df(group): + # GH2936, GH20084 + names.append(group.name) + return DataFrame({"a": [1], "b": [1]}) + + for func in [f_copy, f_nocopy, f_scalar, f_none, f_constant_df]: + del names[:] + + df.groupby("a").apply(func) + assert names == group_names + + def test_apply_with_mixed_dtype(): # GH3480, apply with mixed dtype on axis=1 breaks in 0.11 df = DataFrame({'foo1': np.random.randn(6), diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 8267f38f939b1..56d428f29a38c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1381,11 +1381,9 @@ def test_group_name_available_in_inference_pass(): def f(group): names.append(group.name) return group.copy() - df.groupby('a', sort=False, group_keys=False).apply(f) - # we expect 2 zeros because we call ``f`` once to see if a faster route - # can be used. - expected_names = [0, 0, 1, 2] + + expected_names = [0, 1, 2] assert names == expected_names
The issue of applying a function to the first row twice has been reported quite a few times (issues are usually referenced to #2936) and is also a documented shortcoming/implementation detail (see Notes in [docs](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.apply.html)) The argumentation is usually that this needs to be done to determine whether or not we can take a fast path for the calculation. Even if we're using the fast path for the calculation, however, the first row is still evaluated twice. I believe with this refactoring it would be possible to only eval the first row once iff the fast path is taken. For functions with side effects or very expensive calculations in general this may be a big deal as discussed in various other issues. I'm wondering if this small change may already help out folks. Truth be told, I'm not 100% certain about what's actually happening in this loop regarding the slider and item cache. For the use case where I encountered this, it seems to do the trick, though. closes #2936 closes #2656 closes #7739 closes #10519 closes #12155 closes #20084 closes #21417 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24748
2019-01-13T08:53:12Z
2019-03-26T04:38:34Z
2019-03-26T04:38:34Z
2020-05-02T09:38:12Z
PERF: speed up .ix by moving deprecation string out of __init__
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 95bf776b1f19d..bbcde8f3b3305 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1405,17 +1405,16 @@ class _IXIndexer(_NDFrameIndexer): See more at :ref:`Advanced Indexing <advanced>`. """ - def __init__(self, name, obj): - - _ix_deprecation_warning = textwrap.dedent(""" - .ix is deprecated. Please use - .loc for label based indexing or - .iloc for positional indexing + _ix_deprecation_warning = textwrap.dedent(""" + .ix is deprecated. Please use + .loc for label based indexing or + .iloc for positional indexing - See the documentation here: - http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""") # noqa + See the documentation here: + http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated""") # noqa - warnings.warn(_ix_deprecation_warning, + def __init__(self, name, obj): + warnings.warn(self._ix_deprecation_warning, DeprecationWarning, stacklevel=2) super(_IXIndexer, self).__init__(name, obj)
While `.ix` is deprecated, the `DeprecationWarning` itself adds a decent fraction of overhead to calling it. Simply storing the warning string on the class (or global scope) yields a sizable speedup: ``` $ asv compare v0.24.0rc1 HEAD -s --only-changed --sort ratio before after ratio [fdc4db25] [d0821a4c] <v0.24.0rc1^0> <ix_speedup> - 2.57±0.1ms 2.29±0.01ms 0.89 indexing.NumericSeriesIndexing.time_ix_list_like(<class 'pandas.core.indexes.numeric.UInt64Index'>, 'nonunique_monotonic_inc') - 2.69±0.2ms 2.39±0.02ms 0.89 indexing.NumericSeriesIndexing.time_ix_list_like(<class 'pandas.core.indexes.numeric.Float64Index'>, 'nonunique_monotonic_inc') - 124±2μs 103±5μs 0.83 indexing.NumericSeriesIndexing.time_ix_scalar(<class 'pandas.core.indexes.numeric.UInt64Index'>, 'unique_monotonic_inc') - 149±4μs 121±4μs 0.81 indexing.NumericSeriesIndexing.time_ix_slice(<class 'pandas.core.indexes.numeric.Float64Index'>, 'unique_monotonic_inc') - 2.43±0.08ms 1.96±0.05ms 0.81 indexing.NumericSeriesIndexing.time_ix_list_like(<class 'pandas.core.indexes.numeric.Int64Index'>, 'unique_monotonic_inc') - 179±20μs 138±7μs 0.77 indexing.NumericSeriesIndexing.time_ix_slice(<class 'pandas.core.indexes.numeric.Int64Index'>, 'unique_monotonic_inc') - 506±6μs 384±6μs 0.76 indexing.NumericSeriesIndexing.time_ix_scalar(<class 'pandas.core.indexes.numeric.Float64Index'>, 'nonunique_monotonic_inc') - 84.6±1μs 63.5±2μs 0.75 indexing.NumericSeriesIndexing.time_ix_scalar(<class 'pandas.core.indexes.numeric.Float64Index'>, 'unique_monotonic_inc') - 69.1±4μs 50.4±2μs 0.73 indexing.NumericSeriesIndexing.time_ix_scalar(<class 'pandas.core.indexes.numeric.Int64Index'>, 'unique_monotonic_inc') - 1.22±0.02ms 728±2μs 0.60 indexing.NumericSeriesIndexing.time_ix_slice(<class 'pandas.core.indexes.numeric.Float64Index'>, 'nonunique_monotonic_inc') - 31.3±4μs 8.67±0.9μs 0.28 indexing.DataFrameStringIndexing.time_ix - 16.5±1μs 2.67±0.3μs 0.16 indexing.MethodLookup.time_lookup_ix ``` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24747
2019-01-13T03:18:08Z
2019-01-13T23:52:45Z
2019-01-13T23:52:45Z
2019-01-13T23:52:50Z
CI: isort ci
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index b5774d0571dfa..3e62a08975dad 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -101,7 +101,7 @@ if [[ -z "$CHECK" || "$CHECK" == "lint" ]]; then # Imports - Check formatting using isort see setup.cfg for settings MSG='Check import format using isort ' ; echo $MSG - isort --recursive --check-only pandas + isort --recursive --check-only pandas asv_bench RET=$(($RET + $?)) ; echo $MSG "DONE" fi diff --git a/setup.cfg b/setup.cfg index 48c56caa0c8f7..44d92c0b8777f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -115,7 +115,6 @@ force_sort_within_sections=True skip= pandas/core/api.py, pandas/core/frame.py, - doc/source/conf.py, asv_bench/benchmarks/algorithms.py, asv_bench/benchmarks/attrs_caching.py, asv_bench/benchmarks/binary_ops.py, @@ -150,3 +149,8 @@ skip= asv_bench/benchmarks/sparse.py, asv_bench/benchmarks/stat_ops.py, asv_bench/benchmarks/timeseries.py + asv_bench/benchmarks/pandas_vb_common.py + asv_bench/benchmarks/offset.py + asv_bench/benchmarks/dtypes.py + asv_bench/benchmarks/strings.py + asv_bench/benchmarks/period.py
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` asv_bench and doc dir where [added](https://github.com/pandas-dev/pandas/pull/24092/files) to the isort skip section. We should actually check this directory in CI. Checking doc dir is more effort that its worth ( barely any .py files ) so let's just leave this.
https://api.github.com/repos/pandas-dev/pandas/pulls/24746
2019-01-12T21:06:36Z
2019-01-14T00:03:49Z
2019-01-14T00:03:49Z
2019-02-17T18:41:39Z
CLN: More isort
diff --git a/pandas/tests/computation/test_compat.py b/pandas/tests/computation/test_compat.py index c25ef4bf38cab..7cc373d06cfe1 100644 --- a/pandas/tests/computation/test_compat.py +++ b/pandas/tests/computation/test_compat.py @@ -1,11 +1,11 @@ -import pytest from distutils.version import LooseVersion -import pandas as pd +import pytest +import pandas as pd +from pandas.core.computation.check import _MIN_NUMEXPR_VERSION from pandas.core.computation.engines import _engines import pandas.core.computation.expr as expr -from pandas.core.computation.check import _MIN_NUMEXPR_VERSION def test_compat(): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 1649c99384ef2..c1ba15f428eb7 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -1,39 +1,34 @@ -import warnings -import operator from distutils.version import LooseVersion from itertools import product +import operator +import warnings +import numpy as np +from numpy.random import rand, randint, randn import pytest -from numpy.random import randn, rand, randint -import numpy as np +from pandas.compat import PY3, reduce +from pandas.errors import PerformanceWarning +import pandas.util._test_decorators as td from pandas.core.dtypes.common import is_bool, is_list_like, is_scalar -import pandas as pd -from pandas.errors import PerformanceWarning -from pandas import DataFrame, Series, Panel, date_range -from pandas.util.testing import makeCustomDataframe as mkdf +import pandas as pd +from pandas import DataFrame, Panel, Series, date_range from pandas.core.computation import pytables from pandas.core.computation.check import _NUMEXPR_VERSION -from pandas.core.computation.engines import _engines, NumExprClobberingError -from pandas.core.computation.expr import PythonExprVisitor, PandasExprVisitor +from pandas.core.computation.engines import NumExprClobberingError, _engines +import pandas.core.computation.expr as expr +from pandas.core.computation.expr import PandasExprVisitor, PythonExprVisitor from pandas.core.computation.expressions import ( - _USE_NUMEXPR, _NUMEXPR_INSTALLED) + _NUMEXPR_INSTALLED, _USE_NUMEXPR) from pandas.core.computation.ops import ( - _binary_ops_dict, - _special_case_arith_ops_syms, - _arith_ops_syms, _bool_ops_syms, - _unary_math_ops, _binary_math_ops) - -import pandas.core.computation.expr as expr + _arith_ops_syms, _binary_math_ops, _binary_ops_dict, _bool_ops_syms, + _special_case_arith_ops_syms, _unary_math_ops) import pandas.util.testing as tm -import pandas.util._test_decorators as td -from pandas.util.testing import (assert_frame_equal, randbool, - assert_numpy_array_equal, assert_series_equal, - assert_produces_warning) -from pandas.compat import PY3, reduce - +from pandas.util.testing import ( + assert_frame_equal, assert_numpy_array_equal, assert_produces_warning, + assert_series_equal, makeCustomDataframe as mkdf, randbool) _series_frame_incompatible = _bool_ops_syms _scalar_skip = 'in', 'not in' diff --git a/pandas/tests/extension/arrow/test_bool.py b/pandas/tests/extension/arrow/test_bool.py index 2ace0fadc73e9..15ceb6adff59c 100644 --- a/pandas/tests/extension/arrow/test_bool.py +++ b/pandas/tests/extension/arrow/test_bool.py @@ -2,12 +2,12 @@ import pytest import pandas as pd -import pandas.util.testing as tm from pandas.tests.extension import base +import pandas.util.testing as tm pytest.importorskip('pyarrow', minversion="0.10.0") -from .bool import ArrowBoolArray, ArrowBoolDtype +from .bool import ArrowBoolArray, ArrowBoolDtype # isort:skip @pytest.fixture diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 39a138ed534bd..21dbf9524961c 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -1,11 +1,12 @@ import numpy as np import pytest +from pandas.errors import PerformanceWarning + import pandas as pd -import pandas.util.testing as tm from pandas import SparseArray, SparseDtype -from pandas.errors import PerformanceWarning from pandas.tests.extension import base +import pandas.util.testing as tm def make_data(fill_value): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 7147761d23caa..fe0706efdc4f8 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1,32 +1,29 @@ # -*- coding: utf-8 -*- # pylint: disable=W0102 -from datetime import datetime, date +from datetime import date, datetime +from distutils.version import LooseVersion +import itertools import operator +import re import sys -import pytest + import numpy as np +import pytest -import re -from distutils.version import LooseVersion -import itertools -from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex, - Series, Categorical, SparseArray) - -from pandas.compat import OrderedDict, lrange -from pandas.core.arrays import ( - DatetimeArray, - TimedeltaArray, -) -from pandas.core.internals import (SingleBlockManager, - make_block, BlockManager) +from pandas._libs.internals import BlockPlacement +from pandas.compat import OrderedDict, lrange, u, zip + +import pandas as pd +from pandas import ( + Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series, + SparseArray) import pandas.core.algorithms as algos +from pandas.core.arrays import DatetimeArray, TimedeltaArray +from pandas.core.internals import BlockManager, SingleBlockManager, make_block import pandas.util.testing as tm -import pandas as pd -from pandas._libs.internals import BlockPlacement -from pandas.util.testing import (assert_almost_equal, assert_frame_equal, - randn, assert_series_equal) -from pandas.compat import zip, u +from pandas.util.testing import ( + assert_almost_equal, assert_frame_equal, assert_series_equal, randn) # in 3.6.1 a c-api slicing function changed, see src/compat_helper.h PY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1') diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py index a0a1364f4617e..537881f3a5e85 100644 --- a/pandas/tests/tools/test_numeric.py +++ b/pandas/tests/tools/test_numeric.py @@ -1,12 +1,12 @@ -import pytest import decimal import numpy as np +from numpy import iinfo +import pytest + import pandas as pd from pandas import to_numeric - from pandas.util import testing as tm -from numpy import iinfo class TestToNumeric(object): diff --git a/setup.cfg b/setup.cfg index 3b7d1da9a2b02..48c56caa0c8f7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -115,16 +115,6 @@ force_sort_within_sections=True skip= pandas/core/api.py, pandas/core/frame.py, - pandas/tests/api/test_types.py, - pandas/tests/api/test_api.py, - pandas/tests/tools/test_numeric.py, - pandas/tests/internals/test_internals.py, - pandas/tests/extension/test_sparse.py, - pandas/tests/extension/base/reduce.py, - pandas/tests/computation/test_compat.py, - pandas/tests/computation/test_eval.py, - pandas/types/common.py, - pandas/tests/extension/arrow/test_bool.py, doc/source/conf.py, asv_bench/benchmarks/algorithms.py, asv_bench/benchmarks/attrs_caching.py,
- [x] xref #23334 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Hopefully nearing the end of these now..
https://api.github.com/repos/pandas-dev/pandas/pulls/24745
2019-01-12T20:51:50Z
2019-01-13T18:59:00Z
2019-01-13T18:59:00Z
2019-01-13T18:59:04Z
DOC: Use backticks instead of quotes
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index d3d34e6b9ec3e..2a60429629638 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -149,7 +149,7 @@ These dtypes can be merged & reshaped & casted. pd.concat([df[['A']], df[['B', 'C']]], axis=1).dtypes df['A'].astype(float) -Reduction and groupby operations such as 'sum' work. +Reduction and groupby operations such as ``sum`` work. .. ipython:: python
Small formatting edit to whatsnew. I think there should be backticks and not single quotes here?
https://api.github.com/repos/pandas-dev/pandas/pulls/24744
2019-01-12T20:05:30Z
2019-01-13T18:58:03Z
2019-01-13T18:58:03Z
2019-01-14T21:55:49Z
PERF: 3x speedup in Series of dicts with datetime keys by not having error message scale with input
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 79756d4c0cfab..f84471c3b04e8 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -686,16 +686,19 @@ def construct_from_string(cls, string): >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') datetime64[ns, UTC] """ - msg = "Could not construct DatetimeTZDtype from '{}'" - try: - match = cls._match.match(string) - if match: - d = match.groupdict() - return cls(unit=d['unit'], tz=d['tz']) - except Exception: - # TODO(py3): Change this pass to `raise TypeError(msg) from e` - pass - raise TypeError(msg.format(string)) + if isinstance(string, compat.string_types): + msg = "Could not construct DatetimeTZDtype from '{}'" + try: + match = cls._match.match(string) + if match: + d = match.groupdict() + return cls(unit=d['unit'], tz=d['tz']) + except Exception: + # TODO(py3): Change this pass to `raise TypeError(msg) from e` + pass + raise TypeError(msg.format(string)) + + raise TypeError("Could not construct DatetimeTZDtype") def __unicode__(self): return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 40b8f7afa3598..0fe0a845f5129 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -232,6 +232,10 @@ def test_construct_from_string_raises(self): with pytest.raises(TypeError, match="notatz"): DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]') + with pytest.raises(TypeError, + match="^Could not construct DatetimeTZDtype$"): + DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]']) + def test_is_dtype(self): assert not DatetimeTZDtype.is_dtype(None) assert DatetimeTZDtype.is_dtype(self.dtype)
Surprisingly, the majority of the time spent in constructing a `Series` from a dict with `datetime`-like keys is spent formatting the keys into strings for an error message that gets suppressed. As there's a test ensuring the string case makes it into the error message, we preserve the behavior there. In non-string cases, we mirror how the other `Dtypes` handle this case and do not include the input. ``` $ asv compare v0.24.0rc1 HEAD -s before after ratio [fdc4db25] [5c82eab5] <v0.24.0rc1^0> <series_dict_speedup> - 1.14±0.02s 303±4ms 0.27 series_methods.SeriesConstructor.time_constructor('dict') ``` - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24743
2019-01-12T18:19:43Z
2019-01-13T18:54:17Z
2019-01-13T18:54:17Z
2019-01-13T18:54:22Z
CLN/TST: indexing/multiindex/test_getitem.py
diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py index 88e96329105dd..b7fdbee0b7185 100644 --- a/pandas/tests/indexing/multiindex/test_getitem.py +++ b/pandas/tests/indexing/multiindex/test_getitem.py @@ -1,35 +1,15 @@ import numpy as np import pytest -from pandas.compat import range, u, zip +from pandas.compat import u, zip -import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series -import pandas.core.common as com from pandas.core.indexing import IndexingError from pandas.util import testing as tm - -@pytest.fixture -def frame_random_data_integer_multi_index(): - levels = [[0, 1], [0, 1, 2]] - codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] - index = MultiIndex(levels=levels, codes=codes) - return DataFrame(np.random.randn(6, 2), index=index) - - -@pytest.fixture -def dataframe_with_duplicate_index(): - """Fixture for DataFrame used in tests for gh-4145 and gh-4146""" - data = [['a', 'd', 'e', 'c', 'f', 'b'], - [1, 4, 5, 3, 6, 2], - [1, 4, 5, 3, 6, 2]] - index = ['h1', 'h3', 'h5'] - columns = MultiIndex( - levels=[['A', 'B'], ['A1', 'A2', 'B1', 'B2']], - codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]], - names=['main', 'sub']) - return DataFrame(data, index=index, columns=columns) +# ---------------------------------------------------------------------------- +# test indexing of Series with multi-level Index +# ---------------------------------------------------------------------------- @pytest.mark.parametrize('access_method', [lambda s, x: s[:, x], @@ -51,7 +31,7 @@ def test_series_getitem_multiindex(access_method, level1_value, expected): @pytest.mark.parametrize('level0_value', ['D', 'A']) -def test_getitem_duplicates_multiindex(level0_value): +def test_series_getitem_duplicates_multiindex(level0_value): # GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise # the appropriate error, only in PY3 of course! @@ -65,12 +45,10 @@ def test_getitem_duplicates_multiindex(level0_value): # confirm indexing on missing value raises KeyError if level0_value != 'A': - msg = "'A'" - with pytest.raises(KeyError, match=msg): + with pytest.raises(KeyError, match=r"^'A'$"): df.val['A'] - msg = "'X'" - with pytest.raises(KeyError, match=msg): + with pytest.raises(KeyError, match=r"^'X'$"): df.val['X'] result = df.val[level0_value] @@ -79,89 +57,6 @@ def test_getitem_duplicates_multiindex(level0_value): tm.assert_series_equal(result, expected) -@pytest.mark.parametrize('indexer, is_level1, expected_error', [ - ([], False, None), # empty ok - (['A'], False, None), - (['A', 'D'], False, None), - (['D'], False, r"\['D'\] not in index"), # not any values found - (pd.IndexSlice[:, ['foo']], True, None), - (pd.IndexSlice[:, ['foo', 'bah']], True, None) -]) -def test_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1, - expected_error): - # GH 7866 - # multi-index slicing with missing indexers - idx = MultiIndex.from_product([['A', 'B', 'C'], - ['foo', 'bar', 'baz']], - names=['one', 'two']) - s = Series(np.arange(9, dtype='int64'), index=idx).sort_index() - - if indexer == []: - expected = s.iloc[[]] - elif is_level1: - expected = Series([0, 3, 6], index=MultiIndex.from_product( - [['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index() - else: - exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']], - names=['one', 'two']) - expected = Series(np.arange(3, dtype='int64'), - index=exp_idx).sort_index() - - if expected_error is not None: - with pytest.raises(KeyError, match=expected_error): - s.loc[indexer] - else: - result = s.loc[indexer] - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize('columns_indexer', [ - ([], slice(None)), - (['foo'], []) -]) -def test_getitem_duplicates_multiindex_empty_indexer(columns_indexer): - # GH 8737 - # empty indexer - multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'], - ['alpha', 'beta'])) - df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index) - df = df.sort_index(level=0, axis=1) - - expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0]) - result = df.loc[:, columns_indexer] - tm.assert_frame_equal(result, expected) - - -def test_getitem_duplicates_multiindex_non_scalar_type_object(): - # regression from < 0.14.0 - # GH 7914 - df = DataFrame([[np.mean, np.median], ['mean', 'median']], - columns=MultiIndex.from_tuples([('functs', 'mean'), - ('functs', 'median')]), - index=['function', 'name']) - result = df.loc['function', ('functs', 'mean')] - expected = np.mean - assert result == expected - - -def test_getitem_simple(multiindex_dataframe_random_data): - df = multiindex_dataframe_random_data.T - expected = df.values[:, 0] - result = df['foo', 'one'].values - tm.assert_almost_equal(result, expected) - - -@pytest.mark.parametrize('indexer,msg', [ - (lambda df: df[('foo', 'four')], r"\('foo', 'four'\)"), - (lambda df: df['foobar'], "'foobar'") -]) -def test_getitem_simple_key_error( - multiindex_dataframe_random_data, indexer, msg): - df = multiindex_dataframe_random_data.T - with pytest.raises(KeyError, match=msg): - indexer(df) - - @pytest.mark.parametrize('indexer', [ lambda s: s[2000, 3], lambda s: s.loc[2000, 3] @@ -189,23 +84,9 @@ def test_series_getitem_returns_scalar( assert result == expected -@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning") -@pytest.mark.parametrize('indexer', [ - lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]], - lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]] -]) -def test_series_getitem_fancy( - multiindex_year_month_day_dataframe_random_data, indexer): - s = multiindex_year_month_day_dataframe_random_data['A'] - expected = s.reindex(s.index[49:51]) - - result = indexer(s) - tm.assert_series_equal(result, expected) - - -@pytest.mark.parametrize('indexer,error,msg', [ - (lambda s: s.__getitem__((2000, 3, 4)), KeyError, '356'), - (lambda s: s[(2000, 3, 4)], KeyError, '356'), +@pytest.mark.parametrize('indexer,expected_error,expected_error_msg', [ + (lambda s: s.__getitem__((2000, 3, 4)), KeyError, r"^356L?$"), + (lambda s: s[(2000, 3, 4)], KeyError, r"^356L?$"), (lambda s: s.loc[(2000, 3, 4)], IndexingError, 'Too many indexers'), (lambda s: s.__getitem__(len(s)), IndexError, 'index out of bounds'), (lambda s: s[len(s)], IndexError, 'index out of bounds'), @@ -213,9 +94,10 @@ def test_series_getitem_fancy( 'single positional indexer is out-of-bounds') ]) def test_series_getitem_indexing_errors( - multiindex_year_month_day_dataframe_random_data, indexer, error, msg): + multiindex_year_month_day_dataframe_random_data, indexer, + expected_error, expected_error_msg): s = multiindex_year_month_day_dataframe_random_data['A'] - with pytest.raises(error, match=msg): + with pytest.raises(expected_error, match=expected_error_msg): indexer(s) @@ -227,6 +109,28 @@ def test_series_getitem_corner_generator( tm.assert_series_equal(result, expected) +# ---------------------------------------------------------------------------- +# test indexing of DataFrame with multi-level Index +# ---------------------------------------------------------------------------- + +def test_getitem_simple(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data.T + expected = df.values[:, 0] + result = df['foo', 'one'].values + tm.assert_almost_equal(result, expected) + + +@pytest.mark.parametrize('indexer,expected_error_msg', [ + (lambda df: df[('foo', 'four')], r"^\('foo', 'four'\)$"), + (lambda df: df['foobar'], r"^'foobar'$") +]) +def test_frame_getitem_simple_key_error( + multiindex_dataframe_random_data, indexer, expected_error_msg): + df = multiindex_dataframe_random_data.T + with pytest.raises(KeyError, match=expected_error_msg): + indexer(df) + + def test_frame_getitem_multicolumn_empty_level(): df = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']}) df.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'], @@ -238,24 +142,12 @@ def test_frame_getitem_multicolumn_empty_level(): tm.assert_frame_equal(result, expected) -def test_getitem_tuple_plus_slice(): - # GH 671 - df = DataFrame({'a': np.arange(10), - 'b': np.arange(10), - 'c': np.random.randn(10), - 'd': np.random.randn(10)} - ).set_index(['a', 'b']) - expected = df.loc[0, 0] - result = df.loc[(0, 0), :] - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('indexer,expected_slice', [ (lambda df: df['foo'], slice(3)), (lambda df: df['bar'], slice(3, 5)), (lambda df: df.loc[:, 'bar'], slice(3, 5)) ]) -def test_getitem_toplevel( +def test_frame_getitem_toplevel( multiindex_dataframe_random_data, indexer, expected_slice): df = multiindex_dataframe_random_data.T expected = df.reindex(columns=df.columns[expected_slice]) @@ -264,72 +156,8 @@ def test_getitem_toplevel( tm.assert_frame_equal(result, expected) -def test_getitem_int(frame_random_data_integer_multi_index): - df = frame_random_data_integer_multi_index - result = df.loc[1] - expected = df[-3:] - expected.index = expected.index.droplevel(0) - tm.assert_frame_equal(result, expected) - - -def test_getitem_int_raises_exception(frame_random_data_integer_multi_index): - df = frame_random_data_integer_multi_index - msg = "3" - with pytest.raises(KeyError, match=msg): - df.loc.__getitem__(3) - - -def test_getitem_iloc(multiindex_dataframe_random_data): - df = multiindex_dataframe_random_data - result = df.iloc[2] - expected = df.xs(df.index[2]) - tm.assert_series_equal(result, expected) - - -def test_frame_setitem_view_direct(multiindex_dataframe_random_data): - # this works because we are modifying the underlying array - # really a no-no - df = multiindex_dataframe_random_data.T - df['foo'].values[:] = 0 - assert (df['foo'].values == 0).all() - - -def test_frame_setitem_copy_raises(multiindex_dataframe_random_data): - # will raise/warn as its chained assignment - df = multiindex_dataframe_random_data.T - msg = "A value is trying to be set on a copy of a slice from a DataFrame" - with pytest.raises(com.SettingWithCopyError, match=msg): - df['foo']['one'] = 2 - - -def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data.T - expected = frame - df = frame.copy() - msg = "A value is trying to be set on a copy of a slice from a DataFrame" - with pytest.raises(com.SettingWithCopyError, match=msg): - df['foo']['one'] = 2 - - result = df - tm.assert_frame_equal(result, expected) - - -def test_getitem_lowerdim_corner(multiindex_dataframe_random_data): - df = multiindex_dataframe_random_data - - # test setup - check key not in dataframe - with pytest.raises(KeyError, match="11"): - df.loc[('bar', 'three'), 'B'] - - # in theory should be inserting in a sorted space???? - df.loc[('bar', 'three'), 'B'] = 0 - expected = 0 - result = df.sort_index().loc[('bar', 'three'), 'B'] - assert result == expected - - @pytest.mark.parametrize('unicode_strings', [True, False]) -def test_mixed_depth_get(unicode_strings): +def test_frame_mixed_depth_get(unicode_strings): # If unicode_strings is True, the column labels in dataframe # construction will use unicode strings in Python 2 (pull request # #17099). @@ -355,11 +183,29 @@ def test_mixed_depth_get(unicode_strings): tm.assert_series_equal(result, expected) +# ---------------------------------------------------------------------------- +# test indexing of DataFrame with multi-level Index with duplicates +# ---------------------------------------------------------------------------- + +@pytest.fixture +def dataframe_with_duplicate_index(): + """Fixture for DataFrame used in tests for gh-4145 and gh-4146""" + data = [['a', 'd', 'e', 'c', 'f', 'b'], + [1, 4, 5, 3, 6, 2], + [1, 4, 5, 3, 6, 2]] + index = ['h1', 'h3', 'h5'] + columns = MultiIndex( + levels=[['A', 'B'], ['A1', 'A2', 'B1', 'B2']], + codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]], + names=['main', 'sub']) + return DataFrame(data, index=index, columns=columns) + + @pytest.mark.parametrize('indexer', [ - lambda df: df.loc[:, ('A', 'A1')], - lambda df: df[('A', 'A1')] + lambda df: df[('A', 'A1')], + lambda df: df.loc[:, ('A', 'A1')] ]) -def test_mi_access(dataframe_with_duplicate_index, indexer): +def test_frame_mi_access(dataframe_with_duplicate_index, indexer): # GH 4145 df = dataframe_with_duplicate_index index = Index(['h1', 'h3', 'h5']) @@ -370,7 +216,7 @@ def test_mi_access(dataframe_with_duplicate_index, indexer): tm.assert_frame_equal(result, expected) -def test_mi_access_returns_series(dataframe_with_duplicate_index): +def test_frame_mi_access_returns_series(dataframe_with_duplicate_index): # GH 4146, not returning a block manager when selecting a unique index # from a duplicate index # as of 4879, this returns a Series (which is similar to what happens @@ -381,7 +227,7 @@ def test_mi_access_returns_series(dataframe_with_duplicate_index): tm.assert_series_equal(result, expected) -def test_mi_access_returns_frame(dataframe_with_duplicate_index): +def test_frame_mi_access_returns_frame(dataframe_with_duplicate_index): # selecting a non_unique from the 2nd level df = dataframe_with_duplicate_index expected = DataFrame([['d', 4, 4], ['e', 5, 5]], diff --git a/pandas/tests/indexing/multiindex/test_iloc.py b/pandas/tests/indexing/multiindex/test_iloc.py index a1681c1239aa3..bdd505804c82b 100644 --- a/pandas/tests/indexing/multiindex/test_iloc.py +++ b/pandas/tests/indexing/multiindex/test_iloc.py @@ -142,3 +142,10 @@ def test_iloc_setitem_int_multiindex_series(data, indexes, values, expected_k): df['k'] = expected_k expected = df.k tm.assert_series_equal(series, expected) + + +def test_getitem_iloc(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + result = df.iloc[2] + expected = df.xs(df.index[2]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexing/multiindex/test_loc.py b/pandas/tests/indexing/multiindex/test_loc.py index 75995a24a2ad1..ea451d40eb5d3 100644 --- a/pandas/tests/indexing/multiindex/test_loc.py +++ b/pandas/tests/indexing/multiindex/test_loc.py @@ -4,6 +4,7 @@ import numpy as np import pytest +import pandas as pd from pandas import DataFrame, Index, MultiIndex, Series from pandas.util import testing as tm @@ -15,6 +16,14 @@ def single_level_multiindex(): codes=[[0, 1, 2, 3]], names=['first']) +@pytest.fixture +def frame_random_data_integer_multi_index(): + levels = [[0, 1], [0, 1, 2]] + codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] + index = MultiIndex(levels=levels, codes=codes) + return DataFrame(np.random.randn(6, 2), index=index) + + @pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning") class TestMultiIndexLoc(object): @@ -247,3 +256,123 @@ def convert_nested_indexer(indexer_type, keys): index=MultiIndex.from_product(keys)) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('indexer, is_level1, expected_error', [ + ([], False, None), # empty ok + (['A'], False, None), + (['A', 'D'], False, None), + (['D'], False, r"\['D'\] not in index"), # not any values found + (pd.IndexSlice[:, ['foo']], True, None), + (pd.IndexSlice[:, ['foo', 'bah']], True, None) +]) +def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1, + expected_error): + # GH 7866 + # multi-index slicing with missing indexers + idx = MultiIndex.from_product([['A', 'B', 'C'], + ['foo', 'bar', 'baz']], + names=['one', 'two']) + s = Series(np.arange(9, dtype='int64'), index=idx).sort_index() + + if indexer == []: + expected = s.iloc[[]] + elif is_level1: + expected = Series([0, 3, 6], index=MultiIndex.from_product( + [['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index() + else: + exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']], + names=['one', 'two']) + expected = Series(np.arange(3, dtype='int64'), + index=exp_idx).sort_index() + + if expected_error is not None: + with pytest.raises(KeyError, match=expected_error): + s.loc[indexer] + else: + result = s.loc[indexer] + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning") +@pytest.mark.parametrize('indexer', [ + lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]], + lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]] +]) +def test_series_loc_getitem_fancy( + multiindex_year_month_day_dataframe_random_data, indexer): + s = multiindex_year_month_day_dataframe_random_data['A'] + expected = s.reindex(s.index[49:51]) + + result = indexer(s) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('columns_indexer', [ + ([], slice(None)), + (['foo'], []) +]) +def test_loc_getitem_duplicates_multiindex_empty_indexer(columns_indexer): + # GH 8737 + # empty indexer + multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'], + ['alpha', 'beta'])) + df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index) + df = df.sort_index(level=0, axis=1) + + expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0]) + result = df.loc[:, columns_indexer] + tm.assert_frame_equal(result, expected) + + +def test_loc_getitem_duplicates_multiindex_non_scalar_type_object(): + # regression from < 0.14.0 + # GH 7914 + df = DataFrame([[np.mean, np.median], ['mean', 'median']], + columns=MultiIndex.from_tuples([('functs', 'mean'), + ('functs', 'median')]), + index=['function', 'name']) + result = df.loc['function', ('functs', 'mean')] + expected = np.mean + assert result == expected + + +def test_loc_getitem_tuple_plus_slice(): + # GH 671 + df = DataFrame({'a': np.arange(10), + 'b': np.arange(10), + 'c': np.random.randn(10), + 'd': np.random.randn(10)} + ).set_index(['a', 'b']) + expected = df.loc[0, 0] + result = df.loc[(0, 0), :] + tm.assert_series_equal(result, expected) + + +def test_loc_getitem_int(frame_random_data_integer_multi_index): + df = frame_random_data_integer_multi_index + result = df.loc[1] + expected = df[-3:] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) + + +def test_loc_getitem_int_raises_exception( + frame_random_data_integer_multi_index): + df = frame_random_data_integer_multi_index + with pytest.raises(KeyError, match=r"^3L?$"): + df.loc[3] + + +def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data): + df = multiindex_dataframe_random_data + + # test setup - check key not in dataframe + with pytest.raises(KeyError, match=r"^11L?$"): + df.loc[('bar', 'three'), 'B'] + + # in theory should be inserting in a sorted space???? + df.loc[('bar', 'three'), 'B'] = 0 + expected = 0 + result = df.sort_index().loc[('bar', 'three'), 'B'] + assert result == expected diff --git a/pandas/tests/indexing/multiindex/test_setitem.py b/pandas/tests/indexing/multiindex/test_setitem.py index d49ca34edd0fd..f8f037dbda46b 100644 --- a/pandas/tests/indexing/multiindex/test_setitem.py +++ b/pandas/tests/indexing/multiindex/test_setitem.py @@ -7,6 +7,7 @@ import pandas as pd from pandas import ( DataFrame, MultiIndex, Series, Timestamp, date_range, isna, notna) +import pandas.core.common as com from pandas.util import testing as tm @@ -408,3 +409,31 @@ def test_astype_assignment_with_dups(self): df['A'] = df['A'].astype(np.float64) tm.assert_index_equal(df.index, index) + + +def test_frame_setitem_view_direct(multiindex_dataframe_random_data): + # this works because we are modifying the underlying array + # really a no-no + df = multiindex_dataframe_random_data.T + df['foo'].values[:] = 0 + assert (df['foo'].values == 0).all() + + +def test_frame_setitem_copy_raises(multiindex_dataframe_random_data): + # will raise/warn as its chained assignment + df = multiindex_dataframe_random_data.T + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(com.SettingWithCopyError, match=msg): + df['foo']['one'] = 2 + + +def test_frame_setitem_copy_no_write(multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data.T + expected = frame + df = frame.copy() + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(com.SettingWithCopyError, match=msg): + df['foo']['one'] = 2 + + result = df + tm.assert_frame_equal(result, expected)
follow-on from #24452 to add section breaks and move tests. also `KeyError` checks made more explicit.
https://api.github.com/repos/pandas-dev/pandas/pulls/24741
2019-01-12T17:27:27Z
2019-01-13T20:34:41Z
2019-01-13T20:34:41Z
2019-01-15T19:53:48Z
DOC: Small whatsnew fixes
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3950ff3c8863d..d3d34e6b9ec3e 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1128,7 +1128,7 @@ update the ``ExtensionDtype._metadata`` tuple to match the signature of your - :meth:`~Series.shift` now dispatches to :meth:`ExtensionArray.shift` (:issue:`22386`) - :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`) - :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`) -- :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185:`). +- :meth:`Series.astype` and :meth:`DataFrame.astype` now dispatch to :meth:`ExtensionArray.astype` (:issue:`21185`). - Slicing a single row of a ``DataFrame`` with multiple ExtensionArrays of the same type now preserves the dtype, rather than coercing to object (:issue:`22784`) - Bug when concatenating multiple ``Series`` with different extension dtypes not casting to object dtype (:issue:`22994`) - Series backed by an ``ExtensionArray`` now work with :func:`util.hash_pandas_object` (:issue:`23066`) @@ -1261,7 +1261,7 @@ Other API Changes - The order of the arguments of :func:`DataFrame.to_html` and :func:`DataFrame.to_string` is rearranged to be consistent with each other. (:issue:`23614`) - :meth:`CategoricalIndex.reindex` now raises a ``ValueError`` if the target index is non-unique and not equal to the current index. It previously only raised if the target index was not of a categorical dtype (:issue:`23963`). - :func:`Series.to_list` and :func:`Index.to_list` are now aliases of ``Series.tolist`` respectively ``Index.tolist`` (:issue:`8826`) -- The result of ``SparseSeries.unstack`` is now a :class:`DataFrame` with sparse values, rather than a :class:`SparseDataFrame` (issue:`24372`). +- The result of ``SparseSeries.unstack`` is now a :class:`DataFrame` with sparse values, rather than a :class:`SparseDataFrame` (:issue:`24372`). .. _whatsnew_0240.deprecations: @@ -1300,15 +1300,15 @@ Deprecations - The ``keep_tz=False`` option (the default) of the ``keep_tz`` keyword of :meth:`DatetimeIndex.to_series` is deprecated (:issue:`17832`). - Timezone converting a tz-aware ``datetime.datetime`` or :class:`Timestamp` with :class:`Timestamp` and the ``tz`` argument is now deprecated. Instead, use :meth:`Timestamp.tz_convert` (:issue:`23579`) -- :func:`pandas.api.types.is_period` is deprecated in favor of `pandas.api.types.is_period_dtype` (:issue:`23917`) -- :func:`pandas.api.types.is_datetimetz` is deprecated in favor of `pandas.api.types.is_datetime64tz` (:issue:`23917`) +- :func:`pandas.api.types.is_period` is deprecated in favor of ``pandas.api.types.is_period_dtype`` (:issue:`23917`) +- :func:`pandas.api.types.is_datetimetz` is deprecated in favor of ``pandas.api.types.is_datetime64tz`` (:issue:`23917`) - Creating a :class:`TimedeltaIndex`, :class:`DatetimeIndex`, or :class:`PeriodIndex` by passing range arguments `start`, `end`, and `periods` is deprecated in favor of :func:`timedelta_range`, :func:`date_range`, or :func:`period_range` (:issue:`23919`) - Passing a string alias like ``'datetime64[ns, UTC]'`` as the ``unit`` parameter to :class:`DatetimeTZDtype` is deprecated. Use :class:`DatetimeTZDtype.construct_from_string` instead (:issue:`23990`). - The ``skipna`` parameter of :meth:`~pandas.api.types.infer_dtype` will switch to ``True`` by default in a future version of pandas (:issue:`17066`, :issue:`24050`) - In :meth:`Series.where` with Categorical data, providing an ``other`` that is not present in the categories is deprecated. Convert the categorical to a different dtype or add the ``other`` to the categories first (:issue:`24077`). - :meth:`Series.clip_lower`, :meth:`Series.clip_upper`, :meth:`DataFrame.clip_lower` and :meth:`DataFrame.clip_upper` are deprecated and will be removed in a future version. Use ``Series.clip(lower=threshold)``, ``Series.clip(upper=threshold)`` and the equivalent ``DataFrame`` methods (:issue:`24203`) - :meth:`Series.nonzero` is deprecated and will be removed in a future version (:issue:`18262`) -- Passing an integer to :meth:`Series.fillna` and :meth:`DataFrame.fillna` with ``timedelta64[ns]`` dtypes is deprecated, will raise ``TypeError`` in a future version. Use ``obj.fillna(pd.Timedelta(...))` instead (:issue:`24694`) +- Passing an integer to :meth:`Series.fillna` and :meth:`DataFrame.fillna` with ``timedelta64[ns]`` dtypes is deprecated, will raise ``TypeError`` in a future version. Use ``obj.fillna(pd.Timedelta(...))`` instead (:issue:`24694`) .. _whatsnew_0240.deprecations.datetimelike_int_ops: @@ -1525,7 +1525,7 @@ Performance Improvements - Improved performance of :meth:`~DataFrame.where` for Categorical data (:issue:`24077`) - Improved performance of iterating over a :class:`Series`. Using :meth:`DataFrame.itertuples` now creates iterators without internally allocating lists of all elements (:issue:`20783`) -- Improved performance of :class:`Period` constructor, additionally benefitting ``PeriodArray`` and ``PeriodIndex`` creation (:issue:`24084` and :issue:`24118`) +- Improved performance of :class:`Period` constructor, additionally benefitting ``PeriodArray`` and ``PeriodIndex`` creation (:issue:`24084`, :issue:`24118`) - Improved performance of tz-aware :class:`DatetimeArray` binary operations (:issue:`24491`) .. _whatsnew_0240.bug_fixes: @@ -1671,7 +1671,7 @@ Numeric - Bug in :class:`DataFrame` with ``timedelta64[ns]`` dtype arithmetic operations with ``ndarray`` with integer dtype incorrectly treating the narray as ``timedelta64[ns]`` dtype (:issue:`23114`) - Bug in :meth:`Series.rpow` with object dtype ``NaN`` for ``1 ** NA`` instead of ``1`` (:issue:`22922`). - :meth:`Series.agg` can now handle numpy NaN-aware methods like :func:`numpy.nansum` (:issue:`19629`) -- Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``pct=True`` and more than 2:sup:`24` rows are present resulted in percentages greater than 1.0 (:issue:`18271`) +- Bug in :meth:`Series.rank` and :meth:`DataFrame.rank` when ``pct=True`` and more than 2\ :sup:`24` rows are present resulted in percentages greater than 1.0 (:issue:`18271`) - Calls such as :meth:`DataFrame.round` with a non-unique :meth:`CategoricalIndex` now return expected data. Previously, data would be improperly duplicated (:issue:`21809`). - Added ``log10``, `floor` and `ceil` to the list of supported functions in :meth:`DataFrame.eval` (:issue:`24139`, :issue:`24353`) - Logical operations ``&, |, ^`` between :class:`Series` and :class:`Index` will no longer raise ``ValueError`` (:issue:`22092`) @@ -1758,7 +1758,7 @@ I/O - Bug in :meth:`read_excel()` when ``parse_cols`` is specified with an empty dataset (:issue:`9208`) - :func:`read_html()` no longer ignores all-whitespace ``<tr>`` within ``<thead>`` when considering the ``skiprows`` and ``header`` arguments. Previously, users had to decrease their ``header`` and ``skiprows`` values on such tables to work around the issue. (:issue:`21641`) - :func:`read_excel()` will correctly show the deprecation warning for previously deprecated ``sheetname`` (:issue:`17994`) -- :func:`read_csv()` and func:`read_table()` will throw ``UnicodeError`` and not coredump on badly encoded strings (:issue:`22748`) +- :func:`read_csv()` and :func:`read_table()` will throw ``UnicodeError`` and not coredump on badly encoded strings (:issue:`22748`) - :func:`read_csv()` will correctly parse timezone-aware datetimes (:issue:`22256`) - Bug in :func:`read_csv()` in which memory management was prematurely optimized for the C engine when the data was being read in chunks (:issue:`23509`) - Bug in :func:`read_csv()` in unnamed columns were being improperly identified when extracting a multi-index (:issue:`23687`)
A few small things that I saw when scanning over the What's New page.
https://api.github.com/repos/pandas-dev/pandas/pulls/24738
2019-01-12T01:18:00Z
2019-01-12T10:32:15Z
2019-01-12T10:32:15Z
2019-01-12T17:25:35Z
PERF: leverage tzlocal package to provide 2000x speedup for dateutil.tz.tzlocal operations
diff --git a/LICENSES/TZLOCAL_LICENSE b/LICENSES/TZLOCAL_LICENSE new file mode 100644 index 0000000000000..9be1d2fe595ad --- /dev/null +++ b/LICENSES/TZLOCAL_LICENSE @@ -0,0 +1,19 @@ +Copyright 2011-2017 Lennart Regebro + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 6efd720d1acdd..46138a42f9cf3 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -62,7 +62,8 @@ def time_to_pydatetime(self, index_type): class TzLocalize(object): - params = [None, 'US/Eastern', 'UTC', dateutil.tz.tzutc()] + params = [None, 'US/Eastern', 'UTC', dateutil.tz.tzutc(), + dateutil.tz.tzlocal()] param_names = 'tz' def setup(self, tz): @@ -394,7 +395,8 @@ def time_dup_string_tzoffset_dates(self, cache): class DatetimeAccessor(object): - params = [None, 'US/Eastern', 'UTC', dateutil.tz.tzutc()] + params = [None, 'US/Eastern', 'UTC', dateutil.tz.tzutc(), + dateutil.tz.tzlocal()] param_names = 'tz' def setup(self, tz): diff --git a/pandas/_libs/src/__init__.py b/pandas/_libs/src/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/_libs/src/tzlocal/__init__.py b/pandas/_libs/src/tzlocal/__init__.py new file mode 100644 index 0000000000000..2f5337bebcfbd --- /dev/null +++ b/pandas/_libs/src/tzlocal/__init__.py @@ -0,0 +1,5 @@ +import sys +if sys.platform == 'win32': + from pandas._libs.src.tzlocal.win32 import get_localzone, reload_localzone +else: + from pandas._libs.src.tzlocal.unix import get_localzone, reload_localzone diff --git a/pandas/_libs/src/tzlocal/unix.py b/pandas/_libs/src/tzlocal/unix.py new file mode 100644 index 0000000000000..1f8f357ef68de --- /dev/null +++ b/pandas/_libs/src/tzlocal/unix.py @@ -0,0 +1,164 @@ +import os +import pytz +import re + +from pandas._libs.src.tzlocal import utils + +_cache_tz = None + + +def _tz_from_env(tzenv): + if tzenv[0] == ':': + tzenv = tzenv[1:] + + # TZ specifies a file + if os.path.exists(tzenv): + with open(tzenv, 'rb') as tzfile: + return pytz.tzfile.build_tzinfo('local', tzfile) + + # TZ specifies a zoneinfo zone. + try: + tz = pytz.timezone(tzenv) + # That worked, so we return this: + return tz + except pytz.UnknownTimeZoneError: + raise pytz.UnknownTimeZoneError( + "tzlocal() does not support non-zoneinfo timezones like %s. \n" + "Please use a timezone in the form of Continent/City") + + +def _try_tz_from_env(): + tzenv = os.environ.get('TZ') + if tzenv: + try: + return _tz_from_env(tzenv) + except pytz.UnknownTimeZoneError: + pass + + +def _get_localzone(_root='/'): + """Tries to find the local timezone configuration. + + This method prefers finding the timezone name and passing that to pytz, + over passing in the localtime file, as in the later case the zoneinfo + name is unknown. + + The parameter _root makes the function look for files like /etc/localtime + beneath the _root directory. This is primarily used by the tests. + In normal usage you call the function without parameters.""" + + tzenv = _try_tz_from_env() + if tzenv: + return tzenv + + # Now look for distribution specific configuration files + # that contain the timezone name. + for configfile in ('etc/timezone', 'var/db/zoneinfo'): + tzpath = os.path.join(_root, configfile) + try: + with open(tzpath, 'rb') as tzfile: + data = tzfile.read() + + # Issue #3 was that /etc/timezone was a zoneinfo file. + # That's a misconfiguration, but we need to handle it gracefully: + if data[:5] == b'TZif2': + continue + + etctz = data.strip().decode() + if not etctz: + # Empty file, skip + continue + for etctz in data.decode().splitlines(): + # Get rid of host definitions and comments: + if ' ' in etctz: + etctz, dummy = etctz.split(' ', 1) + if '#' in etctz: + etctz, dummy = etctz.split('#', 1) + if not etctz: + continue + return pytz.timezone(etctz.replace(' ', '_')) + except IOError: + # File doesn't exist or is a directory + continue + + # CentOS has a ZONE setting in /etc/sysconfig/clock, + # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and + # Gentoo has a TIMEZONE setting in /etc/conf.d/clock + # We look through these files for a timezone: + + zone_re = re.compile(r'\s*ZONE\s*=\s*\"') + timezone_re = re.compile(r'\s*TIMEZONE\s*=\s*\"') + end_re = re.compile('\"') + + for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'): + tzpath = os.path.join(_root, filename) + try: + with open(tzpath, 'rt') as tzfile: + data = tzfile.readlines() + + for line in data: + # Look for the ZONE= setting. + match = zone_re.match(line) + if match is None: + # No ZONE= setting. Look for the TIMEZONE= setting. + match = timezone_re.match(line) + if match is not None: + # Some setting existed + line = line[match.end():] + etctz = line[:end_re.search(line).start()] + + # We found a timezone + return pytz.timezone(etctz.replace(' ', '_')) + except IOError: + # File doesn't exist or is a directory + continue + + # systemd distributions use symlinks that include the zone name, + # see manpage of localtime(5) and timedatectl(1) + tzpath = os.path.join(_root, 'etc/localtime') + if os.path.exists(tzpath) and os.path.islink(tzpath): + tzpath = os.path.realpath(tzpath) + start = tzpath.find("/")+1 + while start is not 0: + tzpath = tzpath[start:] + try: + return pytz.timezone(tzpath) + except pytz.UnknownTimeZoneError: + pass + start = tzpath.find("/")+1 + + # Are we under Termux on Android? It's not officially supported, because + # there is no reasonable way to run tests for this, but let's make an effort. + if os.path.exists('/system/bin/getprop'): + import subprocess + androidtz = subprocess.check_output(['getprop', 'persist.sys.timezone']) + return pytz.timezone(androidtz.strip().decode()) + + # No explicit setting existed. Use localtime + for filename in ('etc/localtime', 'usr/local/etc/localtime'): + tzpath = os.path.join(_root, filename) + + if not os.path.exists(tzpath): + continue + with open(tzpath, 'rb') as tzfile: + return pytz.tzfile.build_tzinfo('local', tzfile) + + raise pytz.UnknownTimeZoneError('Can not find any timezone configuration') + + +def get_localzone(): + """Get the computers configured local timezone, if any.""" + global _cache_tz + if _cache_tz is None: + _cache_tz = _get_localzone() + + utils.assert_tz_offset(_cache_tz) + return _cache_tz + + +def reload_localzone(): + """Reload the cached localzone. You need to call this if the timezone has changed.""" + global _cache_tz + _cache_tz = _get_localzone() + utils.assert_tz_offset(_cache_tz) + return _cache_tz diff --git a/pandas/_libs/src/tzlocal/utils.py b/pandas/_libs/src/tzlocal/utils.py new file mode 100644 index 0000000000000..bd9d663e88e7c --- /dev/null +++ b/pandas/_libs/src/tzlocal/utils.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +import datetime + + +def get_system_offset(): + """Get system's timezone offset using built-in library time. + + For the Timezone constants (altzone, daylight, timezone, and tzname), the + value is determined by the timezone rules in effect at module load time or + the last time tzset() is called and may be incorrect for times in the past. + + To keep compatibility with Windows, we're always importing time module here. + """ + import time + if time.daylight and time.localtime().tm_isdst > 0: + return -time.altzone + else: + return -time.timezone + + +def get_tz_offset(tz): + """Get timezone's offset using built-in function datetime.utcoffset().""" + return int(datetime.datetime.now(tz).utcoffset().total_seconds()) + + +def assert_tz_offset(tz): + """Assert that system's timezone offset equals to the timezone offset found. + + If they don't match, we probably have a misconfiguration, for example, an + incorrect timezone set in /etc/timezone file in systemd distributions.""" + tz_offset = get_tz_offset(tz) + system_offset = get_system_offset() + if tz_offset != system_offset: + msg = ('Timezone offset does not match system offset: {0} != {1}. ' + 'Please, check your config files.').format( + tz_offset, system_offset + ) + raise ValueError(msg) diff --git a/pandas/_libs/src/tzlocal/win32.py b/pandas/_libs/src/tzlocal/win32.py new file mode 100644 index 0000000000000..eaa32e88536dc --- /dev/null +++ b/pandas/_libs/src/tzlocal/win32.py @@ -0,0 +1,104 @@ +try: + import _winreg as winreg +except ImportError: + import winreg + +import pytz + +from pandas._libs.src.tzlocal.windows_tz import win_tz +from pandas._libs.src.tzlocal import utils + +_cache_tz = None + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dict = {} + size = winreg.QueryInfoKey(key)[1] + for i in range(size): + data = winreg.EnumValue(key, i) + dict[data[0]] = data[1] + return dict + + +def get_localzone_name(): + # Windows is special. It has unique time zone names (in several + # meanings of the word) available, but unfortunately, they can be + # translated to the language of the operating system, so we need to + # do a backwards lookup, by going through all time zones and see which + # one matches. + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + + TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + localtz = winreg.OpenKey(handle, TZLOCALKEYNAME) + keyvalues = valuestodict(localtz) + localtz.Close() + + if 'TimeZoneKeyName' in keyvalues: + # Windows 7 (and Vista?) + + # For some reason this returns a string with loads of NUL bytes at + # least on some systems. I don't know if this is a bug somewhere, I + # just work around it. + tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0] + else: + # Windows 2000 or XP + + # This is the localized name: + tzwin = keyvalues['StandardName'] + + # Open the list of timezones to look up the real name: + TZKEYNAME = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" + tzkey = winreg.OpenKey(handle, TZKEYNAME) + + # Now, match this value to Time Zone information + tzkeyname = None + for i in range(winreg.QueryInfoKey(tzkey)[0]): + subkey = winreg.EnumKey(tzkey, i) + sub = winreg.OpenKey(tzkey, subkey) + data = valuestodict(sub) + sub.Close() + try: + if data['Std'] == tzwin: + tzkeyname = subkey + break + except KeyError: + # This timezone didn't have proper configuration. + # Ignore it. + pass + + tzkey.Close() + handle.Close() + + if tzkeyname is None: + raise LookupError('Can not find Windows timezone configuration') + + timezone = win_tz.get(tzkeyname) + if timezone is None: + # Nope, that didn't work. Try adding "Standard Time", + # it seems to work a lot of times: + timezone = win_tz.get(tzkeyname + " Standard Time") + + # Return what we have. + if timezone is None: + raise pytz.UnknownTimeZoneError('Can not find timezone ' + tzkeyname) + + return timezone + + +def get_localzone(): + """Returns the zoneinfo-based tzinfo object that matches the Windows-configured timezone.""" + global _cache_tz + if _cache_tz is None: + _cache_tz = pytz.timezone(get_localzone_name()) + + utils.assert_tz_offset(_cache_tz) + return _cache_tz + + +def reload_localzone(): + """Reload the cached localzone. You need to call this if the timezone has changed.""" + global _cache_tz + _cache_tz = pytz.timezone(get_localzone_name()) + utils.assert_tz_offset(_cache_tz) + return _cache_tz diff --git a/pandas/_libs/src/tzlocal/windows_tz.py b/pandas/_libs/src/tzlocal/windows_tz.py new file mode 100644 index 0000000000000..123980b80bf08 --- /dev/null +++ b/pandas/_libs/src/tzlocal/windows_tz.py @@ -0,0 +1,690 @@ +# This file is autogenerated by the update_windows_mapping.py script +# Do not edit. +win_tz = {'AUS Central Standard Time': 'Australia/Darwin', + 'AUS Eastern Standard Time': 'Australia/Sydney', + 'Afghanistan Standard Time': 'Asia/Kabul', + 'Alaskan Standard Time': 'America/Anchorage', + 'Aleutian Standard Time': 'America/Adak', + 'Altai Standard Time': 'Asia/Barnaul', + 'Arab Standard Time': 'Asia/Riyadh', + 'Arabian Standard Time': 'Asia/Dubai', + 'Arabic Standard Time': 'Asia/Baghdad', + 'Argentina Standard Time': 'America/Buenos_Aires', + 'Astrakhan Standard Time': 'Europe/Astrakhan', + 'Atlantic Standard Time': 'America/Halifax', + 'Aus Central W. Standard Time': 'Australia/Eucla', + 'Azerbaijan Standard Time': 'Asia/Baku', + 'Azores Standard Time': 'Atlantic/Azores', + 'Bahia Standard Time': 'America/Bahia', + 'Bangladesh Standard Time': 'Asia/Dhaka', + 'Belarus Standard Time': 'Europe/Minsk', + 'Bougainville Standard Time': 'Pacific/Bougainville', + 'Canada Central Standard Time': 'America/Regina', + 'Cape Verde Standard Time': 'Atlantic/Cape_Verde', + 'Caucasus Standard Time': 'Asia/Yerevan', + 'Cen. Australia Standard Time': 'Australia/Adelaide', + 'Central America Standard Time': 'America/Guatemala', + 'Central Asia Standard Time': 'Asia/Almaty', + 'Central Brazilian Standard Time': 'America/Cuiaba', + 'Central Europe Standard Time': 'Europe/Budapest', + 'Central European Standard Time': 'Europe/Warsaw', + 'Central Pacific Standard Time': 'Pacific/Guadalcanal', + 'Central Standard Time': 'America/Chicago', + 'Central Standard Time (Mexico)': 'America/Mexico_City', + 'Chatham Islands Standard Time': 'Pacific/Chatham', + 'China Standard Time': 'Asia/Shanghai', + 'Cuba Standard Time': 'America/Havana', + 'Dateline Standard Time': 'Etc/GMT+12', + 'E. Africa Standard Time': 'Africa/Nairobi', + 'E. Australia Standard Time': 'Australia/Brisbane', + 'E. Europe Standard Time': 'Europe/Chisinau', + 'E. South America Standard Time': 'America/Sao_Paulo', + 'Easter Island Standard Time': 'Pacific/Easter', + 'Eastern Standard Time': 'America/New_York', + 'Eastern Standard Time (Mexico)': 'America/Cancun', + 'Egypt Standard Time': 'Africa/Cairo', + 'Ekaterinburg Standard Time': 'Asia/Yekaterinburg', + 'FLE Standard Time': 'Europe/Kiev', + 'Fiji Standard Time': 'Pacific/Fiji', + 'GMT Standard Time': 'Europe/London', + 'GTB Standard Time': 'Europe/Bucharest', + 'Georgian Standard Time': 'Asia/Tbilisi', + 'Greenland Standard Time': 'America/Godthab', + 'Greenwich Standard Time': 'Atlantic/Reykjavik', + 'Haiti Standard Time': 'America/Port-au-Prince', + 'Hawaiian Standard Time': 'Pacific/Honolulu', + 'India Standard Time': 'Asia/Calcutta', + 'Iran Standard Time': 'Asia/Tehran', + 'Israel Standard Time': 'Asia/Jerusalem', + 'Jordan Standard Time': 'Asia/Amman', + 'Kaliningrad Standard Time': 'Europe/Kaliningrad', + 'Korea Standard Time': 'Asia/Seoul', + 'Libya Standard Time': 'Africa/Tripoli', + 'Line Islands Standard Time': 'Pacific/Kiritimati', + 'Lord Howe Standard Time': 'Australia/Lord_Howe', + 'Magadan Standard Time': 'Asia/Magadan', + 'Magallanes Standard Time': 'America/Punta_Arenas', + 'Marquesas Standard Time': 'Pacific/Marquesas', + 'Mauritius Standard Time': 'Indian/Mauritius', + 'Middle East Standard Time': 'Asia/Beirut', + 'Montevideo Standard Time': 'America/Montevideo', + 'Morocco Standard Time': 'Africa/Casablanca', + 'Mountain Standard Time': 'America/Denver', + 'Mountain Standard Time (Mexico)': 'America/Chihuahua', + 'Myanmar Standard Time': 'Asia/Rangoon', + 'N. Central Asia Standard Time': 'Asia/Novosibirsk', + 'Namibia Standard Time': 'Africa/Windhoek', + 'Nepal Standard Time': 'Asia/Katmandu', + 'New Zealand Standard Time': 'Pacific/Auckland', + 'Newfoundland Standard Time': 'America/St_Johns', + 'Norfolk Standard Time': 'Pacific/Norfolk', + 'North Asia East Standard Time': 'Asia/Irkutsk', + 'North Asia Standard Time': 'Asia/Krasnoyarsk', + 'North Korea Standard Time': 'Asia/Pyongyang', + 'Omsk Standard Time': 'Asia/Omsk', + 'Pacific SA Standard Time': 'America/Santiago', + 'Pacific Standard Time': 'America/Los_Angeles', + 'Pacific Standard Time (Mexico)': 'America/Tijuana', + 'Pakistan Standard Time': 'Asia/Karachi', + 'Paraguay Standard Time': 'America/Asuncion', + 'Romance Standard Time': 'Europe/Paris', + 'Russia Time Zone 10': 'Asia/Srednekolymsk', + 'Russia Time Zone 11': 'Asia/Kamchatka', + 'Russia Time Zone 3': 'Europe/Samara', + 'Russian Standard Time': 'Europe/Moscow', + 'SA Eastern Standard Time': 'America/Cayenne', + 'SA Pacific Standard Time': 'America/Bogota', + 'SA Western Standard Time': 'America/La_Paz', + 'SE Asia Standard Time': 'Asia/Bangkok', + 'Saint Pierre Standard Time': 'America/Miquelon', + 'Sakhalin Standard Time': 'Asia/Sakhalin', + 'Samoa Standard Time': 'Pacific/Apia', + 'Saratov Standard Time': 'Europe/Saratov', + 'Singapore Standard Time': 'Asia/Singapore', + 'South Africa Standard Time': 'Africa/Johannesburg', + 'Sri Lanka Standard Time': 'Asia/Colombo', + 'Syria Standard Time': 'Asia/Damascus', + 'Taipei Standard Time': 'Asia/Taipei', + 'Tasmania Standard Time': 'Australia/Hobart', + 'Tocantins Standard Time': 'America/Araguaina', + 'Tokyo Standard Time': 'Asia/Tokyo', + 'Tomsk Standard Time': 'Asia/Tomsk', + 'Tonga Standard Time': 'Pacific/Tongatapu', + 'Transbaikal Standard Time': 'Asia/Chita', + 'Turkey Standard Time': 'Europe/Istanbul', + 'Turks And Caicos Standard Time': 'America/Grand_Turk', + 'US Eastern Standard Time': 'America/Indianapolis', + 'US Mountain Standard Time': 'America/Phoenix', + 'UTC': 'Etc/GMT', + 'UTC+12': 'Etc/GMT-12', + 'UTC+13': 'Etc/GMT-13', + 'UTC-02': 'Etc/GMT+2', + 'UTC-08': 'Etc/GMT+8', + 'UTC-09': 'Etc/GMT+9', + 'UTC-11': 'Etc/GMT+11', + 'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar', + 'Venezuela Standard Time': 'America/Caracas', + 'Vladivostok Standard Time': 'Asia/Vladivostok', + 'W. Australia Standard Time': 'Australia/Perth', + 'W. Central Africa Standard Time': 'Africa/Lagos', + 'W. Europe Standard Time': 'Europe/Berlin', + 'W. Mongolia Standard Time': 'Asia/Hovd', + 'West Asia Standard Time': 'Asia/Tashkent', + 'West Bank Standard Time': 'Asia/Hebron', + 'West Pacific Standard Time': 'Pacific/Port_Moresby', + 'Yakutsk Standard Time': 'Asia/Yakutsk'} + +# Old name for the win_tz variable: +tz_names = win_tz + +tz_win = {'Africa/Abidjan': 'Greenwich Standard Time', + 'Africa/Accra': 'Greenwich Standard Time', + 'Africa/Addis_Ababa': 'E. Africa Standard Time', + 'Africa/Algiers': 'W. Central Africa Standard Time', + 'Africa/Asmera': 'E. Africa Standard Time', + 'Africa/Bamako': 'Greenwich Standard Time', + 'Africa/Bangui': 'W. Central Africa Standard Time', + 'Africa/Banjul': 'Greenwich Standard Time', + 'Africa/Bissau': 'Greenwich Standard Time', + 'Africa/Blantyre': 'South Africa Standard Time', + 'Africa/Brazzaville': 'W. Central Africa Standard Time', + 'Africa/Bujumbura': 'South Africa Standard Time', + 'Africa/Cairo': 'Egypt Standard Time', + 'Africa/Casablanca': 'Morocco Standard Time', + 'Africa/Ceuta': 'Romance Standard Time', + 'Africa/Conakry': 'Greenwich Standard Time', + 'Africa/Dakar': 'Greenwich Standard Time', + 'Africa/Dar_es_Salaam': 'E. Africa Standard Time', + 'Africa/Djibouti': 'E. Africa Standard Time', + 'Africa/Douala': 'W. Central Africa Standard Time', + 'Africa/El_Aaiun': 'Morocco Standard Time', + 'Africa/Freetown': 'Greenwich Standard Time', + 'Africa/Gaborone': 'South Africa Standard Time', + 'Africa/Harare': 'South Africa Standard Time', + 'Africa/Johannesburg': 'South Africa Standard Time', + 'Africa/Juba': 'E. Africa Standard Time', + 'Africa/Kampala': 'E. Africa Standard Time', + 'Africa/Khartoum': 'E. Africa Standard Time', + 'Africa/Kigali': 'South Africa Standard Time', + 'Africa/Kinshasa': 'W. Central Africa Standard Time', + 'Africa/Lagos': 'W. Central Africa Standard Time', + 'Africa/Libreville': 'W. Central Africa Standard Time', + 'Africa/Lome': 'Greenwich Standard Time', + 'Africa/Luanda': 'W. Central Africa Standard Time', + 'Africa/Lubumbashi': 'South Africa Standard Time', + 'Africa/Lusaka': 'South Africa Standard Time', + 'Africa/Malabo': 'W. Central Africa Standard Time', + 'Africa/Maputo': 'South Africa Standard Time', + 'Africa/Maseru': 'South Africa Standard Time', + 'Africa/Mbabane': 'South Africa Standard Time', + 'Africa/Mogadishu': 'E. Africa Standard Time', + 'Africa/Monrovia': 'Greenwich Standard Time', + 'Africa/Nairobi': 'E. Africa Standard Time', + 'Africa/Ndjamena': 'W. Central Africa Standard Time', + 'Africa/Niamey': 'W. Central Africa Standard Time', + 'Africa/Nouakchott': 'Greenwich Standard Time', + 'Africa/Ouagadougou': 'Greenwich Standard Time', + 'Africa/Porto-Novo': 'W. Central Africa Standard Time', + 'Africa/Sao_Tome': 'Greenwich Standard Time', + 'Africa/Timbuktu': 'Greenwich Standard Time', + 'Africa/Tripoli': 'Libya Standard Time', + 'Africa/Tunis': 'W. Central Africa Standard Time', + 'Africa/Windhoek': 'Namibia Standard Time', + 'America/Adak': 'Aleutian Standard Time', + 'America/Anchorage': 'Alaskan Standard Time', + 'America/Anguilla': 'SA Western Standard Time', + 'America/Antigua': 'SA Western Standard Time', + 'America/Araguaina': 'Tocantins Standard Time', + 'America/Argentina/La_Rioja': 'Argentina Standard Time', + 'America/Argentina/Rio_Gallegos': 'Argentina Standard Time', + 'America/Argentina/Salta': 'Argentina Standard Time', + 'America/Argentina/San_Juan': 'Argentina Standard Time', + 'America/Argentina/San_Luis': 'Argentina Standard Time', + 'America/Argentina/Tucuman': 'Argentina Standard Time', + 'America/Argentina/Ushuaia': 'Argentina Standard Time', + 'America/Aruba': 'SA Western Standard Time', + 'America/Asuncion': 'Paraguay Standard Time', + 'America/Atka': 'Aleutian Standard Time', + 'America/Bahia': 'Bahia Standard Time', + 'America/Bahia_Banderas': 'Central Standard Time (Mexico)', + 'America/Barbados': 'SA Western Standard Time', + 'America/Belem': 'SA Eastern Standard Time', + 'America/Belize': 'Central America Standard Time', + 'America/Blanc-Sablon': 'SA Western Standard Time', + 'America/Boa_Vista': 'SA Western Standard Time', + 'America/Bogota': 'SA Pacific Standard Time', + 'America/Boise': 'Mountain Standard Time', + 'America/Buenos_Aires': 'Argentina Standard Time', + 'America/Cambridge_Bay': 'Mountain Standard Time', + 'America/Campo_Grande': 'Central Brazilian Standard Time', + 'America/Cancun': 'Eastern Standard Time (Mexico)', + 'America/Caracas': 'Venezuela Standard Time', + 'America/Catamarca': 'Argentina Standard Time', + 'America/Cayenne': 'SA Eastern Standard Time', + 'America/Cayman': 'SA Pacific Standard Time', + 'America/Chicago': 'Central Standard Time', + 'America/Chihuahua': 'Mountain Standard Time (Mexico)', + 'America/Coral_Harbour': 'SA Pacific Standard Time', + 'America/Cordoba': 'Argentina Standard Time', + 'America/Costa_Rica': 'Central America Standard Time', + 'America/Creston': 'US Mountain Standard Time', + 'America/Cuiaba': 'Central Brazilian Standard Time', + 'America/Curacao': 'SA Western Standard Time', + 'America/Danmarkshavn': 'UTC', + 'America/Dawson': 'Pacific Standard Time', + 'America/Dawson_Creek': 'US Mountain Standard Time', + 'America/Denver': 'Mountain Standard Time', + 'America/Detroit': 'Eastern Standard Time', + 'America/Dominica': 'SA Western Standard Time', + 'America/Edmonton': 'Mountain Standard Time', + 'America/Eirunepe': 'SA Pacific Standard Time', + 'America/El_Salvador': 'Central America Standard Time', + 'America/Ensenada': 'Pacific Standard Time (Mexico)', + 'America/Fort_Nelson': 'US Mountain Standard Time', + 'America/Fortaleza': 'SA Eastern Standard Time', + 'America/Glace_Bay': 'Atlantic Standard Time', + 'America/Godthab': 'Greenland Standard Time', + 'America/Goose_Bay': 'Atlantic Standard Time', + 'America/Grand_Turk': 'Turks And Caicos Standard Time', + 'America/Grenada': 'SA Western Standard Time', + 'America/Guadeloupe': 'SA Western Standard Time', + 'America/Guatemala': 'Central America Standard Time', + 'America/Guayaquil': 'SA Pacific Standard Time', + 'America/Guyana': 'SA Western Standard Time', + 'America/Halifax': 'Atlantic Standard Time', + 'America/Havana': 'Cuba Standard Time', + 'America/Hermosillo': 'US Mountain Standard Time', + 'America/Indiana/Knox': 'Central Standard Time', + 'America/Indiana/Marengo': 'US Eastern Standard Time', + 'America/Indiana/Petersburg': 'Eastern Standard Time', + 'America/Indiana/Tell_City': 'Central Standard Time', + 'America/Indiana/Vevay': 'US Eastern Standard Time', + 'America/Indiana/Vincennes': 'Eastern Standard Time', + 'America/Indiana/Winamac': 'Eastern Standard Time', + 'America/Indianapolis': 'US Eastern Standard Time', + 'America/Inuvik': 'Mountain Standard Time', + 'America/Iqaluit': 'Eastern Standard Time', + 'America/Jamaica': 'SA Pacific Standard Time', + 'America/Jujuy': 'Argentina Standard Time', + 'America/Juneau': 'Alaskan Standard Time', + 'America/Kentucky/Monticello': 'Eastern Standard Time', + 'America/Knox_IN': 'Central Standard Time', + 'America/Kralendijk': 'SA Western Standard Time', + 'America/La_Paz': 'SA Western Standard Time', + 'America/Lima': 'SA Pacific Standard Time', + 'America/Los_Angeles': 'Pacific Standard Time', + 'America/Louisville': 'Eastern Standard Time', + 'America/Lower_Princes': 'SA Western Standard Time', + 'America/Maceio': 'SA Eastern Standard Time', + 'America/Managua': 'Central America Standard Time', + 'America/Manaus': 'SA Western Standard Time', + 'America/Marigot': 'SA Western Standard Time', + 'America/Martinique': 'SA Western Standard Time', + 'America/Matamoros': 'Central Standard Time', + 'America/Mazatlan': 'Mountain Standard Time (Mexico)', + 'America/Mendoza': 'Argentina Standard Time', + 'America/Menominee': 'Central Standard Time', + 'America/Merida': 'Central Standard Time (Mexico)', + 'America/Metlakatla': 'Alaskan Standard Time', + 'America/Mexico_City': 'Central Standard Time (Mexico)', + 'America/Miquelon': 'Saint Pierre Standard Time', + 'America/Moncton': 'Atlantic Standard Time', + 'America/Monterrey': 'Central Standard Time (Mexico)', + 'America/Montevideo': 'Montevideo Standard Time', + 'America/Montreal': 'Eastern Standard Time', + 'America/Montserrat': 'SA Western Standard Time', + 'America/Nassau': 'Eastern Standard Time', + 'America/New_York': 'Eastern Standard Time', + 'America/Nipigon': 'Eastern Standard Time', + 'America/Nome': 'Alaskan Standard Time', + 'America/Noronha': 'UTC-02', + 'America/North_Dakota/Beulah': 'Central Standard Time', + 'America/North_Dakota/Center': 'Central Standard Time', + 'America/North_Dakota/New_Salem': 'Central Standard Time', + 'America/Ojinaga': 'Mountain Standard Time', + 'America/Panama': 'SA Pacific Standard Time', + 'America/Pangnirtung': 'Eastern Standard Time', + 'America/Paramaribo': 'SA Eastern Standard Time', + 'America/Phoenix': 'US Mountain Standard Time', + 'America/Port-au-Prince': 'Haiti Standard Time', + 'America/Port_of_Spain': 'SA Western Standard Time', + 'America/Porto_Acre': 'SA Pacific Standard Time', + 'America/Porto_Velho': 'SA Western Standard Time', + 'America/Puerto_Rico': 'SA Western Standard Time', + 'America/Punta_Arenas': 'Magallanes Standard Time', + 'America/Rainy_River': 'Central Standard Time', + 'America/Rankin_Inlet': 'Central Standard Time', + 'America/Recife': 'SA Eastern Standard Time', + 'America/Regina': 'Canada Central Standard Time', + 'America/Resolute': 'Central Standard Time', + 'America/Rio_Branco': 'SA Pacific Standard Time', + 'America/Santa_Isabel': 'Pacific Standard Time (Mexico)', + 'America/Santarem': 'SA Eastern Standard Time', + 'America/Santiago': 'Pacific SA Standard Time', + 'America/Santo_Domingo': 'SA Western Standard Time', + 'America/Sao_Paulo': 'E. South America Standard Time', + 'America/Scoresbysund': 'Azores Standard Time', + 'America/Shiprock': 'Mountain Standard Time', + 'America/Sitka': 'Alaskan Standard Time', + 'America/St_Barthelemy': 'SA Western Standard Time', + 'America/St_Johns': 'Newfoundland Standard Time', + 'America/St_Kitts': 'SA Western Standard Time', + 'America/St_Lucia': 'SA Western Standard Time', + 'America/St_Thomas': 'SA Western Standard Time', + 'America/St_Vincent': 'SA Western Standard Time', + 'America/Swift_Current': 'Canada Central Standard Time', + 'America/Tegucigalpa': 'Central America Standard Time', + 'America/Thule': 'Atlantic Standard Time', + 'America/Thunder_Bay': 'Eastern Standard Time', + 'America/Tijuana': 'Pacific Standard Time (Mexico)', + 'America/Toronto': 'Eastern Standard Time', + 'America/Tortola': 'SA Western Standard Time', + 'America/Vancouver': 'Pacific Standard Time', + 'America/Virgin': 'SA Western Standard Time', + 'America/Whitehorse': 'Pacific Standard Time', + 'America/Winnipeg': 'Central Standard Time', + 'America/Yakutat': 'Alaskan Standard Time', + 'America/Yellowknife': 'Mountain Standard Time', + 'Antarctica/Casey': 'Central Pacific Standard Time', + 'Antarctica/Davis': 'SE Asia Standard Time', + 'Antarctica/DumontDUrville': 'West Pacific Standard Time', + 'Antarctica/Macquarie': 'Central Pacific Standard Time', + 'Antarctica/Mawson': 'West Asia Standard Time', + 'Antarctica/McMurdo': 'New Zealand Standard Time', + 'Antarctica/Palmer': 'Magallanes Standard Time', + 'Antarctica/Rothera': 'SA Eastern Standard Time', + 'Antarctica/South_Pole': 'New Zealand Standard Time', + 'Antarctica/Syowa': 'E. Africa Standard Time', + 'Antarctica/Vostok': 'Central Asia Standard Time', + 'Arctic/Longyearbyen': 'W. Europe Standard Time', + 'Asia/Aden': 'Arab Standard Time', + 'Asia/Almaty': 'Central Asia Standard Time', + 'Asia/Amman': 'Jordan Standard Time', + 'Asia/Anadyr': 'Russia Time Zone 11', + 'Asia/Aqtau': 'West Asia Standard Time', + 'Asia/Aqtobe': 'West Asia Standard Time', + 'Asia/Ashgabat': 'West Asia Standard Time', + 'Asia/Ashkhabad': 'West Asia Standard Time', + 'Asia/Atyrau': 'West Asia Standard Time', + 'Asia/Baghdad': 'Arabic Standard Time', + 'Asia/Bahrain': 'Arab Standard Time', + 'Asia/Baku': 'Azerbaijan Standard Time', + 'Asia/Bangkok': 'SE Asia Standard Time', + 'Asia/Barnaul': 'Altai Standard Time', + 'Asia/Beirut': 'Middle East Standard Time', + 'Asia/Bishkek': 'Central Asia Standard Time', + 'Asia/Brunei': 'Singapore Standard Time', + 'Asia/Calcutta': 'India Standard Time', + 'Asia/Chita': 'Transbaikal Standard Time', + 'Asia/Choibalsan': 'Ulaanbaatar Standard Time', + 'Asia/Chongqing': 'China Standard Time', + 'Asia/Chungking': 'China Standard Time', + 'Asia/Colombo': 'Sri Lanka Standard Time', + 'Asia/Dacca': 'Bangladesh Standard Time', + 'Asia/Damascus': 'Syria Standard Time', + 'Asia/Dhaka': 'Bangladesh Standard Time', + 'Asia/Dili': 'Tokyo Standard Time', + 'Asia/Dubai': 'Arabian Standard Time', + 'Asia/Dushanbe': 'West Asia Standard Time', + 'Asia/Famagusta': 'Turkey Standard Time', + 'Asia/Gaza': 'West Bank Standard Time', + 'Asia/Harbin': 'China Standard Time', + 'Asia/Hebron': 'West Bank Standard Time', + 'Asia/Hong_Kong': 'China Standard Time', + 'Asia/Hovd': 'W. Mongolia Standard Time', + 'Asia/Irkutsk': 'North Asia East Standard Time', + 'Asia/Jakarta': 'SE Asia Standard Time', + 'Asia/Jayapura': 'Tokyo Standard Time', + 'Asia/Jerusalem': 'Israel Standard Time', + 'Asia/Kabul': 'Afghanistan Standard Time', + 'Asia/Kamchatka': 'Russia Time Zone 11', + 'Asia/Karachi': 'Pakistan Standard Time', + 'Asia/Kashgar': 'Central Asia Standard Time', + 'Asia/Katmandu': 'Nepal Standard Time', + 'Asia/Khandyga': 'Yakutsk Standard Time', + 'Asia/Krasnoyarsk': 'North Asia Standard Time', + 'Asia/Kuala_Lumpur': 'Singapore Standard Time', + 'Asia/Kuching': 'Singapore Standard Time', + 'Asia/Kuwait': 'Arab Standard Time', + 'Asia/Macao': 'China Standard Time', + 'Asia/Macau': 'China Standard Time', + 'Asia/Magadan': 'Magadan Standard Time', + 'Asia/Makassar': 'Singapore Standard Time', + 'Asia/Manila': 'Singapore Standard Time', + 'Asia/Muscat': 'Arabian Standard Time', + 'Asia/Nicosia': 'GTB Standard Time', + 'Asia/Novokuznetsk': 'North Asia Standard Time', + 'Asia/Novosibirsk': 'N. Central Asia Standard Time', + 'Asia/Omsk': 'Omsk Standard Time', + 'Asia/Oral': 'West Asia Standard Time', + 'Asia/Phnom_Penh': 'SE Asia Standard Time', + 'Asia/Pontianak': 'SE Asia Standard Time', + 'Asia/Pyongyang': 'North Korea Standard Time', + 'Asia/Qatar': 'Arab Standard Time', + 'Asia/Qyzylorda': 'Central Asia Standard Time', + 'Asia/Rangoon': 'Myanmar Standard Time', + 'Asia/Riyadh': 'Arab Standard Time', + 'Asia/Saigon': 'SE Asia Standard Time', + 'Asia/Sakhalin': 'Sakhalin Standard Time', + 'Asia/Samarkand': 'West Asia Standard Time', + 'Asia/Seoul': 'Korea Standard Time', + 'Asia/Shanghai': 'China Standard Time', + 'Asia/Singapore': 'Singapore Standard Time', + 'Asia/Srednekolymsk': 'Russia Time Zone 10', + 'Asia/Taipei': 'Taipei Standard Time', + 'Asia/Tashkent': 'West Asia Standard Time', + 'Asia/Tbilisi': 'Georgian Standard Time', + 'Asia/Tehran': 'Iran Standard Time', + 'Asia/Tel_Aviv': 'Israel Standard Time', + 'Asia/Thimbu': 'Bangladesh Standard Time', + 'Asia/Thimphu': 'Bangladesh Standard Time', + 'Asia/Tokyo': 'Tokyo Standard Time', + 'Asia/Tomsk': 'Tomsk Standard Time', + 'Asia/Ujung_Pandang': 'Singapore Standard Time', + 'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time', + 'Asia/Ulan_Bator': 'Ulaanbaatar Standard Time', + 'Asia/Urumqi': 'Central Asia Standard Time', + 'Asia/Ust-Nera': 'Vladivostok Standard Time', + 'Asia/Vientiane': 'SE Asia Standard Time', + 'Asia/Vladivostok': 'Vladivostok Standard Time', + 'Asia/Yakutsk': 'Yakutsk Standard Time', + 'Asia/Yekaterinburg': 'Ekaterinburg Standard Time', + 'Asia/Yerevan': 'Caucasus Standard Time', + 'Atlantic/Azores': 'Azores Standard Time', + 'Atlantic/Bermuda': 'Atlantic Standard Time', + 'Atlantic/Canary': 'GMT Standard Time', + 'Atlantic/Cape_Verde': 'Cape Verde Standard Time', + 'Atlantic/Faeroe': 'GMT Standard Time', + 'Atlantic/Jan_Mayen': 'W. Europe Standard Time', + 'Atlantic/Madeira': 'GMT Standard Time', + 'Atlantic/Reykjavik': 'Greenwich Standard Time', + 'Atlantic/South_Georgia': 'UTC-02', + 'Atlantic/St_Helena': 'Greenwich Standard Time', + 'Atlantic/Stanley': 'SA Eastern Standard Time', + 'Australia/ACT': 'AUS Eastern Standard Time', + 'Australia/Adelaide': 'Cen. Australia Standard Time', + 'Australia/Brisbane': 'E. Australia Standard Time', + 'Australia/Broken_Hill': 'Cen. Australia Standard Time', + 'Australia/Canberra': 'AUS Eastern Standard Time', + 'Australia/Currie': 'Tasmania Standard Time', + 'Australia/Darwin': 'AUS Central Standard Time', + 'Australia/Eucla': 'Aus Central W. Standard Time', + 'Australia/Hobart': 'Tasmania Standard Time', + 'Australia/LHI': 'Lord Howe Standard Time', + 'Australia/Lindeman': 'E. Australia Standard Time', + 'Australia/Lord_Howe': 'Lord Howe Standard Time', + 'Australia/Melbourne': 'AUS Eastern Standard Time', + 'Australia/NSW': 'AUS Eastern Standard Time', + 'Australia/North': 'AUS Central Standard Time', + 'Australia/Perth': 'W. Australia Standard Time', + 'Australia/Queensland': 'E. Australia Standard Time', + 'Australia/South': 'Cen. Australia Standard Time', + 'Australia/Sydney': 'AUS Eastern Standard Time', + 'Australia/Tasmania': 'Tasmania Standard Time', + 'Australia/Victoria': 'AUS Eastern Standard Time', + 'Australia/West': 'W. Australia Standard Time', + 'Australia/Yancowinna': 'Cen. Australia Standard Time', + 'Brazil/Acre': 'SA Pacific Standard Time', + 'Brazil/DeNoronha': 'UTC-02', + 'Brazil/East': 'E. South America Standard Time', + 'Brazil/West': 'SA Western Standard Time', + 'CST6CDT': 'Central Standard Time', + 'Canada/Atlantic': 'Atlantic Standard Time', + 'Canada/Central': 'Central Standard Time', + 'Canada/Eastern': 'Eastern Standard Time', + 'Canada/Mountain': 'Mountain Standard Time', + 'Canada/Newfoundland': 'Newfoundland Standard Time', + 'Canada/Pacific': 'Pacific Standard Time', + 'Canada/Saskatchewan': 'Canada Central Standard Time', + 'Canada/Yukon': 'Pacific Standard Time', + 'Chile/Continental': 'Pacific SA Standard Time', + 'Chile/EasterIsland': 'Easter Island Standard Time', + 'Cuba': 'Cuba Standard Time', + 'EST5EDT': 'Eastern Standard Time', + 'Egypt': 'Egypt Standard Time', + 'Eire': 'GMT Standard Time', + 'Etc/GMT': 'UTC', + 'Etc/GMT+1': 'Cape Verde Standard Time', + 'Etc/GMT+10': 'Hawaiian Standard Time', + 'Etc/GMT+11': 'UTC-11', + 'Etc/GMT+12': 'Dateline Standard Time', + 'Etc/GMT+2': 'UTC-02', + 'Etc/GMT+3': 'SA Eastern Standard Time', + 'Etc/GMT+4': 'SA Western Standard Time', + 'Etc/GMT+5': 'SA Pacific Standard Time', + 'Etc/GMT+6': 'Central America Standard Time', + 'Etc/GMT+7': 'US Mountain Standard Time', + 'Etc/GMT+8': 'UTC-08', + 'Etc/GMT+9': 'UTC-09', + 'Etc/GMT-1': 'W. Central Africa Standard Time', + 'Etc/GMT-10': 'West Pacific Standard Time', + 'Etc/GMT-11': 'Central Pacific Standard Time', + 'Etc/GMT-12': 'UTC+12', + 'Etc/GMT-13': 'UTC+13', + 'Etc/GMT-14': 'Line Islands Standard Time', + 'Etc/GMT-2': 'South Africa Standard Time', + 'Etc/GMT-3': 'E. Africa Standard Time', + 'Etc/GMT-4': 'Arabian Standard Time', + 'Etc/GMT-5': 'West Asia Standard Time', + 'Etc/GMT-6': 'Central Asia Standard Time', + 'Etc/GMT-7': 'SE Asia Standard Time', + 'Etc/GMT-8': 'Singapore Standard Time', + 'Etc/GMT-9': 'Tokyo Standard Time', + 'Etc/UTC': 'UTC', + 'Europe/Amsterdam': 'W. Europe Standard Time', + 'Europe/Andorra': 'W. Europe Standard Time', + 'Europe/Astrakhan': 'Astrakhan Standard Time', + 'Europe/Athens': 'GTB Standard Time', + 'Europe/Belfast': 'GMT Standard Time', + 'Europe/Belgrade': 'Central Europe Standard Time', + 'Europe/Berlin': 'W. Europe Standard Time', + 'Europe/Bratislava': 'Central Europe Standard Time', + 'Europe/Brussels': 'Romance Standard Time', + 'Europe/Bucharest': 'GTB Standard Time', + 'Europe/Budapest': 'Central Europe Standard Time', + 'Europe/Busingen': 'W. Europe Standard Time', + 'Europe/Chisinau': 'E. Europe Standard Time', + 'Europe/Copenhagen': 'Romance Standard Time', + 'Europe/Dublin': 'GMT Standard Time', + 'Europe/Gibraltar': 'W. Europe Standard Time', + 'Europe/Guernsey': 'GMT Standard Time', + 'Europe/Helsinki': 'FLE Standard Time', + 'Europe/Isle_of_Man': 'GMT Standard Time', + 'Europe/Istanbul': 'Turkey Standard Time', + 'Europe/Jersey': 'GMT Standard Time', + 'Europe/Kaliningrad': 'Kaliningrad Standard Time', + 'Europe/Kiev': 'FLE Standard Time', + 'Europe/Kirov': 'Russian Standard Time', + 'Europe/Lisbon': 'GMT Standard Time', + 'Europe/Ljubljana': 'Central Europe Standard Time', + 'Europe/London': 'GMT Standard Time', + 'Europe/Luxembourg': 'W. Europe Standard Time', + 'Europe/Madrid': 'Romance Standard Time', + 'Europe/Malta': 'W. Europe Standard Time', + 'Europe/Mariehamn': 'FLE Standard Time', + 'Europe/Minsk': 'Belarus Standard Time', + 'Europe/Monaco': 'W. Europe Standard Time', + 'Europe/Moscow': 'Russian Standard Time', + 'Europe/Oslo': 'W. Europe Standard Time', + 'Europe/Paris': 'Romance Standard Time', + 'Europe/Podgorica': 'Central Europe Standard Time', + 'Europe/Prague': 'Central Europe Standard Time', + 'Europe/Riga': 'FLE Standard Time', + 'Europe/Rome': 'W. Europe Standard Time', + 'Europe/Samara': 'Russia Time Zone 3', + 'Europe/San_Marino': 'W. Europe Standard Time', + 'Europe/Sarajevo': 'Central European Standard Time', + 'Europe/Saratov': 'Saratov Standard Time', + 'Europe/Simferopol': 'Russian Standard Time', + 'Europe/Skopje': 'Central European Standard Time', + 'Europe/Sofia': 'FLE Standard Time', + 'Europe/Stockholm': 'W. Europe Standard Time', + 'Europe/Tallinn': 'FLE Standard Time', + 'Europe/Tirane': 'Central Europe Standard Time', + 'Europe/Tiraspol': 'E. Europe Standard Time', + 'Europe/Ulyanovsk': 'Astrakhan Standard Time', + 'Europe/Uzhgorod': 'FLE Standard Time', + 'Europe/Vaduz': 'W. Europe Standard Time', + 'Europe/Vatican': 'W. Europe Standard Time', + 'Europe/Vienna': 'W. Europe Standard Time', + 'Europe/Vilnius': 'FLE Standard Time', + 'Europe/Volgograd': 'Russian Standard Time', + 'Europe/Warsaw': 'Central European Standard Time', + 'Europe/Zagreb': 'Central European Standard Time', + 'Europe/Zaporozhye': 'FLE Standard Time', + 'Europe/Zurich': 'W. Europe Standard Time', + 'GB': 'GMT Standard Time', + 'GB-Eire': 'GMT Standard Time', + 'GMT+0': 'UTC', + 'GMT-0': 'UTC', + 'GMT0': 'UTC', + 'Greenwich': 'UTC', + 'Hongkong': 'China Standard Time', + 'Iceland': 'Greenwich Standard Time', + 'Indian/Antananarivo': 'E. Africa Standard Time', + 'Indian/Chagos': 'Central Asia Standard Time', + 'Indian/Christmas': 'SE Asia Standard Time', + 'Indian/Cocos': 'Myanmar Standard Time', + 'Indian/Comoro': 'E. Africa Standard Time', + 'Indian/Kerguelen': 'West Asia Standard Time', + 'Indian/Mahe': 'Mauritius Standard Time', + 'Indian/Maldives': 'West Asia Standard Time', + 'Indian/Mauritius': 'Mauritius Standard Time', + 'Indian/Mayotte': 'E. Africa Standard Time', + 'Indian/Reunion': 'Mauritius Standard Time', + 'Iran': 'Iran Standard Time', + 'Israel': 'Israel Standard Time', + 'Jamaica': 'SA Pacific Standard Time', + 'Japan': 'Tokyo Standard Time', + 'Kwajalein': 'UTC+12', + 'Libya': 'Libya Standard Time', + 'MST7MDT': 'Mountain Standard Time', + 'Mexico/BajaNorte': 'Pacific Standard Time (Mexico)', + 'Mexico/BajaSur': 'Mountain Standard Time (Mexico)', + 'Mexico/General': 'Central Standard Time (Mexico)', + 'NZ': 'New Zealand Standard Time', + 'NZ-CHAT': 'Chatham Islands Standard Time', + 'Navajo': 'Mountain Standard Time', + 'PRC': 'China Standard Time', + 'PST8PDT': 'Pacific Standard Time', + 'Pacific/Apia': 'Samoa Standard Time', + 'Pacific/Auckland': 'New Zealand Standard Time', + 'Pacific/Bougainville': 'Bougainville Standard Time', + 'Pacific/Chatham': 'Chatham Islands Standard Time', + 'Pacific/Easter': 'Easter Island Standard Time', + 'Pacific/Efate': 'Central Pacific Standard Time', + 'Pacific/Enderbury': 'UTC+13', + 'Pacific/Fakaofo': 'UTC+13', + 'Pacific/Fiji': 'Fiji Standard Time', + 'Pacific/Funafuti': 'UTC+12', + 'Pacific/Galapagos': 'Central America Standard Time', + 'Pacific/Gambier': 'UTC-09', + 'Pacific/Guadalcanal': 'Central Pacific Standard Time', + 'Pacific/Guam': 'West Pacific Standard Time', + 'Pacific/Honolulu': 'Hawaiian Standard Time', + 'Pacific/Johnston': 'Hawaiian Standard Time', + 'Pacific/Kiritimati': 'Line Islands Standard Time', + 'Pacific/Kosrae': 'Central Pacific Standard Time', + 'Pacific/Kwajalein': 'UTC+12', + 'Pacific/Majuro': 'UTC+12', + 'Pacific/Marquesas': 'Marquesas Standard Time', + 'Pacific/Midway': 'UTC-11', + 'Pacific/Nauru': 'UTC+12', + 'Pacific/Niue': 'UTC-11', + 'Pacific/Norfolk': 'Norfolk Standard Time', + 'Pacific/Noumea': 'Central Pacific Standard Time', + 'Pacific/Pago_Pago': 'UTC-11', + 'Pacific/Palau': 'Tokyo Standard Time', + 'Pacific/Pitcairn': 'UTC-08', + 'Pacific/Ponape': 'Central Pacific Standard Time', + 'Pacific/Port_Moresby': 'West Pacific Standard Time', + 'Pacific/Rarotonga': 'Hawaiian Standard Time', + 'Pacific/Saipan': 'West Pacific Standard Time', + 'Pacific/Samoa': 'UTC-11', + 'Pacific/Tahiti': 'Hawaiian Standard Time', + 'Pacific/Tarawa': 'UTC+12', + 'Pacific/Tongatapu': 'Tonga Standard Time', + 'Pacific/Truk': 'West Pacific Standard Time', + 'Pacific/Wake': 'UTC+12', + 'Pacific/Wallis': 'UTC+12', + 'Poland': 'Central European Standard Time', + 'Portugal': 'GMT Standard Time', + 'ROC': 'Taipei Standard Time', + 'ROK': 'Korea Standard Time', + 'Singapore': 'Singapore Standard Time', + 'Turkey': 'Turkey Standard Time', + 'US/Alaska': 'Alaskan Standard Time', + 'US/Aleutian': 'Aleutian Standard Time', + 'US/Arizona': 'US Mountain Standard Time', + 'US/Central': 'Central Standard Time', + 'US/Eastern': 'Eastern Standard Time', + 'US/Hawaii': 'Hawaiian Standard Time', + 'US/Indiana-Starke': 'Central Standard Time', + 'US/Michigan': 'Eastern Standard Time', + 'US/Mountain': 'Mountain Standard Time', + 'US/Pacific': 'Pacific Standard Time', + 'US/Samoa': 'UTC-11', + 'UTC': 'UTC', + 'Universal': 'UTC', + 'W-SU': 'Russian Standard Time', + 'Zulu': 'UTC'} diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index f932e236b5218..9e06852cfd79e 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -28,7 +28,8 @@ from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.parsing import parse_datetime_string from pandas._libs.tslibs.timedeltas cimport cast_from_unit -from pandas._libs.tslibs.timezones cimport is_utc, is_tzlocal, get_dst_info +from pandas._libs.tslibs.timezones cimport (is_utc, is_tzlocal, get_dst_info, + get_tzlocal_tz) from pandas._libs.tslibs.timezones import UTC from pandas._libs.tslibs.conversion cimport ( tz_convert_single, _TSObject, convert_datetime_to_tsobject, @@ -101,7 +102,8 @@ def ints_to_pydatetime(int64_t[:] arr, object tz=None, object freq=None, int64_t[:] deltas Py_ssize_t pos npy_datetimestruct dts - object dt, new_tz + object dt, new_tz, orig_tz + bint tz_changed str typ int64_t value, delta, local_value ndarray[object] result = np.empty(n, dtype=object) @@ -124,6 +126,15 @@ def ints_to_pydatetime(int64_t[:] arr, object tz=None, object freq=None, raise ValueError("box must be one of 'datetime', 'date', 'time' or" " 'timestamp'") + orig_tz = tz + tz_changed = False + + if is_tzlocal(tz): + new_tz = get_tzlocal_tz(tz) + if new_tz != tz: + tz_changed = True + tz = new_tz + if is_utc(tz) or tz is None: for i in range(n): value = arr[i] @@ -184,7 +195,8 @@ def ints_to_pydatetime(int64_t[:] arr, object tz=None, object freq=None, new_tz = tz._tzinfos[tz._transition_info[pos]] dt64_to_dtstruct(value + deltas[pos], &dts) - result[i] = func_create(value, dts, new_tz, freq) + tz_to_use = orig_tz if tz_changed else new_tz + result[i] = func_create(value, dts, tz_to_use, freq) return result diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 1c0adaaa288a9..bfb39f80992f3 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -33,7 +33,7 @@ from pandas._libs.tslibs.timedeltas cimport (cast_from_unit, delta_to_nanoseconds) from pandas._libs.tslibs.timezones cimport ( is_utc, is_tzlocal, is_fixed_offset, get_utcoffset, get_dst_info, - get_timezone, maybe_get_tz, tz_compare) + get_timezone, maybe_get_tz, tz_compare, get_tzlocal_tz) from pandas._libs.tslibs.timezones import UTC from pandas._libs.tslibs.parsing import parse_datetime_string @@ -541,9 +541,19 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): int64_t local_val Py_ssize_t pos str typ + bint tz_changed assert obj.tzinfo is None + orig_tz = tz + tz_changed = False + + if is_tzlocal(tz): + new_tz = get_tzlocal_tz(tz) + if new_tz != tz: + tz_changed = True + tz = new_tz + if is_utc(tz): pass elif obj.value == NPY_NAT: @@ -574,7 +584,12 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): # so this branch will never be reached. pass - obj.tzinfo = tz + if tz_changed: + # We want to return tzlocal() if provided it, even if we map it + # to a real tz for performance reasons + obj.tzinfo = orig_tz + else: + obj.tzinfo = tz cdef inline datetime _localize_pydatetime(datetime dt, tzinfo tz): @@ -652,6 +667,9 @@ cdef inline int64_t[:] _tz_convert_dst(int64_t[:] values, tzinfo tz, bint tz_is_local tz_is_local = is_tzlocal(tz) + if tz_is_local: + tz = get_tzlocal_tz(tz) + tz_is_local = is_tzlocal(tz) if not tz_is_local: # get_dst_info cannot extract offsets from tzlocal because its @@ -763,6 +781,11 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): if val == NPY_NAT: return val + if is_tzlocal(tz1): + tz1 = get_tzlocal_tz(tz1) + if is_tzlocal(tz2): + tz2 = get_tzlocal_tz(tz2) + # Convert to UTC if is_tzlocal(tz1): utc_date = _tz_convert_tzlocal_utc(val, tz1, to_utc=True) @@ -807,10 +830,16 @@ cdef inline int64_t[:] _tz_convert_one_way(int64_t[:] vals, object tz, int64_t[:] converted, result Py_ssize_t i, n = len(vals) int64_t val + bint tz_is_local + + tz_is_local = is_tzlocal(tz) + if tz_is_local: + tz = get_tzlocal_tz(tz) + tz_is_local = is_tzlocal(tz) if not is_utc(get_timezone(tz)): converted = np.empty(n, dtype=np.int64) - if is_tzlocal(tz): + if tz_is_local: for i in range(n): val = vals[i] if val == NPY_NAT: @@ -915,6 +944,9 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, result = np.empty(n, dtype=np.int64) + if is_tzlocal(tz): + tz = get_tzlocal_tz(tz) + if is_tzlocal(tz): for i in range(n): v = vals[i] @@ -1222,6 +1254,9 @@ cdef int64_t[:] _normalize_local(int64_t[:] stamps, tzinfo tz): npy_datetimestruct dts int64_t delta, local_val + if is_tzlocal(tz): + tz = get_tzlocal_tz(tz) + if is_tzlocal(tz): for i in range(n): if stamps[i] == NPY_NAT: @@ -1301,6 +1336,9 @@ def is_date_array_normalized(int64_t[:] stamps, object tz=None): int64_t local_val, delta str typ + if is_tzlocal(tz): + tz = get_tzlocal_tz(tz) + if tz is None or is_utc(tz): for i in range(n): dt64_to_dtstruct(stamps[i], &dts) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index e28462f7103b9..fc9a8e26fe3d7 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -17,7 +17,7 @@ cimport numpy as cnp from numpy cimport int64_t cnp.import_array() - +cimport pandas._libs.tslibs from pandas._libs.tslibs cimport util from pandas._libs.tslibs.util cimport is_string_object, is_integer_object diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e38e9a1ca5df6..ef7c003fa9032 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -32,7 +32,8 @@ cimport pandas._libs.tslibs.util as util from pandas._libs.tslibs.util cimport is_period_object, is_string_object from pandas._libs.tslibs.timestamps import Timestamp -from pandas._libs.tslibs.timezones cimport is_utc, is_tzlocal, get_dst_info +from pandas._libs.tslibs.timezones cimport (is_utc, is_tzlocal, get_dst_info, + get_tzlocal_tz) from pandas._libs.tslibs.timedeltas import Timedelta from pandas._libs.tslibs.timedeltas cimport delta_to_nanoseconds @@ -1506,6 +1507,9 @@ cdef int64_t[:] localize_dt64arr_to_period(int64_t[:] stamps, npy_datetimestruct dts int64_t local_val + if is_tzlocal(tz): + tz = get_tzlocal_tz(tz) + if is_utc(tz) or tz is None: with nogil: for i in range(n): diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 13a4f5ba48557..4decb2b21ce6f 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -9,7 +9,7 @@ from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dt64_to_dtstruct) from pandas._libs.tslibs.frequencies cimport get_freq_code from pandas._libs.tslibs.timezones cimport ( - is_utc, is_tzlocal, maybe_get_tz, get_dst_info) + is_utc, is_tzlocal, maybe_get_tz, get_dst_info, get_tzlocal_tz) from pandas._libs.tslibs.conversion cimport tz_convert_utc_to_tzlocal from pandas._libs.tslibs.ccalendar cimport get_days_in_month @@ -50,6 +50,9 @@ cdef _reso_local(int64_t[:] stamps, object tz): npy_datetimestruct dts int64_t local_val, delta + if is_tzlocal(tz): + tz = get_tzlocal_tz(tz) + if is_utc(tz) or tz is None: for i in range(n): if stamps[i] == NPY_NAT: diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 50c4a41f97a82..c7902452ade70 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -3,6 +3,9 @@ cpdef bint is_utc(object tz) cdef bint is_tzlocal(object tz) +cpdef object get_tzlocal_tz(object tz) +cpdef _set_tzlocal_tz(object tz) + cdef bint treat_tz_as_pytz(object tz) cdef bint treat_tz_as_dateutil(object tz) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 43a35d77dd127..5031481d3cd34 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -12,12 +12,15 @@ from pytz.tzinfo import BaseTzInfo as _pytz_BaseTzInfo import pytz UTC = pytz.utc +tzlocal_tz = None import numpy as np cimport numpy as cnp from numpy cimport int64_t cnp.import_array() +import pandas._libs.src.tzlocal as tzlocal_package + # ---------------------------------------------------------------------- from pandas._libs.tslibs.util cimport ( is_string_object, is_integer_object, get_nat) @@ -34,6 +37,25 @@ cdef inline bint is_tzlocal(object tz): return isinstance(tz, _dateutil_tzlocal) +cpdef object get_tzlocal_tz(object tz): + global tzlocal_tz + if tzlocal_tz is None: + local_tz = tzlocal_package.get_localzone() + tzlocal_tz = local_tz.zone + + try: + return pytz.timezone(tzlocal_tz) + except pytz.exceptions.UnknownTimeZoneError: + tzlocal_tz = None + + return tz + + +cpdef _set_tzlocal_tz(object tz_str): + global tzlocal_tz + tzlocal_tz = str(tz_str) + + cdef inline bint treat_tz_as_pytz(object tz): return (hasattr(tz, '_utc_transition_times') and hasattr(tz, '_transition_info')) diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py index 0255865dbdf71..4f4678ef39d29 100644 --- a/pandas/tests/tslibs/test_timezones.py +++ b/pandas/tests/tslibs/test_timezones.py @@ -49,6 +49,27 @@ def test_tzlocal_offset(): assert ts.value + offset == Timestamp("2011-01-01").value +def test_tzlocal_package(): + orig_tzlocal_tz = timezones.get_tzlocal_tz(None) + # Check that we fall back to non-tzlocal code path if user has an invalid + # system tz set + timezones._set_tzlocal_tz('foo/bar') + ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()) + assert ts.tz == dateutil.tz.tzlocal() + + utc = pytz.utc + for tz in pytz.all_timezones: + # Check that all pytz timezones work + ts_tz = Timestamp("2011-01-01", tz=tz) + timezones._set_tzlocal_tz(tz) + ts_local = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()) + + assert ts_tz.astimezone(utc) == ts_local.astimezone(utc) + assert ts_local.tz == dateutil.tz.tzlocal() + + timezones._set_tzlocal_tz(orig_tzlocal_tz.zone) + + @pytest.fixture(params=[ (pytz.timezone("US/Eastern"), lambda tz, x: tz.localize(x)), (dateutil.tz.gettz("US/Eastern"), lambda tz, x: x.replace(tzinfo=tz))
This is a follow-up to https://github.com/pandas-dev/pandas/pull/24491, where it was noted that `tzlocal` operations are notably slower: ``` [ 53.12%] ··· timeseries.DatetimeIndex.time_add_timedelta ok [ 53.12%] ··· ============ ============= index_type ------------ ------------- dst 471±8μs repeated 3.52±0.05ms tz_aware 3.78±0.07ms **tz_local 5.77±0.01s** tz_naive 1.98±0.03ms ============ ============= ``` This PR makes them indistinguishable from other timezones: ``` [ 60.00%] ··· timeseries.DatetimeIndex.time_add_timedelta ok [ 60.00%] ··· ============ ============= index_type ------------ ------------- dst 465±8μs repeated 3.55±0.07ms tz_aware 3.65±0.05ms tz_local 3.68±0.04ms tz_naive 1.81±0.05ms ============ ============= ``` To do this, we add a new optional dependency: `tzlocal`. This package queries the OS to try and identify a `pytz` compatible timezone name for the local timezone. If this is successful, we can use the `pytz` timezone instead of `dateutil.tz.tzlocal()`, yielding a huge speedup as the latter invokes `datetime.datetime()`. In cases where the local timezone is also UTC, further speedups are possible. In all cases, the `tzlocal()` object is maintained as the `.tz` to preserve any semantic meaning. One unresolved issue is an incompatibility with `python-dateutil` versions prior to `2.6.0`. I'm unable to reproduce the error locally, but can repeatably get this error on Travis: ``` def test_timetz_accessor(self, tz_naive_fixture): # GH21358 tz = timezones.maybe_get_tz(tz_naive_fixture) expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT]) index = DatetimeIndex(['2018-06-04 10:20:30', pd.NaT], tz=tz) result = index.timetz > tm.assert_numpy_array_equal(result, expected) pandas/tests/indexes/datetimes/test_timezones.py:824: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ pandas/core/dtypes/missing.py:404: in array_equivalent ensure_object(left.ravel()), ensure_object(right.ravel())) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ > if not (PyObject_RichCompareBool(x, y, Py_EQ) or E TypeError: can't compare offset-naive and offset-aware times ``` While this only shows up on the 2.7 build, I believe this is due to it being the only one actually building against `python-dateutil=2.5.0`. Any version `>=2.6.0` does not exhibit this error. Finally, here is the `asv` benchmark against `v0.24.0rc1`: ``` $ asv compare v0.24.0rc1 HEAD -s --sort ratio --only-changed before after ratio [fdc4db25] [56e5cc0d] <v0.24.0rc1^0> <tzlocal> - 68.4±5μs 17.9±1μs 0.26 timeseries.DatetimeIndex.time_get('tz_local') - 5.54±0.02s 188±2ms 0.03 timeseries.DatetimeIndex.time_to_pydatetime('tz_local') - 7.13±0.02s 30.2±0.7ms 0.00 timeseries.DatetimeIndex.time_normalize('tz_local') - 6.00±0.05s 23.7±0.3ms 0.00 timeseries.DatetimeIndex.time_to_time('tz_local') - 6.00±0.01s 21.3±0.3ms 0.00 timeseries.DatetimeIndex.time_to_date('tz_local') - 5.98±0.06s 3.68±0.04ms 0.00 timeseries.DatetimeIndex.time_add_timedelta('tz_local') - 5.96±0.05s 2.49±0.05ms 0.00 timeseries.DatetimeIndex.time_timeseries_is_month_start('tz_local') ``` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24737
2019-01-12T00:57:16Z
2019-06-08T20:38:59Z
null
2019-06-08T20:38:59Z
TST: avoid DST transitions when testing DateTimeIndex roundtrip
diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 8bcc9296cb010..12c1b15733895 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -434,24 +434,19 @@ def test_dti_tz_localize_utc_conversion(self, tz): with pytest.raises(pytz.NonExistentTimeError): rng.tz_localize(tz) - @pytest.mark.parametrize('idx', [ - date_range(start='2014-01-01', end='2014-12-31', freq='M'), - date_range(start='2014-01-01', end='2014-12-31', freq='D'), - date_range(start='2014-01-01', end='2014-03-01', freq='H'), - date_range(start='2014-08-01', end='2014-10-31', freq='T') - ]) - def test_dti_tz_localize_roundtrip(self, tz_aware_fixture, idx): + def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): + # note: this tz tests that a tz-naive index can be localized + # and de-localized successfully, when there are no DST transitions + # in the range. + idx = date_range(start='2014-06-01', end='2014-08-30', freq='15T') tz = tz_aware_fixture localized = idx.tz_localize(tz) - expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq, - tz=tz) - tm.assert_index_equal(localized, expected) + # cant localize a tz-aware object with pytest.raises(TypeError): localized.tz_localize(tz) - reset = localized.tz_localize(None) - tm.assert_index_equal(reset, idx) assert reset.tzinfo is None + tm.assert_index_equal(reset, idx) def test_dti_tz_localize_naive(self): rng = date_range('1/1/2011', periods=100, freq='H')
- [x] closes #24689 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24736
2019-01-11T23:21:14Z
2019-02-02T22:54:10Z
2019-02-02T22:54:10Z
2019-02-03T14:11:02Z
PERF: fix some of .clip() performance regression by using numpy arrays where possible
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 5b0981dc10a8a..f7d0083b86a01 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -140,11 +140,13 @@ def time_map(self, mapper): class Clip(object): + params = [50, 1000, 10**5] + param_names = ['n'] - def setup(self): - self.s = Series(np.random.randn(50)) + def setup(self, n): + self.s = Series(np.random.randn(n)) - def time_clip(self): + def time_clip(self, n): self.s.clip(0, 1) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a0ee9cb253fef..2b97661fe9ec3 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7148,12 +7148,18 @@ def _clip_with_scalar(self, lower, upper, inplace=False): raise ValueError("Cannot use an NA value as a clip threshold") result = self - if upper is not None: - subset = self.le(upper, axis=None) | isna(result) - result = result.where(subset, upper, axis=None, inplace=False) - if lower is not None: - subset = self.ge(lower, axis=None) | isna(result) - result = result.where(subset, lower, axis=None, inplace=False) + mask = isna(self.values) + + with np.errstate(all='ignore'): + if upper is not None: + subset = self.to_numpy() <= upper + result = result.where(subset, upper, axis=None, inplace=False) + if lower is not None: + subset = self.to_numpy() >= lower + result = result.where(subset, lower, axis=None, inplace=False) + + if np.any(mask): + result[mask] = np.nan if inplace: self._update_inplace(result)
A recent change to respect dtypes in `.clip()` (https://github.com/pandas-dev/pandas/pull/24458) introduced a decent overhead of ~2ms to the call: ``` $ asv compare v0.23.4 v0.24.0rc1 --only-changed before after ratio [04095216] [fdc4db25] <v0.23.4^0> <v0.24.0rc1^0> + 120±1μs 2.04±0.04ms 17.01 series_methods.Clip.time_clip(1000) + 936±40μs 3.09±0.02ms 3.31 series_methods.Clip.time_clip(100000) + 111±3μs 2.05±0.08ms 18.55 series_methods.Clip.time_clip(50) ``` This PR cuts the overhead from ~2ms to ~0.6ms by keeping `subset` as a numpy array; it's entirely boolean regardless of underlying dtype, so a DataFrame only adds overhead here: ``` $ asv compare v0.24.0rc1 HEAD --only-changed -s before after ratio [fdc4db25] [63c47c58] <v0.24.0rc1^0> <clip> - 2.04±0.04ms 759±20μs 0.37 series_methods.Clip.time_clip(1000) - 3.09±0.02ms 1.46±0.04ms 0.47 series_methods.Clip.time_clip(100000) - 2.05±0.08ms 724±20μs 0.35 series_methods.Clip.time_clip(50) ``` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24735
2019-01-11T22:17:21Z
2019-01-20T16:05:08Z
2019-01-20T16:05:08Z
2019-01-20T16:05:11Z
BUG-24212 fix usage of Index.take in pd.merge
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3268575c7064d..a4598b315cbb7 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1817,6 +1817,7 @@ Reshaping - Bug in :func:`DataFrame.unstack` where a ``ValueError`` was raised when unstacking timezone aware values (:issue:`18338`) - Bug in :func:`DataFrame.stack` where timezone aware values were converted to timezone naive values (:issue:`19420`) - Bug in :func:`merge_asof` where a ``TypeError`` was raised when ``by_col`` were timezone aware values (:issue:`21184`) +- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`) - Bug showing an incorrect shape when throwing error during ``DataFrame`` construction. (:issue:`20742`) .. _whatsnew_0240.bug_fixes.sparse: diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e11847d2b8ce2..0a51f2ee0dce7 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -757,13 +757,19 @@ def _get_join_info(self): if self.right_index: if len(self.left) > 0: - join_index = self.left.index.take(left_indexer) + join_index = self._create_join_index(self.left.index, + self.right.index, + left_indexer, + how='right') else: join_index = self.right.index.take(right_indexer) left_indexer = np.array([-1] * len(join_index)) elif self.left_index: if len(self.right) > 0: - join_index = self.right.index.take(right_indexer) + join_index = self._create_join_index(self.right.index, + self.left.index, + right_indexer, + how='left') else: join_index = self.left.index.take(left_indexer) right_indexer = np.array([-1] * len(join_index)) @@ -774,6 +780,37 @@ def _get_join_info(self): join_index = join_index.astype(object) return join_index, left_indexer, right_indexer + def _create_join_index(self, index, other_index, indexer, how='left'): + """ + Create a join index by rearranging one index to match another + + Parameters + ---------- + index: Index being rearranged + other_index: Index used to supply values not found in index + indexer: how to rearrange index + how: replacement is only necessary if indexer based on other_index + + Returns + ------- + join_index + """ + join_index = index.take(indexer) + if (self.how in (how, 'outer') and + not isinstance(other_index, MultiIndex)): + # if final index requires values in other_index but not target + # index, indexer may hold missing (-1) values, causing Index.take + # to take the final value in target index + mask = indexer == -1 + if np.any(mask): + # if values missing (-1) from target index, + # take from other_index instead + join_list = join_index.to_numpy() + join_list[mask] = other_index.to_numpy()[mask] + join_index = Index(join_list, dtype=join_index.dtype, + name=join_index.name) + return join_index + def _get_merge_keys(self): """ Note: has side effects (copy/delete key columns) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 1d7c42b7328d0..e123a5171769d 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -939,6 +939,26 @@ def test_merge_two_empty_df_no_division_error(self): with np.errstate(divide='raise'): merge(a, a, on=('a', 'b')) + @pytest.mark.parametrize('how', ['left', 'outer']) + def test_merge_on_index_with_more_values(self, how): + # GH 24212 + # pd.merge gets [-1, -1, 0, 1] as right_indexer, ensure that -1 is + # interpreted as a missing value instead of the last element + df1 = pd.DataFrame([[1, 2], [2, 4], [3, 6], [4, 8]], + columns=['a', 'b']) + df2 = pd.DataFrame([[3, 30], [4, 40]], + columns=['a', 'c']) + df1.set_index('a', drop=False, inplace=True) + df2.set_index('a', inplace=True) + result = pd.merge(df1, df2, left_index=True, right_on='a', how=how) + expected = pd.DataFrame([[1, 2, np.nan], + [2, 4, np.nan], + [3, 6, 30.0], + [4, 8, 40.0]], + columns=['a', 'b', 'c']) + expected.set_index('a', drop=False, inplace=True) + assert_frame_equal(result, expected) + def _check_merge(x, y): for how in ['inner', 'left', 'outer']:
- [X] closes #24212 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry The use of `Index.take` in `pd.merge` to create `join_index` causes an indexer with -1 values to take the last value in the index being taken from. This PR allows it to correctly handle absent values.
https://api.github.com/repos/pandas-dev/pandas/pulls/24733
2019-01-11T20:57:04Z
2019-01-20T16:02:43Z
2019-01-20T16:02:43Z
2019-01-20T16:02:53Z
BUG: Prevent 3D-ndarray for nested tuple labels (#24687)
Being a very rare issue encountered with nested tuples as column and index labels, here is the fix I've managed to come up with - for the time being. While `clean_index_list()` in `pandas/_libs/lib.pyx` is responsible for returning an invalid result (a 3D ndarray where the inner dimensions should be nested tuples), debugging Cython is very challenging for me. And yes, use of tuples in this way is extremely uncommon. So far, the code has run successfully on two distance matrices. <img width="1191" alt="fix" src="https://user-images.githubusercontent.com/17303881/51058729-b9af9080-15b7-11e9-9726-3bde8ca67330.png">
https://api.github.com/repos/pandas-dev/pandas/pulls/24732
2019-01-11T20:44:11Z
2019-01-16T02:13:01Z
null
2019-01-16T02:13:01Z
Change MultiIndex repr (labels -> codes)
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 99114b7dcf34d..5a9bf6c2c6263 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1302,15 +1302,15 @@ def set_names(self, names, level=None, inplace=False): ... [2018, 2019]]) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], - labels=[[1, 1, 0, 0], [0, 1, 0, 1]]) + codes=[[1, 1, 0, 0], [0, 1, 0, 1]]) >>> idx.set_names(['kind', 'year'], inplace=True) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], - labels=[[1, 1, 0, 0], [0, 1, 0, 1]], + codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.set_names('species', level=0) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], - labels=[[1, 1, 0, 0], [0, 1, 0, 1]], + codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) """ @@ -1373,11 +1373,11 @@ def rename(self, name, inplace=False): ... names=['kind', 'year']) >>> idx MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], - labels=[[1, 1, 0, 0], [0, 1, 0, 1]], + codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['kind', 'year']) >>> idx.rename(['species', 'year']) MultiIndex(levels=[['cobra', 'python'], [2018, 2019]], - labels=[[1, 1, 0, 0], [0, 1, 0, 1]], + codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=['species', 'year']) >>> idx.rename('species') Traceback (most recent call last): @@ -4511,7 +4511,7 @@ def isin(self, values, level=None): ... names=('number', 'color')) >>> midx MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']], - labels=[[0, 1, 2], [2, 0, 1]], + codes=[[0, 1, 2], [2, 0, 1]], names=['number', 'color']) Check whether the strings in the 'color' level of the MultiIndex @@ -5214,7 +5214,7 @@ def ensure_index_from_sequences(sequences, names=None): >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']], names=['L1', 'L2']) MultiIndex(levels=[['a'], ['a', 'b']], - labels=[[0, 0], [0, 1]], + codes=[[0, 0], [0, 1]], names=['L1', 'L2']) See Also @@ -5255,7 +5255,7 @@ def ensure_index(index_like, copy=False): >>> ensure_index([['a', 'a'], ['b', 'c']]) MultiIndex(levels=[['a'], ['b', 'c']], - labels=[[0, 0], [0, 1]]) + codes=[[0, 0], [0, 1]]) See Also -------- diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 8d26080a0361d..795bfe7a73541 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -190,8 +190,8 @@ class MultiIndex(Index): >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], - labels=[[0, 0, 1, 1], [1, 0, 1, 0]], - names=['number', 'color']) + codes=[[0, 0, 1, 1], [1, 0, 1, 0]], + names=['number', 'color']) See further examples for how to construct a MultiIndex in the doc strings of the mentioned helper methods. @@ -321,7 +321,7 @@ def from_arrays(cls, arrays, sortorder=None, names=None): >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], - labels=[[0, 0, 1, 1], [1, 0, 1, 0]], + codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ if not is_list_like(arrays): @@ -376,7 +376,7 @@ def from_tuples(cls, tuples, sortorder=None, names=None): ... (2, u'red'), (2, u'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], - labels=[[0, 0, 1, 1], [1, 0, 1, 0]], + codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ if not is_list_like(tuples): @@ -433,7 +433,7 @@ def from_product(cls, iterables, sortorder=None, names=None): >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], - labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color']) """ from pandas.core.arrays.categorical import _factorize_from_iterables @@ -493,14 +493,14 @@ def from_frame(cls, df, sortorder=None, names=None): >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], - labels=[[0, 0, 1, 1], [1, 0, 1, 0]], + codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], - labels=[[0, 0, 1, 1], [1, 0, 1, 0]], + codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation']) """ if not isinstance(df, ABCDataFrame): @@ -619,19 +619,19 @@ def set_levels(self, levels, level=None, inplace=False, names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[[u'a', u'b'], [1, 2]], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], [u'a', u'b']], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[[u'a', u'b'], [1, 2]], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[u'foo', u'bar']) """ if is_list_like(levels) and not isinstance(levels, Index): @@ -738,19 +738,19 @@ def set_codes(self, codes, level=None, inplace=False, names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], [u'one', u'two']], - labels=[[1, 0, 1, 0], [0, 0, 1, 1]], + codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=[u'foo', u'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], [u'one', u'two']], - labels=[[1, 0, 1, 0], [0, 1, 0, 1]], + codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=[u'foo', u'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], [u'one', u'two']], - labels=[[0, 0, 1, 1], [0, 0, 1, 1]], + codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=[u'foo', u'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], [u'one', u'two']], - labels=[[1, 0, 1, 0], [0, 0, 1, 1]], + codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=[u'foo', u'bar']) """ if level is not None and not is_list_like(level): @@ -909,8 +909,8 @@ def _format_attrs(self): attrs = [ ('levels', ibase.default_pprint(self._levels, max_seq_items=False)), - ('labels', ibase.default_pprint(self._codes, - max_seq_items=False))] + ('codes', ibase.default_pprint(self._codes, + max_seq_items=False))] if com._any_not_none(*self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: @@ -1509,8 +1509,8 @@ def to_hierarchical(self, n_repeat, n_shuffle=1): (2, u'one'), (2, u'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], [u'one', u'two']], - labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], - [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) + codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) """ levels = self.levels codes = [np.repeat(level_codes, n_repeat) for @@ -1601,11 +1601,11 @@ def _sort_levels_monotonic(self): codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) + codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i.sort_monotonic() MultiIndex(levels=[['a', 'b'], ['aa', 'bb']], - labels=[[0, 0, 1, 1], [1, 0, 1, 0]]) + codes=[[0, 0, 1, 1], [1, 0, 1, 0]]) """ @@ -1657,18 +1657,18 @@ def remove_unused_levels(self): -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) + codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], - labels=[[1, 1], [0, 1]]) + codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], - labels=[[0, 0], [0, 1]]) + codes=[[0, 0], [0, 1]]) """ new_levels = [] @@ -1975,10 +1975,10 @@ def swaplevel(self, i=-2, j=-1): ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], - labels=[[0, 0, 1, 1], [0, 1, 0, 1]]) + codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], - labels=[[0, 1, 0, 1], [0, 0, 1, 1]]) + codes=[[0, 1, 0, 1], [0, 0, 1, 1]]) """ new_levels = list(self.levels) new_codes = list(self.codes) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 49187aad4f1eb..ca79dcd9408d8 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2473,7 +2473,7 @@ def rsplit(self, pat=None, n=-1, expand=False): >>> idx.str.partition() MultiIndex(levels=[['X', 'Y'], [' '], ['123', '999']], - labels=[[0, 1], [0, 0], [0, 1]]) + codes=[[0, 1], [0, 0], [0, 1]]) Or an index with tuples with ``expand=False``: diff --git a/pandas/tests/util/test_assert_index_equal.py b/pandas/tests/util/test_assert_index_equal.py index b96345d4bd7ce..b5409bf7cd2bf 100644 --- a/pandas/tests/util/test_assert_index_equal.py +++ b/pandas/tests/util/test_assert_index_equal.py @@ -13,7 +13,7 @@ def test_index_equal_levels_mismatch(): Index levels are different \\[left\\]: 1, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\) \\[right\\]: 2, MultiIndex\\(levels=\\[\\[u?'A', u?'B'\\], \\[1, 2, 3, 4\\]\\], - labels=\\[\\[0, 0, 1, 1\\], \\[0, 1, 2, 3\\]\\]\\)""" + codes=\\[\\[0, 0, 1, 1\\], \\[0, 1, 2, 3\\]\\]\\)""" idx1 = Index([1, 2, 3]) idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2),
As I mentioned in https://github.com/pandas-dev/pandas/pull/22511#issuecomment-451667687, the repr for MultiIndex ATM uses ``labels``, even though the parameter/attribute has been changed to ``codes``. ```python >>> mi = pd.MultiIndex.from_product([[8,9,0], ['a', 'b', 'c']]) >>> MultiIndex(levels=[[0, 8, 9], ['a', 'b', 'c']], ... labels=[[1, 1, 1, 2, 2, 2, 0, 0, 0], [0, 1, 2, 0, 1, 2, 0, 1, 2]]) ``` This PR changes the repr, so it uses ``codes``. Fot the record I think we should either decide to adapt #22511 soon, or soon come up with an consensus about an alternative repr to be implemented. The current MultiIndex repr is really, really (really!) bad for large indexes and needs to be be changed.
https://api.github.com/repos/pandas-dev/pandas/pulls/24731
2019-01-11T19:18:11Z
2019-01-13T19:01:43Z
2019-01-13T19:01:43Z
2019-01-13T19:49:48Z
BUG: Switched shapes in ValueError msg in DataFrame construct (#20742)
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3685a24d60e74..bf799b43477a0 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1816,6 +1816,7 @@ Reshaping - Bug in :func:`DataFrame.unstack` where a ``ValueError`` was raised when unstacking timezone aware values (:issue:`18338`) - Bug in :func:`DataFrame.stack` where timezone aware values were converted to timezone naive values (:issue:`19420`) - Bug in :func:`merge_asof` where a ``TypeError`` was raised when ``by_col`` were timezone aware values (:issue:`21184`) +- Bug showing an incorrect shape when throwing error during ``DataFrame`` construction. (:issue:`20742`) .. _whatsnew_0240.bug_fixes.sparse: @@ -1853,6 +1854,7 @@ Other - Bug where C variables were declared with external linkage causing import errors if certain other C libraries were imported before Pandas. (:issue:`24113`) + .. _whatsnew_0.24.0.contributors: Contributors diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index ab033ff4c1c4b..050c3d3e87fc6 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1674,7 +1674,15 @@ def create_block_manager_from_arrays(arrays, names, axes): def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ passed = tuple(map(int, [tot_items] + list(block_shape))) - implied = tuple(map(int, [len(ax) for ax in axes])) + # Correcting the user facing error message during dataframe construction + if len(passed) <= 2: + passed = passed[::-1] + + implied = tuple(len(ax) for ax in axes) + # Correcting the user facing error message during dataframe construction + if len(implied) <= 2: + implied = implied[::-1] + if passed == implied and e is not None: raise e if block_shape[0] == 0: diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c8b3f23db1492..4e0143c368e10 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -386,25 +386,35 @@ def test_constructor_error_msgs(self): 'B': ['a', 'b', 'c']}) # wrong size ndarray, GH 3105 - msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)" + msg = r"Shape of passed values is \(4, 3\), indices imply \(3, 3\)" with pytest.raises(ValueError, match=msg): DataFrame(np.arange(12).reshape((4, 3)), columns=['foo', 'bar', 'baz'], index=pd.date_range('2000-01-01', periods=3)) + arr = np.array([[4, 5, 6]]) + msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + DataFrame(index=[0], columns=range(0, 4), data=arr) + + arr = np.array([4, 5, 6]) + msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + DataFrame(index=[0], columns=range(0, 4), data=arr) + # higher dim raise exception with pytest.raises(ValueError, match='Must pass 2-d input'): DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1]) # wrong size axis labels msg = ("Shape of passed values " - r"is \(3, 2\), indices " - r"imply \(3, 1\)") + r"is \(2, 3\), indices " + r"imply \(1, 3\)") with pytest.raises(ValueError, match=msg): DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1]) msg = ("Shape of passed values " - r"is \(3, 2\), indices " + r"is \(2, 3\), indices " r"imply \(2, 2\)") with pytest.raises(ValueError, match=msg): DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2]) @@ -638,10 +648,10 @@ def _check_basic_constructor(self, empty): assert frame.values.dtype == np.int64 # wrong size axis labels - msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)' + msg = r'Shape of passed values is \(2, 3\), indices imply \(1, 3\)' with pytest.raises(ValueError, match=msg): DataFrame(mat, columns=['A', 'B', 'C'], index=[1]) - msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)' + msg = r'Shape of passed values is \(2, 3\), indices imply \(2, 2\)' with pytest.raises(ValueError, match=msg): DataFrame(mat, columns=['A', 'B'], index=[1, 2]) @@ -1805,7 +1815,7 @@ def test_from_records_to_records(self): tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2)) # wrong length - msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)' + msg = r'Shape of passed values is \(2, 3\), indices imply \(1, 3\)' with pytest.raises(ValueError, match=msg): DataFrame.from_records(arr, index=index[:-1]) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 155083900f83a..23c40276072d6 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -346,7 +346,7 @@ def test_frame_from_json_bad_data(self): json = StringIO('{"columns":["A","B"],' '"index":["2","3"],' '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}') - msg = r"Shape of passed values is \(2, 3\), indices imply \(2, 2\)" + msg = r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)" with pytest.raises(ValueError, match=msg): read_json(json, orient="split")
- [x] closes #20742 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24725
2019-01-11T15:13:07Z
2019-01-18T16:53:36Z
2019-01-18T16:53:35Z
2019-01-18T16:53:41Z
BUG: Switched shapes in ValueError msg in DataFrame construct (#20742)
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index dd06bade2a203..096311a44400b 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1852,6 +1852,7 @@ Other ^^^^^ - Bug where C variables were declared with external linkage causing import errors if certain other C libraries were imported before Pandas. (:issue:`24113`) +- Switched shape in ``ValueError`` message when constructiong a :class:`DataFrame` with parameters ``columns`` and ``index`` not matching the shape of the input data. (:issue:`20742`) .. _whatsnew_0.24.0.contributors: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index ab033ff4c1c4b..d5653641ac55a 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -1673,8 +1673,8 @@ def create_block_manager_from_arrays(arrays, names, axes): def construction_error(tot_items, block_shape, axes, e=None): """ raise a helpful message about our construction """ - passed = tuple(map(int, [tot_items] + list(block_shape))) - implied = tuple(map(int, [len(ax) for ax in axes])) + passed = tuple(map(int, reversed([tot_items] + list(block_shape)))) + implied = tuple(map(int, reversed([len(ax) for ax in axes]))) if passed == implied and e is not None: raise e if block_shape[0] == 0: diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c8b3f23db1492..a01377f3a802a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -386,25 +386,35 @@ def test_constructor_error_msgs(self): 'B': ['a', 'b', 'c']}) # wrong size ndarray, GH 3105 - msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)" + msg = r"Shape of passed values is \(4, 3\), indices imply \(3, 3\)" with pytest.raises(ValueError, match=msg): DataFrame(np.arange(12).reshape((4, 3)), columns=['foo', 'bar', 'baz'], index=pd.date_range('2000-01-01', periods=3)) + # see issue #20742 + msg = r"Shape of passed values is \(1, 3\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + df = pd.DataFrame(index=[0], columns=range(0, 4), data=np.array([[4,5,6]])) + + # see issue #20742 + msg = r"Shape of passed values is \(3, 1\), indices imply \(1, 4\)" + with pytest.raises(ValueError, match=msg): + df = pd.DataFrame(index=[0], columns=range(0, 4), data=np.array([4,5,6])) + # higher dim raise exception with pytest.raises(ValueError, match='Must pass 2-d input'): DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1]) # wrong size axis labels msg = ("Shape of passed values " - r"is \(3, 2\), indices " - r"imply \(3, 1\)") + r"is \(2, 3\), indices " + r"imply \(1, 3\)") with pytest.raises(ValueError, match=msg): DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1]) msg = ("Shape of passed values " - r"is \(3, 2\), indices " + r"is \(2, 3\), indices " r"imply \(2, 2\)") with pytest.raises(ValueError, match=msg): DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2]) @@ -638,10 +648,10 @@ def _check_basic_constructor(self, empty): assert frame.values.dtype == np.int64 # wrong size axis labels - msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)' + msg = r'Shape of passed values is \(2, 3\), indices imply \(1, 3\)' with pytest.raises(ValueError, match=msg): DataFrame(mat, columns=['A', 'B', 'C'], index=[1]) - msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)' + msg = r'Shape of passed values is \(2, 3\), indices imply \(2, 2\)' with pytest.raises(ValueError, match=msg): DataFrame(mat, columns=['A', 'B'], index=[1, 2]) @@ -1805,7 +1815,7 @@ def test_from_records_to_records(self): tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2)) # wrong length - msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)' + msg = r'Shape of passed values is \(2, 3\), indices imply \(1, 3\)' with pytest.raises(ValueError, match=msg): DataFrame.from_records(arr, index=index[:-1])
- [x] closes #20742 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24724
2019-01-11T14:59:21Z
2019-01-11T15:00:25Z
null
2019-01-11T15:00:25Z
TimedeltaArray freq validation without _from_sequence
diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 47b3f93f88b78..be1a7097b0e0d 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -15,8 +15,8 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( - _NS_DTYPE, _TD_DTYPE, ensure_int64, is_datetime64_dtype, is_dtype_equal, - is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_scalar, + _NS_DTYPE, _TD_DTYPE, ensure_int64, is_datetime64_dtype, is_float_dtype, + is_integer_dtype, is_list_like, is_object_dtype, is_scalar, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype @@ -134,28 +134,61 @@ def dtype(self): _attributes = ["freq"] def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): - if not hasattr(values, "dtype"): - raise ValueError( + if isinstance(values, (ABCSeries, ABCIndexClass)): + values = values._values + + inferred_freq = getattr(values, "_freq", None) + + if isinstance(values, type(self)): + if freq is None: + freq = values.freq + elif freq and values.freq: + freq = to_offset(freq) + freq, _ = dtl.validate_inferred_freq(freq, values.freq, False) + values = values._data + + if not isinstance(values, np.ndarray): + msg = ( "Unexpected type '{}'. 'values' must be a TimedeltaArray " "ndarray, or Series or Index containing one of those." - .format(type(values).__name__)) + ) + raise ValueError(msg.format(type(values).__name__)) + + if values.dtype == 'i8': + # for compat with datetime/timedelta/period shared methods, + # we can sometimes get here with int64 values. These represent + # nanosecond UTC (or tz-naive) unix timestamps + values = values.view(_TD_DTYPE) + + if values.dtype != _TD_DTYPE: + raise TypeError(_BAD_DTYPE.format(dtype=values.dtype)) + + try: + dtype_mismatch = dtype != _TD_DTYPE + except TypeError: + raise TypeError(_BAD_DTYPE.format(dtype=dtype)) + else: + if dtype_mismatch: + raise TypeError(_BAD_DTYPE.format(dtype=dtype)) + if freq == "infer": - raise ValueError( + msg = ( "Frequency inference not allowed in TimedeltaArray.__init__. " - "Use 'pd.array()' instead.") + "Use 'pd.array()' instead." + ) + raise ValueError(msg) - if dtype is not None and not is_dtype_equal(dtype, _TD_DTYPE): - raise TypeError("dtype {dtype} cannot be converted to " - "timedelta64[ns]".format(dtype=dtype)) + if copy: + values = values.copy() + if freq: + freq = to_offset(freq) - if values.dtype == 'i8': - values = values.view('timedelta64[ns]') + self._data = values + self._dtype = dtype + self._freq = freq - result = type(self)._from_sequence(values, dtype=dtype, - copy=copy, freq=freq) - self._data = result._data - self._freq = result._freq - self._dtype = result._dtype + if inferred_freq is None and freq is not None: + type(self)._validate_frequency(self, freq) @classmethod def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index af23b2467fcdf..a8745f78392ca 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -43,7 +43,7 @@ def test_incorrect_dtype_raises(self): def test_copy(self): data = np.array([1, 2, 3], dtype='m8[ns]') arr = TimedeltaArray(data, copy=False) - assert arr._data.base is data + assert arr._data is data arr = TimedeltaArray(data, copy=True) assert arr._data is not data
This aligns `TimedeltaArray.__init__` with `DatetimeArray.__init__`, using the same approach how @jbrockmendel now handled the freq validation in `DatetimeArray` in the merged https://github.com/pandas-dev/pandas/pull/24686 First commits removes usage of `from_sequence` (from https://github.com/pandas-dev/pandas/pull/24666), second commit adds the freq validation as in https://github.com/pandas-dev/pandas/pull/24686 So related to the discussion at the end https://github.com/pandas-dev/pandas/pull/24666 (and a partial revert of that). This makes the `TimedeltaArray` constructor again more restricted (only accepts correctly typed (int or timedelta64) containers. Until we decide on the constructors in general (https://github.com/pandas-dev/pandas/issues/24684), I would prefer to keep them strict: it is always easier to later expand functionality (and then also test it), than remove functionality.
https://api.github.com/repos/pandas-dev/pandas/pulls/24723
2019-01-11T10:58:21Z
2019-01-11T14:07:53Z
2019-01-11T14:07:53Z
2019-01-14T15:52:09Z
CLN: replace %s syntax with .format in pandas.io.parsers
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 9a255231bbe70..2996e078a069d 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1485,9 +1485,9 @@ def extract(r): for n in range(len(columns[0])): if all(compat.to_str(c[n]) in self.unnamed_cols for c in columns): raise ParserError( - "Passed header=[%s] are too many rows for this " + "Passed header=[{header}] are too many rows for this " "multi_index of columns" - % ','.join(str(x) for x in self.header) + .format(header=','.join(str(x) for x in self.header)) ) # Clean the column names (if we have an index_col). @@ -1520,9 +1520,11 @@ def _maybe_dedup_names(self, names): counts[col] = cur_count + 1 if is_potential_mi: - col = col[:-1] + ('%s.%d' % (col[-1], cur_count),) + col = col[:-1] + ('{column}.{count}'.format( + column=col[-1], count=cur_count),) else: - col = '%s.%d' % (col, cur_count) + col = '{column}.{count}'.format( + column=col, count=cur_count) cur_count = counts[col] names[i] = col @@ -1569,7 +1571,7 @@ def _get_simple_index(self, data, columns): def ix(col): if not isinstance(col, compat.string_types): return col - raise ValueError('Index %s invalid' % col) + raise ValueError('Index {col} invalid'.format(col=col)) to_remove = [] index = [] @@ -1593,8 +1595,8 @@ def _get_name(icol): return icol if col_names is None: - raise ValueError(('Must supply column order to use %s as ' - 'index') % str(icol)) + raise ValueError(('Must supply column order to use {icol!s} ' + 'as index').format(icol=icol)) for i, c in enumerate(col_names): if i == icol: @@ -1709,7 +1711,8 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, result[c] = cvals if verbose and na_count: - print('Filled %d NA values in column %s' % (na_count, str(c))) + print('Filled {count} NA values in column {c!s}'.format( + count=na_count, c=c)) return result def _infer_types(self, values, na_values, try_num_bool=True): @@ -1810,8 +1813,10 @@ def _cast_types(self, values, cast_type, column): values = astype_nansafe(values, cast_type, copy=True, skipna=True) except ValueError: - raise ValueError("Unable to convert column %s to " - "type %s" % (column, cast_type)) + raise ValueError( + "Unable to convert column {column} to type " + "{cast_type}".format( + column=column, cast_type=cast_type)) return values def _do_date_conversions(self, names, data): @@ -1874,7 +1879,7 @@ def __init__(self, src, **kwds): if self.names is None: if self.prefix: - self.names = ['%s%d' % (self.prefix, i) + self.names = ['{prefix}{i}'.format(prefix=self.prefix, i=i) for i in range(self._reader.table_width)] else: self.names = lrange(self._reader.table_width) @@ -2276,10 +2281,11 @@ def __init__(self, f, **kwds): raise ValueError('Only length-1 decimal markers supported') if self.thousands is None: - self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal) + self.nonnum = re.compile( + r'[^-^0-9^{decimal}]+'.format(decimal=self.decimal)) else: - self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands, - self.decimal)) + self.nonnum = re.compile(r'[^-^0-9^{thousands}^{decimal}]+'.format( + thousands=self.thousands, decimal=self.decimal)) def _set_no_thousands_columns(self): # Create a set of column ids that are not to be stripped of thousands @@ -2518,8 +2524,8 @@ def _infer_columns(self): except StopIteration: if self.line_pos < hr: raise ValueError( - 'Passed header=%s but only %d lines in file' - % (hr, self.line_pos + 1)) + 'Passed header={hr} but only {pos} lines in ' + 'file'.format(hr=hr, pos=(self.line_pos + 1))) # We have an empty file, so check # if columns are provided. That will @@ -2560,7 +2566,8 @@ def _infer_columns(self): while cur_count > 0: counts[col] = cur_count + 1 - col = "%s.%d" % (col, cur_count) + col = '{column}.{count}'.format( + column=col, count=cur_count) cur_count = counts[col] this_columns[i] = col @@ -2628,8 +2635,8 @@ def _infer_columns(self): if not names: if self.prefix: - columns = [['%s%d' % (self.prefix, i) - for i in range(ncols)]] + columns = [['{prefix}{idx}'.format( + prefix=self.prefix, idx=i) for i in range(ncols)]] else: columns = [lrange(ncols)] columns = self._handle_usecols(columns, columns[0]) @@ -3056,8 +3063,9 @@ def _rows_to_cols(self, content): content.append(l) for row_num, actual_len in bad_lines: - msg = ('Expected %d fields in line %d, saw %d' % - (col_len, row_num + 1, actual_len)) + msg = ('Expected {col_len} fields in line {line}, saw ' + '{length}'.format(col_len=col_len, line=(row_num + 1), + length=actual_len)) if (self.delimiter and len(self.delimiter) > 1 and self.quoting != csv.QUOTE_NONE): @@ -3228,8 +3236,9 @@ def _isindex(colspec): new_name, col, old_names = _try_convert_dates( converter, colspec, data_dict, orig_names) if new_name in data_dict: - raise ValueError('New date column already in dict %s' % - new_name) + raise ValueError( + 'New date column already in dict {name}'.format( + name=new_name)) new_data[new_name] = col new_cols.append(new_name) date_cols.update(old_names) @@ -3238,8 +3247,8 @@ def _isindex(colspec): # dict of new name to column list for new_name, colspec in compat.iteritems(parse_spec): if new_name in data_dict: - raise ValueError('Date column %s already in dict' % - new_name) + raise ValueError( + 'Date column {name} already in dict'.format(name=new_name)) _, col, old_names = _try_convert_dates(converter, colspec, data_dict, orig_names) @@ -3418,7 +3427,7 @@ def _stringify_na_values(na_values): # we are like 999 here if v == int(v): v = int(v) - result.append("%s.0" % v) + result.append("{value}.0".format(value=v)) result.append(str(v)) result.append(v) @@ -3563,8 +3572,8 @@ def get_rows(self, infer_nrows, skiprows=None): def detect_colspecs(self, infer_nrows=100, skiprows=None): # Regex escape the delimiters - delimiters = ''.join(r'\%s' % x for x in self.delimiter) - pattern = re.compile('([^%s]+)' % delimiters) + delimiters = ''.join(r'\{}'.format(x) for x in self.delimiter) + pattern = re.compile('([^{}]+)'.format(delimiters)) rows = self.get_rows(infer_nrows, skiprows) if not rows: raise EmptyDataError("No rows from which to infer column width")
progress towards #16130 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24721
2019-01-11T03:57:21Z
2019-03-19T20:23:27Z
2019-03-19T20:23:27Z
2019-03-19T22:12:04Z
CLN: replace %s syntax with .format in pandas.io
diff --git a/pandas/io/packers.py b/pandas/io/packers.py index b83eab7d0eba0..efe4e3a91c69c 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -519,7 +519,8 @@ def encode(obj): elif isinstance(obj, date): return {u'typ': u'date', u'data': u(obj.isoformat())} - raise Exception("cannot encode this datetimelike object: %s" % obj) + raise Exception( + "cannot encode this datetimelike object: {obj}".format(obj=obj)) elif isinstance(obj, Period): return {u'typ': u'period', u'ordinal': obj.ordinal, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index b115529f696b8..4f410a34f7fda 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -363,7 +363,7 @@ def read_hdf(path_or_buf, key=None, mode='r', **kwargs): if not exists: raise compat.FileNotFoundError( - 'File %s does not exist' % path_or_buf) + 'File {path} does not exist'.format(path=path_or_buf)) store = HDFStore(path_or_buf, mode=mode, **kwargs) # can't auto open/close if we are using an iterator @@ -465,8 +465,8 @@ def __init__(self, path, mode=None, complevel=None, complib=None, try: import tables # noqa except ImportError as ex: # pragma: no cover - raise ImportError('HDFStore requires PyTables, "{ex}" problem ' - 'importing'.format(ex=str(ex))) + raise ImportError('HDFStore requires PyTables, "{ex!s}" problem ' + 'importing'.format(ex=ex)) if complib is not None and complib not in tables.filters.all_complibs: raise ValueError( @@ -515,8 +515,9 @@ def __getattr__(self, name): return self.get(name) except (KeyError, ClosedFileError): pass - raise AttributeError("'%s' object has no attribute '%s'" % - (type(self).__name__, name)) + raise AttributeError( + "'{object}' object has no attribute '{name}'".format( + object=type(self).__name__, name=name)) def __contains__(self, key): """ check for existence of this key @@ -533,7 +534,8 @@ def __len__(self): return len(self.groups()) def __unicode__(self): - return '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path)) + return '{type}\nFile path: {path}\n'.format( + type=type(self), path=pprint_thing(self._path)) def __enter__(self): return self @@ -601,7 +603,8 @@ def open(self, mode='a', **kwargs): self._handle = tables.open_file(self._path, self._mode, **kwargs) except (IOError) as e: # pragma: no cover if 'can not be written' in str(e): - print('Opening %s in read-only mode' % self._path) + print( + 'Opening {path} in read-only mode'.format(path=self._path)) self._handle = tables.open_file(self._path, 'r', **kwargs) else: raise @@ -688,7 +691,7 @@ def get(self, key): """ group = self.get_node(key) if group is None: - raise KeyError('No object named %s in the file' % key) + raise KeyError('No object named {key} in the file'.format(key=key)) return self._read_group(group) def select(self, key, where=None, start=None, stop=None, columns=None, @@ -716,7 +719,7 @@ def select(self, key, where=None, start=None, stop=None, columns=None, """ group = self.get_node(key) if group is None: - raise KeyError('No object named %s in the file' % key) + raise KeyError('No object named {key} in the file'.format(key=key)) # create the storer and axes where = _ensure_term(where, scope_level=1) @@ -821,11 +824,11 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: - raise KeyError("Invalid table [%s]" % k) + raise KeyError("Invalid table [{key}]".format(key=k)) if not t.is_table: raise TypeError( - "object [%s] is not a table, and cannot be used in all " - "select as multiple" % t.pathname + "object [{obj}] is not a table, and cannot be used in all " + "select as multiple".format(obj=t.pathname) ) if nrows is None: @@ -1162,7 +1165,7 @@ def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: - raise KeyError('No object named {} in the file'.format(key)) + raise KeyError('No object named {key} in the file'.format(key=key)) s = self._create_storer(group) s.infer_axes() @@ -1225,7 +1228,8 @@ def info(self): .. versionadded:: 0.21.0 """ - output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path)) + output = '{type}\nFile path: {path}\n'.format( + type=type(self), path=pprint_thing(self._path)) if self.is_open: lkeys = sorted(list(self.keys())) if len(lkeys): @@ -1241,8 +1245,9 @@ def info(self): pprint_thing(s or 'invalid_HDFStore node')) except Exception as detail: keys.append(k) - values.append("[invalid_HDFStore node: %s]" - % pprint_thing(detail)) + values.append( + "[invalid_HDFStore node: {detail}]".format( + detail=pprint_thing(detail))) output += adjoin(12, keys, values) else: @@ -1276,10 +1281,11 @@ def _create_storer(self, group, format=None, value=None, append=False, def error(t): raise TypeError( - "cannot properly create the storer for: [%s] [group->%s," - "value->%s,format->%s,append->%s,kwargs->%s]" - % (t, group, type(value), format, append, kwargs) - ) + "cannot properly create the storer for: [{t}] [group->" + "{group},value->{value},format->{format},append->{append}," + "kwargs->{kwargs}]".format(t=t, group=group, + value=type(value), format=format, + append=append, kwargs=kwargs)) pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None)) tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None)) @@ -1559,7 +1565,7 @@ def __init__(self, values=None, kind=None, typ=None, cname=None, def set_name(self, name, kind_attr=None): """ set the name of this indexer """ self.name = name - self.kind_attr = kind_attr or "%s_kind" % name + self.kind_attr = kind_attr or "{name}_kind".format(name=name) if self.cname is None: self.cname = name @@ -1590,7 +1596,9 @@ def __unicode__(self): self.axis, self.pos, self.kind))) - return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp + return ','.join(("{key}->{value}".format(key=key, value=value) + for key, value in zip( + ['name', 'cname', 'axis', 'pos', 'kind'], temp))) def __eq__(self, other): """ compare 2 col items """ @@ -1714,10 +1722,12 @@ def validate_col(self, itemsize=None): itemsize = self.itemsize if c.itemsize < itemsize: raise ValueError( - "Trying to store a string with len [%s] in [%s] " - "column but\nthis column has a limit of [%s]!\n" - "Consider using min_itemsize to preset the sizes on " - "these columns" % (itemsize, self.cname, c.itemsize)) + "Trying to store a string with len [{itemsize}] in " + "[{cname}] column but\nthis column has a limit of " + "[{c_itemsize}]!\nConsider using min_itemsize to " + "preset the sizes on these columns".format( + itemsize=itemsize, cname=self.cname, + c_itemsize=c.itemsize)) return c.itemsize return None @@ -1727,8 +1737,10 @@ def validate_attr(self, append): if append: existing_kind = getattr(self.attrs, self.kind_attr, None) if existing_kind is not None and existing_kind != self.kind: - raise TypeError("incompatible kind in col [%s - %s]" % - (existing_kind, self.kind)) + raise TypeError( + "incompatible kind in col [{existing} - " + "{self_kind}]".format( + existing=existing_kind, self_kind=self.kind)) def update_info(self, info): """ set/update the info for this indexable with the key/value @@ -1753,9 +1765,11 @@ def update_info(self, info): else: raise ValueError( - "invalid info for [%s] for [%s], existing_value [%s] " - "conflicts with new value [%s]" - % (self.name, key, existing_value, value)) + "invalid info for [{name}] for [{key}], " + "existing_value [{existing_value}] conflicts with " + "new value [{value}]".format( + name=self.name, key=key, + existing_value=existing_value, value=value)) else: if value is not None or existing_value is not None: idx[key] = value @@ -1840,7 +1854,7 @@ def create_for_block( """ return a new datacol with the block i """ if cname is None: - cname = name or 'values_block_%d' % i + cname = name or 'values_block_{idx}'.format(idx=i) if name is None: name = cname @@ -1850,7 +1864,7 @@ def create_for_block( if version[0] == 0 and version[1] <= 10 and version[2] == 0: m = re.search(r"values_block_(\d+)", name) if m: - name = "values_%s" % m.groups()[0] + name = "values_{group}".format(group=m.groups()[0]) except IndexError: pass @@ -1862,9 +1876,9 @@ def __init__(self, values=None, kind=None, typ=None, super(DataCol, self).__init__(values=values, kind=kind, typ=typ, cname=cname, **kwargs) self.dtype = None - self.dtype_attr = u'{}_dtype'.format(self.name) + self.dtype_attr = u'{name}_dtype'.format(name=self.name) self.meta = meta - self.meta_attr = u'{}_meta'.format(self.name) + self.meta_attr = u'{name}_meta'.format(name=self.name) self.set_data(data) self.set_metadata(metadata) @@ -1876,7 +1890,9 @@ def __unicode__(self): self.dtype, self.kind, self.shape))) - return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp + return ','.join(("{key}->{value}".format(key=key, value=value) + for key, value in zip( + ['name', 'cname', 'dtype', 'kind', 'shape'], temp))) def __eq__(self, other): """ compare 2 col items """ @@ -1926,7 +1942,8 @@ def set_kind(self): self.kind = 'bool' else: raise AssertionError( - "cannot interpret dtype of [%s] in [%s]" % (dtype, self)) + "cannot interpret dtype of [{dtype}] in [{obj}]".format( + dtype=dtype, obj=self)) # set my typ if we need if self.typ is None: @@ -2009,9 +2026,9 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize, inferred_type = lib.infer_dtype(col.ravel(), skipna=False) if inferred_type != 'string': raise TypeError( - "Cannot serialize the column [%s] because\n" - "its data contents are [%s] object dtype" - % (item, inferred_type) + "Cannot serialize the column [{item}] because\n" + "its data contents are [{type}] object dtype".format( + item=item, type=inferred_type) ) # itemsize is the maximum length of a string (along any dimension) @@ -2033,16 +2050,17 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize, self.itemsize = itemsize self.kind = 'string' self.typ = self.get_atom_string(block, itemsize) - self.set_data(data_converted.astype('|S%d' % itemsize, copy=False)) + self.set_data(data_converted.astype( + '|S{size}'.format(size=itemsize), copy=False)) def get_atom_coltype(self, kind=None): """ return the PyTables column class for this column """ if kind is None: kind = self.kind if self.kind.startswith('uint'): - col_name = "UInt%sCol" % kind[4:] + col_name = "UInt{name}Col".format(name=kind[4:]) else: - col_name = "%sCol" % kind.capitalize() + col_name = "{name}Col".format(name=kind.capitalize()) return getattr(_tables(), col_name) @@ -2325,8 +2343,10 @@ def __unicode__(self): s = self.shape if s is not None: if isinstance(s, (list, tuple)): - s = "[%s]" % ','.join(pprint_thing(x) for x in s) - return "%-12.12s (shape->%s)" % (self.pandas_type, s) + s = "[{shape}]".format( + shape=','.join(pprint_thing(x) for x in s)) + return "{type:12.12} (shape->{shape})".format( + type=self.pandas_type, shape=s) return self.pandas_type def set_object_info(self): @@ -2542,7 +2562,8 @@ def read_array(self, key, start=None, stop=None): return ret def read_index(self, key, **kwargs): - variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key)) + variety = _ensure_decoded( + getattr(self.attrs, '{key}_variety'.format(key=key))) if variety == u'multi': return self.read_multi_index(key, **kwargs) @@ -2554,20 +2575,22 @@ def read_index(self, key, **kwargs): _, index = self.read_index_node(getattr(self.group, key), **kwargs) return index else: # pragma: no cover - raise TypeError('unrecognized index variety: %s' % variety) + raise TypeError( + 'unrecognized index variety: {variety}'.format( + variety=variety)) def write_index(self, key, index): if isinstance(index, MultiIndex): - setattr(self.attrs, '%s_variety' % key, 'multi') + setattr(self.attrs, '{key}_variety'.format(key=key), 'multi') self.write_multi_index(key, index) elif isinstance(index, BlockIndex): - setattr(self.attrs, '%s_variety' % key, 'block') + setattr(self.attrs, '{key}_variety'.format(key=key), 'block') self.write_block_index(key, index) elif isinstance(index, IntIndex): - setattr(self.attrs, '%s_variety' % key, 'sparseint') + setattr(self.attrs, '{key}_variety'.format(key=key), 'sparseint') self.write_sparse_intindex(key, index) else: - setattr(self.attrs, '%s_variety' % key, 'regular') + setattr(self.attrs, '{key}_variety'.format(key=key), 'regular') converted = _convert_index(index, self.encoding, self.errors, self.format_type).set_name('index') @@ -2587,33 +2610,33 @@ def write_index(self, key, index): node._v_attrs.tz = _get_tz(index.tz) def write_block_index(self, key, index): - self.write_array('%s_blocs' % key, index.blocs) - self.write_array('%s_blengths' % key, index.blengths) - setattr(self.attrs, '%s_length' % key, index.length) + self.write_array('{key}_blocs'.format(key=key), index.blocs) + self.write_array('{key}_blengths'.format(key=key), index.blengths) + setattr(self.attrs, '{key}_length'.format(key=key), index.length) def read_block_index(self, key, **kwargs): - length = getattr(self.attrs, '%s_length' % key) - blocs = self.read_array('%s_blocs' % key, **kwargs) - blengths = self.read_array('%s_blengths' % key, **kwargs) + length = getattr(self.attrs, '{key}_length'.format(key=key)) + blocs = self.read_array('{key}_blocs'.format(key=key), **kwargs) + blengths = self.read_array('{key}_blengths'.format(key=key), **kwargs) return BlockIndex(length, blocs, blengths) def write_sparse_intindex(self, key, index): - self.write_array('%s_indices' % key, index.indices) - setattr(self.attrs, '%s_length' % key, index.length) + self.write_array('{key}_indices'.format(key=key), index.indices) + setattr(self.attrs, '{key}_length'.format(key=key), index.length) def read_sparse_intindex(self, key, **kwargs): - length = getattr(self.attrs, '%s_length' % key) - indices = self.read_array('%s_indices' % key, **kwargs) + length = getattr(self.attrs, '{key}_length'.format(key=key)) + indices = self.read_array('{key}_indices'.format(key=key), **kwargs) return IntIndex(length, indices) def write_multi_index(self, key, index): - setattr(self.attrs, '%s_nlevels' % key, index.nlevels) + setattr(self.attrs, '{key}_nlevels'.format(key=key), index.nlevels) for i, (lev, level_codes, name) in enumerate(zip(index.levels, index.codes, index.names)): # write the level - level_key = '%s_level%d' % (key, i) + level_key = '{key}_level{idx}'.format(key=key, idx=i) conv_level = _convert_index(lev, self.encoding, self.errors, self.format_type).set_name(level_key) self.write_array(level_key, conv_level.values) @@ -2622,26 +2645,27 @@ def write_multi_index(self, key, index): node._v_attrs.name = name # write the name - setattr(node._v_attrs, '%s_name%d' % (key, i), name) + setattr(node._v_attrs, '{key}_name{name}'.format( + key=key, name=name), name) # write the labels - label_key = '%s_label%d' % (key, i) + label_key = '{key}_label{idx}'.format(key=key, idx=i) self.write_array(label_key, level_codes) def read_multi_index(self, key, **kwargs): - nlevels = getattr(self.attrs, '%s_nlevels' % key) + nlevels = getattr(self.attrs, '{key}_nlevels'.format(key=key)) levels = [] codes = [] names = [] for i in range(nlevels): - level_key = '%s_level%d' % (key, i) + level_key = '{key}_level{idx}'.format(key=key, idx=i) name, lev = self.read_index_node(getattr(self.group, level_key), **kwargs) levels.append(lev) names.append(name) - label_key = '%s_label%d' % (key, i) + label_key = '{key}_label{idx}'.format(key=key, idx=i) level_codes = self.read_array(label_key, **kwargs) codes.append(level_codes) @@ -2889,7 +2913,7 @@ def read(self, **kwargs): columns = self.read_index('columns') sdict = {} for c in columns: - key = 'sparse_series_%s' % c + key = 'sparse_series_{columns}'.format(columns=c) s = SparseSeriesFixed(self.parent, getattr(self.group, key)) s.infer_axes() sdict[c] = s.read() @@ -2901,7 +2925,7 @@ def write(self, obj, **kwargs): """ write it as a collection of individual sparse series """ super(SparseFrameFixed, self).write(obj, **kwargs) for name, ss in compat.iteritems(obj): - key = 'sparse_series_%s' % name + key = 'sparse_series_{name}'.format(name=name) if key not in self.group._v_children: node = self._handle.create_group(self.group, key) else: @@ -2925,7 +2949,7 @@ def shape(self): # items items = 0 for i in range(self.nblocks): - node = getattr(self.group, 'block%d_items' % i) + node = getattr(self.group, 'block{idx}_items'.format(idx=i)) shape = getattr(node, 'shape', None) if shape is not None: items += shape[0] @@ -2958,15 +2982,16 @@ def read(self, start=None, stop=None, **kwargs): for i in range(self.ndim): _start, _stop = (start, stop) if i == select_axis else (None, None) - ax = self.read_index('axis%d' % i, start=_start, stop=_stop) + ax = self.read_index('axis{idx}'.format( + idx=i), start=_start, stop=_stop) axes.append(ax) items = axes[0] blocks = [] for i in range(self.nblocks): - blk_items = self.read_index('block%d_items' % i) - values = self.read_array('block%d_values' % i, + blk_items = self.read_index('block{idx}_items'.format(idx=i)) + values = self.read_array('block{idx}_values'.format(idx=i), start=_start, stop=_stop) blk = make_block(values, placement=items.get_indexer(blk_items)) @@ -2986,15 +3011,16 @@ def write(self, obj, **kwargs): if not ax.is_unique: raise ValueError( "Columns index has to be unique for fixed format") - self.write_index('axis%d' % i, ax) + self.write_index('axis{idx}'.format(idx=i), ax) # Supporting mixed-type DataFrame objects...nontrivial self.attrs.nblocks = len(data.blocks) for i, blk in enumerate(data.blocks): # I have no idea why, but writing values before items fixed #2299 blk_items = data.items.take(blk.mgr_locs) - self.write_array('block%d_values' % i, blk.values, items=blk_items) - self.write_index('block%d_items' % i, blk_items) + self.write_array('block{idx}_values'.format(idx=i), + blk.values, items=blk_items) + self.write_index('block{idx}_items'.format(idx=i), blk_items) class FrameFixed(BlockManagerFixed): @@ -3065,17 +3091,22 @@ def format_type(self): def __unicode__(self): """ return a pretty representatgion of myself """ self.infer_axes() - dc = ",dc->[%s]" % ','.join( - self.data_columns) if len(self.data_columns) else '' + dc = ",dc->[{columns}]".format(columns=(','.join( + self.data_columns) if len(self.data_columns) else '')) ver = '' if self.is_old_version: - ver = "[%s]" % '.'.join(str(x) for x in self.version) - - return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % ( - self.pandas_type, ver, self.table_type_short, self.nrows, - self.ncols, ','.join(a.name for a in self.index_axes), dc - ) + ver = "[{version}]".format( + version='.'.join(str(x) for x in self.version)) + + return ( + "{pandas_type:12.12}{ver} (typ->{table_type},nrows->{nrows}," + "ncols->{ncols},indexers->[{index_axes}]{dc})".format( + pandas_type=self.pandas_type, ver=ver, + table_type=self.table_type_short, nrows=self.nrows, + ncols=self.ncols, + index_axes=(','.join(a.name for a in self.index_axes)), dc=dc + )) def __getitem__(self, c): """ return the axis for c """ @@ -3090,8 +3121,10 @@ def validate(self, other): return if other.table_type != self.table_type: - raise TypeError("incompatible table_type with existing [%s - %s]" % - (other.table_type, self.table_type)) + raise TypeError( + "incompatible table_type with existing " + "[{other} - {self}]".format( + other=other.table_type, self=self.table_type)) for c in ['index_axes', 'non_index_axes', 'values_axes']: sv = getattr(self, c, None) @@ -3103,13 +3136,14 @@ def validate(self, other): oax = ov[i] if sax != oax: raise ValueError( - "invalid combinate of [%s] on appending data [%s] " - "vs current table [%s]" % (c, sax, oax)) + "invalid combinate of [{c}] on appending data " + "[{sax}] vs current table [{oax}]".format( + c=c, sax=sax, oax=oax)) # should never get here raise Exception( - "invalid combinate of [%s] on appending data [%s] vs " - "current table [%s]" % (c, sv, ov)) + "invalid combinate of [{c}] on appending data [{sv}] vs " + "current table [{ov}]".format(c=c, sv=sv, ov=ov)) @property def is_multi_index(self): @@ -3292,8 +3326,8 @@ def validate_min_itemsize(self, min_itemsize): continue if k not in q: raise ValueError( - "min_itemsize has the key [%s] which is not an axis or " - "data_column" % k) + "min_itemsize has the key [{key}] which is not an axis or " + "data_column".format(key=k)) @property def indexables(self): @@ -3480,9 +3514,10 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, try: axes = _AXES_MAP[type(obj)] except KeyError: - raise TypeError("cannot properly create the storer for: " - "[group->%s,value->%s]" - % (self.group._v_name, type(obj))) + raise TypeError( + "cannot properly create the storer for: [group->{group}," + "value->{value}]".format( + group=self.group._v_name, value=type(obj))) # map axes to numbers axes = [obj._get_axis_number(a) for a in axes] @@ -3600,9 +3635,10 @@ def get_blk_items(mgr, blocks): new_blk_items.append(b_items) except (IndexError, KeyError): raise ValueError( - "cannot match existing table structure for [%s] on " - "appending data" % ','.join(pprint_thing(item) for - item in items)) + "cannot match existing table structure for [{items}] " + "on appending data".format( + items=(','.join(pprint_thing(item) for + item in items)))) blocks = new_blocks blk_items = new_blk_items @@ -3627,9 +3663,11 @@ def get_blk_items(mgr, blocks): try: existing_col = existing_table.values_axes[i] except (IndexError, KeyError): - raise ValueError("Incompatible appended table [%s] with " - "existing table [%s]" - % (blocks, existing_table.values_axes)) + raise ValueError( + "Incompatible appended table [{blocks}]" + "with existing table [{table}]".format( + blocks=blocks, + table=existing_table.values_axes)) else: existing_col = None @@ -3651,9 +3689,8 @@ def get_blk_items(mgr, blocks): except Exception as detail: raise Exception( "cannot find the correct atom type -> " - "[dtype->%s,items->%s] %s" - % (b.dtype.name, b_items, str(detail)) - ) + "[dtype->{name},items->{items}] {detail!s}".format( + name=b.dtype.name, items=b_items, detail=detail)) j += 1 # validate our min_itemsize @@ -3719,8 +3756,8 @@ def process_filter(field, filt): return obj.loc._getitem_axis(takers, axis=axis_number) - raise ValueError( - "cannot find the field [%s] for filtering!" % field) + raise ValueError("cannot find the field [{field}] for " + "filtering!".format(field=field)) obj = process_filter(field, filt) @@ -3798,8 +3835,8 @@ def read_column(self, column, where=None, start=None, stop=None): if not a.is_data_indexable: raise ValueError( - "column [%s] can not be extracted individually; it is " - "not data indexable" % column) + "column [{column}] can not be extracted individually; " + "it is not data indexable".format(column=column)) # column must be an indexable or a data column c = getattr(self.table.cols, column) @@ -3811,7 +3848,8 @@ def read_column(self, column, where=None, start=None, stop=None): ).take_data(), a.tz, True), name=column) - raise KeyError("column [%s] not found in the table" % column) + raise KeyError( + "column [{column}] not found in the table".format(column=column)) class WORMTable(Table): @@ -4120,14 +4158,17 @@ def write_data_chunk(self, rows, indexes, mask, values): rows = rows[m] except Exception as detail: - raise Exception("cannot create row-data -> %s" % detail) + raise Exception( + "cannot create row-data -> {detail}".format(detail=detail)) try: if len(rows): self.table.append(rows) self.table.flush() except Exception as detail: - raise TypeError("tables cannot write this data -> %s" % detail) + raise TypeError( + "tables cannot write this data -> {detail}".format( + detail=detail)) def delete(self, where=None, start=None, stop=None, **kwargs): @@ -4597,7 +4638,7 @@ def _unconvert_index(data, kind, encoding=None, errors='strict'): elif kind == u'object': index = np.asarray(data[0]) else: # pragma: no cover - raise ValueError('unrecognized index type %s' % kind) + raise ValueError('unrecognized index type {kind}'.format(kind=kind)) return index @@ -4612,7 +4653,7 @@ def _unconvert_index_legacy(data, kind, legacy=False, encoding=None, index = _unconvert_string_array(data, nan_rep=None, encoding=encoding, errors=errors) else: # pragma: no cover - raise ValueError('unrecognized index type %s' % kind) + raise ValueError('unrecognized index type {kind}'.format(kind=kind)) return index @@ -4643,7 +4684,7 @@ def _convert_string_array(data, encoding, errors, itemsize=None): ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) - data = np.asarray(data, dtype="S%d" % itemsize) + data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data @@ -4708,7 +4749,7 @@ def _get_converter(kind, encoding, errors): return lambda x: _unconvert_string_array(x, encoding=encoding, errors=errors) else: # pragma: no cover - raise ValueError('invalid kind %s' % kind) + raise ValueError('invalid kind {kind}'.format(kind=kind)) def _need_convert(kind): diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index d634b5ec4f8f9..eb77f79d38d59 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -163,7 +163,7 @@ def _get_properties(self): if buf in const.encoding_names: self.file_encoding = const.encoding_names[buf] else: - self.file_encoding = "unknown (code=%s)" % str(buf) + self.file_encoding = "unknown (code={name!s})".format(name=buf) # Get platform information buf = self._read_bytes(const.platform_offset, const.platform_length) @@ -435,8 +435,11 @@ def _process_columnsize_subheader(self, offset, length): self.column_count = self._read_int(offset, int_len) if (self.col_count_p1 + self.col_count_p2 != self.column_count): - print("Warning: column count mismatch (%d + %d != %d)\n", - self.col_count_p1, self.col_count_p2, self.column_count) + print( + "Warning: column count mismatch ({p1} + {p2} != " + "{column_count})\n".format( + p1=self.col_count_p1, p2=self.col_count_p2, + column_count=self.column_count)) # Unknown purpose def _process_subheader_counts(self, offset, length): @@ -694,7 +697,7 @@ def _chunk_to_dataframe(self): js += 1 else: self.close() - raise ValueError("unknown column type %s" % - self._column_types[j]) + raise ValueError("unknown column type {type}".format( + type=self._column_types[j])) return rslt diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 2f4093e154a95..5d1163b3e0024 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -240,7 +240,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None, try: meta.reflect(only=[table_name], views=True) except sqlalchemy.exc.InvalidRequestError: - raise ValueError("Table %s not found" % table_name) + raise ValueError("Table {name} not found".format(name=table_name)) pandas_sql = SQLDatabase(con, meta=meta) table = pandas_sql.read_table( @@ -250,7 +250,7 @@ def read_sql_table(table_name, con, schema=None, index_col=None, if table is not None: return table else: - raise ValueError("Table %s not found" % table_name, con) + raise ValueError("Table {name} not found".format(name=table_name), con) def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, @@ -552,7 +552,8 @@ def __init__(self, name, pandas_sql_engine, frame=None, index=True, self.table = self.pd_sql.get_table(self.name, self.schema) if self.table is None: - raise ValueError("Could not init table '%s'" % name) + raise ValueError( + "Could not init table '{name}'".format(name=name)) def exists(self): return self.pd_sql.has_table(self.name, self.schema) @@ -569,7 +570,8 @@ def _execute_create(self): def create(self): if self.exists(): if self.if_exists == 'fail': - raise ValueError("Table '%s' already exists." % self.name) + raise ValueError( + "Table '{name}' already exists.".format(name=self.name)) elif self.if_exists == 'replace': self.pd_sql.drop_table(self.name, self.schema) self._execute_create() @@ -1161,8 +1163,8 @@ def to_sql(self, frame, name, if_exists='fail', index=True, from sqlalchemy.types import to_instance, TypeEngine for col, my_type in dtype.items(): if not isinstance(to_instance(my_type), TypeEngine): - raise ValueError('The type of %s is not a SQLAlchemy ' - 'type ' % col) + raise ValueError('The type of {column} is not a ' + 'SQLAlchemy type '.format(column=col)) table = SQLTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, @@ -1244,7 +1246,8 @@ def _get_unicode_name(name): try: uname = text_type(name).encode("utf-8", "strict").decode("utf-8") except UnicodeError: - raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name) + raise ValueError( + "Cannot convert identifier to UTF-8: '{name}'".format(name=name)) return uname @@ -1305,8 +1308,9 @@ def insert_statement(self): bracketed_names = [escape(column) for column in names] col_names = ','.join(bracketed_names) wildcards = ','.join([wld] * len(names)) - insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % ( - escape(self.name), col_names, wildcards) + insert_statement = \ + u'INSERT INTO {table} ({columns}) VALUES ({wld})'.format( + table=escape(self.name), columns=col_names, wld=wildcards) return insert_statement def _execute_insert(self, conn, keys, data_iter): @@ -1429,12 +1433,14 @@ def execute(self, *args, **kwargs): try: self.con.rollback() except Exception: # pragma: no cover - ex = DatabaseError("Execution failed on sql: %s\n%s\nunable" - " to rollback" % (args[0], exc)) + ex = DatabaseError( + "Execution failed on sql: {sql}\n{exc}\nunable " + "to rollback".format(sql=args[0], exc=exc)) raise_with_traceback(ex) ex = DatabaseError( - "Execution failed on sql '%s': %s" % (args[0], exc)) + "Execution failed on sql '{sql}': {exc}".format( + sql=args[0], exc=exc)) raise_with_traceback(ex) @staticmethod @@ -1530,8 +1536,8 @@ def to_sql(self, frame, name, if_exists='fail', index=True, if dtype is not None: for col, my_type in dtype.items(): if not isinstance(my_type, str): - raise ValueError('%s (%s) not a string' % ( - col, str(my_type))) + raise ValueError('{column} ({type!s}) not a string'.format( + column=col, type=my_type)) table = SQLiteTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, @@ -1546,7 +1552,7 @@ def has_table(self, name, schema=None): wld = '?' query = ("SELECT name FROM sqlite_master " - "WHERE type='table' AND name=%s;") % wld + "WHERE type='table' AND name={wld};").format(wld=wld) return len(self.execute(query, [name, ]).fetchall()) > 0 @@ -1554,7 +1560,8 @@ def get_table(self, table_name, schema=None): return None # not supported in fallback mode def drop_table(self, name, schema=None): - drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name) + drop_sql = "DROP TABLE {name}".format( + name=_get_valid_sqlite_name(name)) self.execute(drop_sql) def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): diff --git a/pandas/io/stata.py b/pandas/io/stata.py index b5e7eb24465f5..1b0660171ecac 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -357,7 +357,7 @@ def convert_delta_safe(base, deltas, unit): month = np.ones_like(dates) conv_dates = convert_year_month_safe(year, month) else: - raise ValueError("Date fmt %s not understood" % fmt) + raise ValueError("Date fmt {fmt} not understood".format(fmt=fmt)) if has_bad_values: # Restore NaT for bad values conv_dates[bad_locs] = NaT @@ -452,7 +452,8 @@ def parse_dates_safe(dates, delta=False, year=False, days=False): d = parse_dates_safe(dates, year=True) conv_dates = d.year else: - raise ValueError("Format %s is not a known Stata date format" % fmt) + raise ValueError( + "Format {fmt} is not a known Stata date format".format(fmt=fmt)) conv_dates = Series(conv_dates, dtype=np.float64) missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0] @@ -813,7 +814,7 @@ def __unicode__(self): def __repr__(self): # not perfect :-/ - return "%s(%s)" % (self.__class__, self) + return "{cls}({obj})".format(cls=self.__class__, obj=self) def __eq__(self, other): return (isinstance(other, self.__class__) and @@ -1771,7 +1772,8 @@ def _set_endianness(endianness): elif endianness.lower() in [">", "big"]: return ">" else: # pragma : no cover - raise ValueError("Endianness %s not understood" % endianness) + raise ValueError( + "Endianness {endian} not understood".format(endian=endianness)) def _pad_bytes(name, length): @@ -1789,7 +1791,8 @@ def _convert_datetime_to_stata_type(fmt): "%tq", "th", "%th", "ty", "%ty"]: return np.float64 # Stata expects doubles for SIFs else: - raise NotImplementedError("Format %s not implemented" % fmt) + raise NotImplementedError( + "Format {fmt} not implemented".format(fmt=fmt)) def _maybe_convert_to_int_keys(convert_dates, varlist): @@ -1840,7 +1843,8 @@ def _dtype_to_stata_type(dtype, column): elif dtype == np.int8: return 251 else: # pragma : no cover - raise NotImplementedError("Data type %s not supported." % dtype) + raise NotImplementedError( + "Data type {dtype} not supported.".format(dtype=dtype)) def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, @@ -1895,7 +1899,8 @@ def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, elif dtype == np.int8 or dtype == np.int16: return "%8.0g" else: # pragma : no cover - raise NotImplementedError("Data type %s not supported." % dtype) + raise NotImplementedError( + "Data type {dtype} not supported.".format(dtype=dtype)) class StataWriter(StataParser): @@ -2389,7 +2394,7 @@ def _prepare_data(self): if typ <= self._max_string_length: has_strings = True data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,)) - stype = 'S%d' % typ + stype = 'S{type}'.format(type=typ) dtypes.append(('c' + str(i), stype)) string = data[col].str.encode(self._encoding) data_cols.append(string.values.astype(stype))
progress towards #16130 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24719
2019-01-11T03:01:53Z
2019-01-11T12:51:28Z
2019-01-11T12:51:28Z
2019-01-11T13:01:05Z
BUG: Fix DataFrame.astype(ExtensionDtype) with duplicate column names
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 960b205c49c61..dd06bade2a203 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1639,6 +1639,7 @@ Conversion - Bug in :meth:`DataFrame.combine_first` in which column types were unexpectedly converted to float (:issue:`20699`) - Bug in :meth:`DataFrame.clip` in which column types are not preserved and casted to float (:issue:`24162`) - Bug in :meth:`DataFrame.clip` when order of columns of dataframes doesn't match, result observed is wrong in numeric values (:issue:`20911`) +- Bug in :meth:`DataFrame.astype` where converting to an extension dtype when duplicate column names are present causes a ``RecursionError`` (:issue:`24704`) Strings ^^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1e6ae71660617..a0ee9cb253fef 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5670,9 +5670,10 @@ def astype(self, dtype, copy=True, errors='raise', **kwargs): results.append(results.append(col.copy() if copy else col)) elif is_extension_array_dtype(dtype) and self.ndim > 1: - # GH 18099: columnwise conversion to categorical - # and extension dtype - results = (self[col].astype(dtype, copy=copy) for col in self) + # GH 18099/22869: columnwise conversion to extension dtype + # GH 24704: use iloc to handle duplicate column names + results = (self.iloc[:, i].astype(dtype, copy=copy) + for i in range(len(self.columns))) else: # else, only a single dtype is given diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 70de148dd8fd2..a9f8ab47b16de 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -709,6 +709,17 @@ def test_astype_extension_dtypes_1d(self, dtype): tm.assert_frame_equal(df.astype(dtype), expected1) tm.assert_frame_equal(df.astype('int64').astype(dtype), expected1) + @pytest.mark.parametrize("dtype", ['category', 'Int64']) + def test_astype_extension_dtypes_duplicate_col(self, dtype): + # GH 24704 + a1 = Series([0, np.nan, 4], name='a') + a2 = Series([np.nan, 3, 5], name='a') + df = concat([a1, a2], axis=1) + + result = df.astype(dtype) + expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1) + assert_frame_equal(result, expected) + @pytest.mark.parametrize('dtype', [ {100: 'float64', 200: 'uint64'}, 'category', 'float64']) def test_astype_column_metadata(self, dtype):
- [X] xref https://github.com/pandas-dev/pandas/issues/24704#issuecomment-453266869 (this fixes item 1) - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24717
2019-01-11T02:23:13Z
2019-01-11T12:49:27Z
2019-01-11T12:49:27Z
2019-01-11T14:39:25Z
DOC: Implementing redirect system, and adding user_guide redirects
diff --git a/doc/make.py b/doc/make.py index 0b14a9dcd4c34..eb4a33a569c5a 100755 --- a/doc/make.py +++ b/doc/make.py @@ -15,15 +15,18 @@ import sys import os import shutil +import csv import subprocess import argparse import webbrowser +import docutils +import docutils.parsers.rst DOC_PATH = os.path.dirname(os.path.abspath(__file__)) SOURCE_PATH = os.path.join(DOC_PATH, 'source') BUILD_PATH = os.path.join(DOC_PATH, 'build') -BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates'] +REDIRECTS_FILE = os.path.join(DOC_PATH, 'redirects.csv') class DocBuilder: @@ -139,6 +142,77 @@ def _open_browser(self, single_doc_html): single_doc_html) webbrowser.open(url, new=2) + def _get_page_title(self, page): + """ + Open the rst file `page` and extract its title. + """ + fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) + option_parser = docutils.frontend.OptionParser( + components=(docutils.parsers.rst.Parser,)) + doc = docutils.utils.new_document( + '<doc>', + option_parser.get_default_values()) + with open(fname) as f: + data = f.read() + + parser = docutils.parsers.rst.Parser() + # do not generate any warning when parsing the rst + with open(os.devnull, 'a') as f: + doc.reporter.stream = f + parser.parse(data, doc) + + section = next(node for node in doc.children + if isinstance(node, docutils.nodes.section)) + title = next(node for node in section.children + if isinstance(node, docutils.nodes.title)) + + return title.astext() + + def _add_redirects(self): + """ + Create in the build directory an html file with a redirect, + for every row in REDIRECTS_FILE. + """ + html = ''' + <html> + <head> + <meta http-equiv="refresh" content="0;URL={url}"/> + </head> + <body> + <p> + The page has been moved to <a href="{url}">{title}</a> + </p> + </body> + <html> + ''' + with open(REDIRECTS_FILE) as mapping_fd: + reader = csv.reader(mapping_fd) + for row in reader: + if not row or row[0].strip().startswith('#'): + continue + + path = os.path.join(BUILD_PATH, + 'html', + *row[0].split('/')) + '.html' + + try: + title = self._get_page_title(row[1]) + except Exception: + # the file can be an ipynb and not an rst, or docutils + # may not be able to read the rst because it has some + # sphinx specific stuff + title = 'this page' + + if os.path.exists(path): + raise RuntimeError(( + 'Redirection would overwrite an existing file: ' + '{}').format(path)) + + with open(path, 'w') as moved_page_fd: + moved_page_fd.write( + html.format(url='{}.html'.format(row[1]), + title=title)) + def html(self): """ Build HTML documentation. @@ -150,6 +224,8 @@ def html(self): if self.single_doc_html is not None: self._open_browser(self.single_doc_html) + else: + self._add_redirects() return ret_code def latex(self, force=False): diff --git a/doc/redirects.csv b/doc/redirects.csv new file mode 100644 index 0000000000000..4f4b3d7fc0780 --- /dev/null +++ b/doc/redirects.csv @@ -0,0 +1,37 @@ +# This file should contain all the redirects in the documentation +# in the format `<old_path>,<new_path>` + +# getting started +10min,getting_started/10min +basics,getting_started/basics +dsintro,getting_started/dsintro +overview,getting_started/overview +tutorials,getting_started/tutorials + +# user guide +advanced,user_guide/advanced +categorical,user_guide/categorical +computation,user_guide/computation +enhancingperf,user_guide/enhancingperf +gotchas,user_guide/gotchas +groupby,user_guide/groupby +indexing,user_guide/indexing +integer_na,user_guide/integer_na +io,user_guide/io +merging,user_guide/merging +missing_data,user_guide/missing_data +options,user_guide/options +reshaping,user_guide/reshaping +sparse,user_guide/sparse +style,user_guide/style +text,user_guide/text +timedeltas,user_guide/timedeltas +timeseries,user_guide/timeseries +visualization,user_guide/visualization + +# development +contributing,development/contributing +contributing_docstring,development/contributing_docstring +developer,development/developer +extending,development/extending +internals,development/internals
- [X] closes #24705 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24715
2019-01-10T23:35:52Z
2019-01-23T18:30:35Z
2019-01-23T18:30:35Z
2019-01-23T18:58:47Z
ModuleNotFoundError in in_interactive_session
diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index 64168dd7db1b8..d5ef9f61bc132 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -95,7 +95,10 @@ def in_interactive_session(): from pandas import get_option def check_main(): - import __main__ as main + try: + import __main__ as main + except ModuleNotFoundError: + return get_option('mode.sim_interactive') return (not hasattr(main, '__file__') or get_option('mode.sim_interactive'))
- [x] closes #24690 - [na] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24714
2019-01-10T21:26:59Z
2019-01-13T19:17:35Z
2019-01-13T19:17:34Z
2019-01-13T20:59:16Z
implement Tick division, fix Timedelta.__cmp__ tick
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3950ff3c8863d..6bb27d03835d7 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1614,6 +1614,7 @@ Timedelta - Bug in :class:`Timedelta` and :func:`to_timedelta()` have inconsistencies in supported unit string (:issue:`21762`) - Bug in :class:`TimedeltaIndex` division where dividing by another :class:`TimedeltaIndex` raised ``TypeError`` instead of returning a :class:`Float64Index` (:issue:`23829`, :issue:`22631`) - Bug in :class:`TimedeltaIndex` comparison operations where comparing against non-``Timedelta``-like objects would raise ``TypeError`` instead of returning all-``False`` for ``__eq__`` and all-``True`` for ``__ne__`` (:issue:`24056`) +- Bug in :class:`Timedelta` comparisons when comparing with a ``Tick`` object incorrectly raising ``TypeError`` (:issue:`24710`) Timezones ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index c2f51436612a4..7097a702227d7 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -5,6 +5,7 @@ import cython import time from cpython.datetime cimport (PyDateTime_IMPORT, PyDateTime_Check, + PyDelta_Check, datetime, timedelta, time as dt_time) PyDateTime_IMPORT @@ -28,6 +29,9 @@ from pandas._libs.tslibs.np_datetime cimport ( npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct) from pandas._libs.tslibs.timezones import UTC + +PY2 = bytes == str + # --------------------------------------------------------------------- # Constants @@ -126,6 +130,26 @@ def apply_index_wraps(func): return wrapper +cdef _wrap_timedelta_result(result): + """ + Tick operations dispatch to their Timedelta counterparts. Wrap the result + of these operations in a Tick if possible. + + Parameters + ---------- + result : object + + Returns + ------- + object + """ + if PyDelta_Check(result): + # convert Timedelta back to a Tick + from pandas.tseries.offsets import _delta_to_tick + return _delta_to_tick(result) + + return result + # --------------------------------------------------------------------- # Business Helpers @@ -508,7 +532,13 @@ class _Tick(object): dummy class to mix into tseries.offsets.Tick so that in tslibs.period we can do isinstance checks on _Tick and avoid importing tseries.offsets """ - pass + + def __truediv__(self, other): + result = self.delta.__truediv__(other) + return _wrap_timedelta_result(result) + + if PY2: + __div__ = __truediv__ # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 037e7de27adc3..0476ba1c78efc 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -36,6 +36,7 @@ from pandas._libs.tslibs.nattype import nat_strings from pandas._libs.tslibs.nattype cimport ( checknull_with_nat, NPY_NAT, c_NaT as NaT) from pandas._libs.tslibs.offsets cimport to_offset +from pandas._libs.tslibs.offsets import _Tick as Tick # ---------------------------------------------------------------------- # Constants @@ -757,7 +758,7 @@ cdef class _Timedelta(timedelta): if isinstance(other, _Timedelta): ots = other - elif PyDelta_Check(other): + elif PyDelta_Check(other) or isinstance(other, Tick): ots = Timedelta(other) else: ndim = getattr(other, "ndim", -1) diff --git a/pandas/tests/arithmetic/test_numeric.py b/pandas/tests/arithmetic/test_numeric.py index 7afb90978131d..6694946902836 100644 --- a/pandas/tests/arithmetic/test_numeric.py +++ b/pandas/tests/arithmetic/test_numeric.py @@ -149,10 +149,6 @@ def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box): tm.assert_equal(commute, expected) def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box): - - if box is not pd.Index and isinstance(three_days, pd.offsets.Tick): - raise pytest.xfail("Tick division not implemented") - index = numeric_idx[1:3] expected = TimedeltaIndex(['3 Days', '36 Hours']) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index db0c848eaeb4b..bc753c45c803a 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -78,6 +78,27 @@ def test_unary_ops(self): class TestTimedeltaComparison(object): + def test_compare_tick(self, tick_classes): + cls = tick_classes + + off = cls(4) + td = off.delta + assert isinstance(td, Timedelta) + + assert td == off + assert not td != off + assert td <= off + assert td >= off + assert not td < off + assert not td > off + + assert not td == 2 * off + assert td != 2 * off + assert td <= 2 * off + assert td < 2 * off + assert not td >= 2 * off + assert not td > 2 * off + def test_comparison_object_array(self): # analogous to GH#15183 td = Timedelta('2 days') diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index 27ec7d9d9093a..f4b012ec1897f 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -2,6 +2,8 @@ """ Tests for offsets.Tick and subclasses """ +from __future__ import division + from datetime import datetime, timedelta from hypothesis import assume, example, given, settings, strategies as st @@ -36,6 +38,10 @@ def test_delta_to_tick(): tick = offsets._delta_to_tick(delta) assert (tick == offsets.Day(3)) + td = Timedelta(nanoseconds=5) + tick = offsets._delta_to_tick(td) + assert tick == Nano(5) + @pytest.mark.parametrize('cls', tick_classes) @settings(deadline=None) # GH 24641 @@ -228,6 +234,34 @@ def test_tick_addition(kls, expected): assert result == expected +@pytest.mark.parametrize('cls', tick_classes) +def test_tick_division(cls): + off = cls(10) + + assert off / cls(5) == 2 + assert off / 2 == cls(5) + assert off / 2.0 == cls(5) + + assert off / off.delta == 1 + assert off / off.delta.to_timedelta64() == 1 + + assert off / Nano(1) == off.delta / Nano(1).delta + + if cls is not Nano: + # A case where we end up with a smaller class + result = off / 1000 + assert isinstance(result, offsets.Tick) + assert not isinstance(result, cls) + assert result.delta == off.delta / 1000 + + if cls._inc < Timedelta(seconds=1): + # Case where we end up with a bigger class + result = off / .001 + assert isinstance(result, offsets.Tick) + assert not isinstance(result, cls) + assert result.delta == off.delta / .001 + + @pytest.mark.parametrize('cls1', tick_classes) @pytest.mark.parametrize('cls2', tick_classes) def test_tick_zero(cls1, cls2): diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 73f85d954432e..f208ce37a3b14 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -2343,7 +2343,8 @@ def isAnchored(self): def _delta_to_tick(delta): - if delta.microseconds == 0: + if delta.microseconds == 0 and getattr(delta, "nanoseconds", 0) == 0: + # nanoseconds only for pd.Timedelta if delta.seconds == 0: return Day(delta.days) else:
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24710
2019-01-10T17:52:24Z
2019-01-13T18:43:58Z
2019-01-13T18:43:57Z
2019-01-13T23:08:34Z
Revert handling of i8values to DatetimeIndex
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index dd06bade2a203..3950ff3c8863d 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1235,7 +1235,6 @@ Datetimelike API Changes - :class:`PeriodIndex` subtraction of another ``PeriodIndex`` will now return an object-dtype :class:`Index` of :class:`DateOffset` objects instead of raising a ``TypeError`` (:issue:`20049`) - :func:`cut` and :func:`qcut` now returns a :class:`DatetimeIndex` or :class:`TimedeltaIndex` bins when the input is datetime or timedelta dtype respectively and ``retbins=True`` (:issue:`19891`) - :meth:`DatetimeIndex.to_period` and :meth:`Timestamp.to_period` will issue a warning when timezone information will be lost (:issue:`21333`) -- :class:`DatetimeIndex` now accepts :class:`Int64Index` arguments as epoch timestamps (:issue:`20997`) - :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) .. _whatsnew_0240.api.other: @@ -1353,6 +1352,52 @@ the object's ``freq`` attribute (:issue:`21939`, :issue:`23878`). dti + pd.Index([1 * dti.freq, 2 * dti.freq]) +.. _whatsnew_0240.deprecations.integer_tz: + +Passing Integer data and a timezone to DatetimeIndex +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The behavior of :class:`DatetimeIndex` when passed integer data and +a timezone is changing in a future version of pandas. Previously, these +were interpreted as wall times in the desired timezone. In the future, +these will be interpreted as wall times in UTC, which are then converted +to the desired timezone (:issue:`24559`). + +The default behavior remains the same, but issues a warning: + +.. code-block:: ipython + + In [3]: pd.DatetimeIndex([946684800000000000], tz="US/Central") + /bin/ipython:1: FutureWarning: + Passing integer-dtype data and a timezone to DatetimeIndex. Integer values + will be interpreted differently in a future version of pandas. Previously, + these were viewed as datetime64[ns] values representing the wall time + *in the specified timezone*. In the future, these will be viewed as + datetime64[ns] values representing the wall time *in UTC*. This is similar + to a nanosecond-precision UNIX epoch. To accept the future behavior, use + + pd.to_datetime(integer_data, utc=True).tz_convert(tz) + + To keep the previous behavior, use + + pd.to_datetime(integer_data).tz_localize(tz) + + #!/bin/python3 + Out[3]: DatetimeIndex(['2000-01-01 00:00:00-06:00'], dtype='datetime64[ns, US/Central]', freq=None) + +As the warning message explains, opt in to the future behavior by specifying that +the integer values are UTC, and then converting to the final timezone: + +.. ipython:: python + + pd.to_datetime([946684800000000000], utc=True).tz_convert('US/Central') + +The old behavior can be retained with by localizing directly to the final timezone: + +.. ipython:: python + + pd.to_datetime([946684800000000000]).tz_localize('US/Central') + .. _whatsnew_0240.deprecations.tz_aware_array: Converting Timezone-Aware Series and Index to NumPy Arrays diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d2d9fcf954fe3..a2d67efbecbba 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -33,6 +33,21 @@ from pandas.tseries.offsets import Day, Tick _midnight = time(0, 0) +# TODO(GH-24559): Remove warning, int_as_wall_time parameter. +_i8_message = """ + Passing integer-dtype data and a timezone to DatetimeIndex. Integer values + will be interpreted differently in a future version of pandas. Previously, + these were viewed as datetime64[ns] values representing the wall time + *in the specified timezone*. In the future, these will be viewed as + datetime64[ns] values representing the wall time *in UTC*. This is similar + to a nanosecond-precision UNIX epoch. To accept the future behavior, use + + pd.to_datetime(integer_data, utc=True).tz_convert(tz) + + To keep the previous behavior, use + + pd.to_datetime(integer_data).tz_localize(tz) +""" def tz_to_dtype(tz): @@ -342,13 +357,15 @@ def _simple_new(cls, values, freq=None, dtype=_NS_DTYPE): @classmethod def _from_sequence(cls, data, dtype=None, copy=False, tz=None, freq=None, - dayfirst=False, yearfirst=False, ambiguous='raise'): + dayfirst=False, yearfirst=False, ambiguous='raise', + int_as_wall_time=False): freq, freq_infer = dtl.maybe_infer_freq(freq) subarr, tz, inferred_freq = sequence_to_dt64ns( data, dtype=dtype, copy=copy, tz=tz, - dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) + dayfirst=dayfirst, yearfirst=yearfirst, + ambiguous=ambiguous, int_as_wall_time=int_as_wall_time) freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer) @@ -1649,7 +1666,8 @@ def to_julian_date(self): def sequence_to_dt64ns(data, dtype=None, copy=False, tz=None, - dayfirst=False, yearfirst=False, ambiguous='raise'): + dayfirst=False, yearfirst=False, ambiguous='raise', + int_as_wall_time=False): """ Parameters ---------- @@ -1661,6 +1679,13 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' See pandas._libs.tslibs.conversion.tz_localize_to_utc + int_as_wall_time : bool, default False + Whether to treat ints as wall time in specified timezone, or as + nanosecond-precision UNIX epoch (wall time in UTC). + This is used in DatetimeIndex.__init__ to deprecate the wall-time + behaviour. + + ..versionadded:: 0.24.0 Returns ------- @@ -1717,6 +1742,10 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, data, inferred_tz = objects_to_datetime64ns( data, dayfirst=dayfirst, yearfirst=yearfirst) tz = maybe_infer_tz(tz, inferred_tz) + # When a sequence of timestamp objects is passed, we always + # want to treat the (now i8-valued) data as UTC timestamps, + # not wall times. + int_as_wall_time = False # `data` may have originally been a Categorical[datetime64[ns, tz]], # so we need to handle these types. @@ -1744,8 +1773,16 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, else: # must be integer dtype otherwise # assume this data are epoch timestamps + if tz: + tz = timezones.maybe_get_tz(tz) + if data.dtype != _INT64_DTYPE: data = data.astype(np.int64, copy=False) + if int_as_wall_time and tz is not None and not timezones.is_utc(tz): + warnings.warn(_i8_message, FutureWarning, stacklevel=4) + data = conversion.tz_localize_to_utc(data.view('i8'), tz, + ambiguous=ambiguous) + data = data.view(_NS_DTYPE) result = data.view(_NS_DTYPE) if copy: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c853a30c0de79..99114b7dcf34d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -22,7 +22,8 @@ is_dtype_union_equal, is_extension_array_dtype, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_object_dtype, is_period_dtype, is_scalar, - is_signed_integer_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype) + is_signed_integer_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype, + pandas_dtype) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ( ABCDataFrame, ABCDateOffset, ABCDatetimeArray, ABCIndexClass, @@ -732,6 +733,13 @@ def astype(self, dtype, copy=True): from .category import CategoricalIndex return CategoricalIndex(self.values, name=self.name, dtype=dtype, copy=copy) + elif is_datetime64tz_dtype(dtype): + # TODO(GH-24559): Remove this block, use the following elif. + # avoid FutureWarning from DatetimeIndex constructor. + from pandas import DatetimeIndex + tz = pandas_dtype(dtype).tz + return (DatetimeIndex(np.asarray(self)) + .tz_localize("UTC").tz_convert(tz)) elif is_extension_array_dtype(dtype): return Index(np.asarray(self), dtype=dtype, copy=copy) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 664ca9c5d2f05..a4e058160e567 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -299,7 +299,8 @@ def __new__(cls, data=None, dtarr = DatetimeArray._from_sequence( data, dtype=dtype, copy=copy, tz=tz, freq=freq, - dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) + dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous, + int_as_wall_time=True) subarr = cls._simple_new(dtarr, name=name, freq=dtarr.freq, tz=dtarr.tz) diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 15df0ca2442fa..c107ed51226b0 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -449,7 +449,10 @@ def _convert_bin_to_datelike_type(bins, dtype): bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike """ - if is_datetime64tz_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype): + if is_datetime64tz_dtype(dtype): + bins = to_datetime(bins.astype(np.int64), + utc=True).tz_convert(dtype.tz) + elif is_datetime_or_timedelta_dtype(dtype): bins = Index(bins.astype(np.int64), dtype=dtype) return bins diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index f0f77b4977610..62e96fd39a759 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -209,8 +209,8 @@ def test_is_datetime64tz_dtype(): assert not com.is_datetime64tz_dtype(object) assert not com.is_datetime64tz_dtype([1, 2, 3]) assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) - assert com.is_datetime64tz_dtype(pd.DatetimeIndex( - [1, 2, 3], tz="US/Eastern")) + assert com.is_datetime64tz_dtype(pd.DatetimeIndex(['2000'], + tz="US/Eastern")) def test_is_timedelta64_dtype(): @@ -286,7 +286,7 @@ def test_is_datetimelike(): assert com.is_datetimelike(pd.PeriodIndex([], freq="A")) assert com.is_datetimelike(np.array([], dtype=np.datetime64)) assert com.is_datetimelike(pd.Series([], dtype="timedelta64[ns]")) - assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) + assert com.is_datetimelike(pd.DatetimeIndex(["2000"], tz="US/Eastern")) dtype = DatetimeTZDtype("ns", tz="US/Eastern") s = pd.Series([], dtype=dtype) @@ -480,7 +480,7 @@ def test_needs_i8_conversion(): assert com.needs_i8_conversion(np.datetime64) assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) assert com.needs_i8_conversion(pd.DatetimeIndex( - [1, 2, 3], tz="US/Eastern")) + ["2000"], tz="US/Eastern")) def test_is_numeric_dtype(): @@ -541,7 +541,7 @@ def test_is_extension_type(check_scipy): assert com.is_extension_type(pd.Series(cat)) assert com.is_extension_type(pd.SparseArray([1, 2, 3])) assert com.is_extension_type(pd.SparseSeries([1, 2, 3])) - assert com.is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) + assert com.is_extension_type(pd.DatetimeIndex(['2000'], tz="US/Eastern")) dtype = DatetimeTZDtype("ns", tz="US/Eastern") s = pd.Series([], dtype=dtype) @@ -635,8 +635,8 @@ def test__get_dtype_fails(input_param): (pd.DatetimeIndex([1, 2]), np.datetime64), (pd.DatetimeIndex([1, 2]).dtype, np.datetime64), ('<M8[ns]', np.datetime64), - (pd.DatetimeIndex([1, 2], tz='Europe/London'), pd.Timestamp), - (pd.DatetimeIndex([1, 2], tz='Europe/London').dtype, + (pd.DatetimeIndex(['2000'], tz='Europe/London'), pd.Timestamp), + (pd.DatetimeIndex(['2000'], tz='Europe/London').dtype, pd.Timestamp), ('datetime64[ns, Europe/London]', pd.Timestamp), (pd.SparseSeries([1, 2], dtype='int32'), np.int32), diff --git a/pandas/tests/indexes/datetimes/test_astype.py b/pandas/tests/indexes/datetimes/test_astype.py index 784d1ca6fb82c..ddf6a6ded69f8 100644 --- a/pandas/tests/indexes/datetimes/test_astype.py +++ b/pandas/tests/indexes/datetimes/test_astype.py @@ -238,10 +238,10 @@ def _check_rng(rng): ['US/Pacific', 'datetime64[ns, US/Pacific]'], [None, 'datetime64[ns]']]) def test_integer_index_astype_datetime(self, tz, dtype): - # GH 20997, 20964 + # GH 20997, 20964, 24559 val = [pd.Timestamp('2018-01-01', tz=tz).value] result = pd.Index(val).astype(dtype) - expected = pd.DatetimeIndex(['2018-01-01'], tz=tz) + expected = pd.DatetimeIndex(["2018-01-01"], tz=tz) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 07c42afe44b33..2768da0316aad 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -118,8 +118,15 @@ def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture): tz = tz_aware_fixture i = pd.date_range('20130101', periods=5, freq='H', tz=tz) kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()} - result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs) - expected = i.tz_localize(None).tz_localize('UTC').tz_convert(tz) + + if str(tz) in ('UTC', 'tzutc()'): + warn = None + else: + warn = FutureWarning + + with tm.assert_produces_warning(warn, check_stacklevel=False): + result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs) + expected = DatetimeIndex(i, **kwargs) tm.assert_index_equal(result, expected) # localize into the provided tz @@ -377,6 +384,19 @@ def test_range_kwargs_deprecated(self): with tm.assert_produces_warning(FutureWarning): DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D') + def test_integer_values_and_tz_deprecated(self): + # GH-24559 + values = np.array([946684800000000000]) + with tm.assert_produces_warning(FutureWarning): + result = DatetimeIndex(values, tz='US/Central') + expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central") + tm.assert_index_equal(result, expected) + + # but UTC is *not* deprecated. + with tm.assert_produces_warning(None): + result = DatetimeIndex(values, tz='UTC') + expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central") + def test_constructor_coverage(self): rng = date_range('1/1/2000', periods=10.5) exp = date_range('1/1/2000', periods=10) @@ -555,12 +575,17 @@ def test_constructor_timestamp_near_dst(self): ts[1].to_pydatetime()]) tm.assert_index_equal(result, expected) + # TODO(GH-24559): Remove the xfail for the tz-aware case. @pytest.mark.parametrize('klass', [Index, DatetimeIndex]) @pytest.mark.parametrize('box', [ np.array, partial(np.array, dtype=object), list]) @pytest.mark.parametrize('tz, dtype', [ - ['US/Pacific', 'datetime64[ns, US/Pacific]'], - [None, 'datetime64[ns]']]) + pytest.param('US/Pacific', 'datetime64[ns, US/Pacific]', + marks=[pytest.mark.xfail(), + pytest.mark.filterwarnings( + "ignore:\\n Passing:FutureWarning")]), + [None, 'datetime64[ns]'], + ]) def test_constructor_with_int_tz(self, klass, box, tz, dtype): # GH 20997, 20964 ts = Timestamp('2018-01-01', tz=tz) @@ -568,8 +593,12 @@ def test_constructor_with_int_tz(self, klass, box, tz, dtype): expected = klass([ts]) assert result == expected + # This is the desired future behavior + @pytest.mark.xfail(reason="Future behavior", strict=False) + @pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning") def test_construction_int_rountrip(self, tz_naive_fixture): # GH 12619 + # TODO(GH-24559): Remove xfail tz = tz_naive_fixture result = 1293858000000000000 expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0] diff --git a/pandas/tests/indexes/multi/test_integrity.py b/pandas/tests/indexes/multi/test_integrity.py index 0efd589902b39..c1638a9cde660 100644 --- a/pandas/tests/indexes/multi/test_integrity.py +++ b/pandas/tests/indexes/multi/test_integrity.py @@ -50,7 +50,9 @@ def test_values_multiindex_datetimeindex(): # Test to ensure we hit the boxing / nobox part of MI.values ints = np.arange(10 ** 18, 10 ** 18 + 5) naive = pd.DatetimeIndex(ints) - aware = pd.DatetimeIndex(ints, tz='US/Central') + # TODO(GH-24559): Remove the FutureWarning + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + aware = pd.DatetimeIndex(ints, tz='US/Central') idx = pd.MultiIndex.from_arrays([naive, aware]) result = idx.values diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 218b1127032f5..d75800b763cb9 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -4,6 +4,7 @@ from datetime import datetime, timedelta from decimal import Decimal import math +import sys import numpy as np import pytest @@ -401,24 +402,40 @@ def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, utc, # Test constructing with a datetimetz dtype # .values produces numpy datetimes, so these are considered naive # .asi8 produces integers, so these are considered epoch timestamps + # ^the above will be true in a later version. Right now we `.view` + # the i8 values as NS_DTYPE, effectively treating them as wall times. index = pd.date_range('2011-01-01', periods=5) arg = getattr(index, attr) - if utc: - index = index.tz_localize('UTC').tz_convert(tz_naive_fixture) - else: - index = index.tz_localize(tz_naive_fixture) + index = index.tz_localize(tz_naive_fixture) dtype = index.dtype - result = klass(arg, tz=tz_naive_fixture) + # TODO(GH-24559): Remove the sys.modules and warnings + # not sure what this is from. It's Py2 only. + modules = [sys.modules['pandas.core.indexes.base']] + + if (tz_naive_fixture and attr == "asi8" and + str(tz_naive_fixture) not in ('UTC', 'tzutc()')): + ex_warn = FutureWarning + else: + ex_warn = None + + # stacklevel is checked elsewhere. We don't do it here since + # Index will have an frame, throwing off the expected. + with tm.assert_produces_warning(ex_warn, check_stacklevel=False, + clear=modules): + result = klass(arg, tz=tz_naive_fixture) tm.assert_index_equal(result, index) - result = klass(arg, dtype=dtype) + with tm.assert_produces_warning(ex_warn, check_stacklevel=False): + result = klass(arg, dtype=dtype) tm.assert_index_equal(result, index) - result = klass(list(arg), tz=tz_naive_fixture) + with tm.assert_produces_warning(ex_warn, check_stacklevel=False): + result = klass(list(arg), tz=tz_naive_fixture) tm.assert_index_equal(result, index) - result = klass(list(arg), dtype=dtype) + with tm.assert_produces_warning(ex_warn, check_stacklevel=False): + result = klass(list(arg), dtype=dtype) tm.assert_index_equal(result, index) @pytest.mark.parametrize("attr", ['values', 'asi8']) diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py index 7b57a280c56fc..c2fbb5bbb088c 100644 --- a/pandas/tests/resample/test_period_index.py +++ b/pandas/tests/resample/test_period_index.py @@ -517,8 +517,10 @@ def test_resample_weekly_bug_1726(self): def test_resample_with_dst_time_change(self): # GH 15549 - index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000], - tz='UTC').tz_convert('America/Chicago') + index = ( + pd.DatetimeIndex([1457537600000000000, 1458059600000000000]) + .tz_localize("UTC").tz_convert('America/Chicago') + ) df = pd.DataFrame([1, 2], index=index) result = df.resample('12h', closed='right', label='right').last().ffill() diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 657f5f193c85e..ac365eb87d1bc 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -1037,6 +1037,8 @@ class TestToIterable(object): lambda x: list(x.__iter__()), ], ids=['tolist', 'to_list', 'list', 'iter']) @pytest.mark.parametrize('typ', [Series, Index]) + @pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning") + # TODO(GH-24559): Remove the filterwarnings def test_iterable(self, typ, method, dtype, rdtype): # gh-10904 # gh-13258 @@ -1089,6 +1091,8 @@ def test_iterable_items(self, dtype, rdtype): ('object', (int, long)), ('category', (int, long))]) @pytest.mark.parametrize('typ', [Series, Index]) + @pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning") + # TODO(GH-24559): Remove the filterwarnings def test_iterable_map(self, typ, dtype, rdtype): # gh-13236 # coerce iteration to underlying python / pandas types
TODO: - [x] decide if we're deprecating anything. - [x] update the message (and update filterwarnings) - [x] whatsnew Questions 1. What's the expected behavior of `pd.Index(i8).dtype(DatetimeTZDtype)`? 2. Do we prefer this `int_as_wall_time` parameter, or @jbrockmendel's `_from_sequence_dti` from https://github.com/jbrockmendel/pandas/commit/635b267cbb7ad63edb86157b35f027898d99542a 3. cc @jbrockmendel @jorisvandenbossche @jreback @mroeschke xref https://github.com/pandas-dev/pandas/issues/24559
https://api.github.com/repos/pandas-dev/pandas/pulls/24708
2019-01-10T15:42:15Z
2019-01-11T14:05:17Z
2019-01-11T14:05:17Z
2019-01-11T14:08:06Z
Updated with match
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 84383afed1d03..717e9bc23c6b1 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -698,7 +698,7 @@ def test_excel_read_buffer(self, ext): def test_bad_engine_raises(self, ext): bad_engine = 'foo' - with pytest.raises(ValueError, message="Unknown engine: foo"): + with pytest.raises(ValueError, match="Unknown engine: foo"): read_excel('', engine=bad_engine) @tm.network diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index 55b738a56f809..517a3e059469c 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -3923,7 +3923,7 @@ def test_read_column(self): # HDFStore.select_column should raise a KeyError # exception if the key is not a valid store with pytest.raises(KeyError, - message='No object named index in the file'): + match='No object named df in the file'): store.select_column('df', 'index') store.append('df', df) diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 6fc6aa98fe950..3f9a30d254126 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -171,7 +171,7 @@ def test_round_dst_border_nonexistent(self, method, ts_str, freq): assert result is NaT with pytest.raises(pytz.NonExistentTimeError, - message='2018-03-11 02:00:00'): + match='2018-03-11 02:00:00'): getattr(ts, method)(freq, nonexistent='raise') @pytest.mark.parametrize('timestamp', [ diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 52b72bcafe555..a916cf300653a 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -300,7 +300,7 @@ def test_dt_round_tz_nonexistent(self, method, ts_str, freq): tm.assert_series_equal(result, expected) with pytest.raises(pytz.NonExistentTimeError, - message='2018-03-11 02:00:00'): + match='2018-03-11 02:00:00'): getattr(s.dt, method)(freq, nonexistent='raise') def test_dt_namespace_accessor_categorical(self): diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index f908c7b263dee..bfb5103c97adc 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -216,8 +216,8 @@ def test_constructor_from_unknown_type(self): class Unknown(object): pass with pytest.raises(TypeError, - message='SparseDataFrame called with unknown type ' - '"Unknown" for data argument'): + match=('SparseDataFrame called with unknown type ' + '"Unknown" for data argument')): SparseDataFrame(Unknown()) def test_constructor_preserve_attr(self): diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index 6e47f5543012f..54db3887850ea 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -249,7 +249,7 @@ def test_deprecate_option(self): warnings.simplefilter('always') with pytest.raises( KeyError, - message="Nonexistent option didn't raise KeyError"): + match="No such keys.s.: 'foo'"): self.cf.get_option('foo') assert len(w) == 1 # should have raised one warning assert 'deprecated' in str(w[-1]) # we get the default message
- [x] closes #24706 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24707
2019-01-10T15:23:37Z
2019-01-16T15:18:56Z
2019-01-16T15:18:55Z
2019-01-16T15:20:28Z
Update test_ticks.py
diff --git a/pandas/tests/tseries/offsets/test_ticks.py b/pandas/tests/tseries/offsets/test_ticks.py index dcc7afa797063..27ec7d9d9093a 100644 --- a/pandas/tests/tseries/offsets/test_ticks.py +++ b/pandas/tests/tseries/offsets/test_ticks.py @@ -59,6 +59,7 @@ def test_tick_add_sub(cls, n, m): @pytest.mark.parametrize('cls', tick_classes) +@settings(deadline=None) @example(n=2, m=3) @given(n=st.integers(-999, 999), m=st.integers(-999, 999)) def test_tick_equality(cls, n, m):
- [x] closes #24700 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24701
2019-01-10T13:10:31Z
2019-01-10T14:51:03Z
2019-01-10T14:51:02Z
2019-01-10T14:51:07Z
DEPR/API: disallow lists within list for set_index
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 124ec8f4ab92c..a2537a20058d4 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -93,6 +93,34 @@ Other API Changes Deprecations ~~~~~~~~~~~~ +**Lists as arrays in :meth:`DataFrame.set_index`** + +Currently, :meth:`DataFrame.set_index` accepts lists as meaning two different things - as a list of labels, and as an array-like collection of values. +This ambiguity decides in favor of the list of labels, but nested lists are interpreted as arrays: + +.. ipython:: python + :okwarning: + + df = pd.DataFrame(np.reshape(np.arange(12), (3, 4)), + columns=['a', 'b', 'c', 'd']) + df.set_index(['a', 'b', 'c']) + df.set_index([['a', 'b', 'c']]) + +The latter case has now been deprecated and will be removed in a future version. As a replacement, +it is suggested to wrap the list in a :class:`Series`, :class:`Index`, ``np.array`` or an iterator. + +.. ipython:: python + + df.set_index(pd.Series(['a', 'b', 'c'])) + +It remains possible to use lists as collecting several column keys or arrays to create multiple levels of a :class:`MultiIndex`. + +.. ipython:: ipython + + df.set_index(['a', pd.Series(['a', 'b', 'c'])]) + +**Other deprecations** + - Deprecated the `M (months)` and `Y (year)` `units` parameter of :func: `pandas.to_timedelta`, :func: `pandas.Timedelta` and :func: `pandas.TimedeltaIndex` (:issue:`16344`) .. _whatsnew_0250.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6b4d95055d06d..e97b347220072 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4033,6 +4033,8 @@ def set_index(self, keys, drop=True, append=False, inplace=False, arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and instances of :class:`abc.Iterator`. + Lists (in the sense of a sequence of values, not column labels) + have been deprecated, and will be removed in a future version. drop : bool, default True Delete columns to be used as the new index. append : bool, default False @@ -4116,13 +4118,16 @@ def set_index(self, keys, drop=True, append=False, inplace=False, 'one-dimensional arrays.') missing = [] + depr_warn = False for col in keys: if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray, - list, Iterator)): + Iterator)): # arrays are fine as long as they are one-dimensional # iterators get converted to list below if getattr(col, 'ndim', 1) != 1: raise ValueError(err_msg) + elif isinstance(col, list): + depr_warn = True else: # everything else gets tried as a key; see GH 24969 try: @@ -4136,6 +4141,13 @@ def set_index(self, keys, drop=True, append=False, inplace=False, if missing: raise KeyError('None of {} are in the columns'.format(missing)) + if depr_warn: + msg = ('Passing lists within a list to the parameter "keys" is ' + 'deprecated and will be removed in a future version. To ' + 'silence this warning, wrap the lists in a Series / Index ' + 'or np.ndarray. E.g. df.set_index(["A", [1, 2, 3]]) should ' + 'be passed as df.set_index(["A", pd.Series([1, 2, 3])]).') + warnings.warn(msg, FutureWarning, stacklevel=2) if inplace: frame = self diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index a25e893e08900..9ea2d4258bc9c 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -115,10 +115,8 @@ def test_set_index_after_mutation(self): tm.assert_frame_equal(result, expected) # MultiIndex constructor does not work directly on Series -> lambda - # Add list-of-list constructor because list is ambiguous -> lambda # also test index name if append=True (name is duplicate here for B) - @pytest.mark.parametrize('box', [Series, Index, np.array, - list, lambda x: [list(x)], + @pytest.mark.parametrize('box', [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]) @pytest.mark.parametrize('append, index_name', [(True, None), (True, 'B'), (True, 'test'), (False, None)]) @@ -135,7 +133,7 @@ def test_set_index_pass_single_array(self, frame_of_index_cols, with pytest.raises(KeyError, match=msg): df.set_index(key, drop=drop, append=append) else: - # np.array/list-of-list "forget" the name of B + # np.array "forgets" the name of B name_mi = getattr(key, 'names', None) name = [getattr(key, 'name', None)] if name_mi is None else name_mi @@ -163,9 +161,13 @@ def test_set_index_pass_arrays(self, frame_of_index_cols, keys = ['A', box(df['B'])] # np.array/list "forget" the name of B - names = ['A', None if box in [np.array, list, tuple, iter] else 'B'] + names = ['A', None if box in [np.array, list] else 'B'] - result = df.set_index(keys, drop=drop, append=append) + if box == list: + with tm.assert_produces_warning(FutureWarning): + result = df.set_index(keys, drop=drop, append=append) + else: + result = df.set_index(keys, drop=drop, append=append) # only valid column keys are dropped # since B is always passed as array above, only A is dropped, if at all @@ -193,7 +195,12 @@ def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop, df.index.name = index_name keys = [box1(df['A']), box2(df['A'])] - result = df.set_index(keys, drop=drop, append=append) + + if box1 == list or box2 == list: + with tm.assert_produces_warning(FutureWarning): + result = df.set_index(keys, drop=drop, append=append) + else: + result = df.set_index(keys, drop=drop, append=append) # if either box is iter, it has been consumed; re-read keys = [box1(df['A']), box2(df['A'])] @@ -206,8 +213,16 @@ def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop, # to test against already-tested behaviour, we add sequentially, # hence second append always True; must wrap keys in list, otherwise # box = list would be interpreted as keys - expected = df.set_index([keys[0]], drop=first_drop, append=append) - expected = expected.set_index([keys[1]], drop=drop, append=True) + if box1 == list or box2 == list: + with tm.assert_produces_warning(FutureWarning): + expected = df.set_index([keys[0]], drop=first_drop, + append=append) + expected = expected.set_index([keys[1]], drop=drop, + append=True) + else: + expected = df.set_index([keys[0]], drop=first_drop, append=append) + expected = expected.set_index([keys[1]], drop=drop, append=True) + tm.assert_frame_equal(result, expected) @pytest.mark.parametrize('append', [True, False])
- [x] one way to close #24046 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry - [x] add deprecation warning & test it I wanted to add this before the RC gets cut for two reasons. On the one hand, the docs have silently deprecated arrays in `df.set_index` compare [master](https://pandas-docs.github.io/pandas-docs-travis/api/generated/pandas.DataFrame.set_index.html): > **keys** : *label or list of label* > > Name or names of the columns that will be used as the index. and [0.23.4](https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.DataFrame.set_index.html): > **keys** : *column label or list of column labels / arrays* This is IMO a breaking change. I'd wait for the outcome of the discussion of #24046, but I feel that could easily fall under the table before the RC, so I wanted to provide a worked-out implementation. Equally importantly, #22486 added capabilities for lots of list-likes *within* a list to `df.set_index`, and has not seen a release yet. Therefore, deprecation would be much easier now, than after `0.24.0rc`. @jreback @TomAugspurger @jorisvandenbossche @toobaz @WillAyd
https://api.github.com/repos/pandas-dev/pandas/pulls/24697
2019-01-10T08:14:24Z
2019-03-10T15:27:25Z
null
2019-03-10T16:53:50Z
remove unused kwarg
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 70e4f44cb5de8..e7c851c256081 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -534,7 +534,7 @@ def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): **kwargs) def _astype(self, dtype, copy=False, errors='raise', values=None, - klass=None, **kwargs): + **kwargs): """Coerce to the new type Parameters @@ -599,14 +599,14 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, return self.copy() return self - if klass is None: - if is_sparse(self.values): - # special case sparse, Series[Sparse].astype(object) is sparse - klass = ExtensionBlock - elif is_object_dtype(dtype): - klass = ObjectBlock - elif is_extension_array_dtype(dtype): - klass = ExtensionBlock + klass = None + if is_sparse(self.values): + # special case sparse, Series[Sparse].astype(object) is sparse + klass = ExtensionBlock + elif is_object_dtype(dtype): + klass = ObjectBlock + elif is_extension_array_dtype(dtype): + klass = ExtensionBlock try: # force the copy here
AFAICT `klass` is never passed to `astype`, so the kwarg can be removed. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24695
2019-01-10T04:06:08Z
2019-01-10T16:37:11Z
2019-01-10T16:37:11Z
2020-04-05T17:44:11Z
fix interpreting int as second--> nano
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index aee3d78243d2e..960b205c49c61 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1309,6 +1309,7 @@ Deprecations - In :meth:`Series.where` with Categorical data, providing an ``other`` that is not present in the categories is deprecated. Convert the categorical to a different dtype or add the ``other`` to the categories first (:issue:`24077`). - :meth:`Series.clip_lower`, :meth:`Series.clip_upper`, :meth:`DataFrame.clip_lower` and :meth:`DataFrame.clip_upper` are deprecated and will be removed in a future version. Use ``Series.clip(lower=threshold)``, ``Series.clip(upper=threshold)`` and the equivalent ``DataFrame`` methods (:issue:`24203`) - :meth:`Series.nonzero` is deprecated and will be removed in a future version (:issue:`18262`) +- Passing an integer to :meth:`Series.fillna` and :meth:`DataFrame.fillna` with ``timedelta64[ns]`` dtypes is deprecated, will raise ``TypeError`` in a future version. Use ``obj.fillna(pd.Timedelta(...))` instead (:issue:`24694`) .. _whatsnew_0240.deprecations.datetimelike_int_ops: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 70e4f44cb5de8..602f4856d5fc0 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2499,8 +2499,14 @@ def _can_hold_element(self, element): def fillna(self, value, **kwargs): # allow filling with integers to be - # interpreted as seconds + # interpreted as nanoseconds if is_integer(value) and not isinstance(value, np.timedelta64): + # Deprecation GH#24694, GH#19233 + warnings.warn("Passing integers to fillna is deprecated, will " + "raise a TypeError in a future version. To retain " + "the old behavior, pass pd.Timedelta(seconds=n) " + "instead.", + FutureWarning, stacklevel=6) value = Timedelta(value, unit='s') return super(TimeDeltaBlock, self).fillna(value, **kwargs) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 90ef465c5f239..f4f16ff2d3ac1 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -65,14 +65,17 @@ def test_timedelta_fillna(self): td = s.diff() # reg fillna - result = td.fillna(0) + with tm.assert_produces_warning(FutureWarning): + result = td.fillna(0) expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)]) assert_series_equal(result, expected) - # interprested as seconds - result = td.fillna(1) - expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1), + # interpreted as seconds, deprecated + with tm.assert_produces_warning(FutureWarning): + result = td.fillna(1) + expected = Series([timedelta(seconds=1), + timedelta(0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)]) assert_series_equal(result, expected) @@ -96,14 +99,16 @@ def test_timedelta_fillna(self): # ffill td[2] = np.nan result = td.ffill() - expected = td.fillna(0) + with tm.assert_produces_warning(FutureWarning): + expected = td.fillna(0) expected[0] = np.nan assert_series_equal(result, expected) # bfill td[2] = np.nan result = td.bfill() - expected = td.fillna(0) + with tm.assert_produces_warning(FutureWarning): + expected = td.fillna(0) expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1) assert_series_equal(result, expected)
The existing behavior was put in place in #4684, looks like wrestling with numpy 1.6 and 1.7 was rough. Not sure what this merits in the whatsnew, pls advise. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24694
2019-01-10T03:58:08Z
2019-01-10T17:42:47Z
2019-01-10T17:42:47Z
2019-01-10T20:50:22Z
CLN: Removing find_undoc_args.py, validate_docstrings.py implements the same
diff --git a/scripts/find_undoc_args.py b/scripts/find_undoc_args.py deleted file mode 100755 index ea9541bfaed3a..0000000000000 --- a/scripts/find_undoc_args.py +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -Script that compares the signature arguments with the ones in the docsting -and returns the differences in plain text or GitHub task list format. - -Usage:: - $ ./find_undoc_args.py (see arguments below) -""" -from __future__ import print_function -import sys -from collections import namedtuple -import types -import os -import re -import argparse -import inspect - - -parser = argparse.ArgumentParser(description='Program description.') -parser.add_argument('-p', '--path', metavar='PATH', type=str, required=False, - default=None, action='store', - help='full path relative to which paths wills be reported') -parser.add_argument('-m', '--module', metavar='MODULE', type=str, - required=True, action='store', - help='name of package to import and examine') -parser.add_argument('-G', '--github_repo', metavar='REPO', type=str, - required=False, default=None, action='store', - help='github project where the code lives, ' - 'e.g. "pandas-dev/pandas"') -args = parser.parse_args() - -Entry = namedtuple('Entry', - 'func path lnum undoc_names missing_args ' - 'nsig_names ndoc_names') - - -def entry_gen(root_ns, module_name): - """Walk and yield all methods and functions in the module root_ns and - submodules.""" - q = [root_ns] - seen = set() - while q: - ns = q.pop() - for x in dir(ns): - cand = getattr(ns, x) - if (isinstance(cand, types.ModuleType) and - cand.__name__ not in seen and - cand.__name__.startswith(module_name)): - seen.add(cand.__name__) - q.insert(0, cand) - elif (isinstance(cand, (types.MethodType, types.FunctionType)) and - cand not in seen and cand.__doc__): - seen.add(cand) - yield cand - - -def cmp_docstring_sig(f): - """Return an `Entry` object describing the differences between the - arguments in the signature and the documented ones.""" - def build_loc(f): - path = f.__code__.co_filename.split(args.path, 1)[-1][1:] - return dict(path=path, lnum=f.__code__.co_firstlineno) - - sig_names = set(inspect.getargspec(f).args) - # XXX numpydoc can be used to get the list of parameters - doc = f.__doc__.lower() - doc = re.split(r'^\s*parameters\s*', doc, 1, re.M)[-1] - doc = re.split(r'^\s*returns*', doc, 1, re.M)[0] - doc_names = {x.split(":")[0].strip() for x in doc.split('\n') - if re.match(r'\s+[\w_]+\s*:', x)} - sig_names.discard('self') - doc_names.discard('kwds') - doc_names.discard('kwargs') - doc_names.discard('args') - return Entry(func=f, path=build_loc(f)['path'], lnum=build_loc(f)['lnum'], - undoc_names=sig_names.difference(doc_names), - missing_args=doc_names.difference(sig_names), - nsig_names=len(sig_names), ndoc_names=len(doc_names)) - - -def format_id(i): - return i - - -def format_item_as_github_task_list(i, item, repo): - tmpl = ('- [ ] {id_}) [{fname}:{lnum} ({func_name}())]({link}) - ' - '__Missing__[{nmissing}/{total_args}]: {undoc_names}') - link_tmpl = "https://github.com/{repo}/blob/master/{file}#L{lnum}" - link = link_tmpl.format(repo=repo, file=item.path, lnum=item.lnum) - s = tmpl.format(id_=i, fname=item.path, lnum=item.lnum, - func_name=item.func.__name__, link=link, - nmissing=len(item.undoc_names), - total_args=item.nsig_names, - undoc_names=list(item.undoc_names)) - if item.missing_args: - s += ' __Extra__(?): %s' % list(item.missing_args) - return s - - -def format_item_as_plain(i, item): - tmpl = ('+{lnum} {path} {func_name}(): ' - 'Missing[{nmissing}/{total_args}]={undoc_names}') - s = tmpl.format(path=item.path, lnum=item.lnum, - func_name=item.func.__name__, - nmissing=len(item.undoc_names), - total_args=item.nsig_names, - undoc_names=list(item.undoc_names)) - if item.missing_args: - s += ' Extra(?)=%s' % list(item.missing_args) - return s - - -def main(): - module = __import__(args.module) - if not args.path: - args.path = os.path.dirname(module.__file__) - collect = [cmp_docstring_sig(e) - for e in entry_gen(module, module.__name__)] - # only include if there are missing arguments in the docstring - # (fewer false positives) and there are at least some documented arguments - collect = [e for e in collect - if e.undoc_names and len(e.undoc_names) != e.nsig_names] - collect.sort(key=lambda x: x.path) - - if args.github_repo: - for i, item in enumerate(collect, 1): - print(format_item_as_github_task_list(i, item, args.github_repo)) - else: - for i, item in enumerate(collect, 1): - print(format_item_as_plain(i, item)) - - -if __name__ == '__main__': - sys.exit(main())
The script `find_undoc_args.py` was used to detect functions that the documented parameters did not match the signature ones. `validate_docstrings.py` implements the same validation, among many more. So the script is not needed anymore.
https://api.github.com/repos/pandas-dev/pandas/pulls/24693
2019-01-10T01:30:53Z
2019-01-10T11:00:45Z
2019-01-10T11:00:45Z
2019-01-10T11:00:45Z
DOC: Updated sample CI image in contributing docs
diff --git a/doc/source/_static/ci.png b/doc/source/_static/ci.png index 4570ed2155586..3a4225e3ce1eb 100644 Binary files a/doc/source/_static/ci.png and b/doc/source/_static/ci.png differ
The image of a green CI in https://pandas-docs.github.io/pandas-docs-travis/contributing.html#testing-with-continuous-integration became outdated (contains circleci and appveyor). Replaced by a current one with only travis and azure. ![pandas_ci_doc_screenshot](https://user-images.githubusercontent.com/10058240/50937863-8d87f880-146d-11e9-8a3b-6c32bcd25e36.png)
https://api.github.com/repos/pandas-dev/pandas/pulls/24692
2019-01-10T00:20:47Z
2019-01-10T00:57:14Z
2019-01-10T00:57:14Z
2019-01-10T00:59:11Z
DOC: Creating top-level development section, and moving pages inside
diff --git a/doc/source/contributing.rst b/doc/source/development/contributing.rst similarity index 99% rename from doc/source/contributing.rst rename to doc/source/development/contributing.rst index a68e5c70087e9..c9d6845107dfc 100644 --- a/doc/source/contributing.rst +++ b/doc/source/development/contributing.rst @@ -698,7 +698,7 @@ A pull-request will be considered for merging when you have an all 'green' build then you will get a red 'X', where you can click through to see the individual failed tests. This is an example of a green build. -.. image:: _static/ci.png +.. image:: ../_static/ci.png .. note:: diff --git a/doc/source/contributing_docstring.rst b/doc/source/development/contributing_docstring.rst similarity index 100% rename from doc/source/contributing_docstring.rst rename to doc/source/development/contributing_docstring.rst diff --git a/doc/source/developer.rst b/doc/source/development/developer.rst similarity index 100% rename from doc/source/developer.rst rename to doc/source/development/developer.rst diff --git a/doc/source/extending.rst b/doc/source/development/extending.rst similarity index 100% rename from doc/source/extending.rst rename to doc/source/development/extending.rst diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst new file mode 100644 index 0000000000000..d67a6c3a2ca04 --- /dev/null +++ b/doc/source/development/index.rst @@ -0,0 +1,15 @@ +{{ header }} + +.. _development: + +=========== +Development +=========== + +.. toctree:: + :maxdepth: 2 + + contributing + internals + extending + developer diff --git a/doc/source/internals.rst b/doc/source/development/internals.rst similarity index 100% rename from doc/source/internals.rst rename to doc/source/development/internals.rst diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index b85150c3444b7..b40c0c18976ec 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -124,7 +124,6 @@ See the package overview for more detail about what's in the library. {% if not single_doc -%} What's New <whatsnew/v0.24.0> install - contributing overview 10min tutorials @@ -161,8 +160,6 @@ See the package overview for more detail about what's in the library. api/index {% endif -%} {% if not single_doc -%} - developer - internals - extending + development/index whatsnew/index {% endif -%} diff --git a/setup.cfg b/setup.cfg index 95c71826a80d4..6143cb8446216 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,7 +47,7 @@ ignore = E402, # module level import not at top of file exclude = doc/source/basics.rst - doc/source/contributing_docstring.rst + doc/source/development/contributing_docstring.rst [yapf]
- [X] xref #24499 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24691
2019-01-10T00:01:59Z
2019-01-23T16:23:19Z
2019-01-23T16:23:19Z
2019-01-23T16:23:19Z
do freq validation in DTA.__init__
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index cfb697b3c357a..73e799f9e0a36 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -606,7 +606,7 @@ def _concat_same_type(cls, to_concat): def copy(self, deep=False): values = self.asi8.copy() - return type(self)(values, dtype=self.dtype, freq=self.freq) + return type(self)._simple_new(values, dtype=self.dtype, freq=self.freq) def _values_for_factorize(self): return self.asi8, iNaT diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index efa1757a989fc..d2d9fcf954fe3 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -258,6 +258,8 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False): if isinstance(values, (ABCSeries, ABCIndexClass)): values = values._values + inferred_freq = getattr(values, "_freq", None) + if isinstance(values, type(self)): # validation dtz = getattr(dtype, 'tz', None) @@ -322,9 +324,20 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False): self._dtype = dtype self._freq = freq + if inferred_freq is None and freq is not None: + type(self)._validate_frequency(self, freq) + @classmethod - def _simple_new(cls, values, freq=None, dtype=None): - return cls(values, freq=freq, dtype=dtype) + def _simple_new(cls, values, freq=None, dtype=_NS_DTYPE): + assert isinstance(values, np.ndarray) + if values.dtype == 'i8': + values = values.view(_NS_DTYPE) + + result = object.__new__(cls) + result._data = values + result._freq = freq + result._dtype = dtype + return result @classmethod def _from_sequence(cls, data, dtype=None, copy=False, diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 70e4f44cb5de8..15e8321c0c7f8 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -3078,7 +3078,7 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, elif klass is DatetimeTZBlock and not is_datetime64tz_dtype(values): # TODO: This is no longer hit internally; does it need to be retained # for e.g. pyarrow? - values = DatetimeArray(values, dtype) + values = DatetimeArray._simple_new(values, dtype=dtype) return klass(values, ndim=ndim, placement=placement) diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 8228ed7652fea..60caf61782bbf 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -16,6 +16,16 @@ class TestDatetimeArrayConstructor(object): + def test_freq_validation(self): + # GH#24623 check that invalid instances cannot be created with the + # public constructor + arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 + + msg = ("Inferred frequency H from passed values does not " + "conform to passed frequency W-SUN") + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr, freq="W") + @pytest.mark.parametrize('meth', [DatetimeArray._from_sequence, sequence_to_dt64ns, pd.to_datetime,
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24686
2019-01-09T18:34:18Z
2019-01-10T21:27:35Z
2019-01-10T21:27:35Z
2019-01-10T23:04:08Z
BUG: Fix segfault in Categorical.set_categories
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 21712b1bf5b6c..8d635e2f9fba3 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1501,6 +1501,7 @@ Categorical - Bug in :meth:`Series.where` losing the categorical dtype for categorical data (:issue:`24077`) - Bug in :meth:`Categorical.apply` where ``NaN`` values could be handled unpredictably. They now remain unchanged (:issue:`24241`) - Bug in :class:`Categorical` comparison methods incorrectly raising ``ValueError`` when operating against a :class:`DataFrame` (:issue:`24630`) +- Bug in :meth:`Categorical.set_categories` where setting fewer new categories with ``rename=True`` caused a segmentation fault (:issue:`24675`) Datetimelike ^^^^^^^^^^^^ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index f88249d0fa6b2..1368232470402 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -852,9 +852,9 @@ def set_categories(self, new_categories, ordered=None, rename=False, if (cat.dtype.categories is not None and len(new_dtype.categories) < len(cat.dtype.categories)): # remove all _codes which are larger and set to -1/NaN - self._codes[self._codes >= len(new_dtype.categories)] = -1 + cat._codes[cat._codes >= len(new_dtype.categories)] = -1 else: - codes = _recode_for_categories(self.codes, self.categories, + codes = _recode_for_categories(cat.codes, cat.categories, new_dtype.categories) cat._codes = codes cat._dtype = new_dtype diff --git a/pandas/tests/arrays/categorical/test_api.py b/pandas/tests/arrays/categorical/test_api.py index 348bb947efef7..86dbc5ebf9fe1 100644 --- a/pandas/tests/arrays/categorical/test_api.py +++ b/pandas/tests/arrays/categorical/test_api.py @@ -310,6 +310,13 @@ def test_set_categories_many(self, values, categories, new_categories, result = c.set_categories(new_categories, ordered=ordered) tm.assert_categorical_equal(result, expected) + def test_set_categories_rename_less(self): + # GH 24675 + cat = Categorical(['A', 'B']) + result = cat.set_categories(['A'], rename=True) + expected = Categorical(['A', np.nan]) + tm.assert_categorical_equal(result, expected) + def test_set_categories_private(self): cat = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c', 'd']) cat._set_categories(['a', 'c', 'd', 'e'])
- [X] closes #24675 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry --- The fix basically amounts to changing `self._codes` --> `cat._codes` in the code block below: https://github.com/pandas-dev/pandas/blob/caf462c2a7699daf4b149d49f5aeaff822700113/pandas/core/arrays/categorical.py#L850-L855 Since the new object is being referred to as `cat` instead of `self`, the existing version didn't actually change the `_codes` of the resulting object. The segfault would occur when try to view the resulting `Categorical`, as you'd have `take_1d` trying to take out of bounds `_codes` here: https://github.com/pandas-dev/pandas/blob/caf462c2a7699daf4b149d49f5aeaff822700113/pandas/core/arrays/categorical.py#L1297
https://api.github.com/repos/pandas-dev/pandas/pulls/24680
2019-01-09T02:29:03Z
2019-01-09T12:11:18Z
2019-01-09T12:11:17Z
2019-01-09T15:47:35Z