title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
βŒ€
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
βŒ€
updated_at
stringlengths
20
20
DOC: Implementing single doc building for api docs
diff --git a/doc/make.py b/doc/make.py index e3cb29aa3e086..3d4c0b4f4fbc7 100755 --- a/doc/make.py +++ b/doc/make.py @@ -14,10 +14,12 @@ import sys import os import shutil -import subprocess +# import subprocess import argparse from contextlib import contextmanager +import webbrowser import jinja2 +import pandas DOC_PATH = os.path.dirname(os.path.abspath(__file__)) @@ -26,28 +28,6 @@ BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates'] -def _generate_index(include_api, single_doc=None): - """Create index.rst file with the specified sections. - - Parameters - ---------- - include_api : bool - Whether API documentation will be built. - single_doc : str or None - If provided, this single documentation page will be generated. - """ - if single_doc is not None: - single_doc = os.path.splitext(os.path.basename(single_doc))[0] - include_api = False - - with open(os.path.join(SOURCE_PATH, 'index.rst.template')) as f: - t = jinja2.Template(f.read()) - - with open(os.path.join(SOURCE_PATH, 'index.rst'), 'w') as f: - f.write(t.render(include_api=include_api, - single_doc=single_doc)) - - @contextmanager def _maybe_exclude_notebooks(): """Skip building the notebooks if pandoc is not installed. @@ -58,6 +38,7 @@ def _maybe_exclude_notebooks(): 1. nbconvert isn't installed, or 2. nbconvert is installed, but pandoc isn't """ + # TODO move to exclude_pattern base = os.path.dirname(__file__) notebooks = [os.path.join(base, 'source', nb) for nb in ['style.ipynb']] @@ -96,8 +77,73 @@ class DocBuilder: All public methods of this class can be called as parameters of the script. """ - def __init__(self, num_jobs=1): + def __init__(self, num_jobs=1, include_api=True, single_doc=None): self.num_jobs = num_jobs + self.include_api = include_api + self.single_doc = single_doc + self.single_doc_type = self._single_doc_type + self.exclude_patterns = self._exclude_patterns + + self._generate_index() + if self.single_doc_type == 'api': + self._run_os('sphinx-autogen', '-o', + 'source/generated_single', 'source/index.rst') + + @property + def _exclude_patterns(self): + """Docs source files that will be excluded from building.""" + # TODO move maybe_exclude_notebooks here + if self.single_doc is not None: + rst_files = [f for f in os.listdir(SOURCE_PATH) + if ((f.endswith('.rst') or f.endswith('.ipynb')) + and (f != 'index.rst') + and (f != self.single_doc))] + rst_files += ['generated/*.rst'] + elif not self.include_api: + rst_files = ['api.rst', 'generated/*.rst'] + else: + rst_files = ['generated_single/*.rst'] + + exclude_patterns = ','.join( + '{!r}'.format(i) for i in ['**.ipynb_checkpoints'] + rst_files) + + return exclude_patterns + + @property + def _single_doc_type(self): + if self.single_doc: + if os.path.exists(os.path.join(SOURCE_PATH, self.single_doc)): + return 'rst' + try: + obj = pandas + for name in self.single_doc.split('.'): + obj = getattr(obj, name) + except AttributeError: + raise ValueError('Single document not understood, it should ' + 'be a file in doc/source/*.rst (e.g. ' + '"contributing.rst" or a pandas function or ' + 'method (e.g. "pandas.DataFrame.head")') + else: + return 'api' + + def _generate_index(self): + """Create index.rst file with the specified sections.""" + if self.single_doc_type == 'rst': + single_doc = os.path.splitext(os.path.basename(self.single_doc))[0] + self.include_api = False + elif self.single_doc_type == 'api' and \ + self.single_doc.startswith('pandas.'): + single_doc = self.single_doc[len('pandas.'):] + else: + single_doc = self.single_doc + + with open(os.path.join(SOURCE_PATH, 'index.rst.template')) as f: + t = jinja2.Template(f.read()) + + with open(os.path.join(SOURCE_PATH, 'index.rst'), 'w') as f: + f.write(t.render(include_api=self.include_api, + single_doc=single_doc, + single_doc_type=self.single_doc_type)) @staticmethod def _create_build_structure(): @@ -121,7 +167,10 @@ def _run_os(*args): -------- >>> DocBuilder()._run_os('python', '--version') """ - subprocess.check_call(args, stderr=subprocess.STDOUT) + # TODO check_call should be more safe, but it fails with + # exclude patterns, needs investigation + # subprocess.check_call(args, stderr=subprocess.STDOUT) + os.system(' '.join(args)) def _sphinx_build(self, kind): """Call sphinx to build documentation. @@ -142,11 +191,17 @@ def _sphinx_build(self, kind): self._run_os('sphinx-build', '-j{}'.format(self.num_jobs), '-b{}'.format(kind), - '-d{}'.format(os.path.join(BUILD_PATH, - 'doctrees')), + '-d{}'.format(os.path.join(BUILD_PATH, 'doctrees')), + '-Dexclude_patterns={}'.format(self.exclude_patterns), SOURCE_PATH, os.path.join(BUILD_PATH, kind)) + def _open_browser(self): + url = os.path.join( + 'file://', DOC_PATH, 'build', 'html', + 'generated_single', '{}.html'.format(self.single_doc)) + webbrowser.open(url, new=2) + def html(self): """Build HTML documentation.""" self._create_build_structure() @@ -156,6 +211,9 @@ def html(self): if os.path.exists(zip_fname): os.remove(zip_fname) + if self.single_doc is not None: + self._open_browser() + def latex(self, force=False): """Build PDF documentation.""" self._create_build_structure() @@ -228,6 +286,13 @@ def main(): type=str, default=os.path.join(DOC_PATH, '..'), help='path') + argparser.add_argument('--docstring', + metavar='FILENAME', + type=str, + nargs='*', + default=None, + help=('method or function name to compile, ' + 'e.g. "DataFrame.join"')) args = argparser.parse_args() if args.command not in cmds: @@ -235,8 +300,10 @@ def main(): args.command, ', '.join(cmds))) os.environ['PYTHONPATH'] = args.python_path - _generate_index(not args.no_api, args.single) - getattr(DocBuilder(args.num_jobs), args.command)() + + getattr(DocBuilder(args.num_jobs, + args.no_api, + args.single), args.command)() if __name__ == '__main__': diff --git a/doc/source/conf.py b/doc/source/conf.py index b5fbf096f2626..cf4ac6952dc30 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -86,38 +86,6 @@ if any(re.match("\s*api\s*", l) for l in index_rst_lines): autosummary_generate = True -files_to_delete = [] -for f in os.listdir(os.path.dirname(__file__)): - if (not f.endswith(('.ipynb', '.rst')) or - f.startswith('.') or os.path.basename(f) == 'index.rst'): - continue - - _file_basename = os.path.splitext(f)[0] - _regex_to_match = "\s*{}\s*$".format(_file_basename) - if not any(re.match(_regex_to_match, line) for line in index_rst_lines): - files_to_delete.append(f) - -if files_to_delete: - print("I'm about to DELETE the following:\n{}\n".format( - list(sorted(files_to_delete)))) - sys.stdout.write("WARNING: I'd like to delete those " - "to speed up processing (yes/no)? ") - if PY3: - answer = input() - else: - answer = raw_input() - - if answer.lower().strip() in ('y', 'yes'): - for f in files_to_delete: - f = os.path.join(os.path.join(os.path.dirname(__file__), f)) - f = os.path.abspath(f) - try: - print("Deleting {}".format(f)) - os.unlink(f) - except: - print("Error deleting {}".format(f)) - pass - # Add any paths that contain templates here, relative to this directory. templates_path = ['../_templates'] diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index eff1227e98994..7d11218564e32 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -106,8 +106,13 @@ Some other notes See the package overview for more detail about what's in the library. +{% if single_doc_type == 'api' -%} +.. autosummary:: + :toctree: generated_single/ +{% else -%} .. toctree:: :maxdepth: 4 +{% endif %} {% if single_doc -%} {{ single_doc }}
Same as #19840, using @jorisvandenbossche approach, but using the --single attribute instead of separate functionality for docstrings.
https://api.github.com/repos/pandas-dev/pandas/pulls/19864
2018-02-23T12:16:35Z
2018-02-26T20:52:13Z
null
2023-05-11T01:17:27Z
REF: Base class for all extension tests
diff --git a/ci/lint.sh b/ci/lint.sh index fcd65fc5aba5e..545ac9c90c5c1 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -111,6 +111,15 @@ if [ "$LINT" ]; then RET=1 fi + # Check for the following code in the extension array base tests + # tm.assert_frame_equal + # tm.assert_series_equal + grep -r -E --include '*.py' --exclude base.py 'tm.assert_(series|frame)_equal' pandas/tests/extension/base + + if [ $? = "0" ]; then + RET=1 + fi + echo "Check for invalid testing DONE" # Check for imports from pandas.core.common instead diff --git a/pandas/tests/extension/base/__init__.py b/pandas/tests/extension/base/__init__.py index 2273ef1f3e110..27c106efd0524 100644 --- a/pandas/tests/extension/base/__init__.py +++ b/pandas/tests/extension/base/__init__.py @@ -31,6 +31,14 @@ class TestMyDtype(BaseDtypeTests): Your class ``TestDtype`` will inherit all the tests defined on ``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype`` wherever the test requires it. You're free to implement additional tests. + +All the tests in these modules use ``self.assert_frame_equal`` or +``self.assert_series_equal`` for dataframe or series comparisons. By default, +they use the usual ``pandas.testing.assert_frame_equal`` and +``pandas.testing.assert_series_equal``. You can override the checks used +by defining the staticmethods ``assert_frame_equal`` and +``assert_series_equal`` on your base test class. + """ from .casting import BaseCastingTests # noqa from .constructors import BaseConstructorsTests # noqa diff --git a/pandas/tests/extension/base/base.py b/pandas/tests/extension/base/base.py new file mode 100644 index 0000000000000..d29587e635ebd --- /dev/null +++ b/pandas/tests/extension/base/base.py @@ -0,0 +1,6 @@ +import pandas.util.testing as tm + + +class BaseExtensionTests(object): + assert_series_equal = staticmethod(tm.assert_series_equal) + assert_frame_equal = staticmethod(tm.assert_frame_equal) diff --git a/pandas/tests/extension/base/casting.py b/pandas/tests/extension/base/casting.py index bcfbf0a247269..adc690939b36c 100644 --- a/pandas/tests/extension/base/casting.py +++ b/pandas/tests/extension/base/casting.py @@ -1,8 +1,10 @@ import pandas as pd from pandas.core.internals import ObjectBlock +from .base import BaseExtensionTests -class BaseCastingTests(object): + +class BaseCastingTests(BaseExtensionTests): """Casting to and from ExtensionDtypes""" def test_astype_object_series(self, all_data): diff --git a/pandas/tests/extension/base/constructors.py b/pandas/tests/extension/base/constructors.py index 7ad100e6289e9..2d5d747aec5a7 100644 --- a/pandas/tests/extension/base/constructors.py +++ b/pandas/tests/extension/base/constructors.py @@ -4,8 +4,10 @@ import pandas.util.testing as tm from pandas.core.internals import ExtensionBlock +from .base import BaseExtensionTests -class BaseConstructorsTests(object): + +class BaseConstructorsTests(BaseExtensionTests): def test_series_constructor(self, data): result = pd.Series(data) diff --git a/pandas/tests/extension/base/dtype.py b/pandas/tests/extension/base/dtype.py index f5015bd469f13..63d3d807c270c 100644 --- a/pandas/tests/extension/base/dtype.py +++ b/pandas/tests/extension/base/dtype.py @@ -1,8 +1,10 @@ import numpy as np import pandas as pd +from .base import BaseExtensionTests -class BaseDtypeTests(object): + +class BaseDtypeTests(BaseExtensionTests): """Base class for ExtensionDtype classes""" def test_name(self, dtype): diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index f43971e928cac..31ed8b9e01225 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -1,20 +1,21 @@ import numpy as np import pandas as pd -import pandas.util.testing as tm +from .base import BaseExtensionTests -class BaseGetitemTests(object): + +class BaseGetitemTests(BaseExtensionTests): """Tests for ExtensionArray.__getitem__.""" def test_iloc_series(self, data): ser = pd.Series(data) result = ser.iloc[:4] expected = pd.Series(data[:4]) - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) result = ser.iloc[[0, 1, 2, 3]] - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_iloc_frame(self, data): df = pd.DataFrame({"A": data, 'B': np.arange(len(data))}) @@ -22,30 +23,30 @@ def test_iloc_frame(self, data): # slice -> frame result = df.iloc[:4, [0]] - tm.assert_frame_equal(result, expected) + self.assert_frame_equal(result, expected) # sequence -> frame result = df.iloc[[0, 1, 2, 3], [0]] - tm.assert_frame_equal(result, expected) + self.assert_frame_equal(result, expected) expected = pd.Series(data[:4], name='A') # slice -> series result = df.iloc[:4, 0] - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) # sequence -> series result = df.iloc[:4, 0] - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_loc_series(self, data): ser = pd.Series(data) result = ser.loc[:3] expected = pd.Series(data[:4]) - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) result = ser.loc[[0, 1, 2, 3]] - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_loc_frame(self, data): df = pd.DataFrame({"A": data, 'B': np.arange(len(data))}) @@ -53,21 +54,21 @@ def test_loc_frame(self, data): # slice -> frame result = df.loc[:3, ['A']] - tm.assert_frame_equal(result, expected) + self.assert_frame_equal(result, expected) # sequence -> frame result = df.loc[[0, 1, 2, 3], ['A']] - tm.assert_frame_equal(result, expected) + self.assert_frame_equal(result, expected) expected = pd.Series(data[:4], name='A') # slice -> series result = df.loc[:3, 'A'] - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) # sequence -> series result = df.loc[:3, 'A'] - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_getitem_scalar(self, data): result = data[0] diff --git a/pandas/tests/extension/base/interface.py b/pandas/tests/extension/base/interface.py index 8f17131a9482b..e1596f0675f32 100644 --- a/pandas/tests/extension/base/interface.py +++ b/pandas/tests/extension/base/interface.py @@ -5,8 +5,10 @@ from pandas.core.dtypes.common import is_extension_array_dtype from pandas.core.dtypes.dtypes import ExtensionDtype +from .base import BaseExtensionTests -class BaseInterfaceTests(object): + +class BaseInterfaceTests(BaseExtensionTests): """Tests that the basic interface is satisfied.""" # ------------------------------------------------------------------------ # Interface diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index c77811ca63926..74e5d180b1aa3 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -2,10 +2,11 @@ import numpy as np import pandas as pd -import pandas.util.testing as tm +from .base import BaseExtensionTests -class BaseMethodsTests(object): + +class BaseMethodsTests(BaseExtensionTests): """Various Series and DataFrame methods.""" @pytest.mark.parametrize('dropna', [True, False]) @@ -19,13 +20,13 @@ def test_value_counts(self, all_data, dropna): result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() expected = pd.Series(other).value_counts(dropna=dropna).sort_index() - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_count(self, data_missing): df = pd.DataFrame({"A": data_missing}) result = df.count(axis='columns') expected = pd.Series([0, 1]) - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_apply_simple_series(self, data): result = pd.Series(data).apply(id) diff --git a/pandas/tests/extension/base/missing.py b/pandas/tests/extension/base/missing.py index 1d6f2eea1f1f9..3ae82fa1ca432 100644 --- a/pandas/tests/extension/base/missing.py +++ b/pandas/tests/extension/base/missing.py @@ -3,8 +3,10 @@ import pandas as pd import pandas.util.testing as tm +from .base import BaseExtensionTests -class BaseMissingTests(object): + +class BaseMissingTests(BaseExtensionTests): def test_isna(self, data_missing): if data_missing._can_hold_na: expected = np.array([True, False]) @@ -16,13 +18,13 @@ def test_isna(self, data_missing): result = pd.Series(data_missing).isna() expected = pd.Series(expected) - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_dropna_series(self, data_missing): ser = pd.Series(data_missing) result = ser.dropna() expected = ser.iloc[[1]] - tm.assert_series_equal(result, expected) + self.assert_series_equal(result, expected) def test_dropna_frame(self, data_missing): df = pd.DataFrame({"A": data_missing}) @@ -30,16 +32,16 @@ def test_dropna_frame(self, data_missing): # defaults result = df.dropna() expected = df.iloc[[1]] - tm.assert_frame_equal(result, expected) + self.assert_frame_equal(result, expected) # axis = 1 result = df.dropna(axis='columns') expected = pd.DataFrame(index=[0, 1]) - tm.assert_frame_equal(result, expected) + self.assert_frame_equal(result, expected) # multiple df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]}) result = df.dropna() expected = df.iloc[:0] - tm.assert_frame_equal(result, expected) + self.assert_frame_equal(result, expected) diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index d8f577c6fa50d..cfb70f2291555 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -1,11 +1,12 @@ import pytest import pandas as pd -import pandas.util.testing as tm from pandas.core.internals import ExtensionBlock +from .base import BaseExtensionTests -class BaseReshapingTests(object): + +class BaseReshapingTests(BaseExtensionTests): """Tests for reshaping and concatenation.""" @pytest.mark.parametrize('in_frame', [True, False]) def test_concat(self, data, in_frame): @@ -32,8 +33,8 @@ def test_align(self, data, na_value): # Assumes that the ctor can take a list of scalars of the type e1 = pd.Series(type(data)(list(a) + [na_value])) e2 = pd.Series(type(data)([na_value] + list(b))) - tm.assert_series_equal(r1, e1) - tm.assert_series_equal(r2, e2) + self.assert_series_equal(r1, e1) + self.assert_series_equal(r2, e2) def test_align_frame(self, data, na_value): a = data[:3] @@ -45,17 +46,17 @@ def test_align_frame(self, data, na_value): # Assumes that the ctor can take a list of scalars of the type e1 = pd.DataFrame({'A': type(data)(list(a) + [na_value])}) e2 = pd.DataFrame({'A': type(data)([na_value] + list(b))}) - tm.assert_frame_equal(r1, e1) - tm.assert_frame_equal(r2, e2) + self.assert_frame_equal(r1, e1) + self.assert_frame_equal(r2, e2) def test_set_frame_expand_regular_with_extension(self, data): df = pd.DataFrame({"A": [1] * len(data)}) df['B'] = data expected = pd.DataFrame({"A": [1] * len(data), "B": data}) - tm.assert_frame_equal(df, expected) + self.assert_frame_equal(df, expected) def test_set_frame_expand_extension_with_regular(self, data): df = pd.DataFrame({'A': data}) df['B'] = [1] * len(data) expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) - tm.assert_frame_equal(df, expected) + self.assert_frame_equal(df, expected)
Added the staticmethods - assert_series_equal - assert_frame_equal to the base class. Useful for test cases like DecimalArray with NaNs, since our assert_*_equal methods don't properly handle Decimal NaNs. - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/19863
2018-02-23T12:09:25Z
2018-02-24T15:19:19Z
2018-02-24T15:19:19Z
2018-05-02T13:10:24Z
ENH: Let initialisation from dicts use insertion order for python >= 3.6 (part II)
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 4cf7c8013aa2b..129ac6b06205c 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -99,9 +99,9 @@ def max_value(group): applied = df.groupby('A').apply(max_value) result = applied.get_dtype_counts().sort_values() - expected = Series({'object': 2, - 'float64': 2, - 'int64': 1}).sort_values() + expected = Series({'float64': 2, + 'int64': 1, + 'object': 2}).sort_values() assert_series_equal(result, expected) def test_groupby_return_type(self): @@ -244,7 +244,7 @@ def func_with_no_date(batch): return pd.Series({'c': 2}) def func_with_date(batch): - return pd.Series({'c': 2, 'b': datetime(2015, 1, 1)}) + return pd.Series({'b': datetime(2015, 1, 1), 'c': 2}) dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date) dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1]) @@ -1628,8 +1628,8 @@ def f(g): def test_apply_with_mixed_dtype(self): # GH3480, apply with mixed dtype on axis=1 breaks in 0.11 - df = DataFrame({'foo1': ['one', 'two', 'two', 'three', 'one', 'two'], - 'foo2': np.random.randn(6)}) + df = DataFrame({'foo1': np.random.randn(6), + 'foo2': ['one', 'two', 'two', 'three', 'one', 'two']}) result = df.apply(lambda x: x, axis=1) assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts()) @@ -2113,10 +2113,10 @@ def test_multifunc_sum_bug(self): def test_handle_dict_return_value(self): def f(group): - return {'min': group.min(), 'max': group.max()} + return {'max': group.max(), 'min': group.min()} def g(group): - return Series({'min': group.min(), 'max': group.max()}) + return Series({'max': group.max(), 'min': group.min()}) result = self.df.groupby('A')['C'].apply(f) expected = self.df.groupby('A')['C'].apply(g) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 4159d0f709a13..1be7dfdcc64e6 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -519,7 +519,9 @@ def test_cython_transform_frame(self, op, args, targop): 'timedelta': pd.timedelta_range(1, freq='s', periods=1000), 'string': strings * 50, - 'string_missing': strings_missing * 50}) + 'string_missing': strings_missing * 50}, + columns=['float', 'float_missing', 'int', 'datetime', + 'timedelta', 'string', 'string_missing']) df['cat'] = df['string'].astype('category') df2 = df.copy() @@ -552,7 +554,9 @@ def test_cython_transform_frame(self, op, args, targop): tm.assert_frame_equal(expected, gb.transform(op, *args).sort_index( axis=1)) - tm.assert_frame_equal(expected, getattr(gb, op)(*args)) + tm.assert_frame_equal( + expected, + getattr(gb, op)(*args).sort_index(axis=1)) # individual columns for c in df: if c not in ['float', 'int', 'float_missing' diff --git a/pandas/tests/indexing/test_ix.py b/pandas/tests/indexing/test_ix.py index 3f71e673a4ffe..c84576c984525 100644 --- a/pandas/tests/indexing/test_ix.py +++ b/pandas/tests/indexing/test_ix.py @@ -53,13 +53,15 @@ def test_ix_loc_setitem_consistency(self): # GH 8607 # ix setitem consistency - df = DataFrame({'timestamp': [1413840976, 1413842580, 1413760580], - 'delta': [1174, 904, 161], - 'elapsed': [7673, 9277, 1470]}) - expected = DataFrame({'timestamp': pd.to_datetime( - [1413840976, 1413842580, 1413760580], unit='s'), - 'delta': [1174, 904, 161], - 'elapsed': [7673, 9277, 1470]}) + df = DataFrame({'delta': [1174, 904, 161], + 'elapsed': [7673, 9277, 1470], + 'timestamp': [1413840976, 1413842580, 1413760580]}) + expected = DataFrame({'delta': [1174, 904, 161], + 'elapsed': [7673, 9277, 1470], + 'timestamp': pd.to_datetime( + [1413840976, 1413842580, 1413760580], + unit='s') + }) df2 = df.copy() df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s') diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index dddba5b425c3b..03c071dbe4bc5 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -539,8 +539,8 @@ def test_east_asian_unicode_frame(self): assert _rep(df) == expected # column name - df = DataFrame({u'あああああ': [1, 222, 33333, 4], - 'b': [u'あ', u'いいい', u'う', u'ええええええ']}, + df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'], + u'あああああ': [1, 222, 33333, 4]}, index=['a', 'bb', 'c', 'ddd']) expected = (u" b あああああ\na あ 1\n" u"bb いいい 222\nc う 33333\n" @@ -647,8 +647,8 @@ def test_east_asian_unicode_frame(self): assert _rep(df) == expected # column name - df = DataFrame({u'あああああ': [1, 222, 33333, 4], - 'b': [u'あ', u'いいい', u'う', u'ええええええ']}, + df = DataFrame({'b': [u'あ', u'いいい', u'う', u'ええええええ'], + u'あああああ': [1, 222, 33333, 4]}, index=['a', 'bb', 'c', 'ddd']) expected = (u" b あああああ\n" u"a あ 1\n" @@ -733,8 +733,8 @@ def test_east_asian_unicode_frame(self): assert _rep(df) == expected # ambiguous unicode - df = DataFrame({u'あああああ': [1, 222, 33333, 4], - 'b': [u'あ', u'いいい', u'‘‘', u'ええええええ']}, + df = DataFrame({'b': [u'あ', u'いいい', u'‘‘', u'ええええええ'], + u'あああああ': [1, 222, 33333, 4]}, index=['a', 'bb', 'c', '‘‘‘']) expected = (u" b あああああ\n" u"a あ 1\n" diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index f266a8b3a3268..5ebf196be094e 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -115,17 +115,18 @@ def test_to_latex_empty(self): assert result == expected def test_to_latex_with_formatters(self): - df = DataFrame({'int': [1, 2, 3], + df = DataFrame({'datetime64': [datetime(2016, 1, 1), + datetime(2016, 2, 5), + datetime(2016, 3, 3)], 'float': [1.0, 2.0, 3.0], + 'int': [1, 2, 3], 'object': [(1, 2), True, False], - 'datetime64': [datetime(2016, 1, 1), - datetime(2016, 2, 5), - datetime(2016, 3, 3)]}) + }) - formatters = {'int': lambda x: '0x{x:x}'.format(x=x), + formatters = {'datetime64': lambda x: x.strftime('%Y-%m'), 'float': lambda x: '[{x: 4.1f}]'.format(x=x), + 'int': lambda x: '0x{x:x}'.format(x=x), 'object': lambda x: '-{x!s}-'.format(x=x), - 'datetime64': lambda x: x.strftime('%Y-%m'), '__index__': lambda x: 'index: {x}'.format(x=x)} result = df.to_latex(formatters=dict(formatters)) @@ -347,10 +348,10 @@ def test_to_latex_escape(self): a = 'a' b = 'b' - test_dict = {u('co^l1'): {a: "a", - b: "b"}, - u('co$e^x$'): {a: "a", - b: "b"}} + test_dict = {u('co$e^x$'): {a: "a", + b: "b"}, + u('co^l1'): {a: "a", + b: "b"}} unescaped_result = DataFrame(test_dict).to_latex(escape=False) escaped_result = DataFrame(test_dict).to_latex( diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index a72744e08fa7c..7e497c395266f 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -553,7 +553,7 @@ def __str__(self): def test_label_overflow(self): # GH14256: buffer length not checked when writing label - df = pd.DataFrame({'foo': [1337], 'bar' * 100000: [1]}) + df = pd.DataFrame({'bar' * 100000: [1], 'foo': [1337]}) assert df.to_json() == \ '{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format( bar=('bar' * 100000)) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 101d34ebdb89f..5dca45c8dd8bb 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -588,18 +588,18 @@ def test_merge_on_datetime64tz(self): result = pd.merge(left, right, on='key', how='outer') assert_frame_equal(result, expected) - left = pd.DataFrame({'value': pd.date_range('20151010', periods=2, - tz='US/Eastern'), - 'key': [1, 2]}) - right = pd.DataFrame({'value': pd.date_range('20151011', periods=2, - tz='US/Eastern'), - 'key': [2, 3]}) + left = pd.DataFrame({'key': [1, 2], + 'value': pd.date_range('20151010', periods=2, + tz='US/Eastern')}) + right = pd.DataFrame({'key': [2, 3], + 'value': pd.date_range('20151011', periods=2, + tz='US/Eastern')}) expected = DataFrame({ + 'key': [1, 2, 3], 'value_x': list(pd.date_range('20151010', periods=2, tz='US/Eastern')) + [pd.NaT], 'value_y': [pd.NaT] + list(pd.date_range('20151011', periods=2, - tz='US/Eastern')), - 'key': [1, 2, 3]}) + tz='US/Eastern'))}) result = pd.merge(left, right, on='key', how='outer') assert_frame_equal(result, expected) assert result['value_x'].dtype == 'datetime64[ns, US/Eastern]' @@ -632,18 +632,18 @@ def test_merge_on_periods(self): result = pd.merge(left, right, on='key', how='outer') assert_frame_equal(result, expected) - left = pd.DataFrame({'value': pd.period_range('20151010', periods=2, - freq='D'), - 'key': [1, 2]}) - right = pd.DataFrame({'value': pd.period_range('20151011', periods=2, - freq='D'), - 'key': [2, 3]}) + left = pd.DataFrame({'key': [1, 2], + 'value': pd.period_range('20151010', periods=2, + freq='D')}) + right = pd.DataFrame({'key': [2, 3], + 'value': pd.period_range('20151011', periods=2, + freq='D')}) exp_x = pd.period_range('20151010', periods=2, freq='D') exp_y = pd.period_range('20151011', periods=2, freq='D') - expected = DataFrame({'value_x': list(exp_x) + [pd.NaT], - 'value_y': [pd.NaT] + list(exp_y), - 'key': [1, 2, 3]}) + expected = DataFrame({'key': [1, 2, 3], + 'value_x': list(exp_x) + [pd.NaT], + 'value_y': [pd.NaT] + list(exp_y)}) result = pd.merge(left, right, on='key', how='outer') assert_frame_equal(result, expected) assert result['value_x'].dtype == 'object' @@ -651,12 +651,13 @@ def test_merge_on_periods(self): def test_indicator(self): # PR #10054. xref #7412 and closes #8790. - df1 = DataFrame({'col1': [0, 1], 'col_left': [ - 'a', 'b'], 'col_conflict': [1, 2]}) + df1 = DataFrame({'col1': [0, 1], 'col_conflict': [1, 2], + 'col_left': ['a', 'b']}) df1_copy = df1.copy() - df2 = DataFrame({'col1': [1, 2, 3, 4, 5], 'col_right': [2, 2, 2, 2, 2], - 'col_conflict': [1, 2, 3, 4, 5]}) + df2 = DataFrame({'col1': [1, 2, 3, 4, 5], + 'col_conflict': [1, 2, 3, 4, 5], + 'col_right': [2, 2, 2, 2, 2]}) df2_copy = df2.copy() df_result = DataFrame({ diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py index 31c484a483d18..42d8eb7273ee1 100644 --- a/pandas/tests/reshape/merge/test_merge_ordered.py +++ b/pandas/tests/reshape/merge/test_merge_ordered.py @@ -83,9 +83,10 @@ def test_empty_sequence_concat(self): pd.concat([pd.DataFrame(), None]) def test_doc_example(self): - left = DataFrame({'key': ['a', 'c', 'e', 'a', 'c', 'e'], + left = DataFrame({'group': list('aaabbb'), + 'key': ['a', 'c', 'e', 'a', 'c', 'e'], 'lvalue': [1, 2, 3] * 2, - 'group': list('aaabbb')}) + }) right = DataFrame({'key': ['b', 'c', 'd'], 'rvalue': [1, 2, 3]}) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 7e126dd56775b..11c3b733422cf 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1542,10 +1542,10 @@ def test_concat_bug_2972(self): def test_concat_bug_3602(self): # GH 3602, duplicate columns - df1 = DataFrame({'firmNo': [0, 0, 0, 0], 'stringvar': [ - 'rrr', 'rrr', 'rrr', 'rrr'], 'prc': [6, 6, 6, 6]}) - df2 = DataFrame({'misc': [1, 2, 3, 4], 'prc': [ - 6, 6, 6, 6], 'C': [9, 10, 11, 12]}) + df1 = DataFrame({'firmNo': [0, 0, 0, 0], 'prc': [6, 6, 6, 6], + 'stringvar': ['rrr', 'rrr', 'rrr', 'rrr']}) + df2 = DataFrame({'C': [9, 10, 11, 12], 'misc': [1, 2, 3, 4], + 'prc': [6, 6, 6, 6]}) expected = DataFrame([[0, 6, 'rrr', 9, 1, 6], [0, 6, 'rrr', 10, 2, 6], [0, 6, 'rrr', 11, 3, 6], diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py index b7422dfd7e911..000b22d4fdd36 100644 --- a/pandas/tests/reshape/test_melt.py +++ b/pandas/tests/reshape/test_melt.py @@ -589,11 +589,11 @@ def test_nonnumeric_suffix(self): def test_mixed_type_suffix(self): df = pd.DataFrame({ - 'treatment_1': [1.0, 2.0], - 'treatment_foo': [3.0, 4.0], - 'result_foo': [5.0, 6.0], + 'A': ['X1', 'X2'], 'result_1': [0, 9], - 'A': ['X1', 'X2']}) + 'result_foo': [5.0, 6.0], + 'treatment_1': [1.0, 2.0], + 'treatment_foo': [3.0, 4.0]}) expected = pd.DataFrame({ 'A': ['X1', 'X2', 'X1', 'X2'], 'colname': ['1', '1', 'foo', 'foo'], diff --git a/pandas/tests/reshape/test_reshape.py b/pandas/tests/reshape/test_reshape.py index a57c3c41b3637..c4d925b83585b 100644 --- a/pandas/tests/reshape/test_reshape.py +++ b/pandas/tests/reshape/test_reshape.py @@ -100,8 +100,8 @@ def test_basic_types(self, sparse, dtype): expected_counts = {'int64': 1, 'object': 1} expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0) - expected = Series(expected_counts).sort_values() - tm.assert_series_equal(result.get_dtype_counts().sort_values(), + expected = Series(expected_counts).sort_index() + tm.assert_series_equal(result.get_dtype_counts().sort_index(), expected) def test_just_na(self, sparse): @@ -212,10 +212,10 @@ def test_dataframe_dummies_prefix_str(self, df, sparse): def test_dataframe_dummies_subset(self, df, sparse): result = get_dummies(df, prefix=['from_A'], columns=['A'], sparse=sparse) - expected = DataFrame({'from_A_a': [1, 0, 1], - 'from_A_b': [0, 1, 0], - 'B': ['b', 'b', 'c'], - 'C': [1, 2, 3]}, dtype=np.uint8) + expected = DataFrame({'B': ['b', 'b', 'c'], + 'C': [1, 2, 3], + 'from_A_a': [1, 0, 1], + 'from_A_b': [0, 1, 0]}, dtype=np.uint8) expected[['C']] = df[['C']] assert_frame_equal(result, expected) @@ -249,16 +249,16 @@ def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse): def test_dataframe_dummies_prefix_dict(self, sparse): prefixes = {'A': 'from_A', 'B': 'from_B'} - df = DataFrame({'A': ['a', 'b', 'a'], - 'B': ['b', 'b', 'c'], - 'C': [1, 2, 3]}) + df = DataFrame({'C': [1, 2, 3], + 'A': ['a', 'b', 'a'], + 'B': ['b', 'b', 'c']}) result = get_dummies(df, prefix=prefixes, sparse=sparse) - expected = DataFrame({'from_A_a': [1, 0, 1], + expected = DataFrame({'C': [1, 2, 3], + 'from_A_a': [1, 0, 1], 'from_A_b': [0, 1, 0], 'from_B_b': [1, 1, 0], - 'from_B_c': [0, 0, 1], - 'C': [1, 2, 3]}) + 'from_B_c': [0, 0, 1]}) columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c'] expected[columns] = expected[columns].astype(np.uint8)
- [x] xref #19018 and #19830 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` This PR is the second part of the preparation for allowing initialisation using insertion order for python>=3.6. See cross-referenced issues for details. After this PR all tests will pass independent of dict initialisation rule, and then core/frame.py and core/series.py will be changed in the next PR to use insertion order for python>=3.6.
https://api.github.com/repos/pandas-dev/pandas/pulls/19859
2018-02-23T08:34:05Z
2018-02-24T14:55:36Z
2018-02-24T14:55:35Z
2018-02-24T17:37:09Z
DOC: Updated links to 2 tutorials in tutorials.rst
diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index db9385519bff2..0398e2892cef5 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -26,32 +26,34 @@ repository <http://github.com/jvns/pandas-cookbook>`_. To run the examples in th clone the GitHub repository and get IPython Notebook running. See `How to use this cookbook <https://github.com/jvns/pandas-cookbook#how-to-use-this-cookbook>`_. -- `A quick tour of the IPython Notebook: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_ +- `A quick tour of the IPython Notebook: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_ Shows off IPython's awesome tab completion and magic functions. -- `Chapter 1: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb>`_ +- `Chapter 1: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb>`_ Reading your data into pandas is pretty much the easiest thing. Even when the encoding is wrong! -- `Chapter 2: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%202%20-%20Selecting%20data%20&%20finding%20the%20most%20common%20complaint%20type.ipynb>`_ +- `Chapter 2: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%202%20-%20Selecting%20data%20%26%20finding%20the%20most%20common%20complaint%20type.ipynb>`_ It's not totally obvious how to select data from a pandas dataframe. Here we explain the basics (how to take slices and get columns) -- `Chapter 3: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%203%20-%20Which%20borough%20has%20the%20most%20noise%20complaints%3F%20%28or%2C%20more%20selecting%20data%29.ipynb>`_ +- `Chapter 3: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%203%20-%20Which%20borough%20has%20the%20most%20noise%20complaints%20%28or%2C%20more%20selecting%20data%29.ipynb>`_ Here we get into serious slicing and dicing and learn how to filter dataframes in complicated ways, really fast. -- `Chapter 4: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%204%20-%20Find%20out%20on%20which%20weekday%20people%20bike%20the%20most%20with%20groupby%20and%20aggregate.ipynb>`_ +- `Chapter 4: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%204%20-%20Find%20out%20on%20which%20weekday%20people%20bike%20the%20most%20with%20groupby%20and%20aggregate.ipynb>`_ Groupby/aggregate is seriously my favorite thing about pandas and I use it all the time. You should probably read this. -- `Chapter 5: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%205%20-%20Combining%20dataframes%20and%20scraping%20Canadian%20weather%20data.ipynb>`_ +- `Chapter 5: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%205%20-%20Combining%20dataframes%20and%20scraping%20Canadian%20weather%20data.ipynb>`_ Here you get to find out if it's cold in Montreal in the winter (spoiler: yes). Web scraping with pandas is fun! Here we combine dataframes. -- `Chapter 6: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%206%20-%20String%20operations%21%20Which%20month%20was%20the%20snowiest%3F.ipynb>`_ +- `Chapter 6: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%206%20-%20String%20Operations-%20Which%20month%20was%20the%20snowiest.ipynb>`_ Strings with pandas are great. It has all these vectorized string operations and they're the best. We will turn a bunch of strings containing "Snow" into vectors of numbers in a trice. -- `Chapter 7: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb>`_ +- `Chapter 7: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb>`_ Cleaning up messy data is never a joy, but with pandas it's easier. -- `Chapter 8: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%208%20-%20How%20to%20deal%20with%20timestamps.ipynb>`_ +- `Chapter 8: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%208%20-%20How%20to%20deal%20with%20timestamps.ipynb>`_ Parsing Unix timestamps is confusing at first but it turns out to be really easy. +- `Chapter 9: <http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%209%20-%20Loading%20data%20from%20SQL%20databases.ipynb>`_ + Reading data from SQL databases. Lessons for new pandas users
Two of the links to the [pandas-cookbook](https://github.com/jvns/pandas-cookbook) did not work for version `v0.2` of the documents. Now all links work. Where a version `v0.2` exists, it links to that version. In the remaining two cases, it now links to version `v0.1` instead of raising the `404: Page not found` error.
https://api.github.com/repos/pandas-dev/pandas/pulls/19857
2018-02-23T06:36:49Z
2018-02-24T16:31:13Z
2018-02-24T16:31:13Z
2018-02-24T16:31:18Z
ENH: Add option to disable MathJax (#19824).
diff --git a/doc/source/options.rst b/doc/source/options.rst index cce16a5396377..a82be4d84bf3f 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -402,6 +402,10 @@ display.html.table_schema False Whether to publish a Table display.html.border 1 A ``border=value`` attribute is inserted in the ``<table>`` tag for the DataFrame HTML repr. +display.html.use_mathjax True When True, Jupyter notebook will process + table contents using MathJax, rendering + mathematical expressions enclosed by the + dollar symbol. io.excel.xls.writer xlwt The default Excel writer engine for 'xls' files. io.excel.xlsm.writer openpyxl The default Excel writer engine for diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 08363cd54c606..f2c96ba3f53a8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -337,6 +337,7 @@ Other Enhancements - Added :func:`SeriesGroupBy.is_monotonic_increasing` and :func:`SeriesGroupBy.is_monotonic_decreasing` (:issue:`17015`) - For subclassed ``DataFrames``, :func:`DataFrame.apply` will now preserve the ``Series`` subclass (if defined) when passing the data to the applied function (:issue:`19822`) - :func:`DataFrame.from_dict` now accepts a ``columns`` argument that can be used to specify the column names when ``orient='index'`` is used (:issue:`18529`) +- Added option ``display.html.use_mathjax`` so `MathJax <https://www.mathjax.org/>`_ can be disabled when rendering tables in ``Jupyter`` notebooks (:issue:`19856`, :issue:`19824`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index da42cdbf10233..0edbf892172a9 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -207,6 +207,12 @@ def use_numexpr_cb(key): (currently both are identical) """ +pc_html_use_mathjax_doc = """\ +: boolean + When True, Jupyter notebook will process table contents using MathJax, + rendering mathematical expressions enclosed by the dollar symbol. + (default: True) +""" pc_width_doc = """ : int @@ -358,6 +364,8 @@ def table_schema_cb(key): validator=is_bool, cb=table_schema_cb) cf.register_option('html.border', 1, pc_html_border_doc, validator=is_int) + cf.register_option('html.use_mathjax', True, pc_html_use_mathjax_doc, + validator=is_bool) with cf.config_prefix('html'): cf.register_option('border', 1, pc_html_border_doc, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 621641747f376..50b4f11634b78 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1209,6 +1209,9 @@ def write_result(self, buf): frame = self.frame _classes = ['dataframe'] # Default class. + use_mathjax = get_option("display.html.use_mathjax") + if not use_mathjax: + _classes.append('tex2jax_ignore') if self.classes is not None: if isinstance(self.classes, str): self.classes = self.classes.split() diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 525f487d8aa39..f876ceb8a26bf 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -325,9 +325,19 @@ def format_attr(pair): .format(row=r, col=c)}) body.append(row_es) + table_attr = self.table_attributes + use_mathjax = get_option("display.html.use_mathjax") + if not use_mathjax: + table_attr = table_attr or '' + if 'class="' in table_attr: + table_attr = table_attr.replace('class="', + 'class="tex2jax_ignore ') + else: + table_attr += ' class="tex2jax_ignore"' + return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid, precision=precision, table_styles=table_styles, - caption=caption, table_attributes=self.table_attributes) + caption=caption, table_attributes=table_attr) def format(self, formatter, subset=None): """ diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 03c071dbe4bc5..6c3b75cdfa6df 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1434,6 +1434,13 @@ def test_repr_html(self): tm.reset_display_options() + def test_repr_html_mathjax(self): + df = DataFrame([[1, 2], [3, 4]]) + assert 'tex2jax_ignore' not in df._repr_html_() + + with pd.option_context('display.html.use_mathjax', False): + assert 'tex2jax_ignore' in df._repr_html_() + def test_repr_html_wide(self): max_cols = get_option('display.max_columns') df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1))) diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index adf8e14b756c2..c1ab9cd184340 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -46,6 +46,13 @@ def test_init_series(self): def test_repr_html_ok(self): self.styler._repr_html_() + def test_repr_html_mathjax(self): + # gh-19824 + assert 'tex2jax_ignore' not in self.styler._repr_html_() + + with pd.option_context('display.html.use_mathjax', False): + assert 'tex2jax_ignore' in self.styler._repr_html_() + def test_update_ctx(self): self.styler._update_ctx(self.attrs) expected = {(0, 0): ['color: red'],
This prevents MathJax from processing table contents: <img width="599" alt="screen shot 2018-02-24 at 7 36 50 am" src="https://user-images.githubusercontent.com/1804856/36632244-0c07bdac-1939-11e8-85eb-0c22d42991d1.png"> I'm hoping for a code review, to find out if this might be accepted. If so, I'll add the tests/docs. - [x] closes #19824 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19856
2018-02-23T05:35:34Z
2018-03-01T23:06:21Z
2018-03-01T23:06:20Z
2018-07-24T21:40:59Z
BUG: names on union and intersection for Index were inconsistent (GH9943 GH9862)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 6ace245a4bae1..f42cfc4d4c27f 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -1215,6 +1215,8 @@ Indexing - Bug in `scalar in Index` if scalar is a float while the ``Index`` is of integer dtype (:issue:`22085`) - Bug in `MultiIndex.set_levels` when levels value is not subscriptable (:issue:`23273`) - Bug where setting a timedelta column by ``Index`` causes it to be casted to double, and therefore lose precision (:issue:`23511`) +- Bug in :func:`Index.union` and :func:`Index.intersection` where name of the ``Index`` of the result was not computed correctly for certain cases (:issue:`9943`, :issue:`9862`) + Missing ^^^^^^^ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index ae64179b36485..7434a02043d65 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -61,7 +61,7 @@ import pandas.core.sorting as sorting from pandas.io.formats.printing import ( pprint_thing, default_pprint, format_object_summary, format_object_attrs) -from pandas.core.ops import make_invalid_op +from pandas.core.ops import make_invalid_op, get_op_result_name from pandas.core.strings import StringMethods __all__ = ['Index'] @@ -1253,7 +1253,7 @@ def _convert_can_do_setop(self, other): other = Index(other, name=self.name) result_name = self.name else: - result_name = self.name if self.name == other.name else None + result_name = get_op_result_name(self, other) return other, result_name def _convert_for_op(self, value): @@ -2745,19 +2745,15 @@ def __or__(self, other): def __xor__(self, other): return self.symmetric_difference(other) - def _get_consensus_name(self, other): + def _get_reconciled_name_object(self, other): """ - Given 2 indexes, give a consensus name meaning - we take the not None one, or None if the names differ. - Return a new object if we are resetting the name + If the result of a set operation will be self, + return self, unless the name changes, in which + case make a shallow copy of self. """ - if self.name != other.name: - if self.name is None or other.name is None: - name = self.name or other.name - else: - name = None - if self.name != name: - return self._shallow_copy(name=name) + name = get_op_result_name(self, other) + if self.name != name: + return self._shallow_copy(name=name) return self def union(self, other): @@ -2785,10 +2781,10 @@ def union(self, other): other = ensure_index(other) if len(other) == 0 or self.equals(other): - return self._get_consensus_name(other) + return self._get_reconciled_name_object(other) if len(self) == 0: - return other._get_consensus_name(self) + return other._get_reconciled_name_object(self) # TODO: is_dtype_union_equal is a hack around # 1. buggy set ops with duplicates (GH #13432) @@ -2851,11 +2847,10 @@ def union(self, other): stacklevel=3) # for subclasses - return self._wrap_union_result(other, result) + return self._wrap_setop_result(other, result) - def _wrap_union_result(self, other, result): - name = self.name if self.name == other.name else None - return self.__class__(result, name=name) + def _wrap_setop_result(self, other, result): + return self._constructor(result, name=get_op_result_name(self, other)) def intersection(self, other): """ @@ -2885,7 +2880,7 @@ def intersection(self, other): other = ensure_index(other) if self.equals(other): - return self._get_consensus_name(other) + return self._get_reconciled_name_object(other) if not is_dtype_equal(self.dtype, other.dtype): this = self.astype('O') @@ -2905,7 +2900,7 @@ def intersection(self, other): if self.is_monotonic and other.is_monotonic: try: result = self._inner_indexer(lvals, rvals)[0] - return self._wrap_union_result(other, result) + return self._wrap_setop_result(other, result) except TypeError: pass @@ -4175,7 +4170,7 @@ def _join_monotonic(self, other, how='left', return_indexers=False): return join_index def _wrap_joined_index(self, joined, other): - name = self.name if self.name == other.name else None + name = get_op_result_name(self, other) return Index(joined, name=name) def _get_string_slice(self, key, use_lhs=True, use_rhs=True): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 278e395d65014..6e2f0b00fcd6e 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -25,6 +25,7 @@ import pandas.core.common as com import pandas.core.missing as missing import pandas.core.indexes.base as ibase +from pandas.core.ops import get_op_result_name from pandas.core.arrays.categorical import Categorical, contains _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -324,6 +325,10 @@ def itemsize(self): # Size of the items in categories, not codes. return self.values.itemsize + def _wrap_setop_result(self, other, result): + name = get_op_result_name(self, other) + return self._shallow_copy(result, name=name) + def get_values(self): """ return the underlying data as an ndarray """ return self._data.get_values() diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index bd6f0c68a9aa5..3a2f9986760d3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -34,6 +34,7 @@ from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index +from pandas.core.ops import get_op_result_name import pandas.compat as compat from pandas.tseries.frequencies import to_offset, Resolution from pandas.core.indexes.datetimelike import ( @@ -592,6 +593,10 @@ def union(self, other): y : Index or DatetimeIndex """ self._assert_can_do_setop(other) + + if len(other) == 0 or self.equals(other) or len(self) == 0: + return super(DatetimeIndex, self).union(other) + if not isinstance(other, DatetimeIndex): try: other = DatetimeIndex(other) @@ -674,7 +679,7 @@ def _maybe_utc_convert(self, other): return this, other def _wrap_joined_index(self, joined, other): - name = self.name if self.name == other.name else None + name = get_op_result_name(self, other) if (isinstance(other, DatetimeIndex) and self.freq == other.freq and self._can_fast_union(other)): @@ -745,11 +750,11 @@ def _fast_union(self, other): else: return left - def _wrap_union_result(self, other, result): - name = self.name if self.name == other.name else None + def _wrap_setop_result(self, other, result): + name = get_op_result_name(self, other) if not timezones.tz_compare(self.tz, other.tz): raise ValueError('Passed item and index have different timezone') - return self._simple_new(result, name=name, freq=None, tz=self.tz) + return self._shallow_copy(result, name=name, freq=None, tz=self.tz) def intersection(self, other): """ @@ -765,6 +770,10 @@ def intersection(self, other): y : Index or DatetimeIndex """ self._assert_can_do_setop(other) + + if self.equals(other): + return self._get_reconciled_name_object(other) + if not isinstance(other, DatetimeIndex): try: other = DatetimeIndex(other) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index b86921b5579ed..79239ec90ac80 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -25,6 +25,7 @@ from pandas.core.indexes.base import ( Index, ensure_index, default_pprint, _index_shared_docs) +from pandas.core.ops import get_op_result_name from pandas._libs import Timestamp, Timedelta from pandas._libs.interval import ( @@ -1048,7 +1049,7 @@ def func(self, other): raise TypeError(msg.format(op=op_name)) result = getattr(self._multiindex, op_name)(other._multiindex) - result_name = self.name if self.name == other.name else None + result_name = get_op_result_name(self, other) # GH 19101: ensure empty results have correct dtype if result.empty: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 420b862ae16a4..795ffeefa1794 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -21,7 +21,7 @@ from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat import pandas.core.indexes.base as ibase - +from pandas.core.ops import get_op_result_name _num_index_shared_docs = dict() @@ -215,7 +215,7 @@ def _convert_scalar_indexer(self, key, kind=None): ._convert_scalar_indexer(key, kind=kind)) def _wrap_joined_index(self, joined, other): - name = self.name if self.name == other.name else None + name = get_op_result_name(self, other) return Int64Index(joined, name=name) @classmethod @@ -288,7 +288,7 @@ def _convert_index_indexer(self, keyarr): return keyarr def _wrap_joined_index(self, joined, other): - name = self.name if self.name == other.name else None + name = get_op_result_name(self, other) return UInt64Index(joined, name=name) @classmethod diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 7e11ca5dbfcef..92ffaea521d7f 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -12,9 +12,9 @@ is_integer_dtype, is_datetime64_any_dtype, is_bool_dtype, - pandas_dtype, + pandas_dtype ) - +from pandas.core.ops import get_op_result_name from pandas.core.accessor import PandasDelegate, delegate_names from pandas.core.indexes.datetimes import DatetimeIndex, Int64Index, Index from pandas.core.indexes.datetimelike import ( @@ -848,8 +848,8 @@ def _assert_can_do_setop(self, other): msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr) raise IncompatibleFrequency(msg) - def _wrap_union_result(self, other, result): - name = self.name if self.name == other.name else None + def _wrap_setop_result(self, other, result): + name = get_op_result_name(self, other) result = self._apply_meta(result) result.name = name return result diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 673ab9f2118a4..d1b5645928921 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -5,7 +5,7 @@ import numpy as np -from pandas._libs import index as libindex +from pandas._libs import index as libindex, lib import pandas.compat as compat from pandas.compat import get_range_parameters, lrange, range from pandas.compat.numpy import function as nv @@ -263,8 +263,9 @@ def tolist(self): @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is None: + name = kwargs.get("name", self.name) return RangeIndex._simple_new( - name=self.name, **dict(self._get_data_as_items())) + name=name, **dict(self._get_data_as_items())) else: kwargs.setdefault('name', self.name) return self._int64index._shallow_copy(values, **kwargs) @@ -344,6 +345,10 @@ def intersection(self, other): ------- intersection : Index """ + + if self.equals(other): + return self._get_reconciled_name_object(other) + if not isinstance(other, RangeIndex): return super(RangeIndex, self).intersection(other) @@ -424,10 +429,9 @@ def union(self, other): union : Index """ self._assert_can_do_setop(other) - if len(other) == 0 or self.equals(other): - return self - if len(self) == 0: - return other + if len(other) == 0 or self.equals(other) or len(self) == 0: + return super(RangeIndex, self).union(other) + if isinstance(other, RangeIndex): start_s, step_s = self._start, self._step end_s = self._start + self._step * (len(self) - 1) @@ -498,7 +502,12 @@ def __getitem__(self, key): super_getitem = super(RangeIndex, self).__getitem__ if is_scalar(key): - n = int(key) + if not lib.is_integer(key): + raise IndexError("only integers, slices (`:`), " + "ellipsis (`...`), numpy.newaxis (`None`) " + "and integer or boolean " + "arrays are valid indices") + n = com.cast_scalar_indexer(key) if n != key: return super_getitem(key) if n < 0: @@ -649,7 +658,8 @@ def _evaluate_numeric_binop(self, other): return op(self._int64index, other) # TODO: Do attrs get handled reliably? - return _evaluate_numeric_binop + name = '__{name}__'.format(name=op.__name__) + return compat.set_function_name(_evaluate_numeric_binop, name, cls) cls.__add__ = _make_evaluate_binop(operator.add) cls.__radd__ = _make_evaluate_binop(ops.radd) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 33361c851a4c5..5b077a6984114 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -26,6 +26,7 @@ from pandas.core.base import _shared_docs from pandas.core.indexes.base import _index_shared_docs import pandas.core.common as com +from pandas.core.ops import get_op_result_name import pandas.core.dtypes.concat as _concat from pandas.util._decorators import Appender, Substitution from pandas.core.indexes.datetimelike import ( @@ -281,6 +282,10 @@ def union(self, other): y : Index or TimedeltaIndex """ self._assert_can_do_setop(other) + + if len(other) == 0 or self.equals(other) or len(self) == 0: + return super(TimedeltaIndex, self).union(other) + if not isinstance(other, TimedeltaIndex): try: other = TimedeltaIndex(other) @@ -313,7 +318,7 @@ def join(self, other, how='left', level=None, return_indexers=False, sort=sort) def _wrap_joined_index(self, joined, other): - name = self.name if self.name == other.name else None + name = get_op_result_name(self, other) if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and self._can_fast_union(other)): joined = self._shallow_copy(joined, name=name) @@ -373,10 +378,6 @@ def _fast_union(self, other): else: return left - def _wrap_union_result(self, other, result): - name = self.name if self.name == other.name else None - return self._simple_new(result, name=name, freq=None) - def intersection(self, other): """ Specialized intersection for TimedeltaIndex objects. May be much faster @@ -391,6 +392,10 @@ def intersection(self, other): y : Index or TimedeltaIndex """ self._assert_can_do_setop(other) + + if self.equals(other): + return self._get_reconciled_name_object(other) + if not isinstance(other, TimedeltaIndex): try: other = TimedeltaIndex(other) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 468b1610a9142..c5cbaea23df76 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -360,10 +360,10 @@ def test_has_duplicates(self, indices): def test_duplicated(self, indices, keep): if type(indices) is not self._holder: pytest.skip('Can only check if we know the index type') - if not len(indices) or isinstance(indices, MultiIndex): + if not len(indices) or isinstance(indices, (MultiIndex, RangeIndex)): # MultiIndex tested separately in: # tests/indexes/multi/test_unique_and_duplicates - pytest.skip('Skip check for empty Index and MultiIndex') + pytest.skip('Skip check for empty Index, MultiIndex, RangeIndex') idx = self._holder(indices) if idx.has_duplicates: diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py index 8cfed33a96ac5..e82cce873e75c 100644 --- a/pandas/tests/indexes/conftest.py +++ b/pandas/tests/indexes/conftest.py @@ -15,6 +15,7 @@ tm.makeTimedeltaIndex(100), tm.makeIntIndex(100), tm.makeUIntIndex(100), + tm.makeRangeIndex(100), tm.makeFloatIndex(100), Index([True, False]), tm.makeCategoricalIndex(100), diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index daebc6e95aac4..724dffc49dd3b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -786,6 +786,67 @@ def test_intersect_str_dates(self): assert len(result) == 0 + @pytest.mark.parametrize( + 'fname, sname, expected_name', + [ + ('A', 'A', 'A'), + ('A', 'B', None), + ('A', None, None), + (None, 'B', None), + (None, None, None), + ]) + def test_corner_union(self, indices, fname, sname, expected_name): + # GH 9943 9862 + # Test unions with various name combinations + # Do not test MultiIndex or repeats + + if isinstance(indices, MultiIndex) or not indices.is_unique: + pytest.skip("Not for MultiIndex or repeated indices") + + # Test copy.union(copy) + first = indices.copy().set_names(fname) + second = indices.copy().set_names(sname) + union = first.union(second) + expected = indices.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test copy.union(empty) + first = indices.copy().set_names(fname) + second = indices.drop(indices).set_names(sname) + union = first.union(second) + expected = indices.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test empty.union(copy) + first = indices.drop(indices).set_names(fname) + second = indices.copy().set_names(sname) + union = first.union(second) + expected = indices.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test empty.union(empty) + first = indices.drop(indices).set_names(fname) + second = indices.drop(indices).set_names(sname) + union = first.union(second) + expected = indices.drop(indices).set_names(expected_name) + tm.assert_index_equal(union, expected) + + def test_chained_union(self): + # Chained unions handles names correctly + i1 = Index([1, 2], name='i1') + i2 = Index([3, 4], name='i2') + i3 = Index([5, 6], name='i3') + union = i1.union(i2.union(i3)) + expected = i1.union(i2).union(i3) + tm.assert_index_equal(union, expected) + + j1 = Index([1, 2], name='j1') + j2 = Index([], name='j2') + j3 = Index([], name='j3') + union = j1.union(j2.union(j3)) + expected = j1.union(j2).union(j3) + tm.assert_index_equal(union, expected) + def test_union(self): # TODO: Replace with fixturesult first = self.strIndex[5:20] @@ -824,7 +885,7 @@ def test_union_identity(self): @pytest.mark.parametrize("first_list", [list('ab'), list()]) @pytest.mark.parametrize("second_list", [list('ab'), list()]) @pytest.mark.parametrize("first_name, second_name, expected_name", [ - ('A', 'B', None), (None, 'B', 'B'), ('A', None, 'A')]) + ('A', 'B', None), (None, 'B', None), ('A', None, None)]) def test_union_name_preservation(self, first_list, second_list, first_name, second_name, expected_name): first = Index(first_list, name=first_name) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 23bf8896409c9..c632a9fe31faa 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2290,10 +2290,10 @@ def test_concat_categoricalindex(self): result = pd.concat([a, b, c], axis=1) - exp_idx = pd.CategoricalIndex([0, 1, 2, 9]) - exp = pd.DataFrame({0: [1, np.nan, np.nan, 1], - 1: [2, 2, np.nan, np.nan], - 2: [np.nan, 3, 3, np.nan]}, + exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories) + exp = pd.DataFrame({0: [1, 1, np.nan, np.nan], + 1: [np.nan, 2, 2, np.nan], + 2: [np.nan, np.nan, 3, 3]}, columns=[0, 1, 2], index=exp_idx) tm.assert_frame_equal(result, exp)
xref #9943 closes #9862 - [x] tests added / passed - Added tests to check names on unions when Index has different values - Fixed test for ranges with unions - Added tests for all index types for corner cases for union, difference - Added tests so that `index.intersection(empty_index)==index.difference(index)` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Also fixes bug in `Index.difference` when computing `index.difference(index)`
https://api.github.com/repos/pandas-dev/pandas/pulls/19849
2018-02-22T22:05:05Z
2018-11-06T18:21:48Z
2018-11-06T18:21:48Z
2019-07-12T17:21:39Z
Separate TimedeltaIndex mul/div tests
diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 2cf33644377ab..b685584a29fb9 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -2,7 +2,7 @@ import pytest import numpy as np -from datetime import date, timedelta, time +from datetime import date import dateutil import pandas as pd @@ -18,112 +18,6 @@ class TestDatetimeIndex(object): - def test_get_loc(self): - idx = pd.date_range('2000-01-01', periods=3) - - for method in [None, 'pad', 'backfill', 'nearest']: - assert idx.get_loc(idx[1], method) == 1 - assert idx.get_loc(idx[1].to_pydatetime(), method) == 1 - assert idx.get_loc(str(idx[1]), method) == 1 - - if method is not None: - assert idx.get_loc(idx[1], method, - tolerance=pd.Timedelta('0 days')) == 1 - - assert idx.get_loc('2000-01-01', method='nearest') == 0 - assert idx.get_loc('2000-01-01T12', method='nearest') == 1 - - assert idx.get_loc('2000-01-01T12', method='nearest', - tolerance='1 day') == 1 - assert idx.get_loc('2000-01-01T12', method='nearest', - tolerance=pd.Timedelta('1D')) == 1 - assert idx.get_loc('2000-01-01T12', method='nearest', - tolerance=np.timedelta64(1, 'D')) == 1 - assert idx.get_loc('2000-01-01T12', method='nearest', - tolerance=timedelta(1)) == 1 - with tm.assert_raises_regex(ValueError, - 'unit abbreviation w/o a number'): - idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo') - with pytest.raises(KeyError): - idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours') - with pytest.raises( - ValueError, - match='tolerance size must match target index size'): - idx.get_loc('2000-01-01', method='nearest', - tolerance=[pd.Timedelta('1day').to_timedelta64(), - pd.Timedelta('1day').to_timedelta64()]) - - assert idx.get_loc('2000', method='nearest') == slice(0, 3) - assert idx.get_loc('2000-01', method='nearest') == slice(0, 3) - - assert idx.get_loc('1999', method='nearest') == 0 - assert idx.get_loc('2001', method='nearest') == 2 - - with pytest.raises(KeyError): - idx.get_loc('1999', method='pad') - with pytest.raises(KeyError): - idx.get_loc('2001', method='backfill') - - with pytest.raises(KeyError): - idx.get_loc('foobar') - with pytest.raises(TypeError): - idx.get_loc(slice(2)) - - idx = pd.to_datetime(['2000-01-01', '2000-01-04']) - assert idx.get_loc('2000-01-02', method='nearest') == 0 - assert idx.get_loc('2000-01-03', method='nearest') == 1 - assert idx.get_loc('2000-01', method='nearest') == slice(0, 2) - - # time indexing - idx = pd.date_range('2000-01-01', periods=24, freq='H') - tm.assert_numpy_array_equal(idx.get_loc(time(12)), - np.array([12]), check_dtype=False) - tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)), - np.array([]), check_dtype=False) - with pytest.raises(NotImplementedError): - idx.get_loc(time(12, 30), method='pad') - - def test_get_indexer(self): - idx = pd.date_range('2000-01-01', periods=3) - exp = np.array([0, 1, 2], dtype=np.intp) - tm.assert_numpy_array_equal(idx.get_indexer(idx), exp) - - target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', - '1 day 1 hour']) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), - np.array([-1, 0, 1], dtype=np.intp)) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), - np.array([0, 1, 2], dtype=np.intp)) - tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), - np.array([0, 1, 1], dtype=np.intp)) - tm.assert_numpy_array_equal( - idx.get_indexer(target, 'nearest', - tolerance=pd.Timedelta('1 hour')), - np.array([0, -1, 1], dtype=np.intp)) - tol_raw = [pd.Timedelta('1 hour'), - pd.Timedelta('1 hour'), - pd.Timedelta('1 hour').to_timedelta64(), ] - tm.assert_numpy_array_equal( - idx.get_indexer(target, 'nearest', - tolerance=[np.timedelta64(x) for x in tol_raw]), - np.array([0, -1, 1], dtype=np.intp)) - tol_bad = [pd.Timedelta('2 hour').to_timedelta64(), - pd.Timedelta('1 hour').to_timedelta64(), - 'foo', ] - with pytest.raises( - ValueError, match='abbreviation w/o a number'): - idx.get_indexer(target, 'nearest', tolerance=tol_bad) - with pytest.raises(ValueError): - idx.get_indexer(idx[[0]], method='nearest', tolerance='foo') - - def test_reasonable_keyerror(self): - # GH #1062 - index = DatetimeIndex(['1/3/2000']) - try: - index.get_loc('1/1/2000') - except KeyError as e: - assert '2000' in str(e) - def test_roundtrip_pickle_with_tz(self): # GH 8367 diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index a9f1a5e608ac7..af65a8618d30f 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta, time import pytest import pytz @@ -12,10 +12,93 @@ START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) -class TestDatetimeIndex(object): +class TestGetItem(object): + def test_getitem(self): + idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') + idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', + tz='Asia/Tokyo', name='idx') - def test_where_other(self): + for idx in [idx1, idx2]: + result = idx[0] + assert result == Timestamp('2011-01-01', tz=idx.tz) + + result = idx[0:5] + expected = pd.date_range('2011-01-01', '2011-01-05', freq='D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[0:10:2] + expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[-20:-5:3] + expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx[4::-1] + expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03', + '2011-01-02', '2011-01-01'], + freq='-1D', tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_dti_business_getitem(self): + rng = pd.bdate_range(START, END) + smaller = rng[:5] + exp = DatetimeIndex(rng.view(np.ndarray)[:5]) + tm.assert_index_equal(smaller, exp) + + assert smaller.offset == rng.offset + + sliced = rng[::5] + assert sliced.offset == BDay() * 5 + fancy_indexed = rng[[4, 3, 2, 1, 0]] + assert len(fancy_indexed) == 5 + assert isinstance(fancy_indexed, DatetimeIndex) + assert fancy_indexed.freq is None + + # 32-bit vs. 64-bit platforms + assert rng[4] == rng[np.int_(4)] + + def test_dti_business_getitem_matplotlib_hackaround(self): + rng = pd.bdate_range(START, END) + values = rng[:, None] + expected = rng.values[:, None] + tm.assert_numpy_array_equal(values, expected) + + def test_dti_custom_getitem(self): + rng = pd.bdate_range(START, END, freq='C') + smaller = rng[:5] + exp = DatetimeIndex(rng.view(np.ndarray)[:5]) + tm.assert_index_equal(smaller, exp) + assert smaller.offset == rng.offset + + sliced = rng[::5] + assert sliced.offset == CDay() * 5 + + fancy_indexed = rng[[4, 3, 2, 1, 0]] + assert len(fancy_indexed) == 5 + assert isinstance(fancy_indexed, DatetimeIndex) + assert fancy_indexed.freq is None + + # 32-bit vs. 64-bit platforms + assert rng[4] == rng[np.int_(4)] + + def test_dti_custom_getitem_matplotlib_hackaround(self): + rng = pd.bdate_range(START, END, freq='C') + values = rng[:, None] + expected = rng.values[:, None] + tm.assert_numpy_array_equal(values, expected) + + +class TestWhere(object): + def test_where_other(self): # other is ndarray or Index i = pd.date_range('20130101', periods=3, tz='US/Eastern') @@ -46,6 +129,152 @@ def test_where_tz(self): expected = i2 tm.assert_index_equal(result, expected) + +class TestTake(object): + def test_take(self): + # GH#10295 + idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') + idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', + tz='Asia/Tokyo', name='idx') + + for idx in [idx1, idx2]: + result = idx.take([0]) + assert result == Timestamp('2011-01-01', tz=idx.tz) + + result = idx.take([0, 1, 2]) + expected = pd.date_range('2011-01-01', '2011-01-03', freq='D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([0, 2, 4]) + expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([7, 4, 1]) + expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D', + tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + result = idx.take([3, 2, 5]) + expected = DatetimeIndex(['2011-01-04', '2011-01-03', + '2011-01-06'], + freq=None, tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq is None + + result = idx.take([-3, 2, 5]) + expected = DatetimeIndex(['2011-01-29', '2011-01-03', + '2011-01-06'], + freq=None, tz=idx.tz, name='idx') + tm.assert_index_equal(result, expected) + assert result.freq is None + + def test_take_invalid_kwargs(self): + idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') + indices = [1, 6, 5, 9, 10, 13, 15, 3] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + tm.assert_raises_regex(TypeError, msg, idx.take, + indices, foo=2) + + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, out=indices) + + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, mode='clip') + + # TODO: This method came from test_datetime; de-dup with version above + @pytest.mark.parametrize('tz', [None, 'US/Eastern', 'Asia/Tokyo']) + def test_take2(self, tz): + dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), + datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] + + idx = DatetimeIndex(start='2010-01-01 09:00', + end='2010-02-01 09:00', freq='H', tz=tz, + name='idx') + expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) + + taken1 = idx.take([5, 6, 8, 12]) + taken2 = idx[[5, 6, 8, 12]] + + for taken in [taken1, taken2]: + tm.assert_index_equal(taken, expected) + assert isinstance(taken, DatetimeIndex) + assert taken.freq is None + assert taken.tz == expected.tz + assert taken.name == expected.name + + def test_take_fill_value(self): + # GH#12631 + idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], + name='xxx') + result = idx.take(np.array([1, 0, -1])) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx') + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], + name='xxx') + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx') + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + def test_take_fill_value_with_timezone(self): + idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], + name='xxx', tz='US/Eastern') + result = idx.take(np.array([1, 0, -1])) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx', tz='US/Eastern') + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], + name='xxx', tz='US/Eastern') + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], + name='xxx', tz='US/Eastern') + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + +class TestDatetimeIndex(object): @pytest.mark.parametrize('null', [None, np.nan, pd.NaT]) @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern']) def test_insert_nat(self, tz, null): @@ -253,233 +482,108 @@ def test_delete_slice(self): assert result.freq == expected.freq assert result.tz == expected.tz - def test_getitem(self): - idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') - idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', - tz='Asia/Tokyo', name='idx') - - for idx in [idx1, idx2]: - result = idx[0] - assert result == Timestamp('2011-01-01', tz=idx.tz) - - result = idx[0:5] - expected = pd.date_range('2011-01-01', '2011-01-05', freq='D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[0:10:2] - expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[-20:-5:3] - expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx[4::-1] - expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03', - '2011-01-02', '2011-01-01'], - freq='-1D', tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - def test_take(self): - # GH 10295 - idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') - idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', - tz='Asia/Tokyo', name='idx') - - for idx in [idx1, idx2]: - result = idx.take([0]) - assert result == Timestamp('2011-01-01', tz=idx.tz) - - result = idx.take([0, 1, 2]) - expected = pd.date_range('2011-01-01', '2011-01-03', freq='D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([0, 2, 4]) - expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([7, 4, 1]) - expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D', - tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq == expected.freq - - result = idx.take([3, 2, 5]) - expected = DatetimeIndex(['2011-01-04', '2011-01-03', - '2011-01-06'], - freq=None, tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq is None - - result = idx.take([-3, 2, 5]) - expected = DatetimeIndex(['2011-01-29', '2011-01-03', - '2011-01-06'], - freq=None, tz=idx.tz, name='idx') - tm.assert_index_equal(result, expected) - assert result.freq is None - - def test_take_invalid_kwargs(self): - idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx') - indices = [1, 6, 5, 9, 10, 13, 15, 3] - - msg = r"take\(\) got an unexpected keyword argument 'foo'" - tm.assert_raises_regex(TypeError, msg, idx.take, - indices, foo=2) - - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, out=indices) - - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, mode='clip') - - # TODO: This method came from test_datetime; de-dup with version above - @pytest.mark.parametrize('tz', [None, 'US/Eastern', 'Asia/Tokyo']) - def test_take2(self, tz): - dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), - datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] - - idx = DatetimeIndex(start='2010-01-01 09:00', - end='2010-02-01 09:00', freq='H', tz=tz, - name='idx') - expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) - - taken1 = idx.take([5, 6, 8, 12]) - taken2 = idx[[5, 6, 8, 12]] - - for taken in [taken1, taken2]: - tm.assert_index_equal(taken, expected) - assert isinstance(taken, DatetimeIndex) - assert taken.freq is None - assert taken.tz == expected.tz - assert taken.name == expected.name - - def test_take_fill_value(self): - # GH 12631 - idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], - name='xxx') - result = idx.take(np.array([1, 0, -1])) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx') - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], - name='xxx') - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx') - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - - def test_take_fill_value_with_timezone(self): - idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], - name='xxx', tz='US/Eastern') - result = idx.take(np.array([1, 0, -1])) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx', tz='US/Eastern') - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], - name='xxx', tz='US/Eastern') - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], - name='xxx', tz='US/Eastern') - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - - -class TestBusinessDatetimeIndexIndexing(object): - def setup_method(self, method): - self.rng = pd.bdate_range(START, END) - - def test_getitem(self): - smaller = self.rng[:5] - exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) - tm.assert_index_equal(smaller, exp) - - assert smaller.offset == self.rng.offset - - sliced = self.rng[::5] - assert sliced.offset == BDay() * 5 - - fancy_indexed = self.rng[[4, 3, 2, 1, 0]] - assert len(fancy_indexed) == 5 - assert isinstance(fancy_indexed, DatetimeIndex) - assert fancy_indexed.freq is None - - # 32-bit vs. 64-bit platforms - assert self.rng[4] == self.rng[np.int_(4)] - - def test_getitem_matplotlib_hackaround(self): - values = self.rng[:, None] - expected = self.rng.values[:, None] - tm.assert_numpy_array_equal(values, expected) - - -class TestCustomDatetimeIndexIndexing(object): - def setup_method(self, method): - self.rng = pd.bdate_range(START, END, freq='C') - - def test_getitem(self): - smaller = self.rng[:5] - exp = DatetimeIndex(self.rng.view(np.ndarray)[:5]) - tm.assert_index_equal(smaller, exp) - assert smaller.offset == self.rng.offset - - sliced = self.rng[::5] - assert sliced.offset == CDay() * 5 - - fancy_indexed = self.rng[[4, 3, 2, 1, 0]] - assert len(fancy_indexed) == 5 - assert isinstance(fancy_indexed, DatetimeIndex) - assert fancy_indexed.freq is None - - # 32-bit vs. 64-bit platforms - assert self.rng[4] == self.rng[np.int_(4)] - - def test_getitem_matplotlib_hackaround(self): - values = self.rng[:, None] - expected = self.rng.values[:, None] - tm.assert_numpy_array_equal(values, expected) + def test_get_loc(self): + idx = pd.date_range('2000-01-01', periods=3) + + for method in [None, 'pad', 'backfill', 'nearest']: + assert idx.get_loc(idx[1], method) == 1 + assert idx.get_loc(idx[1].to_pydatetime(), method) == 1 + assert idx.get_loc(str(idx[1]), method) == 1 + + if method is not None: + assert idx.get_loc(idx[1], method, + tolerance=pd.Timedelta('0 days')) == 1 + + assert idx.get_loc('2000-01-01', method='nearest') == 0 + assert idx.get_loc('2000-01-01T12', method='nearest') == 1 + + assert idx.get_loc('2000-01-01T12', method='nearest', + tolerance='1 day') == 1 + assert idx.get_loc('2000-01-01T12', method='nearest', + tolerance=pd.Timedelta('1D')) == 1 + assert idx.get_loc('2000-01-01T12', method='nearest', + tolerance=np.timedelta64(1, 'D')) == 1 + assert idx.get_loc('2000-01-01T12', method='nearest', + tolerance=timedelta(1)) == 1 + with tm.assert_raises_regex(ValueError, + 'unit abbreviation w/o a number'): + idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo') + with pytest.raises(KeyError): + idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours') + with pytest.raises( + ValueError, + match='tolerance size must match target index size'): + idx.get_loc('2000-01-01', method='nearest', + tolerance=[pd.Timedelta('1day').to_timedelta64(), + pd.Timedelta('1day').to_timedelta64()]) + + assert idx.get_loc('2000', method='nearest') == slice(0, 3) + assert idx.get_loc('2000-01', method='nearest') == slice(0, 3) + + assert idx.get_loc('1999', method='nearest') == 0 + assert idx.get_loc('2001', method='nearest') == 2 + + with pytest.raises(KeyError): + idx.get_loc('1999', method='pad') + with pytest.raises(KeyError): + idx.get_loc('2001', method='backfill') + + with pytest.raises(KeyError): + idx.get_loc('foobar') + with pytest.raises(TypeError): + idx.get_loc(slice(2)) + + idx = pd.to_datetime(['2000-01-01', '2000-01-04']) + assert idx.get_loc('2000-01-02', method='nearest') == 0 + assert idx.get_loc('2000-01-03', method='nearest') == 1 + assert idx.get_loc('2000-01', method='nearest') == slice(0, 2) + + # time indexing + idx = pd.date_range('2000-01-01', periods=24, freq='H') + tm.assert_numpy_array_equal(idx.get_loc(time(12)), + np.array([12]), check_dtype=False) + tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)), + np.array([]), check_dtype=False) + with pytest.raises(NotImplementedError): + idx.get_loc(time(12, 30), method='pad') + + def test_get_indexer(self): + idx = pd.date_range('2000-01-01', periods=3) + exp = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(idx.get_indexer(idx), exp) + + target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', + '1 day 1 hour']) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), + np.array([-1, 0, 1], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), + np.array([0, 1, 2], dtype=np.intp)) + tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), + np.array([0, 1, 1], dtype=np.intp)) + tm.assert_numpy_array_equal( + idx.get_indexer(target, 'nearest', + tolerance=pd.Timedelta('1 hour')), + np.array([0, -1, 1], dtype=np.intp)) + tol_raw = [pd.Timedelta('1 hour'), + pd.Timedelta('1 hour'), + pd.Timedelta('1 hour').to_timedelta64(), ] + tm.assert_numpy_array_equal( + idx.get_indexer(target, 'nearest', + tolerance=[np.timedelta64(x) for x in tol_raw]), + np.array([0, -1, 1], dtype=np.intp)) + tol_bad = [pd.Timedelta('2 hour').to_timedelta64(), + pd.Timedelta('1 hour').to_timedelta64(), + 'foo', ] + with pytest.raises( + ValueError, match='abbreviation w/o a number'): + idx.get_indexer(target, 'nearest', tolerance=tol_bad) + with pytest.raises(ValueError): + idx.get_indexer(idx[[0]], method='nearest', tolerance='foo') + + def test_reasonable_keyerror(self): + # GH#1062 + index = DatetimeIndex(['1/3/2000']) + try: + index.get_loc('1/1/2000') + except KeyError as e: + assert '2000' in str(e) diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index b913934195260..6b8e2203e83fd 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -6,9 +6,9 @@ import pandas as pd from pandas.util import testing as tm from pandas.compat import lrange -from pandas._libs import tslib, tslibs +from pandas._libs import tslibs from pandas import (PeriodIndex, Series, DatetimeIndex, - period_range, Period) + period_range, Period, notna) from pandas._libs.tslibs import period as libperiod @@ -119,7 +119,7 @@ def test_getitem_datetime(self): def test_getitem_nat(self): idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M') assert idx[0] == pd.Period('2011-01', freq='M') - assert idx[1] is tslib.NaT + assert idx[1] is pd.NaT s = pd.Series([0, 1, 2], index=idx) assert s[pd.NaT] == 1 @@ -127,7 +127,7 @@ def test_getitem_nat(self): s = pd.Series(idx, index=idx) assert (s[pd.Period('2011-01', freq='M')] == pd.Period('2011-01', freq='M')) - assert s[pd.NaT] is tslib.NaT + assert s[pd.NaT] is pd.NaT def test_getitem_list_periods(self): # GH 7710 @@ -190,31 +190,43 @@ def test_getitem_day(self): s[v] -class TestIndexing(object): +class TestWhere(object): + @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) + def test_where(self, klass): + i = period_range('20130101', periods=5, freq='D') + cond = [True] * len(i) + expected = i + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) - def test_get_loc_msg(self): - idx = period_range('2000-1-1', freq='A', periods=10) - bad_period = Period('2012', 'A') - pytest.raises(KeyError, idx.get_loc, bad_period) + cond = [False] + [True] * (len(i) - 1) + expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq='D') + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) - try: - idx.get_loc(bad_period) - except KeyError as inst: - assert inst.args[0] == bad_period + def test_where_other(self): + i = period_range('20130101', periods=5, freq='D') + for arr in [np.nan, pd.NaT]: + result = i.where(notna(i), other=np.nan) + expected = i + tm.assert_index_equal(result, expected) - def test_get_loc_nat(self): - didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03']) - pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M') + i2 = i.copy() + i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), + freq='D') + result = i.where(notna(i2), i2) + tm.assert_index_equal(result, i2) - # check DatetimeIndex compat - for idx in [didx, pidx]: - assert idx.get_loc(pd.NaT) == 1 - assert idx.get_loc(None) == 1 - assert idx.get_loc(float('nan')) == 1 - assert idx.get_loc(np.nan) == 1 + i2 = i.copy() + i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), + freq='D') + result = i.where(notna(i2), i2.values) + tm.assert_index_equal(result, i2) + +class TestTake(object): def test_take(self): - # GH 10295 + # GH#10295 idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') @@ -278,7 +290,7 @@ def test_take_misc(self): assert taken.name == expected.name def test_take_fill_value(self): - # GH 12631 + # GH#12631 idx = pd.PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'], name='xxx', freq='D') result = idx.take(np.array([1, 0, -1])) @@ -309,6 +321,30 @@ def test_take_fill_value(self): with pytest.raises(IndexError): idx.take(np.array([1, -5])) + +class TestIndexing(object): + + def test_get_loc_msg(self): + idx = period_range('2000-1-1', freq='A', periods=10) + bad_period = Period('2012', 'A') + pytest.raises(KeyError, idx.get_loc, bad_period) + + try: + idx.get_loc(bad_period) + except KeyError as inst: + assert inst.args[0] == bad_period + + def test_get_loc_nat(self): + didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03']) + pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M') + + # check DatetimeIndex compat + for idx in [didx, pidx]: + assert idx.get_loc(pd.NaT) == 1 + assert idx.get_loc(None) == 1 + assert idx.get_loc(float('nan')) == 1 + assert idx.get_loc(np.nan) == 1 + def test_get_loc(self): # GH 17717 p0 = pd.Period('2017-09-01') diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index dd437363cfc1d..4548d7fa1a468 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -5,7 +5,7 @@ import pandas as pd import pandas.util._test_decorators as td from pandas.util import testing as tm -from pandas import (PeriodIndex, period_range, notna, DatetimeIndex, NaT, +from pandas import (PeriodIndex, period_range, DatetimeIndex, NaT, Index, Period, Series, DataFrame, date_range, offsets) @@ -33,38 +33,9 @@ def test_pickle_round_trip(self, freq): result = tm.round_trip_pickle(idx) tm.assert_index_equal(result, idx) - @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) - def test_where(self, klass): - i = self.create_index() - cond = [True] * len(i) - expected = i - result = i.where(klass(cond)) - tm.assert_index_equal(result, expected) - - cond = [False] + [True] * (len(i) - 1) - expected = PeriodIndex([NaT] + i[1:].tolist(), freq='D') - result = i.where(klass(cond)) - tm.assert_index_equal(result, expected) - - def test_where_other(self): - - i = self.create_index() - for arr in [np.nan, pd.NaT]: - result = i.where(notna(i), other=np.nan) - expected = i - tm.assert_index_equal(result, expected) - - i2 = i.copy() - i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), - freq='D') - result = i.where(notna(i2), i2) - tm.assert_index_equal(result, i2) - - i2 = i.copy() - i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), - freq='D') - result = i.where(notna(i2), i2.values) - tm.assert_index_equal(result, i2) + def test_where(self): + # This is handled in test_indexing + pass def test_repeat(self): # GH10183 diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 24341b3419859..282501860f7e5 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -123,8 +123,149 @@ def test_comparisons_nat(self): tm.assert_numpy_array_equal(result, expected) +class TestTimedeltaIndexMultiplicationDivision(object): + # __mul__, __rmul__, + # __div__, __rdiv__, __floordiv__, __rfloordiv__, + # __mod__, __rmod__, __divmod__, __rdivmod__ + + # ------------------------------------------------------------- + # Multiplication + # organized with scalar others first, then array-like + + def test_tdi_mul_int(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + result = idx * 1 + tm.assert_index_equal(result, idx) + + def test_tdi_rmul_int(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + result = 1 * idx + tm.assert_index_equal(result, idx) + + def test_tdi_mul_tdlike_scalar_raises(self, delta): + rng = timedelta_range('1 days', '10 days', name='foo') + with pytest.raises(TypeError): + rng * delta + + def test_tdi_mul_int_array_zerodim(self): + rng5 = np.arange(5, dtype='int64') + idx = TimedeltaIndex(rng5) + expected = TimedeltaIndex(rng5 * 5) + result = idx * np.array(5, dtype='int64') + tm.assert_index_equal(result, expected) + + def test_tdi_mul_int_array(self): + rng5 = np.arange(5, dtype='int64') + idx = TimedeltaIndex(rng5) + didx = TimedeltaIndex(rng5 ** 2) + + result = idx * rng5 + tm.assert_index_equal(result, didx) + + def test_tdi_mul_dti_raises(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + with pytest.raises(TypeError): + idx * idx + + def test_tdi_mul_too_short_raises(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + with pytest.raises(TypeError): + idx * TimedeltaIndex(np.arange(3)) + with pytest.raises(ValueError): + idx * np.array([1, 2]) + + def test_tdi_mul_int_series(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + didx = TimedeltaIndex(np.arange(5, dtype='int64') ** 2) + + result = idx * Series(np.arange(5, dtype='int64')) + + tm.assert_series_equal(result, Series(didx)) + + def test_tdi_mul_float_series(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + + rng5f = np.arange(5, dtype='float64') + result = idx * Series(rng5f + 0.1) + expected = Series(TimedeltaIndex(rng5f * (rng5f + 0.1))) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('other', [np.arange(1, 11), + pd.Int64Index(range(1, 11)), + pd.UInt64Index(range(1, 11)), + pd.Float64Index(range(1, 11)), + pd.RangeIndex(1, 11)]) + def test_tdi_rmul_arraylike(self, other): + tdi = TimedeltaIndex(['1 Day'] * 10) + expected = timedelta_range('1 days', '10 days') + + result = other * tdi + tm.assert_index_equal(result, expected) + commute = tdi * other + tm.assert_index_equal(commute, expected) + + # ------------------------------------------------------------- + # TimedeltaIndex.__div__ + + def test_tdi_div_int(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + result = idx / 1 + tm.assert_index_equal(result, idx) + + def test_tdi_div_tdlike_scalar(self, delta): + rng = timedelta_range('1 days', '10 days', name='foo') + expected = Int64Index((np.arange(10) + 1) * 12, name='foo') + + result = rng / delta + tm.assert_index_equal(result, expected, exact=False) + + def test_tdi_div_tdlike_scalar_with_nat(self, delta): + rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') + expected = Float64Index([12, np.nan, 24], name='foo') + result = rng / delta + tm.assert_index_equal(result, expected) + + def test_tdi_div_nat_raises(self): + # don't allow division by NaT (make could in the future) + rng = timedelta_range('1 days', '10 days', name='foo') + with pytest.raises(TypeError): + rng / pd.NaT + + # ------------------------------------------------------------- + # TimedeltaIndex.__floordiv__ + + def test_tdi_floordiv_int(self): + idx = TimedeltaIndex(np.arange(5, dtype='int64')) + result = idx // 1 + tm.assert_index_equal(result, idx) + + def test_tdi_floordiv_tdlike_scalar(self, delta): + tdi = timedelta_range('1 days', '10 days', name='foo') + expected = Int64Index((np.arange(10) + 1) * 12, name='foo') + + result = tdi // delta + tm.assert_index_equal(result, expected, exact=False) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=10, seconds=7), + Timedelta('10m7s'), + Timedelta('10m7s').to_timedelta64()]) + def test_tdi_floordiv_timedelta_scalar(self, scalar_td): + # GH#19125 + tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None) + expected = pd.Index([2.0, 2.0, np.nan]) + + res = tdi.__rfloordiv__(scalar_td) + tm.assert_index_equal(res, expected) + + expected = pd.Index([0.0, 0.0, np.nan]) + + res = tdi // (scalar_td) + tm.assert_index_equal(res, expected) + + class TestTimedeltaIndexArithmetic(object): - _holder = TimedeltaIndex + # Addition and Subtraction Operations # ------------------------------------------------------------- # Invalid Operations @@ -138,6 +279,20 @@ def test_tdi_add_str_invalid(self): with pytest.raises(TypeError): 'a' + tdi + @pytest.mark.parametrize('freq', [None, 'H']) + def test_tdi_sub_period(self, freq): + # GH#13078 + # not supported, check TypeError + p = pd.Period('2011-01-01', freq='D') + + idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq) + + with pytest.raises(TypeError): + idx - p + + with pytest.raises(TypeError): + p - idx + # ------------------------------------------------------------- # TimedeltaIndex.shift is used by __add__/__sub__ @@ -310,69 +465,6 @@ def test_tdi_add_sub_anchored_offset_arraylike(self, box): with tm.assert_produces_warning(PerformanceWarning): anchored - tdi - def test_mul_int(self): - idx = self._holder(np.arange(5, dtype='int64')) - result = idx * 1 - tm.assert_index_equal(result, idx) - - def test_rmul_int(self): - idx = self._holder(np.arange(5, dtype='int64')) - result = 1 * idx - tm.assert_index_equal(result, idx) - - def test_div_int(self): - idx = self._holder(np.arange(5, dtype='int64')) - result = idx / 1 - tm.assert_index_equal(result, idx) - - def test_floordiv_int(self): - idx = self._holder(np.arange(5, dtype='int64')) - result = idx // 1 - tm.assert_index_equal(result, idx) - - def test_mul_int_array_zerodim(self): - rng5 = np.arange(5, dtype='int64') - idx = self._holder(rng5) - expected = self._holder(rng5 * 5) - result = idx * np.array(5, dtype='int64') - tm.assert_index_equal(result, expected) - - def test_mul_int_array(self): - rng5 = np.arange(5, dtype='int64') - idx = self._holder(rng5) - didx = self._holder(rng5 ** 2) - - result = idx * rng5 - tm.assert_index_equal(result, didx) - - def test_mul_int_series(self): - idx = self._holder(np.arange(5, dtype='int64')) - didx = self._holder(np.arange(5, dtype='int64') ** 2) - - result = idx * Series(np.arange(5, dtype='int64')) - - tm.assert_series_equal(result, Series(didx)) - - def test_mul_float_series(self): - idx = self._holder(np.arange(5, dtype='int64')) - - rng5f = np.arange(5, dtype='float64') - result = idx * Series(rng5f + 0.1) - expected = Series(self._holder(rng5f * (rng5f + 0.1))) - tm.assert_series_equal(result, expected) - - def test_dti_mul_dti_raises(self): - idx = self._holder(np.arange(5, dtype='int64')) - with pytest.raises(TypeError): - idx * idx - - def test_dti_mul_too_short_raises(self): - idx = self._holder(np.arange(5, dtype='int64')) - with pytest.raises(TypeError): - idx * self._holder(np.arange(3)) - with pytest.raises(ValueError): - idx * np.array([1, 2]) - def test_ufunc_coercions(self): # normal ops are also tested in tseries/test_timedeltas.py idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'], @@ -496,68 +588,6 @@ def test_tdi_radd_timestamp(self): # ------------------------------------------------------------- - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=10, seconds=7), - Timedelta('10m7s'), - Timedelta('10m7s').to_timedelta64()]) - def test_tdi_floordiv_timedelta_scalar(self, scalar_td): - # GH#19125 - tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None) - expected = pd.Index([2.0, 2.0, np.nan]) - - res = tdi.__rfloordiv__(scalar_td) - tm.assert_index_equal(res, expected) - - expected = pd.Index([0.0, 0.0, np.nan]) - - res = tdi // (scalar_td) - tm.assert_index_equal(res, expected) - - def test_tdi_floordiv_tdlike_scalar(self, delta): - tdi = timedelta_range('1 days', '10 days', name='foo') - expected = Int64Index((np.arange(10) + 1) * 12, name='foo') - - result = tdi // delta - tm.assert_index_equal(result, expected, exact=False) - - def test_tdi_mul_tdlike_scalar_raises(self, delta): - rng = timedelta_range('1 days', '10 days', name='foo') - with pytest.raises(TypeError): - rng * delta - - def test_tdi_div_nat_raises(self): - # don't allow division by NaT (make could in the future) - rng = timedelta_range('1 days', '10 days', name='foo') - with pytest.raises(TypeError): - rng / pd.NaT - - def test_tdi_div_tdlike_scalar(self, delta): - rng = timedelta_range('1 days', '10 days', name='foo') - expected = Int64Index((np.arange(10) + 1) * 12, name='foo') - - result = rng / delta - tm.assert_index_equal(result, expected, exact=False) - - def test_tdi_div_tdlike_scalar_with_nat(self, delta): - rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') - expected = Float64Index([12, np.nan, 24], name='foo') - result = rng / delta - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize('other', [np.arange(1, 11), - pd.Int64Index(range(1, 11)), - pd.UInt64Index(range(1, 11)), - pd.Float64Index(range(1, 11)), - pd.RangeIndex(1, 11)]) - def test_tdi_rmul_arraylike(self, other): - tdi = TimedeltaIndex(['1 Day'] * 10) - expected = timedelta_range('1 days', '10 days') - - result = other * tdi - tm.assert_index_equal(result, expected) - commute = tdi * other - tm.assert_index_equal(commute, expected) - def test_subtraction_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') @@ -685,20 +715,6 @@ def test_dti_tdi_numeric_ops(self): expected = DatetimeIndex(['20121231', pd.NaT, '20130101']) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('freq', [None, 'H']) - def test_sub_period(self, freq): - # GH 13078 - # not supported, check TypeError - p = pd.Period('2011-01-01', freq='D') - - idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq) - - with pytest.raises(TypeError): - idx - p - - with pytest.raises(TypeError): - p - idx - def test_addition_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index 59e38c2e738b0..08992188265bd 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -8,116 +8,7 @@ from pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta -class TestTimedeltaIndex(object): - - def test_insert(self): - - idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx') - - result = idx.insert(2, timedelta(days=5)) - exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx') - tm.assert_index_equal(result, exp) - - # insertion of non-datetime should coerce to object index - result = idx.insert(1, 'inserted') - expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'), - Timedelta('2day')], name='idx') - assert not isinstance(result, TimedeltaIndex) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - - idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx') - - # preserve freq - expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02', - '1day 00:00:03'], - name='idx', freq='s') - expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02', - '1day 00:00:03', '1day 00:00:04'], - name='idx', freq='s') - - # reset freq to None - expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01', - '1day 00:00:02', '1day 00:00:03'], - name='idx', freq=None) - expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02', - '1day 00:00:03', '1day 00:00:05'], - name='idx', freq=None) - - cases = [(0, Timedelta('1day'), expected_0), - (-3, Timedelta('1day'), expected_0), - (3, Timedelta('1day 00:00:04'), expected_3), - (1, Timedelta('1day 00:00:01'), expected_1_nofreq), - (3, Timedelta('1day 00:00:05'), expected_3_nofreq)] - - for n, d, expected in cases: - result = idx.insert(n, d) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - - # GH 18295 (test missing) - expected = TimedeltaIndex(['1day', pd.NaT, '2day', '3day']) - for na in (np.nan, pd.NaT, None): - result = timedelta_range('1day', '3day').insert(1, na) - tm.assert_index_equal(result, expected) - - def test_delete(self): - idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx') - - # prserve freq - expected_0 = timedelta_range(start='2 Days', periods=4, freq='D', - name='idx') - expected_4 = timedelta_range(start='1 Days', periods=4, freq='D', - name='idx') - - # reset freq to None - expected_1 = TimedeltaIndex( - ['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx') - - cases = {0: expected_0, - -5: expected_0, - -1: expected_4, - 4: expected_4, - 1: expected_1} - for n, expected in compat.iteritems(cases): - result = idx.delete(n) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - - with pytest.raises((IndexError, ValueError)): - # either depeidnig on numpy version - result = idx.delete(5) - - def test_delete_slice(self): - idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx') - - # prserve freq - expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D', - name='idx') - expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D', - name='idx') - - # reset freq to None - expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d', - '7 d', '8 d', '9 d', '10d'], - freq=None, name='idx') - - cases = {(0, 1, 2): expected_0_2, - (7, 8, 9): expected_7_9, - (3, 4, 5): expected_3_5} - for n, expected in compat.iteritems(cases): - result = idx.delete(n) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - - result = idx.delete(slice(n[0], n[-1] + 1)) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - +class TestGetItem(object): def test_getitem(self): idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx') @@ -150,6 +41,13 @@ def test_getitem(self): tm.assert_index_equal(result, expected) assert result.freq == expected.freq + +class TestWhere(object): + # placeholder for symmetry with DatetimeIndex and PeriodIndex tests + pass + + +class TestTake(object): def test_take(self): # GH 10295 idx1 = timedelta_range('1 day', '31 day', freq='D', name='idx') @@ -252,6 +150,117 @@ def test_take_fill_value(self): with pytest.raises(IndexError): idx.take(np.array([1, -5])) + +class TestTimedeltaIndex(object): + + def test_insert(self): + + idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx') + + result = idx.insert(2, timedelta(days=5)) + exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx') + tm.assert_index_equal(result, exp) + + # insertion of non-datetime should coerce to object index + result = idx.insert(1, 'inserted') + expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'), + Timedelta('2day')], name='idx') + assert not isinstance(result, TimedeltaIndex) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx') + + # preserve freq + expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02', + '1day 00:00:03'], + name='idx', freq='s') + expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02', + '1day 00:00:03', '1day 00:00:04'], + name='idx', freq='s') + + # reset freq to None + expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01', + '1day 00:00:02', '1day 00:00:03'], + name='idx', freq=None) + expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02', + '1day 00:00:03', '1day 00:00:05'], + name='idx', freq=None) + + cases = [(0, Timedelta('1day'), expected_0), + (-3, Timedelta('1day'), expected_0), + (3, Timedelta('1day 00:00:04'), expected_3), + (1, Timedelta('1day 00:00:01'), expected_1_nofreq), + (3, Timedelta('1day 00:00:05'), expected_3_nofreq)] + + for n, d, expected in cases: + result = idx.insert(n, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + # GH 18295 (test missing) + expected = TimedeltaIndex(['1day', pd.NaT, '2day', '3day']) + for na in (np.nan, pd.NaT, None): + result = timedelta_range('1day', '3day').insert(1, na) + tm.assert_index_equal(result, expected) + + def test_delete(self): + idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx') + + # prserve freq + expected_0 = timedelta_range(start='2 Days', periods=4, freq='D', + name='idx') + expected_4 = timedelta_range(start='1 Days', periods=4, freq='D', + name='idx') + + # reset freq to None + expected_1 = TimedeltaIndex( + ['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx') + + cases = {0: expected_0, + -5: expected_0, + -1: expected_4, + 4: expected_4, + 1: expected_1} + for n, expected in compat.iteritems(cases): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + with pytest.raises((IndexError, ValueError)): + # either depeidnig on numpy version + result = idx.delete(5) + + def test_delete_slice(self): + idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx') + + # prserve freq + expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D', + name='idx') + expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D', + name='idx') + + # reset freq to None + expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d', + '7 d', '8 d', '9 d', '10d'], + freq=None, name='idx') + + cases = {(0, 1, 2): expected_0_2, + (7, 8, 9): expected_7_9, + (3, 4, 5): expected_3_5} + for n, expected in compat.iteritems(cases): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + result = idx.delete(slice(n[0], n[-1] + 1)) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + def test_get_loc(self): idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
Collect DTI/TDI/PI indexing tests ~symmetrically.
https://api.github.com/repos/pandas-dev/pandas/pulls/19848
2018-02-22T20:44:09Z
2018-02-23T12:35:07Z
2018-02-23T12:35:07Z
2018-02-23T15:23:28Z
Fix+test DTI/TDI/PI add/sub with ndarray[datetime64/timedelta64]
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fd3c3a5a7a301..b0e4ae02ff8ee 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -755,6 +755,7 @@ Datetimelike - Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`) - Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) - Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` addition and subtraction where name of the returned object was not always set consistently. (:issue:`19744`) +- Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` addition and subtraction where operations with numpy arrays raised ``TypeError`` (:issue:`19847`) - Timedelta diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index a68d883f04380..9411428b2e68d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -31,6 +31,7 @@ is_integer_dtype, is_object_dtype, is_string_dtype, + is_datetime64_dtype, is_period_dtype, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( @@ -676,9 +677,7 @@ def _add_datetimelike_methods(cls): """ def __add__(self, other): - from pandas.core.index import Index - from pandas.core.indexes.timedeltas import TimedeltaIndex - from pandas.tseries.offsets import DateOffset + from pandas import Index, DatetimeIndex, TimedeltaIndex, DateOffset other = lib.item_from_zerodim(other) if isinstance(other, ABCSeries): @@ -710,6 +709,9 @@ def __add__(self, other): .format(typ=type(other))) elif isinstance(other, Index): result = self._add_datelike(other) + elif is_datetime64_dtype(other): + # ndarray[datetime64]; note DatetimeIndex is caught above + return self + DatetimeIndex(other) elif is_integer_dtype(other) and self.freq is None: # GH#19123 raise NullFrequencyError("Cannot shift with no freq") @@ -729,10 +731,7 @@ def __radd__(self, other): cls.__radd__ = __radd__ def __sub__(self, other): - from pandas.core.index import Index - from pandas.core.indexes.datetimes import DatetimeIndex - from pandas.core.indexes.timedeltas import TimedeltaIndex - from pandas.tseries.offsets import DateOffset + from pandas import Index, DatetimeIndex, TimedeltaIndex, DateOffset other = lib.item_from_zerodim(other) if isinstance(other, ABCSeries): @@ -764,6 +763,9 @@ def __sub__(self, other): .format(typ=type(other).__name__)) elif isinstance(other, DatetimeIndex): result = self._sub_datelike(other) + elif is_datetime64_dtype(other): + # ndarray[datetime64]; note we caught DatetimeIndex earlier + return self - DatetimeIndex(other) elif isinstance(other, Index): raise TypeError("cannot subtract {typ1} and {typ2}" .format(typ1=type(self).__name__, @@ -782,6 +784,11 @@ def __sub__(self, other): cls.__sub__ = __sub__ def __rsub__(self, other): + if is_datetime64_dtype(other) and is_timedelta64_dtype(self): + # ndarray[datetime64] cannot be subtracted from self, so + # we need to wrap in DatetimeIndex and flip the operation + from pandas import DatetimeIndex + return DatetimeIndex(other) - self return -(self - other) cls.__rsub__ = __rsub__ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 219adfdb66c82..6f80962eab079 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -377,6 +377,10 @@ def _add_delta(self, delta): new_values = self._add_delta_td(delta) elif isinstance(delta, TimedeltaIndex): new_values = self._add_delta_tdi(delta) + elif is_timedelta64_dtype(delta): + # ndarray[timedelta64] --> wrap in TimedeltaIndex + delta = TimedeltaIndex(delta) + new_values = self._add_delta_tdi(delta) else: raise TypeError("cannot add the type {0} to a TimedeltaIndex" .format(type(delta))) diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 7900c983b6c77..5a7ea44f3698c 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -571,6 +571,62 @@ def test_add_datetimelike_and_dti_tz(self, addend): with tm.assert_raises_regex(TypeError, msg): addend + dti_tz + # ------------------------------------------------------------- + # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64] + + def test_dti_add_dt64_array_raises(self, tz): + dti = pd.date_range('2016-01-01', periods=3, tz=tz) + dtarr = dti.values + + with pytest.raises(TypeError): + dti + dtarr + with pytest.raises(TypeError): + dtarr + dti + + def test_dti_sub_dt64_array_naive(self): + dti = pd.date_range('2016-01-01', periods=3, tz=None) + dtarr = dti.values + + expected = dti - dti + result = dti - dtarr + tm.assert_index_equal(result, expected) + result = dtarr - dti + tm.assert_index_equal(result, expected) + + def test_dti_sub_dt64_array_aware_raises(self, tz): + if tz is None: + return + dti = pd.date_range('2016-01-01', periods=3, tz=tz) + dtarr = dti.values + + with pytest.raises(TypeError): + dti - dtarr + with pytest.raises(TypeError): + dtarr - dti + + def test_dti_add_td64_array(self, tz): + dti = pd.date_range('2016-01-01', periods=3, tz=tz) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = dti + tdi + result = dti + tdarr + tm.assert_index_equal(result, expected) + result = tdarr + dti + tm.assert_index_equal(result, expected) + + def test_dti_sub_td64_array(self, tz): + dti = pd.date_range('2016-01-01', periods=3, tz=tz) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = dti - tdi + result = dti - tdarr + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + tdarr - dti + # ------------------------------------------------------------- def test_sub_dti_dti(self): diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 0c06e6a4963b4..d7bf1e0210f62 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -255,6 +255,64 @@ def test_comp_nat(self, dtype): class TestPeriodIndexArithmetic(object): + + # ----------------------------------------------------------------- + # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64] + + def test_pi_add_sub_dt64_array_raises(self): + rng = pd.period_range('1/1/2000', freq='D', periods=3) + dti = pd.date_range('2016-01-01', periods=3) + dtarr = dti.values + + with pytest.raises(TypeError): + rng + dtarr + with pytest.raises(TypeError): + dtarr + rng + + with pytest.raises(TypeError): + rng - dtarr + with pytest.raises(TypeError): + dtarr - rng + + def test_pi_add_sub_td64_array_non_tick_raises(self): + rng = pd.period_range('1/1/2000', freq='Q', periods=3) + dti = pd.date_range('2016-01-01', periods=3) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + with pytest.raises(period.IncompatibleFrequency): + rng + tdarr + with pytest.raises(period.IncompatibleFrequency): + tdarr + rng + + with pytest.raises(period.IncompatibleFrequency): + rng - tdarr + with pytest.raises(period.IncompatibleFrequency): + tdarr - rng + + @pytest.mark.xfail(reason='op with TimedeltaIndex raises, with ndarray OK') + def test_pi_add_sub_td64_array_tick(self): + rng = pd.period_range('1/1/2000', freq='Q', periods=3) + dti = pd.date_range('2016-01-01', periods=3) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = rng + tdi + result = rng + tdarr + tm.assert_index_equal(result, expected) + result = tdarr + rng + tm.assert_index_equal(result, expected) + + expected = rng - tdi + result = rng - tdarr + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + tdarr - rng + + # ----------------------------------------------------------------- + # operations with array/Index of DateOffset objects + @pytest.mark.parametrize('box', [np.array, pd.Index]) def test_pi_add_offset_array(self, box): # GH#18849 diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 282501860f7e5..6a80b995b6ee9 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -586,6 +586,55 @@ def test_tdi_radd_timestamp(self): expected = DatetimeIndex(['2011-01-02', '2011-01-03']) tm.assert_index_equal(result, expected) + # ------------------------------------------------------------- + # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64] + + def test_tdi_sub_dt64_array(self): + dti = pd.date_range('2016-01-01', periods=3) + tdi = dti - dti.shift(1) + dtarr = dti.values + + with pytest.raises(TypeError): + tdi - dtarr + + # TimedeltaIndex.__rsub__ + expected = pd.DatetimeIndex(dtarr) - tdi + result = dtarr - tdi + tm.assert_index_equal(result, expected) + + def test_tdi_add_dt64_array(self): + dti = pd.date_range('2016-01-01', periods=3) + tdi = dti - dti.shift(1) + dtarr = dti.values + + expected = pd.DatetimeIndex(dtarr) + tdi + result = tdi + dtarr + tm.assert_index_equal(result, expected) + result = dtarr + tdi + tm.assert_index_equal(result, expected) + + def test_tdi_add_td64_array(self): + dti = pd.date_range('2016-01-01', periods=3) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = 2 * tdi + result = tdi + tdarr + tm.assert_index_equal(result, expected) + result = tdarr + tdi + tm.assert_index_equal(result, expected) + + def test_tdi_sub_td64_array(self): + dti = pd.date_range('2016-01-01', periods=3) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = 0 * tdi + result = tdi - tdarr + tm.assert_index_equal(result, expected) + result = tdarr - tdi + tm.assert_index_equal(result, expected) + # ------------------------------------------------------------- def test_subtraction_ops(self):
Two notes for upcoming passes: - the tests are pretty redundant, can be parametrized more tightly once they are all OK. - catching ndarray[datetime64] separately from DatetimeIndex is kind of weird, but for the moment necesarry. Once #19835 goes through and re-orders the catching, we'll be able to fix this.
https://api.github.com/repos/pandas-dev/pandas/pulls/19847
2018-02-22T20:08:11Z
2018-02-25T16:11:42Z
2018-02-25T16:11:42Z
2018-06-22T03:28:41Z
DOC: Make API reference intro section concise
diff --git a/doc/source/api.rst b/doc/source/api.rst index b8aad67e147ba..0e47499a03f3a 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -6,19 +6,18 @@ API Reference ************* This page gives an overview of all public pandas objects, functions and -methods. In general, all classes and functions exposed in the top-level -``pandas.*`` namespace are regarded as public. +methods. All classes and functions exposed in ``pandas.*`` namespace are public. -Further some of the subpackages are public, including ``pandas.errors``, -``pandas.plotting``, and ``pandas.testing``. Certain functions in the -``pandas.io`` and ``pandas.tseries`` submodules are public as well (those -mentioned in the documentation). Further, the ``pandas.api.types`` subpackage -holds some public functions related to data types in pandas. +Some subpackages are public which include ``pandas.errors``, +``pandas.plotting``, and ``pandas.testing``. Public functions in +``pandas.io`` and ``pandas.tseries`` submodules are mentioned in +the documentation. ``pandas.api.types`` subpackage holds some +public functions related to data types in pandas. .. warning:: - The ``pandas.core``, ``pandas.compat``, and ``pandas.util`` top-level modules are considered to be PRIVATE. Stability of functionality in those modules in not guaranteed. + The ``pandas.core``, ``pandas.compat``, and ``pandas.util`` top-level modules are PRIVATE. Stable functionality in such modules is not guaranteed. .. _api.functions:
Also revise the typo "in not guaranteed" to "is not guaranteed"
https://api.github.com/repos/pandas-dev/pandas/pulls/19846
2018-02-22T20:06:18Z
2018-02-26T08:43:00Z
2018-02-26T08:43:00Z
2018-02-26T08:43:06Z
CI: Align pep8speaks config with setup.cfg
diff --git a/.pep8speaks.yml b/.pep8speaks.yml index 299b76c8922cc..fda26d87bf7f6 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -6,5 +6,7 @@ scanner: pycodestyle: max-line-length: 79 ignore: # Errors and warnings to ignore - - E731 - - E402 + - E402, # module level import not at top of file + - E731, # do not assign a lambda expression, use a def + - E741, # do not use variables named 'l', 'O', or 'I' + - W503 # line break before binary operator
I copy pasted the ignored flake8 codes from setup.cfg, so that the online pep8speaks does the same as our linting on CI
https://api.github.com/repos/pandas-dev/pandas/pulls/19841
2018-02-22T12:49:13Z
2018-02-22T13:55:03Z
2018-02-22T13:55:03Z
2018-02-22T13:55:03Z
DOC: script to build single docstring page
diff --git a/doc/make.py b/doc/make.py index e3cb29aa3e086..2819a62347627 100755 --- a/doc/make.py +++ b/doc/make.py @@ -14,11 +14,14 @@ import sys import os import shutil -import subprocess +# import subprocess import argparse from contextlib import contextmanager +import webbrowser import jinja2 +import pandas + DOC_PATH = os.path.dirname(os.path.abspath(__file__)) SOURCE_PATH = os.path.join(DOC_PATH, 'source') @@ -26,28 +29,6 @@ BUILD_DIRS = ['doctrees', 'html', 'latex', 'plots', '_static', '_templates'] -def _generate_index(include_api, single_doc=None): - """Create index.rst file with the specified sections. - - Parameters - ---------- - include_api : bool - Whether API documentation will be built. - single_doc : str or None - If provided, this single documentation page will be generated. - """ - if single_doc is not None: - single_doc = os.path.splitext(os.path.basename(single_doc))[0] - include_api = False - - with open(os.path.join(SOURCE_PATH, 'index.rst.template')) as f: - t = jinja2.Template(f.read()) - - with open(os.path.join(SOURCE_PATH, 'index.rst'), 'w') as f: - f.write(t.render(include_api=include_api, - single_doc=single_doc)) - - @contextmanager def _maybe_exclude_notebooks(): """Skip building the notebooks if pandoc is not installed. @@ -58,6 +39,7 @@ def _maybe_exclude_notebooks(): 1. nbconvert isn't installed, or 2. nbconvert is installed, but pandoc isn't """ + # TODO move to exclude_pattern base = os.path.dirname(__file__) notebooks = [os.path.join(base, 'source', nb) for nb in ['style.ipynb']] @@ -96,8 +78,110 @@ class DocBuilder: All public methods of this class can be called as parameters of the script. """ - def __init__(self, num_jobs=1): + def __init__(self, num_jobs=1, include_api=True, single_doc=None): self.num_jobs = num_jobs + self.include_api = include_api + self.single_doc = None + self.single_doc_type = None + if single_doc is not None: + self._process_single_doc(single_doc) + self.exclude_patterns = self._exclude_patterns + + self._generate_index() + if self.single_doc_type == 'docstring': + self._run_os('sphinx-autogen', '-o', + 'source/generated_single', 'source/index.rst') + + @property + def _exclude_patterns(self): + """Docs source files that will be excluded from building.""" + # TODO move maybe_exclude_notebooks here + if self.single_doc is not None: + rst_files = [f for f in os.listdir(SOURCE_PATH) + if ((f.endswith('.rst') or f.endswith('.ipynb')) + and (f != 'index.rst') + and (f != '{0}.rst'.format(self.single_doc)))] + if self.single_doc_type != 'api': + rst_files += ['generated/*.rst'] + elif not self.include_api: + rst_files = ['api.rst', 'generated/*.rst'] + else: + rst_files = ['generated_single/*.rst'] + + exclude_patterns = ','.join( + '{!r}'.format(i) for i in ['**.ipynb_checkpoints'] + rst_files) + + return exclude_patterns + + def _process_single_doc(self, single_doc): + """Extract self.single_doc (base name) and self.single_doc_type from + passed single_doc kwarg. + + """ + self.include_api = False + + if single_doc == 'api.rst': + self.single_doc_type = 'api' + self.single_doc = 'api' + elif os.path.exists(os.path.join(SOURCE_PATH, single_doc)): + self.single_doc_type = 'rst' + self.single_doc = os.path.splitext(os.path.basename(single_doc))[0] + elif os.path.exists( + os.path.join(SOURCE_PATH, '{}.rst'.format(single_doc))): + self.single_doc_type = 'rst' + self.single_doc = single_doc + elif single_doc is not None: + try: + obj = pandas + for name in single_doc.split('.'): + obj = getattr(obj, name) + except AttributeError: + raise ValueError('Single document not understood, it should ' + 'be a file in doc/source/*.rst (e.g. ' + '"contributing.rst" or a pandas function or ' + 'method (e.g. "pandas.DataFrame.head")') + else: + self.single_doc_type = 'docstring' + if single_doc.startswith('pandas.'): + self.single_doc = single_doc[len('pandas.'):] + else: + self.single_doc = single_doc + + def _copy_generated_docstring(self): + """Copy existing generated (from api.rst) docstring page because + this is more correct in certain cases (where a custom autodoc + template is used). + + """ + fname = os.path.join(SOURCE_PATH, 'generated', + 'pandas.{}.rst'.format(self.single_doc)) + temp_dir = os.path.join(SOURCE_PATH, 'generated_single') + + try: + os.makedirs(temp_dir) + except OSError: + pass + + if os.path.exists(fname): + try: + # copying to make sure sphinx always thinks it is new + # and needs to be re-generated (to pick source code changes) + shutil.copy(fname, temp_dir) + except: # noqa + pass + + def _generate_index(self): + """Create index.rst file with the specified sections.""" + if self.single_doc_type == 'docstring': + self._copy_generated_docstring() + + with open(os.path.join(SOURCE_PATH, 'index.rst.template')) as f: + t = jinja2.Template(f.read()) + + with open(os.path.join(SOURCE_PATH, 'index.rst'), 'w') as f: + f.write(t.render(include_api=self.include_api, + single_doc=self.single_doc, + single_doc_type=self.single_doc_type)) @staticmethod def _create_build_structure(): @@ -121,7 +205,10 @@ def _run_os(*args): -------- >>> DocBuilder()._run_os('python', '--version') """ - subprocess.check_call(args, stderr=subprocess.STDOUT) + # TODO check_call should be more safe, but it fails with + # exclude patterns, needs investigation + # subprocess.check_call(args, stderr=subprocess.STDOUT) + os.system(' '.join(args)) def _sphinx_build(self, kind): """Call sphinx to build documentation. @@ -142,11 +229,21 @@ def _sphinx_build(self, kind): self._run_os('sphinx-build', '-j{}'.format(self.num_jobs), '-b{}'.format(kind), - '-d{}'.format(os.path.join(BUILD_PATH, - 'doctrees')), + '-d{}'.format(os.path.join(BUILD_PATH, 'doctrees')), + '-Dexclude_patterns={}'.format(self.exclude_patterns), SOURCE_PATH, os.path.join(BUILD_PATH, kind)) + def _open_browser(self): + base_url = os.path.join('file://', DOC_PATH, 'build', 'html') + if self.single_doc_type == 'docstring': + url = os.path.join( + base_url, + 'generated_single', 'pandas.{}.html'.format(self.single_doc)) + else: + url = os.path.join(base_url, '{}.html'.format(self.single_doc)) + webbrowser.open(url, new=2) + def html(self): """Build HTML documentation.""" self._create_build_structure() @@ -156,6 +253,11 @@ def html(self): if os.path.exists(zip_fname): os.remove(zip_fname) + if self.single_doc is not None: + self._open_browser() + shutil.rmtree(os.path.join(SOURCE_PATH, 'generated_single'), + ignore_errors=True) + def latex(self, force=False): """Build PDF documentation.""" self._create_build_structure() @@ -222,8 +324,8 @@ def main(): metavar='FILENAME', type=str, default=None, - help=('filename of section to compile, ' - 'e.g. "indexing"')) + help=('filename of section or method name to ' + 'compile, e.g. "indexing", "DataFrame.join"')) argparser.add_argument('--python-path', type=str, default=os.path.join(DOC_PATH, '..'), @@ -235,8 +337,10 @@ def main(): args.command, ', '.join(cmds))) os.environ['PYTHONPATH'] = args.python_path - _generate_index(not args.no_api, args.single) - getattr(DocBuilder(args.num_jobs), args.command)() + + getattr(DocBuilder(args.num_jobs, + not args.no_api, + args.single), args.command)() if __name__ == '__main__': diff --git a/doc/source/conf.py b/doc/source/conf.py index b5fbf096f2626..835127e5094e4 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -18,7 +18,6 @@ import importlib import warnings -from pandas.compat import u, PY3 try: raw_input # Python 2 @@ -86,38 +85,6 @@ if any(re.match("\s*api\s*", l) for l in index_rst_lines): autosummary_generate = True -files_to_delete = [] -for f in os.listdir(os.path.dirname(__file__)): - if (not f.endswith(('.ipynb', '.rst')) or - f.startswith('.') or os.path.basename(f) == 'index.rst'): - continue - - _file_basename = os.path.splitext(f)[0] - _regex_to_match = "\s*{}\s*$".format(_file_basename) - if not any(re.match(_regex_to_match, line) for line in index_rst_lines): - files_to_delete.append(f) - -if files_to_delete: - print("I'm about to DELETE the following:\n{}\n".format( - list(sorted(files_to_delete)))) - sys.stdout.write("WARNING: I'd like to delete those " - "to speed up processing (yes/no)? ") - if PY3: - answer = input() - else: - answer = raw_input() - - if answer.lower().strip() in ('y', 'yes'): - for f in files_to_delete: - f = os.path.join(os.path.join(os.path.dirname(__file__), f)) - f = os.path.abspath(f) - try: - print("Deleting {}".format(f)) - os.unlink(f) - except: - print("Error deleting {}".format(f)) - pass - # Add any paths that contain templates here, relative to this directory. templates_path = ['../_templates'] @@ -131,8 +98,8 @@ master_doc = 'index' # General information about the project. -project = u('pandas') -copyright = u('2008-2014, the pandas development team') +project = u'pandas' +copyright = u'2008-2014, the pandas development team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -343,8 +310,8 @@ # file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pandas.tex', - u('pandas: powerful Python data analysis toolkit'), - u('Wes McKinney\n\& PyData Development Team'), 'manual'), + u'pandas: powerful Python data analysis toolkit', + u'Wes McKinney\n\& PyData Development Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 258ab874cafcf..e159af9958fde 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -171,7 +171,7 @@ We'll now kick off a three-step process: # Create and activate the build environment conda env create -f ci/environment-dev.yaml conda activate pandas-dev - + # or with older versions of Anaconda: source activate pandas-dev @@ -388,14 +388,11 @@ If you want to do a full clean build, do:: python make.py html You can tell ``make.py`` to compile only a single section of the docs, greatly -reducing the turn-around time for checking your changes. You will be prompted to -delete ``.rst`` files that aren't required. This is okay because the prior -versions of these files can be checked out from git. However, you must make sure -not to commit the file deletions to your Git repository! +reducing the turn-around time for checking your changes. :: - #omit autosummary and API section + # omit autosummary and API section python make.py clean python make.py --no-api @@ -404,10 +401,20 @@ not to commit the file deletions to your Git repository! python make.py clean python make.py --single indexing -For comparison, a full documentation build may take 10 minutes, a ``-no-api`` build -may take 3 minutes and a single section may take 15 seconds. Subsequent builds, which -only process portions you have changed, will be faster. Open the following file in a web -browser to see the full documentation you just built:: + # compile the reference docs for a single function + python make.py clean + python make.py --single DataFrame.join + +For comparison, a full documentation build may take 15 minutes, but a single +section may take 15 seconds. Subsequent builds, which only process portions +you have changed, will be faster. + +You can also specify to use multiple cores to speed up the documentation build:: + + python make.py html --num-jobs 4 + +Open the following file in a web browser to see the full documentation you +just built:: pandas/docs/build/html/index.html diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template index eff1227e98994..cb6cce5edaf79 100644 --- a/doc/source/index.rst.template +++ b/doc/source/index.rst.template @@ -106,8 +106,13 @@ Some other notes See the package overview for more detail about what's in the library. +{% if single_doc_type == 'docstring' -%} +.. autosummary:: + :toctree: generated_single/ +{% else -%} .. toctree:: :maxdepth: 4 +{% endif %} {% if single_doc -%} {{ single_doc }}
Add a way to build a single docstring page, without the need to build the full docs.
https://api.github.com/repos/pandas-dev/pandas/pulls/19840
2018-02-22T12:43:33Z
2018-03-01T09:35:05Z
2018-03-01T09:35:05Z
2018-03-01T09:35:09Z
DOC: Making doc/source/conf.py pass PEP-8, and added to lint
diff --git a/ci/lint.sh b/ci/lint.sh index b862a3bfcf29e..e3a39668885f0 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -37,12 +37,12 @@ if [ "$LINT" ]; then fi echo "Linting scripts/*.py DONE" - echo "Linting doc script" - flake8 doc/make.py + echo "Linting doc scripts" + flake8 doc/make.py doc/source/conf.py if [ $? -ne "0" ]; then RET=1 fi - echo "Linting doc script DONE" + echo "Linting doc scripts DONE" echo "Linting *.pyx" flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403 diff --git a/doc/source/conf.py b/doc/source/conf.py index 7c4edd0486636..b5fbf096f2626 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -2,7 +2,8 @@ # # pandas documentation build configuration file, created by # -# This file is execfile()d with the current directory set to its containing dir. +# This file is execfile()d with the current directory set to its containing +# dir. # # Note that not all possible configuration values are present in this # autogenerated file. @@ -49,8 +50,9 @@ # -- General configuration ----------------------------------------------- -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sphinxext. +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# sphinxext. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', @@ -60,7 +62,8 @@ 'numpydoc', 'ipython_sphinxext.ipython_directive', 'ipython_sphinxext.ipython_console_highlighting', - 'IPython.sphinxext.ipython_console_highlighting', # lowercase didn't work + # lowercase didn't work + 'IPython.sphinxext.ipython_console_highlighting', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', @@ -95,22 +98,24 @@ files_to_delete.append(f) if files_to_delete: - print("I'm about to DELETE the following:\n%s\n" % list(sorted(files_to_delete))) - sys.stdout.write("WARNING: I'd like to delete those to speed up processing (yes/no)? ") + print("I'm about to DELETE the following:\n{}\n".format( + list(sorted(files_to_delete)))) + sys.stdout.write("WARNING: I'd like to delete those " + "to speed up processing (yes/no)? ") if PY3: answer = input() else: answer = raw_input() - if answer.lower().strip() in ('y','yes'): + if answer.lower().strip() in ('y', 'yes'): for f in files_to_delete: - f = os.path.join(os.path.join(os.path.dirname(__file__),f)) - f= os.path.abspath(f) + f = os.path.join(os.path.join(os.path.dirname(__file__), f)) + f = os.path.abspath(f) try: - print("Deleting %s" % f) + print("Deleting {}".format(f)) os.unlink(f) except: - print("Error deleting %s" % f) + print("Error deleting {}".format(f)) pass # Add any paths that contain templates here, relative to this directory. @@ -137,7 +142,7 @@ import pandas # version = '%s r%s' % (pandas.__version__, svn_version()) -version = '%s' % (pandas.__version__) +version = str(pandas.__version__) # The full version, including alpha/beta/rc tags. release = version @@ -159,8 +164,8 @@ # for source files. exclude_trees = [] -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None +# The reST default role (used for this markup: `text`) to use for all +# documents. default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True @@ -334,8 +339,8 @@ # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). +# Grouping the document tree into LaTeX files. List of tuples (source start +# file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pandas.tex', u('pandas: powerful Python data analysis toolkit'), @@ -392,7 +397,7 @@ # wherever the docs are built. The docs' target is the browser, not # the console, so this is fine. 'pd.options.display.encoding="utf8"' - ] +] # Add custom Documenter to handle attributes/methods of an AccessorProperty @@ -400,7 +405,8 @@ import sphinx from sphinx.util import rpartition -from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter +from sphinx.ext.autodoc import ( + Documenter, MethodDocumenter, AttributeDocumenter) from sphinx.ext.autosummary import Autosummary @@ -408,7 +414,6 @@ class AccessorDocumenter(MethodDocumenter): """ Specialized Documenter subclass for accessors. """ - objtype = 'accessor' directivetype = 'method' @@ -426,7 +431,6 @@ class AccessorLevelDocumenter(Documenter): Specialized Documenter subclass for objects on accessor level (methods, attributes). """ - # This is the simple straightforward version # modname is None, base the last elements (eg 'hour') # and path the part before (eg 'Series.dt') @@ -436,7 +440,6 @@ class AccessorLevelDocumenter(Documenter): # mod_cls = mod_cls.split('.') # # return modname, mod_cls + [base] - def resolve_name(self, modname, parents, path, base): if modname is None: if path: @@ -471,16 +474,17 @@ def resolve_name(self, modname, parents, path, base): return modname, parents + [base] -class AccessorAttributeDocumenter(AccessorLevelDocumenter, AttributeDocumenter): - +class AccessorAttributeDocumenter(AccessorLevelDocumenter, + AttributeDocumenter): objtype = 'accessorattribute' directivetype = 'attribute' - # lower than AttributeDocumenter so this is not chosen for normal attributes + # lower than AttributeDocumenter so this is not chosen for normal + # attributes priority = 0.6 -class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter): +class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter): objtype = 'accessormethod' directivetype = 'method' @@ -508,7 +512,6 @@ class PandasAutosummary(Autosummary): This alternative autosummary class lets us override the table summary for Series.plot and DataFrame.plot in the API docs. """ - def _replace_pandas_items(self, display_name, sig, summary, real_name): # this a hack: ideally we should extract the signature from the # .__call__ method instead of hard coding this @@ -561,18 +564,18 @@ def linkcode_resolve(domain, info): lineno = None if lineno: - linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) + linespec = "#L{:d}-L{:d}".format(lineno, lineno + len(source) - 1) else: linespec = "" fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__)) if '+' in pandas.__version__: - return "http://github.com/pandas-dev/pandas/blob/master/pandas/%s%s" % ( - fn, linespec) + return ("http://github.com/pandas-dev/pandas/blob/master/pandas/" + "{}{}".format(fn, linespec)) else: - return "http://github.com/pandas-dev/pandas/blob/v%s/pandas/%s%s" % ( - pandas.__version__, fn, linespec) + return ("http://github.com/pandas-dev/pandas/blob/" + "v{}/pandas/{}{}".format(pandas.__version__, fn, linespec)) # remove the docstring of the flags attribute (inherited from numpy ndarray)
- [X] closes https://github.com/pandas-dev/pandas/issues/19838 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19839
2018-02-22T11:48:28Z
2018-02-22T14:06:09Z
2018-02-22T14:06:08Z
2018-02-22T14:06:18Z
DOC: remove deprecated from_items from dsintro docs
diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index e8f73a9ec2e8a..1ba00b8fb6f23 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -364,6 +364,19 @@ and returns a DataFrame. It operates like the ``DataFrame`` constructor except for the ``orient`` parameter which is ``'columns'`` by default, but which can be set to ``'index'`` in order to use the dict keys as row labels. + +.. ipython:: python + + pd.DataFrame.from_dict(dict([('A', [1, 2, 3]), ('B', [4, 5, 6])])) + +If you pass ``orient='index'``, the keys will be the row labels. In this +case, you can also pass the desired column names: + +.. ipython:: python + + pd.DataFrame.from_dict(dict([('A', [1, 2, 3]), ('B', [4, 5, 6])]), + orient='index', columns=['one', 'two', 'three']) + .. _basics.dataframe.from_records: **DataFrame.from_records** @@ -378,28 +391,6 @@ dtype. For example: data pd.DataFrame.from_records(data, index='C') -.. _basics.dataframe.from_items: - -**DataFrame.from_items** - -``DataFrame.from_items`` works analogously to the form of the ``dict`` -constructor that takes a sequence of ``(key, value)`` pairs, where the keys are -column (or row, in the case of ``orient='index'``) names, and the value are the -column values (or row values). This can be useful for constructing a DataFrame -with the columns in a particular order without having to pass an explicit list -of columns: - -.. ipython:: python - - pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])]) - -If you pass ``orient='index'``, the keys will be the row labels. But in this -case you must also pass the desired column names: - -.. ipython:: python - - pd.DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], - orient='index', columns=['one', 'two', 'three']) Column selection, addition, deletion ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1c5cf87d6b39b..061b69f25e7ac 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1255,12 +1255,14 @@ def to_records(self, index=True, convert_datetime64=True): @classmethod def from_items(cls, items, columns=None, orient='columns'): - """ + """Construct a dataframe from a list of tuples + .. deprecated:: 0.23.0 - from_items is deprecated and will be removed in a - future version. Use :meth:`DataFrame.from_dict(dict())` - instead. :meth:`DataFrame.from_dict(OrderedDict(...))` may be used - to preserve the key order. + `from_items` is deprecated and will be removed in a future version. + Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>` + instead. + :meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>` + may be used to preserve the key order. Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified @@ -1284,8 +1286,8 @@ def from_items(cls, items, columns=None, orient='columns'): """ warnings.warn("from_items is deprecated. Please use " - "DataFrame.from_dict(dict()) instead. " - "DataFrame.from_dict(OrderedDict()) may be used to " + "DataFrame.from_dict(dict(items), ...) instead. " + "DataFrame.from_dict(OrderedDict(items)) may be used to " "preserve the key order.", FutureWarning, stacklevel=2)
Follow-up on https://github.com/pandas-dev/pandas/pull/19802 and https://github.com/pandas-dev/pandas/pull/18529
https://api.github.com/repos/pandas-dev/pandas/pulls/19837
2018-02-22T10:40:54Z
2018-02-24T14:39:08Z
2018-02-24T14:39:08Z
2018-02-24T14:39:11Z
DOC: correct min_count param docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8034cf89cf8b7..85e2ce475ffa2 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7855,7 +7855,7 @@ def _doc_parms(cls): >>> pd.Series([np.nan]).prod() 1.0 ->>> pd.Series([np.nan]).sum(min_count=1) +>>> pd.Series([np.nan]).prod(min_count=1) nan """ @@ -7867,8 +7867,9 @@ def _doc_parms(cls): .. versionadded :: 0.22.0 - Added with the default being 1. This means the sum or product - of an all-NA or empty series is ``NaN``. + Added with the default being 0. This means the sum of an all-NA + or empty Series is 0, and the product of an all-NA or empty + Series is 1. """
correct min_count param docstring for function ’Series.prodβ€˜οΌŒ ’Series.sum', 'DataFrame.prod', 'DataFrame.sum' - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19836
2018-02-22T06:28:32Z
2018-02-23T11:24:24Z
2018-02-23T11:24:24Z
2018-02-23T11:44:20Z
De-duplicate add_offset_array methods
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index ac75e5ae5e2a0..a68d883f04380 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -2,7 +2,7 @@ Base and utility classes for tseries type pandas objects. """ import warnings - +import operator from datetime import datetime, timedelta from pandas import compat @@ -10,6 +10,12 @@ from pandas.core.tools.timedeltas import to_timedelta import numpy as np + +from pandas._libs import lib, iNaT, NaT +from pandas._libs.tslibs.period import Period +from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds +from pandas._libs.tslibs.timestamps import round_ns + from pandas.core.dtypes.common import ( _ensure_int64, is_dtype_equal, @@ -25,18 +31,15 @@ is_integer_dtype, is_object_dtype, is_string_dtype, + is_period_dtype, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna from pandas.core import common as com, algorithms, ops from pandas.core.algorithms import checked_add_with_arr -from pandas.errors import NullFrequencyError +from pandas.errors import NullFrequencyError, PerformanceWarning import pandas.io.formats.printing as printing -from pandas._libs import lib, iNaT, NaT -from pandas._libs.tslibs.period import Period -from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds -from pandas._libs.tslibs.timestamps import round_ns from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly @@ -637,13 +640,33 @@ def _sub_datelike(self, other): def _sub_period(self, other): return NotImplemented - def _add_offset_array(self, other): - # Array/Index of DateOffset objects - return NotImplemented + def _addsub_offset_array(self, other, op): + """ + Add or subtract array-like of DateOffset objects - def _sub_offset_array(self, other): - # Array/Index of DateOffset objects - return NotImplemented + Parameters + ---------- + other : Index, np.ndarray + object-dtype containing pd.DateOffset objects + op : {operator.add, operator.sub} + + Returns + ------- + result : same class as self + """ + assert op in [operator.add, operator.sub] + if len(other) == 1: + return op(self, other[0]) + + warnings.warn("Adding/subtracting array of DateOffsets to " + "{cls} not vectorized" + .format(cls=type(self).__name__), PerformanceWarning) + + res_values = op(self.astype('O').values, np.array(other)) + kwargs = {} + if not is_period_dtype(self): + kwargs['freq'] = 'infer' + return self._constructor(res_values, **kwargs) @classmethod def _add_datetimelike_methods(cls): @@ -660,13 +683,24 @@ def __add__(self, other): other = lib.item_from_zerodim(other) if isinstance(other, ABCSeries): return NotImplemented - elif is_timedelta64_dtype(other): + + # scalar others + elif isinstance(other, (DateOffset, timedelta, np.timedelta64)): result = self._add_delta(other) - elif isinstance(other, (DateOffset, timedelta)): + elif isinstance(other, (datetime, np.datetime64)): + result = self._add_datelike(other) + elif is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + result = self.shift(other) + + # array-like others + elif is_timedelta64_dtype(other): + # TimedeltaIndex, ndarray[timedelta64] result = self._add_delta(other) elif is_offsetlike(other): # Array/Index of DateOffset objects - result = self._add_offset_array(other) + result = self._addsub_offset_array(other, operator.add) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if hasattr(other, '_add_delta'): # i.e. DatetimeIndex, TimedeltaIndex, or PeriodIndex @@ -674,12 +708,6 @@ def __add__(self, other): else: raise TypeError("cannot add TimedeltaIndex and {typ}" .format(typ=type(other))) - elif is_integer(other): - # This check must come after the check for timedelta64_dtype - # or else it will incorrectly catch np.timedelta64 objects - result = self.shift(other) - elif isinstance(other, (datetime, np.datetime64)): - result = self._add_datelike(other) elif isinstance(other, Index): result = self._add_datelike(other) elif is_integer_dtype(other) and self.freq is None: @@ -709,13 +737,26 @@ def __sub__(self, other): other = lib.item_from_zerodim(other) if isinstance(other, ABCSeries): return NotImplemented - elif is_timedelta64_dtype(other): + + # scalar others + elif isinstance(other, (DateOffset, timedelta, np.timedelta64)): result = self._add_delta(-other) - elif isinstance(other, (DateOffset, timedelta)): + elif isinstance(other, (datetime, np.datetime64)): + result = self._sub_datelike(other) + elif is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + result = self.shift(-other) + elif isinstance(other, Period): + result = self._sub_period(other) + + # array-like others + elif is_timedelta64_dtype(other): + # TimedeltaIndex, ndarray[timedelta64] result = self._add_delta(-other) elif is_offsetlike(other): # Array/Index of DateOffset objects - result = self._sub_offset_array(other) + result = self._addsub_offset_array(other, operator.sub) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): # We checked above for timedelta64_dtype(other) so this # must be invalid. @@ -723,14 +764,6 @@ def __sub__(self, other): .format(typ=type(other).__name__)) elif isinstance(other, DatetimeIndex): result = self._sub_datelike(other) - elif is_integer(other): - # This check must come after the check for timedelta64_dtype - # or else it will incorrectly catch np.timedelta64 objects - result = self.shift(-other) - elif isinstance(other, (datetime, np.datetime64)): - result = self._sub_datelike(other) - elif isinstance(other, Period): - result = self._sub_period(other) elif isinstance(other, Index): raise TypeError("cannot subtract {typ1} and {typ2}" .format(typ1=type(self).__name__, diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 17f92339e4205..36ea2bffb9531 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -964,29 +964,6 @@ def _add_offset(self, offset): "or DatetimeIndex", PerformanceWarning) return self.astype('O') + offset - def _add_offset_array(self, other): - # Array/Index of DateOffset objects - if len(other) == 1: - return self + other[0] - else: - warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), - PerformanceWarning) - return self.astype('O') + np.array(other) - # TODO: pass freq='infer' like we do in _sub_offset_array? - # TODO: This works for __add__ but loses dtype in __sub__ - - def _sub_offset_array(self, other): - # Array/Index of DateOffset objects - if len(other) == 1: - return self - other[0] - else: - warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), - PerformanceWarning) - res_values = self.astype('O').values - np.array(other) - return self.__class__(res_values, freq='infer') - def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): from pandas.io.formats.format import _get_format_datetime64_from_values format = _get_format_datetime64_from_values(self, date_format) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4c14cbffcd813..f0567c9c963af 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -44,7 +44,6 @@ from pandas.util._decorators import (Appender, Substitution, cache_readonly, deprecate_kwarg) from pandas.compat import zip, u -from pandas.errors import PerformanceWarning import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -745,28 +744,6 @@ def _sub_period(self, other): # result must be Int64Index or Float64Index return Index(new_data) - def _add_offset_array(self, other): - # Array/Index of DateOffset objects - if len(other) == 1: - return self + other[0] - else: - warnings.warn("Adding/subtracting array of DateOffsets to " - "{cls} not vectorized" - .format(cls=type(self).__name__), PerformanceWarning) - res_values = self.astype('O').values + np.array(other) - return self.__class__(res_values) - - def _sub_offset_array(self, other): - # Array/Index of DateOffset objects - if len(other) == 1: - return self - other[0] - else: - warnings.warn("Adding/subtracting array of DateOffsets to " - "{cls} not vectorized" - .format(cls=type(self).__name__), PerformanceWarning) - res_values = self.astype('O').values - np.array(other) - return self.__class__(res_values) - def shift(self, n): """ Specialized shift which produces an PeriodIndex diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 3542a24290f89..219adfdb66c82 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,7 +1,6 @@ """ implement the TimedeltaIndex """ from datetime import timedelta -import warnings import numpy as np from pandas.core.dtypes.common import ( @@ -433,43 +432,16 @@ def _sub_datelike(self, other): else: raise TypeError("cannot subtract a datelike from a TimedeltaIndex") - def _add_offset_array(self, other): - # Array/Index of DateOffset objects + def _addsub_offset_array(self, other, op): + # Add or subtract Array-like of DateOffset objects try: # TimedeltaIndex can only operate with a subset of DateOffset # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError - if len(other) == 1: - return self + other[0] - else: - from pandas.errors import PerformanceWarning - warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), - PerformanceWarning) - return self.astype('O') + np.array(other) - # TODO: pass freq='infer' like we do in _sub_offset_array? - # TODO: This works for __add__ but loses dtype in __sub__ - except AttributeError: - raise TypeError("Cannot add non-tick DateOffset to TimedeltaIndex") - - def _sub_offset_array(self, other): - # Array/Index of DateOffset objects - try: - # TimedeltaIndex can only operate with a subset of DateOffset - # subclasses. Incompatible classes will raise AttributeError, - # which we re-raise as TypeError - if len(other) == 1: - return self - other[0] - else: - from pandas.errors import PerformanceWarning - warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), - PerformanceWarning) - res_values = self.astype('O').values - np.array(other) - return self.__class__(res_values, freq='infer') + return DatetimeIndexOpsMixin._addsub_offset_array(self, other, op) except AttributeError: - raise TypeError("Cannot subtrack non-tick DateOffset from" - " TimedeltaIndex") + raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" + .format(cls=type(self).__name__)) def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs):
Combine DTI/TDI/PI add_offset_array and sub_offset_array methods. Re-order `__add__` and `__sub__` if/else branches to put scalars up front. This was going to include fixes for DTI/TDI with ndarray[timedelta64]/ndarray[datetime64], but the diff got big enough to split that off to the next one.
https://api.github.com/repos/pandas-dev/pandas/pulls/19835
2018-02-22T05:15:07Z
2018-02-24T14:43:35Z
2018-02-24T14:43:35Z
2018-06-22T03:28:35Z
Continue porting period_helper; fix leftover asfreq bug
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4c1e98b236db7..c1ba35dff90ed 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -763,7 +763,7 @@ Timedelta - Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (:issue:`19042`) - Bug in :func:`Timedelta.__add__`, :func:`Timedelta.__sub__` where adding or subtracting a ``np.timedelta64`` object would return another ``np.timedelta64`` instead of a ``Timedelta`` (:issue:`19738`) - Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`) -- Bug in :func:`Period.asfreq` where periods near ``datetime(1, 1, 1)`` could be converted incorrectly (:issue:`19643`) +- Bug in :func:`Period.asfreq` where periods near ``datetime(1, 1, 1)`` could be converted incorrectly (:issue:`19643`, :issue:`19834`) - Bug in :func:`Timedelta.total_seconds()` causing precision errors i.e. ``Timedelta('30S').total_seconds()==30.000000000000004`` (:issue:`19458`) - diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index a812ed2e7e2b3..e3d250aa44f17 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -42,10 +42,10 @@ static int floordiv(int x, int divisor) { static int monthToQuarter(int month) { return ((month - 1) / 3) + 1; } -/* Find the absdate (days elapsed since datetime(1, 1, 1) +/* Find the unix_date (days elapsed since datetime(1970, 1, 1) * for the given year/month/day. * Assumes GREGORIAN_CALENDAR */ -npy_int64 absdate_from_ymd(int year, int month, int day) { +npy_int64 unix_date_from_ymd(int year, int month, int day) { /* Calculate the absolute date */ pandas_datetimestruct dts; npy_int64 unix_date; @@ -55,16 +55,16 @@ npy_int64 absdate_from_ymd(int year, int month, int day) { dts.month = month; dts.day = day; unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, &dts); - return ORD_OFFSET + unix_date; + return unix_date; } /* Sets the date part of the date_info struct Assumes GREGORIAN_CALENDAR */ static int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo, - npy_int64 absdate) { + npy_int64 unix_date) { pandas_datetimestruct dts; - pandas_datetime_to_datetimestruct(absdate - ORD_OFFSET, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); dinfo->year = dts.year; dinfo->month = dts.month; dinfo->day = dts.day; @@ -137,26 +137,26 @@ PANDAS_INLINE npy_int64 transform_via_day(npy_int64 ordinal, return result; } -static npy_int64 DtoB_weekday(npy_int64 absdate) { - return floordiv(absdate, 7) * 5 + mod_compat(absdate, 7) - BDAY_OFFSET; +static npy_int64 DtoB_weekday(npy_int64 unix_date) { + return floordiv(unix_date + 4, 7) * 5 + mod_compat(unix_date + 4, 7) - 4; } static npy_int64 DtoB(struct date_info *dinfo, - int roll_back, npy_int64 absdate) { + int roll_back, npy_int64 unix_date) { int day_of_week = dayofweek(dinfo->year, dinfo->month, dinfo->day); if (roll_back == 1) { if (day_of_week > 4) { // change to friday before weekend - absdate -= (day_of_week - 4); + unix_date -= (day_of_week - 4); } } else { if (day_of_week > 4) { // change to Monday after weekend - absdate += (7 - day_of_week); + unix_date += (7 - day_of_week); } } - return DtoB_weekday(absdate); + return DtoB_weekday(unix_date); } @@ -165,18 +165,19 @@ static npy_int64 DtoB(struct date_info *dinfo, static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; ordinal = downsample_daytime(ordinal, af_info); - dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); + dInfoCalc_SetFromAbsDate(&dinfo, ordinal); if (dinfo.month > af_info->to_a_year_end) { - return (npy_int64)(dinfo.year + 1 - BASE_YEAR); + return (npy_int64)(dinfo.year + 1 - 1970); } else { - return (npy_int64)(dinfo.year - BASE_YEAR); + return (npy_int64)(dinfo.year - 1970); } } -static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, - int *quarter) { +static int DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year) { struct date_info dinfo; - dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); + int quarter; + + dInfoCalc_SetFromAbsDate(&dinfo, ordinal); if (af_info->to_q_year_end != 12) { dinfo.month -= af_info->to_q_year_end; if (dinfo.month <= 0) { @@ -187,9 +188,8 @@ static npy_int64 DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year, } *year = dinfo.year; - *quarter = monthToQuarter(dinfo.month); - - return 0; + quarter = monthToQuarter(dinfo.month); + return quarter; } static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, asfreq_info *af_info) { @@ -197,8 +197,8 @@ static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, asfreq_info *af_info) { ordinal = downsample_daytime(ordinal, af_info); - DtoQ_yq(ordinal, af_info, &year, &quarter); - return (npy_int64)((year - BASE_YEAR) * 4 + quarter - 1); + quarter = DtoQ_yq(ordinal, af_info, &year); + return (npy_int64)((year - 1970) * 4 + quarter - 1); } static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) { @@ -206,28 +206,25 @@ static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) { ordinal = downsample_daytime(ordinal, af_info); - dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET); - return (npy_int64)((dinfo.year - BASE_YEAR) * 12 + dinfo.month - 1); + dInfoCalc_SetFromAbsDate(&dinfo, ordinal); + return (npy_int64)((dinfo.year - 1970) * 12 + dinfo.month - 1); } static npy_int64 asfreq_DTtoW(npy_int64 ordinal, asfreq_info *af_info) { ordinal = downsample_daytime(ordinal, af_info); - return (ordinal + ORD_OFFSET - (1 + af_info->to_week_end)) / 7 + 1 - - WEEK_OFFSET; + return floordiv(ordinal + 3 - af_info->to_week_end, 7) + 1; } static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - npy_int64 absdate; int roll_back; ordinal = downsample_daytime(ordinal, af_info); - absdate = ordinal + ORD_OFFSET; - dInfoCalc_SetFromAbsDate(&dinfo, absdate); + dInfoCalc_SetFromAbsDate(&dinfo, ordinal); // This usage defines roll_back the opposite way from the others roll_back = 1 - af_info->is_end; - return DtoB(&dinfo, roll_back, absdate); + return DtoB(&dinfo, roll_back, ordinal); } // all intra day calculations are now done within one function @@ -243,10 +240,7 @@ static npy_int64 asfreq_UpsampleWithinDay(npy_int64 ordinal, //************ FROM BUSINESS *************** static npy_int64 asfreq_BtoDT(npy_int64 ordinal, asfreq_info *af_info) { - ordinal += BDAY_OFFSET; - ordinal = - (floordiv(ordinal - 1, 5) * 7 + mod_compat(ordinal - 1, 5) + 1 - - ORD_OFFSET); + ordinal = floordiv(ordinal + 3, 5) * 7 + mod_compat(ordinal + 3, 5) - 3; return upsample_daytime(ordinal, af_info); } @@ -270,8 +264,7 @@ static npy_int64 asfreq_BtoW(npy_int64 ordinal, asfreq_info *af_info) { //************ FROM WEEKLY *************** static npy_int64 asfreq_WtoDT(npy_int64 ordinal, asfreq_info *af_info) { - ordinal = (ordinal + WEEK_OFFSET) * 7 + - af_info->from_week_end - ORD_OFFSET + + ordinal = ordinal * 7 + af_info->from_week_end - 4 + (7 - 1) * (af_info->is_end - 1); return upsample_daytime(ordinal, af_info); } @@ -294,30 +287,29 @@ static npy_int64 asfreq_WtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - npy_int64 absdate = asfreq_WtoDT(ordinal, af_info) + ORD_OFFSET; + npy_int64 unix_date = asfreq_WtoDT(ordinal, af_info); int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate(&dinfo, absdate); + dInfoCalc_SetFromAbsDate(&dinfo, unix_date); - return DtoB(&dinfo, roll_back, absdate); + return DtoB(&dinfo, roll_back, unix_date); } //************ FROM MONTHLY *************** static void MtoD_ym(npy_int64 ordinal, int *y, int *m) { - *y = floordiv(ordinal, 12) + BASE_YEAR; + *y = floordiv(ordinal, 12) + 1970; *m = mod_compat(ordinal, 12) + 1; } static npy_int64 asfreq_MtoDT(npy_int64 ordinal, asfreq_info *af_info) { - npy_int64 absdate; + npy_int64 unix_date; int y, m; ordinal += af_info->is_end; MtoD_ym(ordinal, &y, &m); - absdate = absdate_from_ymd(y, m, 1); - ordinal = absdate - ORD_OFFSET; + unix_date = unix_date_from_ymd(y, m, 1); - ordinal -= af_info->is_end; - return upsample_daytime(ordinal, af_info); + unix_date -= af_info->is_end; + return upsample_daytime(unix_date, af_info); } static npy_int64 asfreq_MtoA(npy_int64 ordinal, asfreq_info *af_info) { @@ -334,18 +326,18 @@ static npy_int64 asfreq_MtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - npy_int64 absdate = asfreq_MtoDT(ordinal, af_info) + ORD_OFFSET; + npy_int64 unix_date = asfreq_MtoDT(ordinal, af_info); int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate(&dinfo, absdate); + dInfoCalc_SetFromAbsDate(&dinfo, unix_date); - return DtoB(&dinfo, roll_back, absdate); + return DtoB(&dinfo, roll_back, unix_date); } //************ FROM QUARTERLY *************** static void QtoD_ym(npy_int64 ordinal, int *y, int *m, asfreq_info *af_info) { - *y = floordiv(ordinal, 4) + BASE_YEAR; + *y = floordiv(ordinal, 4) + 1970; *m = mod_compat(ordinal, 4) * 3 + 1; if (af_info->from_q_year_end != 12) { @@ -359,16 +351,16 @@ static void QtoD_ym(npy_int64 ordinal, int *y, int *m, asfreq_info *af_info) { } static npy_int64 asfreq_QtoDT(npy_int64 ordinal, asfreq_info *af_info) { - npy_int64 absdate; + npy_int64 unix_date; int y, m; ordinal += af_info->is_end; QtoD_ym(ordinal, &y, &m, af_info); - absdate = absdate_from_ymd(y, m, 1); + unix_date = unix_date_from_ymd(y, m, 1); - absdate -= af_info->is_end; - return upsample_daytime(absdate - ORD_OFFSET, af_info); + unix_date -= af_info->is_end; + return upsample_daytime(unix_date, af_info); } static npy_int64 asfreq_QtoQ(npy_int64 ordinal, asfreq_info *af_info) { @@ -389,21 +381,21 @@ static npy_int64 asfreq_QtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - npy_int64 absdate = asfreq_QtoDT(ordinal, af_info) + ORD_OFFSET; + npy_int64 unix_date = asfreq_QtoDT(ordinal, af_info); int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate(&dinfo, absdate); + dInfoCalc_SetFromAbsDate(&dinfo, unix_date); - return DtoB(&dinfo, roll_back, absdate); + return DtoB(&dinfo, roll_back, unix_date); } //************ FROM ANNUAL *************** static npy_int64 asfreq_AtoDT(npy_int64 ordinal, asfreq_info *af_info) { - npy_int64 absdate; + npy_int64 unix_date; // start from 1970 - npy_int64 year = ordinal + BASE_YEAR; + npy_int64 year = ordinal + 1970; int month = (af_info->from_a_year_end % 12) + 1; if (af_info->from_a_year_end != 12) { @@ -411,10 +403,10 @@ static npy_int64 asfreq_AtoDT(npy_int64 ordinal, asfreq_info *af_info) { } year += af_info->is_end; - absdate = absdate_from_ymd(year, month, 1); + unix_date = unix_date_from_ymd(year, month, 1); - absdate -= af_info->is_end; - return upsample_daytime(absdate - ORD_OFFSET, af_info); + unix_date -= af_info->is_end; + return upsample_daytime(unix_date, af_info); } static npy_int64 asfreq_AtoA(npy_int64 ordinal, asfreq_info *af_info) { @@ -435,11 +427,11 @@ static npy_int64 asfreq_AtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) { struct date_info dinfo; - npy_int64 absdate = asfreq_AtoDT(ordinal, af_info) + ORD_OFFSET; + npy_int64 unix_date = asfreq_AtoDT(ordinal, af_info); int roll_back = af_info->is_end; - dInfoCalc_SetFromAbsDate(&dinfo, absdate); + dInfoCalc_SetFromAbsDate(&dinfo, unix_date); - return DtoB(&dinfo, roll_back, absdate); + return DtoB(&dinfo, roll_back, unix_date); } static npy_int64 nofunc(npy_int64 ordinal, asfreq_info *af_info) { diff --git a/pandas/_libs/src/period_helper.h b/pandas/_libs/src/period_helper.h index 1573b1eeec74b..7163dc960d152 100644 --- a/pandas/_libs/src/period_helper.h +++ b/pandas/_libs/src/period_helper.h @@ -20,32 +20,8 @@ frequency conversion routines. #include "limits.h" #include "numpy/ndarraytypes.h" -/* - * declarations from period here - */ - -#define Py_Error(errortype, errorstr) \ - { \ - PyErr_SetString(errortype, errorstr); \ - goto onError; \ - } - /*** FREQUENCY CONSTANTS ***/ -// HIGHFREQ_ORIG is the datetime ordinal from which to begin the second -// frequency ordinal sequence - -// #define HIGHFREQ_ORIG 62135683200LL -#define BASE_YEAR 1970 -#define ORD_OFFSET 719163LL // days until 1970-01-01 -#define BDAY_OFFSET 513689LL // days until 1970-01-01 -#define WEEK_OFFSET 102737LL -#define BASE_WEEK_TO_DAY_OFFSET \ - 1 // difference between day 0 and end of week in days -#define DAYS_PER_WEEK 7 -#define BUSINESS_DAYS_PER_WEEK 5 -#define HIGHFREQ_ORIG 0 // ORD_OFFSET * 86400LL // days until 1970-01-01 - #define FR_ANN 1000 /* Annual */ #define FR_ANNDEC FR_ANN /* Annual - December year end*/ #define FR_ANNJAN 1001 /* Annual - January year end*/ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e1c783ac9fa54..f1a193706144f 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -75,10 +75,6 @@ cdef extern from "period_helper.h": int FR_BUS int FR_UND - int ORD_OFFSET - int WEEK_OFFSET - int BDAY_OFFSET - ctypedef struct date_info: double second int minute @@ -181,7 +177,7 @@ cdef int64_t get_period_ordinal(int year, int month, int day, period_ordinal : int64_t """ cdef: - int64_t absdays, unix_date, seconds, delta + int64_t unix_date, seconds, delta int64_t weeks int64_t day_adj int freq_group, fmonth, mdiff @@ -215,8 +211,7 @@ cdef int64_t get_period_ordinal(int year, int month, int day, elif freq == FR_MTH: return (year - 1970) * 12 + month - 1 - absdays = absdate_from_ymd(year, month, day) - unix_date = absdays - ORD_OFFSET + unix_date = unix_date_from_ymd(year, month, day) if freq >= FR_SEC: seconds = unix_date * 86400 + hour * 3600 + minute * 60 + second @@ -247,48 +242,48 @@ cdef int64_t get_period_ordinal(int year, int month, int day, return unix_date elif freq == FR_BUS: - # calculate the current week assuming sunday as last day of a week - # Jan 1 0001 is a Monday, so subtract 1 to get to end-of-week - weeks = (unix_date + ORD_OFFSET - 1) // 7 + # calculate the current week (counting from 1970-01-01) treating + # sunday as last day of a week + weeks = (unix_date + 3) // 7 # calculate the current weekday (in range 1 .. 7) - delta = (unix_date + ORD_OFFSET - 1) % 7 + 1 + delta = (unix_date + 3) % 7 + 1 # return the number of business days in full weeks plus the business # days in the last - possible partial - week if delta <= 5: - return (weeks * 5) + delta - BDAY_OFFSET + return (5 * weeks) + delta - 4 else: - return (weeks * 5) + (5 + 1) - BDAY_OFFSET + return (5 * weeks) + (5 + 1) - 4 elif freq_group == FR_WK: day_adj = freq - FR_WK - return (unix_date + ORD_OFFSET - (1 + day_adj)) // 7 + 1 - WEEK_OFFSET + return (unix_date + 3 - day_adj) // 7 + 1 # raise ValueError cdef void get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil: cdef: - int64_t absdate + int64_t unix_date double abstime - absdate = get_python_ordinal(ordinal, freq); - abstime = get_abs_time(freq, absdate - ORD_OFFSET, ordinal) + unix_date = get_unix_date(ordinal, freq) + abstime = get_abs_time(freq, unix_date, ordinal) while abstime < 0: abstime += 86400 - absdate -= 1 + unix_date -= 1 while abstime >= 86400: abstime -= 86400 - absdate += 1 + unix_date += 1 - dInfoCalc_SetFromAbsDateTime(dinfo, absdate, abstime) + date_info_from_days_and_time(dinfo, unix_date, abstime) -cdef int64_t get_python_ordinal(int64_t period_ordinal, int freq) nogil: +cdef int64_t get_unix_date(int64_t period_ordinal, int freq) nogil: """ Returns the proleptic Gregorian ordinal of the date, as an integer. - This corresponds to the number of days since Jan., 1st, 1AD. + This corresponds to the number of days since Jan., 1st, 1970 AD. When the instance has a frequency less than daily, the proleptic date is calculated for the last day of the period. @@ -299,92 +294,56 @@ cdef int64_t get_python_ordinal(int64_t period_ordinal, int freq) nogil: Returns ------- - absdate : int64_t number of days since datetime(1, 1, 1) + unix_date : int64_t number of days since datetime(1970, 1, 1) """ cdef: asfreq_info af_info freq_conv_func toDaily = NULL if freq == FR_DAY: - return period_ordinal + ORD_OFFSET + return period_ordinal toDaily = get_asfreq_func(freq, FR_DAY) get_asfreq_info(freq, FR_DAY, 'E', &af_info) - return toDaily(period_ordinal, &af_info) + ORD_OFFSET + return toDaily(period_ordinal, &af_info) -cdef void dInfoCalc_SetFromAbsDateTime(date_info *dinfo, - int64_t absdate, double abstime) nogil: +@cython.cdivision +cdef void date_info_from_days_and_time(date_info *dinfo, + int64_t unix_date, + double abstime) nogil: """ Set the instance's value using the given date and time. - Assumes GREGORIAN_CALENDAR. Parameters ---------- dinfo : date_info* - absdate : int64_t - days elapsed since datetime(1, 1, 1) + unix_date : int64_t + days elapsed since datetime(1970, 1, 1) abstime : double - seconds elapsed since beginning of day described by absdate + seconds elapsed since beginning of day described by unix_date Notes ----- Updates dinfo inplace """ + cdef: + pandas_datetimestruct dts + int inttime + int hour, minute + double second + # Bounds check # The calling function is responsible for ensuring that # abstime >= 0.0 and abstime <= 86400 # Calculate the date - dInfoCalc_SetFromAbsDate(dinfo, absdate) - - # Calculate the time - dInfoCalc_SetFromAbsTime(dinfo, abstime) - - -cdef void dInfoCalc_SetFromAbsDate(date_info *dinfo, int64_t absdate) nogil: - """ - Sets the date part of the date_info struct - Assumes GREGORIAN_CALENDAR - - Parameters - ---------- - dinfo : date_info* - unix_date : int64_t - - Notes - ----- - Updates dinfo inplace - """ - cdef: - pandas_datetimestruct dts - - pandas_datetime_to_datetimestruct(absdate - ORD_OFFSET, PANDAS_FR_D, &dts) + pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts) dinfo.year = dts.year dinfo.month = dts.month dinfo.day = dts.day - -@cython.cdivision -cdef void dInfoCalc_SetFromAbsTime(date_info *dinfo, double abstime) nogil: - """ - Sets the time part of the DateTime object. - - Parameters - ---------- - dinfo : date_info* - abstime : double - seconds elapsed since beginning of day described by absdate - - Notes - ----- - Updates dinfo inplace - """ - cdef: - int inttime - int hour, minute - double second - + # Calculate the time inttime = <int>abstime hour = inttime / 3600 minute = (inttime % 3600) / 60 @@ -396,8 +355,7 @@ cdef void dInfoCalc_SetFromAbsTime(date_info *dinfo, double abstime) nogil: @cython.cdivision -cdef double get_abs_time(int freq, int64_t date_ordinal, - int64_t ordinal) nogil: +cdef double get_abs_time(int freq, int64_t unix_date, int64_t ordinal) nogil: cdef: int freq_index, day_index, base_index int64_t per_day, start_ord @@ -416,16 +374,15 @@ cdef double get_abs_time(int freq, int64_t date_ordinal, if base_index < freq_index: unit = 1 / unit - start_ord = date_ordinal * per_day + start_ord = unix_date * per_day result = <double>(unit * (ordinal - start_ord)) return result -cdef int64_t absdate_from_ymd(int year, int month, int day) nogil: +cdef int64_t unix_date_from_ymd(int year, int month, int day) nogil: """ - Find the absdate (days elapsed since datetime(1, 1, 1) + Find the unix_date (days elapsed since datetime(1970, 1, 1) for the given year/month/day. - Assumes GREGORIAN_CALENDAR Parameters ---------- @@ -435,11 +392,9 @@ cdef int64_t absdate_from_ymd(int year, int month, int day) nogil: Returns ------- - absdate : int - days elapsed since datetime(1, 1, 1) + unix_date : int + days elapsed since datetime(1970, 1, 1) """ - - # /* Calculate the absolute date cdef: pandas_datetimestruct dts int64_t unix_date @@ -449,7 +404,7 @@ cdef int64_t absdate_from_ymd(int year, int month, int day) nogil: dts.month = month dts.day = day unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, &dts) - return ORD_OFFSET + unix_date + return unix_date cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): @@ -475,9 +430,9 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): cdef: asfreq_info af_info int qtr_freq - int64_t daily_ord + int64_t unix_date - daily_ord = get_python_ordinal(ordinal, freq) - ORD_OFFSET + unix_date = get_unix_date(ordinal, freq) if get_freq_group(freq) == FR_QTR: qtr_freq = freq @@ -486,16 +441,16 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): get_asfreq_info(FR_DAY, qtr_freq, 'E', &af_info) - DtoQ_yq(daily_ord, &af_info, year, quarter) + quarter[0] = DtoQ_yq(unix_date, &af_info, year) return qtr_freq -cdef void DtoQ_yq(int64_t ordinal, asfreq_info *af_info, - int *year, int *quarter): +cdef int DtoQ_yq(int64_t unix_date, asfreq_info *af_info, int *year): cdef: date_info dinfo + int quarter - dInfoCalc_SetFromAbsDate(&dinfo, ordinal + ORD_OFFSET) + date_info_from_days_and_time(&dinfo, unix_date, 0) if af_info.to_q_year_end != 12: dinfo.month -= af_info.to_q_year_end @@ -505,10 +460,11 @@ cdef void DtoQ_yq(int64_t ordinal, asfreq_info *af_info, dinfo.year += 1 year[0] = dinfo.year - quarter[0] = monthToQuarter(dinfo.month) + quarter = month_to_quarter(dinfo.month) + return quarter -cdef inline int monthToQuarter(int month): +cdef inline int month_to_quarter(int month): return (month - 1) // 3 + 1 @@ -678,7 +634,7 @@ def period_format(int64_t value, int freq, object fmt=None): return repr(NaT) if fmt is None: - freq_group = (freq // 1000) * 1000 + freq_group = get_freq_group(freq) if freq_group == 1000: # FR_ANN fmt = b'%Y' elif freq_group == 2000: # FR_QTR @@ -1620,8 +1576,8 @@ class Period(_Period): return cls._from_ordinal(ordinal, freq) -def _ordinal_from_fields(year, month, quarter, day, - hour, minute, second, freq): +cdef int64_t _ordinal_from_fields(year, month, quarter, day, + hour, minute, second, freq): base, mult = get_freq_code(freq) if quarter is not None: year, month = _quarter_to_myear(year, quarter, freq) diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index dff5433adcf79..f43ab0704f0f4 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -1440,3 +1440,10 @@ def test_period_immutable(): freq = per.freq with pytest.raises(AttributeError): per.freq = 2 * freq + + +@pytest.mark.xfail(reason='GH#19834 Period parsing error') +def test_small_year_parsing(): + per1 = Period('0001-01-07', 'D') + assert per1.year == 1 + assert per1.day == 7 diff --git a/pandas/tests/scalar/period/test_period_asfreq.py b/pandas/tests/scalar/period/test_period_asfreq.py index 9f8b2562e9e20..474d19809b03c 100644 --- a/pandas/tests/scalar/period/test_period_asfreq.py +++ b/pandas/tests/scalar/period/test_period_asfreq.py @@ -21,6 +21,16 @@ def test_asfreq_near_zero(self, freq): tup2 = (prev.year, prev.month, prev.day) assert tup2 < tup1 + def test_asfreq_near_zero_weekly(self): + # GH#19834 + per1 = Period('0001-01-01', 'D') + 6 + per2 = Period('0001-01-01', 'D') - 6 + week1 = per1.asfreq('W') + week2 = per2.asfreq('W') + assert week1 != week2 + assert week1.asfreq('D', 'E') >= per1 + assert week2.asfreq('D', 'S') <= per2 + @pytest.mark.xfail(reason='GH#19643 period_helper asfreq functions fail ' 'to check for overflows') def test_to_timestamp_out_of_bounds(self):
In #19650 we fixed a bug with asfreq but missed a weekly-frequency case. This fixes and tests that. I'll see if I can cook up a more thorough set of tests to make sure nothing else slips through the cracks. Other than that, the main thing this does is substitute in `ORD_OFFSET = WEEK_OFFSET * 7 + 4` and `BDAY_OFFSET = 5 * WEEK_OFFSET + 4` and then cleans up some algebra (which is now OK because we are using non-cdivision floordiv and mod). As a result we get rid of a bunch of calls that add ORD_OFFSET just to subtract it a few lines later. Gets rid of CamelCase in tslibs.period, moves towards using pandas_datetimestruct directly and getting rid of the redundant `date_info`. Catches a RuntimeWarning in the tests; closes #19767.
https://api.github.com/repos/pandas-dev/pandas/pulls/19834
2018-02-22T02:27:13Z
2018-02-23T11:35:01Z
2018-02-23T11:35:01Z
2018-02-23T15:27:43Z
BUG: Fix qcut with NaT present
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4c1e98b236db7..86661eb5e4414 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -896,6 +896,7 @@ Reshaping - Bug in :func:`DataFrame.join` which does an ``outer`` instead of a ``left`` join when being called with multiple DataFrames and some have non-unique indices (:issue:`19624`) - :func:`Series.rename` now accepts ``axis`` as a kwarg (:issue:`18589`) - Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) +- Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`) Other ^^^^^ diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 777f08bd9db2b..359c030157bd3 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -279,18 +279,22 @@ def _trim_zeros(x): def _coerce_to_type(x): """ if the passed data is of datetime/timedelta type, - this method converts it to integer so that cut method can + this method converts it to numeric so that cut method can handle it """ dtype = None if is_timedelta64_dtype(x): - x = to_timedelta(x).view(np.int64) + x = to_timedelta(x) dtype = np.timedelta64 elif is_datetime64_dtype(x): - x = to_datetime(x).view(np.int64) + x = to_datetime(x) dtype = np.datetime64 + if dtype is not None: + # GH 19768: force NaT to NaN during integer conversion + x = np.where(x.notna(), x.view(np.int64), np.nan) + return x, dtype diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index f7262a2f0da63..ff914273d47b1 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -6,7 +6,8 @@ from pandas import (Series, isna, to_datetime, DatetimeIndex, Timestamp, Interval, IntervalIndex, Categorical, - cut, qcut, date_range) + cut, qcut, date_range, NaT, TimedeltaIndex) +from pandas.tseries.offsets import Nano, Day import pandas.util.testing as tm from pandas.api.types import CategoricalDtype as CDT @@ -250,6 +251,18 @@ def test_qcut_nas(self): result = qcut(arr, 4) assert isna(result[:20]).all() + @pytest.mark.parametrize('s', [ + Series(DatetimeIndex(['20180101', NaT, '20180103'])), + Series(TimedeltaIndex(['0 days', NaT, '2 days']))], + ids=lambda x: str(x.dtype)) + def test_qcut_nat(self, s): + # GH 19768 + intervals = IntervalIndex.from_tuples( + [(s[0] - Nano(), s[2] - Day()), np.nan, (s[2] - Day(), s[2])]) + expected = Series(Categorical(intervals, ordered=True)) + result = qcut(s, 2) + tm.assert_series_equal(result, expected) + def test_qcut_index(self): result = qcut([0, 2], 2) intervals = [Interval(-0.001, 1), Interval(1, 2)]
- [X] closes #19768 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19833
2018-02-22T01:05:37Z
2018-02-22T11:39:39Z
2018-02-22T11:39:39Z
2018-02-22T17:58:05Z
DOC: correct Series.reset_index example
diff --git a/pandas/core/series.py b/pandas/core/series.py index 90dc14836ab55..b682d88ec2ce7 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1015,7 +1015,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): >>> s = pd.Series([1, 2, 3, 4], index=pd.Index(['a', 'b', 'c', 'd'], ... name = 'idx')) >>> s.reset_index() - index 0 + idx 0 0 0 1 1 1 2 2 2 3
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19832
2018-02-21T23:52:45Z
2018-02-22T02:23:12Z
2018-02-22T02:23:12Z
2018-02-22T02:23:14Z
Fix to_csv of DataFrame with single level multiindex.
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index de111072bef02..7a5d455ff853f 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -188,7 +188,7 @@ Renaming names in a MultiIndex :func:`DataFrame.rename_axis` now supports ``index`` and ``columns`` arguments and :func:`Series.rename_axis` supports ``index`` argument (:issue:`19978`) -This change allows a dictionary to be passed so that some of the names +This change allows a dictionary to be passed so that some of the names of a ``MultiIndex`` can be changed. Example: @@ -1216,6 +1216,7 @@ Notice how we now instead output ``np.nan`` itself instead of a stringified form - :func:`read_sas()` will correctly parse sas7bdat files with data page types having also bit 7 set (so page type is 128 + 256 = 384) (:issue:`16615`) - Bug in :meth:`detect_client_encoding` where potential ``IOError`` goes unhandled when importing in a mod_wsgi process due to restricted access to stdout. (:issue:`21552`) - Bug in :func:`to_string()` that broke column alignment when ``index=False`` and width of first column's values is greater than the width of first column's header (:issue:`16839`, :issue:`13032`) +- Bug in :func:`DataFrame.to_csv` where a single level MultiIndex incorrectly wrote a tuple. Now just the value of the index is written (:issue:`19589`). Plotting ^^^^^^^^ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 24e7d9bebae3e..c694289efc493 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -732,11 +732,14 @@ def _format_native_types(self, na_rep='nan', **kwargs): new_levels.append(level) new_labels.append(label) - # reconstruct the multi-index - mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names, - sortorder=self.sortorder, verify_integrity=False) - - return mi.values + if len(new_levels) == 1: + return Index(new_levels[0])._format_native_types() + else: + # reconstruct the multi-index + mi = MultiIndex(levels=new_levels, labels=new_labels, + names=self.names, sortorder=self.sortorder, + verify_integrity=False) + return mi.values @Appender(_index_shared_docs['_get_grouper_for_level']) def _get_grouper_for_level(self, mapper, level): diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index 7042cae526207..228373a7bf545 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -325,6 +325,25 @@ def test_to_csv_multi_index(self): exp = tm.convert_rows_list_to_csv_str(exp_rows) assert df.to_csv(index=False) == exp + @pytest.mark.parametrize("ind,expected", [ + (pd.MultiIndex(levels=[[1.0]], + labels=[[0]], + names=["x"]), + "x,data\n1.0,1\n"), + (pd.MultiIndex(levels=[[1.], [2.]], + labels=[[0], [0]], + names=["x", "y"]), + "x,y,data\n1.0,2.0,1\n") + ]) + @pytest.mark.parametrize("klass", [ + pd.DataFrame, pd.Series + ]) + def test_to_csv_single_level_multi_index(self, ind, expected, klass): + # see gh-19589 + result = klass(pd.Series([1], ind, name="data")).to_csv( + line_terminator="\n", header=True) + assert result == expected + def test_to_csv_string_array_ascii(self): # GH 10813 str_array = [{'names': ['foo', 'bar']}, {'names': ['baz', 'qux']}]
### What's this PR do? Fixes the output of to_csv to show the value instead of the tuple when a ``DataFrame`` or ``Series`` is constructed using a ``MultiIndex`` with one level. Although, I'm not sure if it's more desirable to require a MultiIndex to be instantiated with more than one level in the future. - [X] closes #19589 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19831
2018-02-21T22:38:08Z
2018-11-01T00:38:03Z
2018-11-01T00:38:03Z
2018-11-01T00:38:26Z
ENH: Let initialisation from dicts use insertion order for python >= 3.6 (part I)
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index d1ad9f71e6350..a057ca0879cac 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -646,7 +646,7 @@ def test_infer_output_shape_columns(self): 'datetime': [pd.Timestamp('2017-11-29 03:30:00'), pd.Timestamp('2017-11-29 03:45:00')]}) result = df.apply(lambda row: (row.number, row.string), axis=1) - expected = Series([t[2:] for t in df.itertuples()]) + expected = Series([(t.number, t.string) for t in df.itertuples()]) assert_series_equal(result, expected) def test_infer_output_shape_listlike_columns(self): diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 8b1fd7d50cb4d..8e012922d25f1 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -405,8 +405,8 @@ def test_get_numeric_data(self): result = df.get_dtype_counts() expected = Series({'int64': 1, 'float64': 1, datetime64name: 1, objectname: 1}) - result.sort_index() - expected.sort_index() + result = result.sort_index() + expected = expected.sort_index() assert_series_equal(result, expected) df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8abd88d8a379c..511b7d17ce79a 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1362,9 +1362,8 @@ def test_constructor_with_datetimes(self): expected['float64'] = 1 expected[floatname] = 1 - result.sort_index() - expected = Series(expected) - expected.sort_index() + result = result.sort_index() + expected = Series(expected).sort_index() tm.assert_series_equal(result, expected) # check with ndarray construction ndim>0 @@ -1373,7 +1372,7 @@ def test_constructor_with_datetimes(self): intname: np.array([1] * 10, dtype=intname)}, index=np.arange(10)) result = df.get_dtype_counts() - result.sort_index() + result = result.sort_index() tm.assert_series_equal(result, expected) # GH 2809 @@ -1384,8 +1383,8 @@ def test_constructor_with_datetimes(self): df = DataFrame({'datetime_s': datetime_s}) result = df.get_dtype_counts() expected = Series({datetime64name: 1}) - result.sort_index() - expected.sort_index() + result = result.sort_index() + expected = expected.sort_index() tm.assert_series_equal(result, expected) # GH 2810 @@ -1395,8 +1394,8 @@ def test_constructor_with_datetimes(self): df = DataFrame({'datetimes': datetimes, 'dates': dates}) result = df.get_dtype_counts() expected = Series({datetime64name: 1, objectname: 1}) - result.sort_index() - expected.sort_index() + result = result.sort_index() + expected = expected.sort_index() tm.assert_series_equal(result, expected) # GH 7594 @@ -1519,8 +1518,8 @@ def test_constructor_for_list_with_dtypes(self): result = df.get_dtype_counts() expected = Series( {'int64': 1, 'float64': 2, datetime64name: 1, objectname: 1}) - result.sort_index() - expected.sort_index() + result = result.sort_index() + expected = expected.sort_index() tm.assert_series_equal(result, expected) def test_constructor_frame_copy(self): @@ -1832,7 +1831,7 @@ def test_from_records_misc_brokenness(self): rows.append([datetime(2010, 1, 1), 1]) rows.append([datetime(2010, 1, 2), 1]) df2_obj = DataFrame.from_records(rows, columns=['date', 'test']) - results = df2_obj.get_dtype_counts() + results = df2_obj.get_dtype_counts().sort_index() expected = Series({'datetime64[ns]': 1, 'int64': 1}) tm.assert_series_equal(results, expected) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 38bdecc9eb88f..e9e5b2a447a4a 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -725,9 +725,9 @@ def test_timedeltas(self): df = DataFrame(dict(A=Series(date_range('2012-1-1', periods=3, freq='D')), B=Series([timedelta(days=i) for i in range(3)]))) - result = df.get_dtype_counts().sort_values() + result = df.get_dtype_counts().sort_index() expected = Series( - {'datetime64[ns]': 1, 'timedelta64[ns]': 1}).sort_values() + {'datetime64[ns]': 1, 'timedelta64[ns]': 1}).sort_index() assert_series_equal(result, expected) df['C'] = df['A'] + df['B'] diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 882fa634d167d..a8b81b1b03552 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2430,8 +2430,8 @@ def _check_get(df, cond, check_dtypes=True): # upcasting case (GH # 2794) df = DataFrame(dict((c, Series([1] * 3, dtype=c)) - for c in ['int64', 'int32', - 'float32', 'float64'])) + for c in ['float32', 'float64', + 'int32', 'int64'])) df.iloc[1, :] = 0 result = df.where(df >= 0).get_dtype_counts() diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py index 8236a41d00243..4c560129bfa45 100644 --- a/pandas/tests/frame/test_mutate_columns.py +++ b/pandas/tests/frame/test_mutate_columns.py @@ -166,17 +166,17 @@ def test_insert(self): # new item df['x'] = df['a'].astype('float32') - result = Series(dict(float64=5, float32=1)) - assert (df.get_dtype_counts() == result).all() + result = Series(dict(float32=1, float64=5)) + assert (df.get_dtype_counts().sort_index() == result).all() # replacing current (in different block) df['a'] = df['a'].astype('float32') - result = Series(dict(float64=4, float32=2)) - assert (df.get_dtype_counts() == result).all() + result = Series(dict(float32=2, float64=4)) + assert (df.get_dtype_counts().sort_index() == result).all() df['y'] = df['a'].astype('int32') - result = Series(dict(float64=4, float32=2, int32=1)) - assert (df.get_dtype_counts() == result).all() + result = Series(dict(float32=2, float64=4, int32=1)) + assert (df.get_dtype_counts().sort_index() == result).all() with tm.assert_raises_regex(ValueError, 'already exists'): df.insert(1, 'a', df['b']) diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index 36465db78361f..0b32ec89d3909 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -155,14 +155,14 @@ def check(result, expected=None): # rename, GH 4403 df4 = DataFrame( - {'TClose': [22.02], - 'RT': [0.0454], + {'RT': [0.0454], + 'TClose': [22.02], 'TExg': [0.0422]}, index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date'])) - df5 = DataFrame({'STK_ID': [600809] * 3, - 'RPT_Date': [20120930, 20121231, 20130331], + df5 = DataFrame({'RPT_Date': [20120930, 20121231, 20130331], + 'STK_ID': [600809] * 3, 'STK_Name': [u('ι₯‘驦'), u('ι₯‘驦'), u('ι₯‘驦')], 'TClose': [38.05, 41.66, 30.01]}, index=MultiIndex.from_tuples( diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index 7907486c7c98d..68df0982a1e3e 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -719,9 +719,10 @@ def verify(df): assert_frame_equal(left, right) # GH7401 - df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10), + df = pd.DataFrame({'A': list('aaaaabbbbb'), 'B': (date_range('2012-01-01', periods=5) - .tolist() * 2)}) + .tolist() * 2), + 'C': np.arange(10)}) df.iloc[3, 1] = np.NaN left = df.set_index(['A', 'B']).unstack() diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index a3ba34ae92283..dda5cdea52cac 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -1054,10 +1054,10 @@ def test_to_csv_with_dst_transitions(self): def test_to_csv_quoting(self): df = DataFrame({ - 'c_string': ['a', 'b,c'], - 'c_int': [42, np.nan], - 'c_float': [1.0, 3.2], 'c_bool': [True, False], + 'c_float': [1.0, 3.2], + 'c_int': [42, np.nan], + 'c_string': ['a', 'b,c'], }) expected = """\
- [x] xref #19018 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` This PR prepares pandas to allow initialisation using insertion order for python>=3.6. E.g. ``Series({'B': 1, 'A': 2})`` will then retain order and not convert to ``Series({'A': 2, 'B': 1})``. Letting dataframe/series initialisation from dicts preserve order for python >=3.6 is a relative simple change, just change a line each in core/frame.py and core/series.py. However, this makes some 36 tests fail, as the tests have been set up to depend on conversion to alphabetical order. This PR makes all tests in ``pandas/tests/frame`` pass both if dict initialisation uses insertion order and alphabetical order. No test functionality is changed in this PR. After this PR I will do 1 or 2 more similar PRs, so all tests pass independent of dict initialisation rule, and then core/frame.py and core/series.py will be changed to use insertion order for python>=3.6.
https://api.github.com/repos/pandas-dev/pandas/pulls/19830
2018-02-21T22:04:58Z
2018-02-23T01:22:35Z
2018-02-23T01:22:35Z
2018-02-23T07:26:46Z
Test Decorators and Better Pytest Integration in 'test_excel'
diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 80a2c05d86971..78aaf4596c8b7 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -365,6 +365,20 @@ def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) +if sys.version_info[0] < 3: + # In PY2 functools.wraps doesn't provide metadata pytest needs to generate + # decorated tests using parametrization. See pytest GH issue #2782 + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 86cee54665781..fdf9954285db8 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -28,43 +28,6 @@ from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf -def _skip_if_no_xlrd(): - try: - import xlrd - ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2])) - if ver < (0, 9): - pytest.skip('xlrd < 0.9, skipping') - except ImportError: - pytest.skip('xlrd not installed, skipping') - - -def _skip_if_no_xlwt(): - try: - import xlwt # NOQA - except ImportError: - pytest.skip('xlwt not installed, skipping') - - -def _skip_if_no_openpyxl(): - try: - import openpyxl # NOQA - except ImportError: - pytest.skip('openpyxl not installed, skipping') - - -def _skip_if_no_xlsxwriter(): - try: - import xlsxwriter # NOQA - except ImportError: - pytest.skip('xlsxwriter not installed, skipping') - - -def _skip_if_no_excelsuite(): - _skip_if_no_xlrd() - _skip_if_no_xlwt() - _skip_if_no_openpyxl() - - _seriesd = tm.getSeriesData() _tsd = tm.getTimeSeriesData() _frame = DataFrame(_seriesd)[:10] @@ -74,6 +37,7 @@ def _skip_if_no_excelsuite(): _mixed_frame['foo'] = 'bar' +@td.skip_if_no('xlrd', '0.9') class SharedItems(object): def setup_method(self, method): @@ -103,7 +67,7 @@ def get_csv_refdf(self, basename): dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python') return dfref - def get_excelfile(self, basename): + def get_excelfile(self, basename, ext): """ Return test data ExcelFile instance. Test data path is defined by pandas.util.testing.get_data_path() @@ -119,9 +83,9 @@ def get_excelfile(self, basename): excel : io.excel.ExcelFile """ - return ExcelFile(os.path.join(self.dirpath, basename + self.ext)) + return ExcelFile(os.path.join(self.dirpath, basename + ext)) - def get_exceldf(self, basename, *args, **kwds): + def get_exceldf(self, basename, ext, *args, **kwds): """ Return test data DataFrame. Test data path is defined by pandas.util.testing.get_data_path() @@ -137,36 +101,23 @@ def get_exceldf(self, basename, *args, **kwds): df : DataFrame """ - pth = os.path.join(self.dirpath, basename + self.ext) + pth = os.path.join(self.dirpath, basename + ext) return read_excel(pth, *args, **kwds) class ReadingTestsBase(SharedItems): # This is based on ExcelWriterBase - # - # Base class for test cases to run with different Excel readers. - # To add a reader test, define the following: - # 1. A check_skip function that skips your tests if your reader isn't - # installed. - # 2. Add a property ext, which is the file extension that your reader - # reades from. (needs to start with '.' so it's a valid path) - # 3. Add a property engine_name, which is the name of the reader class. - # For the reader this is not used for anything at the moment. - def setup_method(self, method): - self.check_skip() - super(ReadingTestsBase, self).setup_method(method) - - def test_usecols_int(self): + def test_usecols_int(self, ext): dfref = self.get_csv_refdf('test1') dfref = dfref.reindex(columns=['A', 'B', 'C']) - df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, usecols=3) - df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, - usecols=3) + df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols=3) + df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols=3) with tm.assert_produces_warning(FutureWarning): - df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], index_col=0, parse_cols=3) # TODO add index to xls file) @@ -174,17 +125,17 @@ def test_usecols_int(self): tm.assert_frame_equal(df2, dfref, check_names=False) tm.assert_frame_equal(df3, dfref, check_names=False) - def test_usecols_list(self): + def test_usecols_list(self, ext): dfref = self.get_csv_refdf('test1') dfref = dfref.reindex(columns=['B', 'C']) - df1 = self.get_exceldf('test1', 'Sheet1', index_col=0, - usecols=[0, 2, 3]) - df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, + df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols=[0, 2, 3]) + df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols=[0, 2, 3]) with tm.assert_produces_warning(FutureWarning): - df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], index_col=0, parse_cols=[0, 2, 3]) # TODO add index to xls file) @@ -192,18 +143,18 @@ def test_usecols_list(self): tm.assert_frame_equal(df2, dfref, check_names=False) tm.assert_frame_equal(df3, dfref, check_names=False) - def test_usecols_str(self): + def test_usecols_str(self, ext): dfref = self.get_csv_refdf('test1') df1 = dfref.reindex(columns=['A', 'B', 'C']) - df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, - usecols='A:D') - df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols='A:D') + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols='A:D') with tm.assert_produces_warning(FutureWarning): - df4 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], + df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], index_col=0, parse_cols='A:D') # TODO add index to xls, read xls ignores index name ? @@ -212,37 +163,37 @@ def test_usecols_str(self): tm.assert_frame_equal(df4, df1, check_names=False) df1 = dfref.reindex(columns=['B', 'C']) - df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, - usecols='A,C,D') - df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols='A,C,D') + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols='A,C,D') # TODO add index to xls file tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) df1 = dfref.reindex(columns=['B', 'C']) - df2 = self.get_exceldf('test1', 'Sheet1', index_col=0, - usecols='A,C:D') - df3 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0, + df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols='A,C:D') + df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0, usecols='A,C:D') tm.assert_frame_equal(df2, df1, check_names=False) tm.assert_frame_equal(df3, df1, check_names=False) - def test_excel_stop_iterator(self): + def test_excel_stop_iterator(self, ext): - parsed = self.get_exceldf('test2', 'Sheet1') + parsed = self.get_exceldf('test2', ext, 'Sheet1') expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1']) tm.assert_frame_equal(parsed, expected) - def test_excel_cell_error_na(self): + def test_excel_cell_error_na(self, ext): - parsed = self.get_exceldf('test3', 'Sheet1') + parsed = self.get_exceldf('test3', ext, 'Sheet1') expected = DataFrame([[np.nan]], columns=['Test']) tm.assert_frame_equal(parsed, expected) - def test_excel_passes_na(self): + def test_excel_passes_na(self, ext): - excel = self.get_excelfile('test4') + excel = self.get_excelfile('test4', ext) parsed = read_excel(excel, 'Sheet1', keep_default_na=False, na_values=['apple']) @@ -257,7 +208,7 @@ def test_excel_passes_na(self): tm.assert_frame_equal(parsed, expected) # 13967 - excel = self.get_excelfile('test5') + excel = self.get_excelfile('test5', ext) parsed = read_excel(excel, 'Sheet1', keep_default_na=False, na_values=['apple']) @@ -271,9 +222,9 @@ def test_excel_passes_na(self): columns=['Test']) tm.assert_frame_equal(parsed, expected) - def test_excel_table_sheet_by_index(self): + def test_excel_table_sheet_by_index(self, ext): - excel = self.get_excelfile('test1') + excel = self.get_excelfile('test1', ext) dfref = self.get_csv_refdf('test1') df1 = read_excel(excel, 0, index_col=0) @@ -300,21 +251,22 @@ def test_excel_table_sheet_by_index(self): with pytest.raises(xlrd.XLRDError): read_excel(excel, 'asdf') - def test_excel_table(self): + def test_excel_table(self, ext): dfref = self.get_csv_refdf('test1') - df1 = self.get_exceldf('test1', 'Sheet1', index_col=0) - df2 = self.get_exceldf('test1', 'Sheet2', skiprows=[1], index_col=0) + df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0) + df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1], + index_col=0) # TODO add index to file tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) - df3 = self.get_exceldf('test1', 'Sheet1', index_col=0, + df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, skipfooter=1) tm.assert_frame_equal(df3, df1.iloc[:-1]) - def test_reader_special_dtypes(self): + def test_reader_special_dtypes(self, ext): expected = DataFrame.from_dict(OrderedDict([ ("IntCol", [1, 2, -3, 4, 0]), @@ -330,36 +282,36 @@ def test_reader_special_dtypes(self): basename = 'test_types' # should read in correctly and infer types - actual = self.get_exceldf(basename, 'Sheet1') + actual = self.get_exceldf(basename, ext, 'Sheet1') tm.assert_frame_equal(actual, expected) # if not coercing number, then int comes in as float float_expected = expected.copy() float_expected["IntCol"] = float_expected["IntCol"].astype(float) float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0 - actual = self.get_exceldf(basename, 'Sheet1', convert_float=False) + actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False) tm.assert_frame_equal(actual, float_expected) # check setting Index (assuming xls and xlsx are the same here) for icol, name in enumerate(expected.columns): - actual = self.get_exceldf(basename, 'Sheet1', index_col=icol) + actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol) exp = expected.set_index(name) tm.assert_frame_equal(actual, exp) # convert_float and converters should be different but both accepted expected["StrCol"] = expected["StrCol"].apply(str) actual = self.get_exceldf( - basename, 'Sheet1', converters={"StrCol": str}) + basename, ext, 'Sheet1', converters={"StrCol": str}) tm.assert_frame_equal(actual, expected) no_convert_float = float_expected.copy() no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str) - actual = self.get_exceldf(basename, 'Sheet1', convert_float=False, + actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False, converters={"StrCol": str}) tm.assert_frame_equal(actual, no_convert_float) # GH8212 - support for converters and missing values - def test_reader_converters(self): + def test_reader_converters(self, ext): basename = 'test_converters' @@ -378,13 +330,14 @@ def test_reader_converters(self): # should read in correctly and set types of single cells (not array # dtypes) - actual = self.get_exceldf(basename, 'Sheet1', converters=converters) + actual = self.get_exceldf(basename, ext, 'Sheet1', + converters=converters) tm.assert_frame_equal(actual, expected) - def test_reader_dtype(self): + def test_reader_dtype(self, ext): # GH 8212 basename = 'testdtype' - actual = self.get_exceldf(basename) + actual = self.get_exceldf(basename, ext) expected = DataFrame({ 'a': [1, 2, 3, 4], @@ -395,7 +348,7 @@ def test_reader_dtype(self): tm.assert_frame_equal(actual, expected) - actual = self.get_exceldf(basename, + actual = self.get_exceldf(basename, ext, dtype={'a': 'float64', 'b': 'float32', 'c': str}) @@ -406,14 +359,14 @@ def test_reader_dtype(self): tm.assert_frame_equal(actual, expected) with pytest.raises(ValueError): - actual = self.get_exceldf(basename, dtype={'d': 'int64'}) + actual = self.get_exceldf(basename, ext, dtype={'d': 'int64'}) - def test_reading_all_sheets(self): + def test_reading_all_sheets(self, ext): # Test reading all sheetnames by setting sheetname to None, # Ensure a dict is returned. # See PR #9450 basename = 'test_multisheet' - dfs = self.get_exceldf(basename, sheet_name=None) + dfs = self.get_exceldf(basename, ext, sheet_name=None) # ensure this is not alphabetical to test order preservation expected_keys = ['Charlie', 'Alpha', 'Beta'] tm.assert_contains_all(expected_keys, dfs.keys()) @@ -421,7 +374,7 @@ def test_reading_all_sheets(self): # Ensure sheet order is preserved assert expected_keys == list(dfs.keys()) - def test_reading_multiple_specific_sheets(self): + def test_reading_multiple_specific_sheets(self, ext): # Test reading specific sheetnames by specifying a mixed list # of integers and strings, and confirm that duplicated sheet # references (positions/names) are removed properly. @@ -430,42 +383,41 @@ def test_reading_multiple_specific_sheets(self): basename = 'test_multisheet' # Explicitly request duplicates. Only the set should be returned. expected_keys = [2, 'Charlie', 'Charlie'] - dfs = self.get_exceldf(basename, sheet_name=expected_keys) + dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys) expected_keys = list(set(expected_keys)) tm.assert_contains_all(expected_keys, dfs.keys()) assert len(expected_keys) == len(dfs.keys()) - def test_reading_all_sheets_with_blank(self): + def test_reading_all_sheets_with_blank(self, ext): # Test reading all sheetnames by setting sheetname to None, # In the case where some sheets are blank. # Issue #11711 basename = 'blank_with_header' - dfs = self.get_exceldf(basename, sheet_name=None) + dfs = self.get_exceldf(basename, ext, sheet_name=None) expected_keys = ['Sheet1', 'Sheet2', 'Sheet3'] tm.assert_contains_all(expected_keys, dfs.keys()) # GH6403 - def test_read_excel_blank(self): - actual = self.get_exceldf('blank', 'Sheet1') + def test_read_excel_blank(self, ext): + actual = self.get_exceldf('blank', ext, 'Sheet1') tm.assert_frame_equal(actual, DataFrame()) - def test_read_excel_blank_with_header(self): + def test_read_excel_blank_with_header(self, ext): expected = DataFrame(columns=['col_1', 'col_2']) - actual = self.get_exceldf('blank_with_header', 'Sheet1') + actual = self.get_exceldf('blank_with_header', ext, 'Sheet1') tm.assert_frame_equal(actual, expected) + @td.skip_if_no('openpyxl') + @td.skip_if_no('xlwt') # GH 12292 : error when read one empty column from excel file - def test_read_one_empty_col_no_header(self): - _skip_if_no_xlwt() - _skip_if_no_openpyxl() - + def test_read_one_empty_col_no_header(self, ext): df = pd.DataFrame( [["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]] ) - with ensure_clean(self.ext) as path: + with ensure_clean(ext) as path: df.to_excel(path, 'no_header', index=False, header=False) actual_header_none = read_excel( path, @@ -484,17 +436,16 @@ def test_read_one_empty_col_no_header(self): tm.assert_frame_equal(actual_header_none, expected) tm.assert_frame_equal(actual_header_zero, expected) - def test_read_one_empty_col_with_header(self): - _skip_if_no_xlwt() - _skip_if_no_openpyxl() - + @td.skip_if_no('openpyxl') + @td.skip_if_no('xlwt') + def test_read_one_empty_col_with_header(self, ext): df = pd.DataFrame( [["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]] ) - with ensure_clean(self.ext) as path: + with ensure_clean(ext) as path: df.to_excel(path, 'with_header', index=False, header=True) actual_header_none = read_excel( path, @@ -514,16 +465,15 @@ def test_read_one_empty_col_with_header(self): expected_header_zero = DataFrame(columns=[0], dtype='int64') tm.assert_frame_equal(actual_header_zero, expected_header_zero) - def test_set_column_names_in_parameter(self): - _skip_if_no_xlwt() - _skip_if_no_openpyxl() - + @td.skip_if_no('openpyxl') + @td.skip_if_no('xlwt') + def test_set_column_names_in_parameter(self, ext): # GH 12870 : pass down column names associated with # keyword argument names refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'], [3, 'baz']], columns=['a', 'b']) - with ensure_clean(self.ext) as pth: + with ensure_clean(ext) as pth: with ExcelWriter(pth) as writer: refdf.to_excel(writer, 'Data_no_head', header=False, index=False) @@ -540,42 +490,45 @@ def test_set_column_names_in_parameter(self): tm.assert_frame_equal(xlsdf_no_head, refdf) tm.assert_frame_equal(xlsdf_with_head, refdf) - def test_date_conversion_overflow(self): + def test_date_conversion_overflow(self, ext): # GH 10001 : pandas.ExcelFile ignore parse_dates=False expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'], [pd.Timestamp('2016-03-16'), 'Jack Black'], [1e+20, 'Timothy Brown']], columns=['DateColWithBigInt', 'StringCol']) - result = self.get_exceldf('testdateoverflow') + result = self.get_exceldf('testdateoverflow', ext) tm.assert_frame_equal(result, expected) - def test_sheet_name_and_sheetname(self): + def test_sheet_name_and_sheetname(self, ext): # GH10559: Minor improvement: Change "sheet_name" to "sheetname" # GH10969: DOC: Consistent var names (sheetname vs sheet_name) # GH12604: CLN GH10559 Rename sheetname variable to sheet_name dfref = self.get_csv_refdf('test1') - df1 = self.get_exceldf('test1', sheet_name='Sheet1') # doc + df1 = self.get_exceldf('test1', ext, sheet_name='Sheet1') # doc with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - df2 = self.get_exceldf('test1', sheetname='Sheet1') # bkwrd compat + df2 = self.get_exceldf('test1', ext, + sheetname='Sheet1') # bkwrd compat tm.assert_frame_equal(df1, dfref, check_names=False) tm.assert_frame_equal(df2, dfref, check_names=False) - def test_sheet_name_both_raises(self): + def test_sheet_name_both_raises(self, ext): with tm.assert_raises_regex(TypeError, "Cannot specify both"): - self.get_exceldf('test1', sheetname='Sheet1', sheet_name='Sheet1') + self.get_exceldf('test1', ext, sheetname='Sheet1', + sheet_name='Sheet1') -class XlrdTests(ReadingTestsBase): +@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm']) +class TestXlrdReader(ReadingTestsBase): """ This is the base class for the xlrd tests, and 3 different file formats are supported: xls, xlsx, xlsm """ - def test_excel_read_buffer(self): + def test_excel_read_buffer(self, ext): - pth = os.path.join(self.dirpath, 'test1' + self.ext) + pth = os.path.join(self.dirpath, 'test1' + ext) expected = read_excel(pth, 'Sheet1', index_col=0) with open(pth, 'rb') as f: actual = read_excel(f, 'Sheet1', index_col=0) @@ -586,10 +539,10 @@ def test_excel_read_buffer(self): actual = read_excel(xls, 'Sheet1', index_col=0) tm.assert_frame_equal(expected, actual) - def test_read_xlrd_Book(self): - _skip_if_no_xlwt() - + @td.skip_if_no('xlwt') + def test_read_xlrd_Book(self, ext): import xlrd + df = self.frame with ensure_clean('.xls') as pth: df.to_excel(pth, "SheetA") @@ -603,39 +556,39 @@ def test_read_xlrd_Book(self): tm.assert_frame_equal(df, result) @tm.network - def test_read_from_http_url(self): + def test_read_from_http_url(self, ext): url = ('https://raw.github.com/pandas-dev/pandas/master/' - 'pandas/tests/io/data/test1' + self.ext) + 'pandas/tests/io/data/test1' + ext) url_table = read_excel(url) - local_table = self.get_exceldf('test1') + local_table = self.get_exceldf('test1', ext) tm.assert_frame_equal(url_table, local_table) - def test_read_from_s3_url(self): + @td.skip_if_no('s3fs') + def test_read_from_s3_url(self, ext): boto3 = pytest.importorskip('boto3') - pytest.importorskip('s3fs') moto = pytest.importorskip('moto') with moto.mock_s3(): conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket="pandas-test") - file_name = os.path.join(self.dirpath, 'test1' + self.ext) + file_name = os.path.join(self.dirpath, 'test1' + ext) with open(file_name, 'rb') as f: - conn.Bucket("pandas-test").put_object(Key="test1" + self.ext, + conn.Bucket("pandas-test").put_object(Key="test1" + ext, Body=f) - url = ('s3://pandas-test/test1' + self.ext) + url = ('s3://pandas-test/test1' + ext) url_table = read_excel(url) - local_table = self.get_exceldf('test1') + local_table = self.get_exceldf('test1', ext) tm.assert_frame_equal(url_table, local_table) @pytest.mark.slow - def test_read_from_file_url(self): + def test_read_from_file_url(self, ext): # FILE if sys.version_info[:2] < (2, 6): pytest.skip("file:// not supported with Python < 2.6") - localtable = os.path.join(self.dirpath, 'test1' + self.ext) + localtable = os.path.join(self.dirpath, 'test1' + ext) local_table = read_excel(localtable) try: @@ -649,37 +602,37 @@ def test_read_from_file_url(self): tm.assert_frame_equal(url_table, local_table) @td.skip_if_no('pathlib') - def test_read_from_pathlib_path(self): + def test_read_from_pathlib_path(self, ext): # GH12655 from pathlib import Path - str_path = os.path.join(self.dirpath, 'test1' + self.ext) + str_path = os.path.join(self.dirpath, 'test1' + ext) expected = read_excel(str_path, 'Sheet1', index_col=0) - path_obj = Path(self.dirpath, 'test1' + self.ext) + path_obj = Path(self.dirpath, 'test1' + ext) actual = read_excel(path_obj, 'Sheet1', index_col=0) tm.assert_frame_equal(expected, actual) @td.skip_if_no('py.path') - def test_read_from_py_localpath(self): + def test_read_from_py_localpath(self, ext): # GH12655 from py.path import local as LocalPath - str_path = os.path.join(self.dirpath, 'test1' + self.ext) + str_path = os.path.join(self.dirpath, 'test1' + ext) expected = read_excel(str_path, 'Sheet1', index_col=0) abs_dir = os.path.abspath(self.dirpath) - path_obj = LocalPath(abs_dir).join('test1' + self.ext) + path_obj = LocalPath(abs_dir).join('test1' + ext) actual = read_excel(path_obj, 'Sheet1', index_col=0) tm.assert_frame_equal(expected, actual) - def test_reader_closes_file(self): + def test_reader_closes_file(self, ext): - pth = os.path.join(self.dirpath, 'test1' + self.ext) + pth = os.path.join(self.dirpath, 'test1' + ext) f = open(pth, 'rb') with ExcelFile(f) as xlsx: # parses okay @@ -687,14 +640,12 @@ def test_reader_closes_file(self): assert f.closed - def test_creating_and_reading_multiple_sheets(self): + @td.skip_if_no('openpyxl') + @td.skip_if_no('xlwt') + def test_creating_and_reading_multiple_sheets(self, ext): # Test reading multiple sheets, from a runtime created excel file # with multiple sheets. # See PR #9450 - - _skip_if_no_xlwt() - _skip_if_no_openpyxl() - def tdf(sheetname): d, i = [11, 22, 33], [1, 2, 3] return DataFrame(d, i, columns=[sheetname]) @@ -704,7 +655,7 @@ def tdf(sheetname): dfs = [tdf(s) for s in sheets] dfs = dict(zip(sheets, dfs)) - with ensure_clean(self.ext) as pth: + with ensure_clean(ext) as pth: with ExcelWriter(pth) as ew: for sheetname, df in iteritems(dfs): df.to_excel(ew, sheetname) @@ -712,10 +663,10 @@ def tdf(sheetname): for s in sheets: tm.assert_frame_equal(dfs[s], dfs_returned[s]) - def test_reader_seconds(self): - # Test reading times with and without milliseconds. GH5945. + def test_reader_seconds(self, ext): import xlrd + # Test reading times with and without milliseconds. GH5945. if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"): # Xlrd >= 0.9.3 can handle Excel milliseconds. expected = DataFrame.from_dict({"Time": [time(1, 2, 3), @@ -743,16 +694,16 @@ def test_reader_seconds(self): time(16, 37, 1), time(18, 20, 54)]}) - actual = self.get_exceldf('times_1900', 'Sheet1') + actual = self.get_exceldf('times_1900', ext, 'Sheet1') tm.assert_frame_equal(actual, expected) - actual = self.get_exceldf('times_1904', 'Sheet1') + actual = self.get_exceldf('times_1904', ext, 'Sheet1') tm.assert_frame_equal(actual, expected) - def test_read_excel_multiindex(self): + def test_read_excel_multiindex(self, ext): # GH 4679 mi = MultiIndex.from_product([['foo', 'bar'], ['a', 'b']]) - mi_file = os.path.join(self.dirpath, 'testmultiindex' + self.ext) + mi_file = os.path.join(self.dirpath, 'testmultiindex' + ext) expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True], [2, 3.5, pd.Timestamp('2015-01-02'), False], @@ -806,9 +757,9 @@ def test_read_excel_multiindex(self): header=[0, 1], skiprows=2) tm.assert_frame_equal(actual, expected) - def test_read_excel_multiindex_empty_level(self): + @td.skip_if_no('xlsxwriter') + def test_read_excel_multiindex_empty_level(self, ext): # GH 12453 - _skip_if_no_xlsxwriter() with ensure_clean('.xlsx') as path: df = DataFrame({ ('Zero', ''): {0: 0}, @@ -846,9 +797,9 @@ def test_read_excel_multiindex_empty_level(self): actual = pd.read_excel(path, header=[0, 1]) tm.assert_frame_equal(actual, expected) - def test_excel_multindex_roundtrip(self): + @td.skip_if_no('xlsxwriter') + def test_excel_multindex_roundtrip(self, ext): # GH 4679 - _skip_if_no_xlsxwriter() with ensure_clean('.xlsx') as pth: for c_idx_names in [True, False]: for r_idx_names in [True, False]: @@ -891,9 +842,9 @@ def test_excel_multindex_roundtrip(self): tm.assert_frame_equal( df, act, check_names=check_names) - def test_excel_old_index_format(self): + def test_excel_old_index_format(self, ext): # see gh-4679 - filename = 'test_index_name_pre17' + self.ext + filename = 'test_index_name_pre17' + ext in_file = os.path.join(self.dirpath, filename) # We detect headers to determine if index names exist, so @@ -952,31 +903,30 @@ def test_excel_old_index_format(self): actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0, 1]) tm.assert_frame_equal(actual, expected, check_names=False) - def test_read_excel_bool_header_arg(self): + def test_read_excel_bool_header_arg(self, ext): # GH 6114 for arg in [True, False]: with pytest.raises(TypeError): - pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), + pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), header=arg) - def test_read_excel_chunksize(self): + def test_read_excel_chunksize(self, ext): # GH 8011 with pytest.raises(NotImplementedError): - pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), + pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), chunksize=100) - def test_read_excel_parse_dates(self): + @td.skip_if_no('openpyxl') + @td.skip_if_no('xlwt') + def test_read_excel_parse_dates(self, ext): # GH 11544, 12051 - _skip_if_no_openpyxl() - _skip_if_no_xlwt() # for df2.to_excel - df = DataFrame( {'col': [1, 2, 3], 'date_strings': pd.date_range('2012-01-01', periods=3)}) df2 = df.copy() df2['date_strings'] = df2['date_strings'].dt.strftime('%m/%d/%Y') - with ensure_clean(self.ext) as pth: + with ensure_clean(ext) as pth: df2.to_excel(pth) res = read_excel(pth) @@ -995,10 +945,10 @@ def test_read_excel_parse_dates(self): date_parser=dateparser, index_col=0) tm.assert_frame_equal(df, res) - def test_read_excel_skiprows_list(self): + def test_read_excel_skiprows_list(self, ext): # GH 4903 actual = pd.read_excel(os.path.join(self.dirpath, - 'testskiprows' + self.ext), + 'testskiprows' + ext), 'skiprows_list', skiprows=[0, 2]) expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True], [2, 3.5, pd.Timestamp('2015-01-02'), False], @@ -1008,40 +958,40 @@ def test_read_excel_skiprows_list(self): tm.assert_frame_equal(actual, expected) actual = pd.read_excel(os.path.join(self.dirpath, - 'testskiprows' + self.ext), + 'testskiprows' + ext), 'skiprows_list', skiprows=np.array([0, 2])) tm.assert_frame_equal(actual, expected) - def test_read_excel_nrows(self): + def test_read_excel_nrows(self, ext): # GH 16645 num_rows_to_pull = 5 - actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), + actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), nrows=num_rows_to_pull) expected = pd.read_excel(os.path.join(self.dirpath, - 'test1' + self.ext)) + 'test1' + ext)) expected = expected[:num_rows_to_pull] tm.assert_frame_equal(actual, expected) - def test_read_excel_nrows_greater_than_nrows_in_file(self): + def test_read_excel_nrows_greater_than_nrows_in_file(self, ext): # GH 16645 expected = pd.read_excel(os.path.join(self.dirpath, - 'test1' + self.ext)) + 'test1' + ext)) num_records_in_file = len(expected) num_rows_to_pull = num_records_in_file + 10 - actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), + actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), nrows=num_rows_to_pull) tm.assert_frame_equal(actual, expected) - def test_read_excel_nrows_non_integer_parameter(self): + def test_read_excel_nrows_non_integer_parameter(self, ext): # GH 16645 msg = "'nrows' must be an integer >=0" with tm.assert_raises_regex(ValueError, msg): - pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext), + pd.read_excel(os.path.join(self.dirpath, 'test1' + ext), nrows='5') - def test_read_excel_squeeze(self): + def test_read_excel_squeeze(self, ext): # GH 12157 - f = os.path.join(self.dirpath, 'test_squeeze' + self.ext) + f = os.path.join(self.dirpath, 'test_squeeze' + ext) actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True) expected = pd.Series([2, 3, 4], [4, 5, 6], name='b') @@ -1058,351 +1008,308 @@ def test_read_excel_squeeze(self): tm.assert_series_equal(actual, expected) -class TestXlsReaderTests(XlrdTests): - ext = '.xls' - engine_name = 'xlrd' - check_skip = staticmethod(_skip_if_no_xlrd) - - -class TestXlsxReaderTests(XlrdTests): - ext = '.xlsx' - engine_name = 'xlrd' - check_skip = staticmethod(_skip_if_no_xlrd) +class _WriterBase(SharedItems): + @pytest.fixture(autouse=True) + def set_engine_and_path(self, request, merge_cells, engine, ext): + """Fixture to set engine and open file for use in each test case -class TestXlsmReaderTests(XlrdTests): - ext = '.xlsm' - engine_name = 'xlrd' - check_skip = staticmethod(_skip_if_no_xlrd) + Rather than requiring `engine=...` to be provided explictly as an + argument in each test, this fixture sets a global option to dictate + which engine should be used to write Excel files. After executing + the test it rolls back said change to the global option. + It also uses a context manager to open a temporary excel file for + the function to write to, accessible via `self.path` -class ExcelWriterBase(SharedItems): + Notes + ----- + This fixture will run as part of each test method defined in the + class and any subclasses, on account of the `autouse=True` + argument + """ + option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.')) + prev_engine = get_option(option_name) + set_option(option_name, engine) + with ensure_clean(ext) as path: + self.path = path + yield + set_option(option_name, prev_engine) # Roll back option change + + +@pytest.mark.parametrize("merge_cells", [True, False]) +@pytest.mark.parametrize("engine,ext", [ + pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif( + not td.safe_import('openpyxl'), reason='No openpyxl')), + pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif( + not td.safe_import('openpyxl'), reason='No openpyxl')), + pytest.param('xlwt', '.xls', marks=pytest.mark.skipif( + not td.safe_import('xlwt'), reason='No xlwt')), + pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif( + not td.safe_import('xlsxwriter'), reason='No xlsxwriter')) +]) +class TestExcelWriter(_WriterBase): # Base class for test cases to run with different Excel writers. - # To add a writer test, define the following: - # 1. A check_skip function that skips your tests if your writer isn't - # installed. - # 2. Add a property ext, which is the file extension that your writer - # writes to. (needs to start with '.' so it's a valid path) - # 3. Add a property engine_name, which is the name of the writer class. - - # Test with MultiIndex and Hierarchical Rows as merged cells. - merge_cells = True - - def setup_method(self, method): - self.check_skip() - super(ExcelWriterBase, self).setup_method(method) - self.option_name = 'io.excel.%s.writer' % self.ext.strip('.') - self.prev_engine = get_option(self.option_name) - set_option(self.option_name, self.engine_name) - - def teardown_method(self, method): - set_option(self.option_name, self.prev_engine) - def test_excel_sheet_by_name_raise(self): - _skip_if_no_xlrd() + def test_excel_sheet_by_name_raise(self, merge_cells, engine, ext): import xlrd - with ensure_clean(self.ext) as pth: - gt = DataFrame(np.random.randn(10, 2)) - gt.to_excel(pth) - xl = ExcelFile(pth) - df = read_excel(xl, 0) - tm.assert_frame_equal(gt, df) - - with pytest.raises(xlrd.XLRDError): - read_excel(xl, '0') - - def test_excelwriter_contextmanager(self): - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as pth: - with ExcelWriter(pth) as writer: - self.frame.to_excel(writer, 'Data1') - self.frame2.to_excel(writer, 'Data2') - - with ExcelFile(pth) as reader: - found_df = read_excel(reader, 'Data1') - found_df2 = read_excel(reader, 'Data2') - tm.assert_frame_equal(found_df, self.frame) - tm.assert_frame_equal(found_df2, self.frame2) - - def test_roundtrip(self): - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - self.frame['A'][:5] = nan - - self.frame.to_excel(path, 'test1') - self.frame.to_excel(path, 'test1', columns=['A', 'B']) - self.frame.to_excel(path, 'test1', header=False) - self.frame.to_excel(path, 'test1', index=False) - - # test roundtrip - self.frame.to_excel(path, 'test1') - recons = read_excel(path, 'test1', index_col=0) - tm.assert_frame_equal(self.frame, recons) - - self.frame.to_excel(path, 'test1', index=False) - recons = read_excel(path, 'test1', index_col=None) - recons.index = self.frame.index - tm.assert_frame_equal(self.frame, recons) - - self.frame.to_excel(path, 'test1', na_rep='NA') - recons = read_excel(path, 'test1', index_col=0, na_values=['NA']) - tm.assert_frame_equal(self.frame, recons) - - # GH 3611 - self.frame.to_excel(path, 'test1', na_rep='88') - recons = read_excel(path, 'test1', index_col=0, na_values=['88']) - tm.assert_frame_equal(self.frame, recons) - - self.frame.to_excel(path, 'test1', na_rep='88') - recons = read_excel(path, 'test1', index_col=0, - na_values=[88, 88.0]) - tm.assert_frame_equal(self.frame, recons) - - # GH 6573 - self.frame.to_excel(path, 'Sheet1') - recons = read_excel(path, index_col=0) - tm.assert_frame_equal(self.frame, recons) - - self.frame.to_excel(path, '0') - recons = read_excel(path, index_col=0) - tm.assert_frame_equal(self.frame, recons) - - # GH 8825 Pandas Series should provide to_excel method - s = self.frame["A"] - s.to_excel(path) - recons = read_excel(path, index_col=0) - tm.assert_frame_equal(s.to_frame(), recons) - - def test_mixed(self): - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - self.mixed_frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1', index_col=0) - tm.assert_frame_equal(self.mixed_frame, recons) - - def test_tsframe(self): - _skip_if_no_xlrd() + gt = DataFrame(np.random.randn(10, 2)) + gt.to_excel(self.path) + xl = ExcelFile(self.path) + df = read_excel(xl, 0) + tm.assert_frame_equal(gt, df) + with pytest.raises(xlrd.XLRDError): + read_excel(xl, '0') + + def test_excelwriter_contextmanager(self, merge_cells, engine, ext): + with ExcelWriter(self.path) as writer: + self.frame.to_excel(writer, 'Data1') + self.frame2.to_excel(writer, 'Data2') + + with ExcelFile(self.path) as reader: + found_df = read_excel(reader, 'Data1') + found_df2 = read_excel(reader, 'Data2') + tm.assert_frame_equal(found_df, self.frame) + tm.assert_frame_equal(found_df2, self.frame2) + + def test_roundtrip(self, merge_cells, engine, ext): + self.frame['A'][:5] = nan + + self.frame.to_excel(self.path, 'test1') + self.frame.to_excel(self.path, 'test1', columns=['A', 'B']) + self.frame.to_excel(self.path, 'test1', header=False) + self.frame.to_excel(self.path, 'test1', index=False) + + # test roundtrip + self.frame.to_excel(self.path, 'test1') + recons = read_excel(self.path, 'test1', index_col=0) + tm.assert_frame_equal(self.frame, recons) + + self.frame.to_excel(self.path, 'test1', index=False) + recons = read_excel(self.path, 'test1', index_col=None) + recons.index = self.frame.index + tm.assert_frame_equal(self.frame, recons) + + self.frame.to_excel(self.path, 'test1', na_rep='NA') + recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA']) + tm.assert_frame_equal(self.frame, recons) + + # GH 3611 + self.frame.to_excel(self.path, 'test1', na_rep='88') + recons = read_excel(self.path, 'test1', index_col=0, na_values=['88']) + tm.assert_frame_equal(self.frame, recons) + + self.frame.to_excel(self.path, 'test1', na_rep='88') + recons = read_excel(self.path, 'test1', index_col=0, + na_values=[88, 88.0]) + tm.assert_frame_equal(self.frame, recons) + + # GH 6573 + self.frame.to_excel(self.path, 'Sheet1') + recons = read_excel(self.path, index_col=0) + tm.assert_frame_equal(self.frame, recons) + + self.frame.to_excel(self.path, '0') + recons = read_excel(self.path, index_col=0) + tm.assert_frame_equal(self.frame, recons) + + # GH 8825 Pandas Series should provide to_excel method + s = self.frame["A"] + s.to_excel(self.path) + recons = read_excel(self.path, index_col=0) + tm.assert_frame_equal(s.to_frame(), recons) + + def test_mixed(self, merge_cells, engine, ext): + self.mixed_frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1', index_col=0) + tm.assert_frame_equal(self.mixed_frame, recons) + + def test_tsframe(self, merge_cells, engine, ext): df = tm.makeTimeDataFrame()[:5] - with ensure_clean(self.ext) as path: - df.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1') - tm.assert_frame_equal(df, recons) - - def test_basics_with_nan(self): - _skip_if_no_xlrd() - with ensure_clean(self.ext) as path: - self.frame['A'][:5] = nan - self.frame.to_excel(path, 'test1') - self.frame.to_excel(path, 'test1', columns=['A', 'B']) - self.frame.to_excel(path, 'test1', header=False) - self.frame.to_excel(path, 'test1', index=False) - - def test_int_types(self): - _skip_if_no_xlrd() - - for np_type in (np.int8, np.int16, np.int32, np.int64): - - with ensure_clean(self.ext) as path: - # Test np.int values read come back as int (rather than float - # which is Excel's format). - frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)), - dtype=np_type) - frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1') - int_frame = frame.astype(np.int64) - tm.assert_frame_equal(int_frame, recons) - recons2 = read_excel(path, 'test1') - tm.assert_frame_equal(int_frame, recons2) - - # test with convert_float=False comes back as float - float_frame = frame.astype(float) - recons = read_excel(path, 'test1', convert_float=False) - tm.assert_frame_equal(recons, float_frame, - check_index_type=False, - check_column_type=False) - - def test_float_types(self): - _skip_if_no_xlrd() - - for np_type in (np.float16, np.float32, np.float64): - with ensure_clean(self.ext) as path: - # Test np.float values read come back as float. - frame = DataFrame(np.random.random_sample(10), dtype=np_type) - frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1').astype(np_type) - tm.assert_frame_equal(frame, recons, check_dtype=False) - - def test_bool_types(self): - _skip_if_no_xlrd() - - for np_type in (np.bool8, np.bool_): - with ensure_clean(self.ext) as path: - # Test np.bool values read come back as float. - frame = (DataFrame([1, 0, True, False], dtype=np_type)) - frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1').astype(np_type) - tm.assert_frame_equal(frame, recons) - - def test_inf_roundtrip(self): - _skip_if_no_xlrd() - + df.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(df, recons) + + def test_basics_with_nan(self, merge_cells, engine, ext): + self.frame['A'][:5] = nan + self.frame.to_excel(self.path, 'test1') + self.frame.to_excel(self.path, 'test1', columns=['A', 'B']) + self.frame.to_excel(self.path, 'test1', header=False) + self.frame.to_excel(self.path, 'test1', index=False) + + @pytest.mark.parametrize("np_type", [ + np.int8, np.int16, np.int32, np.int64]) + def test_int_types(self, merge_cells, engine, ext, np_type): + # Test np.int values read come back as int (rather than float + # which is Excel's format). + frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)), + dtype=np_type) + frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1') + int_frame = frame.astype(np.int64) + tm.assert_frame_equal(int_frame, recons) + recons2 = read_excel(self.path, 'test1') + tm.assert_frame_equal(int_frame, recons2) + + # test with convert_float=False comes back as float + float_frame = frame.astype(float) + recons = read_excel(self.path, 'test1', convert_float=False) + tm.assert_frame_equal(recons, float_frame, + check_index_type=False, + check_column_type=False) + + @pytest.mark.parametrize("np_type", [ + np.float16, np.float32, np.float64]) + def test_float_types(self, merge_cells, engine, ext, np_type): + # Test np.float values read come back as float. + frame = DataFrame(np.random.random_sample(10), dtype=np_type) + frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1').astype(np_type) + tm.assert_frame_equal(frame, recons, check_dtype=False) + + @pytest.mark.parametrize("np_type", [np.bool8, np.bool_]) + def test_bool_types(self, merge_cells, engine, ext, np_type): + # Test np.bool values read come back as float. + frame = (DataFrame([1, 0, True, False], dtype=np_type)) + frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1').astype(np_type) + tm.assert_frame_equal(frame, recons) + + def test_inf_roundtrip(self, merge_cells, engine, ext): frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)]) - with ensure_clean(self.ext) as path: - frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1') - tm.assert_frame_equal(frame, recons) - - def test_sheets(self): - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - self.frame['A'][:5] = nan - - self.frame.to_excel(path, 'test1') - self.frame.to_excel(path, 'test1', columns=['A', 'B']) - self.frame.to_excel(path, 'test1', header=False) - self.frame.to_excel(path, 'test1', index=False) - - # Test writing to separate sheets - writer = ExcelWriter(path) - self.frame.to_excel(writer, 'test1') - self.tsframe.to_excel(writer, 'test2') - writer.save() - reader = ExcelFile(path) - recons = read_excel(reader, 'test1', index_col=0) - tm.assert_frame_equal(self.frame, recons) - recons = read_excel(reader, 'test2', index_col=0) - tm.assert_frame_equal(self.tsframe, recons) - assert 2 == len(reader.sheet_names) - assert 'test1' == reader.sheet_names[0] - assert 'test2' == reader.sheet_names[1] - - def test_colaliases(self): - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - self.frame['A'][:5] = nan - - self.frame.to_excel(path, 'test1') - self.frame.to_excel(path, 'test1', columns=['A', 'B']) - self.frame.to_excel(path, 'test1', header=False) - self.frame.to_excel(path, 'test1', index=False) - - # column aliases - col_aliases = Index(['AA', 'X', 'Y', 'Z']) - self.frame2.to_excel(path, 'test1', header=col_aliases) - reader = ExcelFile(path) - rs = read_excel(reader, 'test1', index_col=0) - xp = self.frame2.copy() - xp.columns = col_aliases - tm.assert_frame_equal(xp, rs) - - def test_roundtrip_indexlabels(self): - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - - self.frame['A'][:5] = nan - - self.frame.to_excel(path, 'test1') - self.frame.to_excel(path, 'test1', columns=['A', 'B']) - self.frame.to_excel(path, 'test1', header=False) - self.frame.to_excel(path, 'test1', index=False) - - # test index_label - frame = (DataFrame(np.random.randn(10, 2)) >= 0) - frame.to_excel(path, 'test1', - index_label=['test'], - merge_cells=self.merge_cells) - reader = ExcelFile(path) - recons = read_excel(reader, 'test1', - index_col=0, - ).astype(np.int64) - frame.index.names = ['test'] - assert frame.index.names == recons.index.names - - frame = (DataFrame(np.random.randn(10, 2)) >= 0) - frame.to_excel(path, - 'test1', - index_label=['test', 'dummy', 'dummy2'], - merge_cells=self.merge_cells) - reader = ExcelFile(path) - recons = read_excel(reader, 'test1', - index_col=0, - ).astype(np.int64) - frame.index.names = ['test'] - assert frame.index.names == recons.index.names - - frame = (DataFrame(np.random.randn(10, 2)) >= 0) - frame.to_excel(path, - 'test1', - index_label='test', - merge_cells=self.merge_cells) - reader = ExcelFile(path) - recons = read_excel(reader, 'test1', - index_col=0, - ).astype(np.int64) - frame.index.names = ['test'] - tm.assert_frame_equal(frame, recons.astype(bool)) - - with ensure_clean(self.ext) as path: - - self.frame.to_excel(path, - 'test1', - columns=['A', 'B', 'C', 'D'], - index=False, merge_cells=self.merge_cells) - # take 'A' and 'B' as indexes (same row as cols 'C', 'D') - df = self.frame.copy() - df = df.set_index(['A', 'B']) - - reader = ExcelFile(path) - recons = read_excel(reader, 'test1', index_col=[0, 1]) - tm.assert_frame_equal(df, recons, check_less_precise=True) - - def test_excel_roundtrip_indexname(self): - _skip_if_no_xlrd() - + frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(frame, recons) + + def test_sheets(self, merge_cells, engine, ext): + self.frame['A'][:5] = nan + + self.frame.to_excel(self.path, 'test1') + self.frame.to_excel(self.path, 'test1', columns=['A', 'B']) + self.frame.to_excel(self.path, 'test1', header=False) + self.frame.to_excel(self.path, 'test1', index=False) + + # Test writing to separate sheets + writer = ExcelWriter(self.path) + self.frame.to_excel(writer, 'test1') + self.tsframe.to_excel(writer, 'test2') + writer.save() + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1', index_col=0) + tm.assert_frame_equal(self.frame, recons) + recons = read_excel(reader, 'test2', index_col=0) + tm.assert_frame_equal(self.tsframe, recons) + assert 2 == len(reader.sheet_names) + assert 'test1' == reader.sheet_names[0] + assert 'test2' == reader.sheet_names[1] + + def test_colaliases(self, merge_cells, engine, ext): + self.frame['A'][:5] = nan + + self.frame.to_excel(self.path, 'test1') + self.frame.to_excel(self.path, 'test1', columns=['A', 'B']) + self.frame.to_excel(self.path, 'test1', header=False) + self.frame.to_excel(self.path, 'test1', index=False) + + # column aliases + col_aliases = Index(['AA', 'X', 'Y', 'Z']) + self.frame2.to_excel(self.path, 'test1', header=col_aliases) + reader = ExcelFile(self.path) + rs = read_excel(reader, 'test1', index_col=0) + xp = self.frame2.copy() + xp.columns = col_aliases + tm.assert_frame_equal(xp, rs) + + def test_roundtrip_indexlabels(self, merge_cells, engine, ext): + self.frame['A'][:5] = nan + + self.frame.to_excel(self.path, 'test1') + self.frame.to_excel(self.path, 'test1', columns=['A', 'B']) + self.frame.to_excel(self.path, 'test1', header=False) + self.frame.to_excel(self.path, 'test1', index=False) + + # test index_label + frame = (DataFrame(np.random.randn(10, 2)) >= 0) + frame.to_excel(self.path, 'test1', + index_label=['test'], + merge_cells=merge_cells) + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1', + index_col=0, + ).astype(np.int64) + frame.index.names = ['test'] + assert frame.index.names == recons.index.names + + frame = (DataFrame(np.random.randn(10, 2)) >= 0) + frame.to_excel(self.path, + 'test1', + index_label=['test', 'dummy', 'dummy2'], + merge_cells=merge_cells) + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1', + index_col=0, + ).astype(np.int64) + frame.index.names = ['test'] + assert frame.index.names == recons.index.names + + frame = (DataFrame(np.random.randn(10, 2)) >= 0) + frame.to_excel(self.path, + 'test1', + index_label='test', + merge_cells=merge_cells) + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1', + index_col=0, + ).astype(np.int64) + frame.index.names = ['test'] + tm.assert_frame_equal(frame, recons.astype(bool)) + + self.frame.to_excel(self.path, + 'test1', + columns=['A', 'B', 'C', 'D'], + index=False, merge_cells=merge_cells) + # take 'A' and 'B' as indexes (same row as cols 'C', 'D') + df = self.frame.copy() + df = df.set_index(['A', 'B']) + + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1', index_col=[0, 1]) + tm.assert_frame_equal(df, recons, check_less_precise=True) + + def test_excel_roundtrip_indexname(self, merge_cells, engine, ext): df = DataFrame(np.random.randn(10, 4)) df.index.name = 'foo' - with ensure_clean(self.ext) as path: - df.to_excel(path, merge_cells=self.merge_cells) + df.to_excel(self.path, merge_cells=merge_cells) - xf = ExcelFile(path) - result = read_excel(xf, xf.sheet_names[0], - index_col=0) + xf = ExcelFile(self.path) + result = read_excel(xf, xf.sheet_names[0], + index_col=0) - tm.assert_frame_equal(result, df) - assert result.index.name == 'foo' - - def test_excel_roundtrip_datetime(self): - _skip_if_no_xlrd() + tm.assert_frame_equal(result, df) + assert result.index.name == 'foo' + def test_excel_roundtrip_datetime(self, merge_cells, engine, ext): # datetime.date, not sure what to test here exactly tsf = self.tsframe.copy() - with ensure_clean(self.ext) as path: - tsf.index = [x.date() for x in self.tsframe.index] - tsf.to_excel(path, 'test1', merge_cells=self.merge_cells) - reader = ExcelFile(path) - recons = read_excel(reader, 'test1') - tm.assert_frame_equal(self.tsframe, recons) + tsf.index = [x.date() for x in self.tsframe.index] + tsf.to_excel(self.path, 'test1', merge_cells=merge_cells) + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(self.tsframe, recons) # GH4133 - excel output format strings - def test_excel_date_datetime_format(self): - _skip_if_no_xlrd() + def test_excel_date_datetime_format(self, merge_cells, engine, ext): df = DataFrame([[date(2014, 1, 31), date(1999, 9, 24)], [datetime(1998, 5, 26, 23, 33, 4), @@ -1414,133 +1321,117 @@ def test_excel_date_datetime_format(self): datetime(2014, 2, 28, 13, 5, 13)]], index=['DATE', 'DATETIME'], columns=['X', 'Y']) - with ensure_clean(self.ext) as filename1: - with ensure_clean(self.ext) as filename2: - writer1 = ExcelWriter(filename1) - writer2 = ExcelWriter(filename2, - date_format='DD.MM.YYYY', - datetime_format='DD.MM.YYYY HH-MM-SS') + with ensure_clean(ext) as filename2: + writer1 = ExcelWriter(self.path) + writer2 = ExcelWriter(filename2, + date_format='DD.MM.YYYY', + datetime_format='DD.MM.YYYY HH-MM-SS') - df.to_excel(writer1, 'test1') - df.to_excel(writer2, 'test1') + df.to_excel(writer1, 'test1') + df.to_excel(writer2, 'test1') - writer1.close() - writer2.close() + writer1.close() + writer2.close() - reader1 = ExcelFile(filename1) - reader2 = ExcelFile(filename2) + reader1 = ExcelFile(self.path) + reader2 = ExcelFile(filename2) - rs1 = read_excel(reader1, 'test1', index_col=None) - rs2 = read_excel(reader2, 'test1', index_col=None) + rs1 = read_excel(reader1, 'test1', index_col=None) + rs2 = read_excel(reader2, 'test1', index_col=None) - tm.assert_frame_equal(rs1, rs2) + tm.assert_frame_equal(rs1, rs2) - # since the reader returns a datetime object for dates, we need - # to use df_expected to check the result - tm.assert_frame_equal(rs2, df_expected) + # since the reader returns a datetime object for dates, we need + # to use df_expected to check the result + tm.assert_frame_equal(rs2, df_expected) - def test_to_excel_interval_no_labels(self): + def test_to_excel_interval_no_labels(self, merge_cells, engine, ext): # GH19242 - test writing Interval without labels - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), - dtype=np.int64) - expected = frame.copy() - frame['new'] = pd.cut(frame[0], 10) - expected['new'] = pd.cut(expected[0], 10).astype(str) - frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1') - tm.assert_frame_equal(expected, recons) - - def test_to_excel_interval_labels(self): + frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), + dtype=np.int64) + expected = frame.copy() + frame['new'] = pd.cut(frame[0], 10) + expected['new'] = pd.cut(expected[0], 10).astype(str) + frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(expected, recons) + + def test_to_excel_interval_labels(self, merge_cells, engine, ext): # GH19242 - test writing Interval with labels - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), - dtype=np.int64) - expected = frame.copy() - intervals = pd.cut(frame[0], 10, labels=['A', 'B', 'C', 'D', 'E', - 'F', 'G', 'H', 'I', 'J']) - frame['new'] = intervals - expected['new'] = pd.Series(list(intervals)) - frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1') - tm.assert_frame_equal(expected, recons) - - def test_to_excel_timedelta(self): + frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), + dtype=np.int64) + expected = frame.copy() + intervals = pd.cut(frame[0], 10, labels=['A', 'B', 'C', 'D', 'E', + 'F', 'G', 'H', 'I', 'J']) + frame['new'] = intervals + expected['new'] = pd.Series(list(intervals)) + frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(expected, recons) + + def test_to_excel_timedelta(self, merge_cells, engine, ext): # GH 19242, GH9155 - test writing timedelta to xls - _skip_if_no_xlrd() - - with ensure_clean('.xls') as path: - frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), - columns=['A'], - dtype=np.int64 - ) - expected = frame.copy() - frame['new'] = frame['A'].apply(lambda x: timedelta(seconds=x)) - expected['new'] = expected['A'].apply( - lambda x: timedelta(seconds=x).total_seconds() / float(86400)) - frame.to_excel(path, 'test1') - reader = ExcelFile(path) - recons = read_excel(reader, 'test1') - tm.assert_frame_equal(expected, recons) - - def test_to_excel_periodindex(self): - _skip_if_no_xlrd() - + if engine == 'openpyxl': + pytest.xfail('Timedelta roundtrip broken with openpyxl') + if engine == 'xlsxwriter' and (sys.version_info[0] == 2 and + sys.platform.startswith('linux')): + pytest.xfail('Not working on linux with Py2 and xlsxwriter') + frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), + columns=['A'], + dtype=np.int64 + ) + expected = frame.copy() + frame['new'] = frame['A'].apply(lambda x: timedelta(seconds=x)) + expected['new'] = expected['A'].apply( + lambda x: timedelta(seconds=x).total_seconds() / float(86400)) + frame.to_excel(self.path, 'test1') + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(expected, recons) + + def test_to_excel_periodindex(self, merge_cells, engine, ext): frame = self.tsframe xp = frame.resample('M', kind='period').mean() - with ensure_clean(self.ext) as path: - xp.to_excel(path, 'sht1') - - reader = ExcelFile(path) - rs = read_excel(reader, 'sht1', index_col=0) - tm.assert_frame_equal(xp, rs.to_period('M')) + xp.to_excel(self.path, 'sht1') - def test_to_excel_multiindex(self): - _skip_if_no_xlrd() + reader = ExcelFile(self.path) + rs = read_excel(reader, 'sht1', index_col=0) + tm.assert_frame_equal(xp, rs.to_period('M')) + def test_to_excel_multiindex(self, merge_cells, engine, ext): frame = self.frame arrays = np.arange(len(frame.index) * 2).reshape(2, -1) new_index = MultiIndex.from_arrays(arrays, names=['first', 'second']) frame.index = new_index - with ensure_clean(self.ext) as path: - frame.to_excel(path, 'test1', header=False) - frame.to_excel(path, 'test1', columns=['A', 'B']) + frame.to_excel(self.path, 'test1', header=False) + frame.to_excel(self.path, 'test1', columns=['A', 'B']) - # round trip - frame.to_excel(path, 'test1', merge_cells=self.merge_cells) - reader = ExcelFile(path) - df = read_excel(reader, 'test1', index_col=[0, 1]) - tm.assert_frame_equal(frame, df) + # round trip + frame.to_excel(self.path, 'test1', merge_cells=merge_cells) + reader = ExcelFile(self.path) + df = read_excel(reader, 'test1', index_col=[0, 1]) + tm.assert_frame_equal(frame, df) # GH13511 - def test_to_excel_multiindex_nan_label(self): - _skip_if_no_xlrd() - + def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext): frame = pd.DataFrame({'A': [None, 2, 3], 'B': [10, 20, 30], 'C': np.random.sample(3)}) frame = frame.set_index(['A', 'B']) - with ensure_clean(self.ext) as path: - frame.to_excel(path, merge_cells=self.merge_cells) - df = read_excel(path, index_col=[0, 1]) - tm.assert_frame_equal(frame, df) + frame.to_excel(self.path, merge_cells=merge_cells) + df = read_excel(self.path, index_col=[0, 1]) + tm.assert_frame_equal(frame, df) # Test for Issue 11328. If column indices are integers, make # sure they are handled correctly for either setting of # merge_cells - def test_to_excel_multiindex_cols(self): - _skip_if_no_xlrd() - + def test_to_excel_multiindex_cols(self, merge_cells, engine, ext): frame = self.frame arrays = np.arange(len(frame.index) * 2).reshape(2, -1) new_index = MultiIndex.from_arrays(arrays, @@ -1551,42 +1442,37 @@ def test_to_excel_multiindex_cols(self): (50, 1), (50, 2)]) frame.columns = new_cols_index header = [0, 1] - if not self.merge_cells: + if not merge_cells: header = 0 - with ensure_clean(self.ext) as path: - # round trip - frame.to_excel(path, 'test1', merge_cells=self.merge_cells) - reader = ExcelFile(path) - df = read_excel(reader, 'test1', header=header, - index_col=[0, 1]) - if not self.merge_cells: - fm = frame.columns.format(sparsify=False, - adjoin=False, names=False) - frame.columns = [".".join(map(str, q)) for q in zip(*fm)] - tm.assert_frame_equal(frame, df) - - def test_to_excel_multiindex_dates(self): - _skip_if_no_xlrd() - + # round trip + frame.to_excel(self.path, 'test1', merge_cells=merge_cells) + reader = ExcelFile(self.path) + df = read_excel(reader, 'test1', header=header, + index_col=[0, 1]) + if not merge_cells: + fm = frame.columns.format(sparsify=False, + adjoin=False, names=False) + frame.columns = [".".join(map(str, q)) for q in zip(*fm)] + tm.assert_frame_equal(frame, df) + + def test_to_excel_multiindex_dates(self, merge_cells, engine, ext): # try multiindex with dates tsframe = self.tsframe.copy() new_index = [tsframe.index, np.arange(len(tsframe.index))] tsframe.index = MultiIndex.from_arrays(new_index) - with ensure_clean(self.ext) as path: - tsframe.index.names = ['time', 'foo'] - tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells) - reader = ExcelFile(path) - recons = read_excel(reader, 'test1', - index_col=[0, 1]) - - tm.assert_frame_equal(tsframe, recons) - assert recons.index.names == ('time', 'foo') + tsframe.index.names = ['time', 'foo'] + tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells) + reader = ExcelFile(self.path) + recons = read_excel(reader, 'test1', + index_col=[0, 1]) - def test_to_excel_multiindex_no_write_index(self): - _skip_if_no_xlrd() + tm.assert_frame_equal(tsframe, recons) + assert recons.index.names == ('time', 'foo') + def test_to_excel_multiindex_no_write_index(self, merge_cells, engine, + ext): # Test writing and re-reading a MI witout the index. GH 5616. # Initial non-MI frame. @@ -1597,53 +1483,44 @@ def test_to_excel_multiindex_no_write_index(self): multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)]) frame2.index = multi_index - with ensure_clean(self.ext) as path: + # Write out to Excel without the index. + frame2.to_excel(self.path, 'test1', index=False) - # Write out to Excel without the index. - frame2.to_excel(path, 'test1', index=False) + # Read it back in. + reader = ExcelFile(self.path) + frame3 = read_excel(reader, 'test1') - # Read it back in. - reader = ExcelFile(path) - frame3 = read_excel(reader, 'test1') - - # Test that it is the same as the initial frame. - tm.assert_frame_equal(frame1, frame3) - - def test_to_excel_float_format(self): - _skip_if_no_xlrd() + # Test that it is the same as the initial frame. + tm.assert_frame_equal(frame1, frame3) + def test_to_excel_float_format(self, merge_cells, engine, ext): df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) - with ensure_clean(self.ext) as filename: - df.to_excel(filename, 'test1', float_format='%.2f') + df.to_excel(self.path, 'test1', float_format='%.2f') - reader = ExcelFile(filename) - rs = read_excel(reader, 'test1', index_col=None) - xp = DataFrame([[0.12, 0.23, 0.57], - [12.32, 123123.20, 321321.20]], - index=['A', 'B'], columns=['X', 'Y', 'Z']) - tm.assert_frame_equal(rs, xp) - - def test_to_excel_output_encoding(self): - _skip_if_no_xlrd() + reader = ExcelFile(self.path) + rs = read_excel(reader, 'test1', index_col=None) + xp = DataFrame([[0.12, 0.23, 0.57], + [12.32, 123123.20, 321321.20]], + index=['A', 'B'], columns=['X', 'Y', 'Z']) + tm.assert_frame_equal(rs, xp) + def test_to_excel_output_encoding(self, merge_cells, engine, ext): # avoid mixed inferred_type df = DataFrame([[u'\u0192', u'\u0193', u'\u0194'], [u'\u0195', u'\u0196', u'\u0197']], index=[u'A\u0192', u'B'], columns=[u'X\u0193', u'Y', u'Z']) - with ensure_clean('__tmp_to_excel_float_format__.' + self.ext)\ - as filename: + with ensure_clean('__tmp_to_excel_float_format__.' + ext) as filename: df.to_excel(filename, sheet_name='TestSheet', encoding='utf8') result = read_excel(filename, 'TestSheet', encoding='utf8') tm.assert_frame_equal(result, df) - def test_to_excel_unicode_filename(self): - _skip_if_no_xlrd() - with ensure_clean(u('\u0192u.') + self.ext) as filename: + def test_to_excel_unicode_filename(self, merge_cells, engine, ext): + with ensure_clean(u('\u0192u.') + ext) as filename: try: f = open(filename, 'wb') except UnicodeEncodeError: @@ -1664,7 +1541,7 @@ def test_to_excel_unicode_filename(self): index=['A', 'B'], columns=['X', 'Y', 'Z']) tm.assert_frame_equal(rs, xp) - # def test_to_excel_header_styling_xls(self): + # def test_to_excel_header_styling_xls(self, merge_cells, engine, ext): # import StringIO # s = StringIO( @@ -1711,7 +1588,7 @@ def test_to_excel_unicode_filename(self): # assert 1 == cell_xf.border.left_line_style # assert 2 == cell_xf.alignment.hor_align # os.remove(filename) - # def test_to_excel_header_styling_xlsx(self): + # def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext): # import StringIO # s = StringIO( # """Date,ticker,type,value @@ -1764,10 +1641,8 @@ def test_to_excel_unicode_filename(self): # assert ws.cell(maddr).merged # os.remove(filename) - def test_excel_010_hemstring(self): - _skip_if_no_xlrd() - - if self.merge_cells: + def test_excel_010_hemstring(self, merge_cells, engine, ext): + if merge_cells: pytest.skip('Skip tests for merged MI format.') from pandas.util.testing import makeCustomDataframe as mkdf @@ -1776,12 +1651,11 @@ def test_excel_010_hemstring(self): def roundtrip(df, header=True, parser_hdr=0, index=True): - with ensure_clean(self.ext) as path: - df.to_excel(path, header=header, - merge_cells=self.merge_cells, index=index) - xf = ExcelFile(path) - res = read_excel(xf, xf.sheet_names[0], header=parser_hdr) - return res + df.to_excel(self.path, header=header, + merge_cells=merge_cells, index=index) + xf = ExcelFile(self.path) + res = read_excel(xf, xf.sheet_names[0], header=parser_hdr) + return res nrows = 5 ncols = 3 @@ -1817,12 +1691,11 @@ def roundtrip(df, header=True, parser_hdr=0, index=True): assert res.shape == (1, 2) assert res.iloc[0, 0] is not np.nan - def test_excel_010_hemstring_raises_NotImplementedError(self): + def test_excel_010_hemstring_raises_NotImplementedError(self, merge_cells, + engine, ext): # This test was failing only for j>1 and header=False, # So I reproduced a simple test. - _skip_if_no_xlrd() - - if self.merge_cells: + if merge_cells: pytest.skip('Skip tests for merged MI format.') from pandas.util.testing import makeCustomDataframe as mkdf @@ -1831,12 +1704,11 @@ def test_excel_010_hemstring_raises_NotImplementedError(self): def roundtrip2(df, header=True, parser_hdr=0, index=True): - with ensure_clean(self.ext) as path: - df.to_excel(path, header=header, - merge_cells=self.merge_cells, index=index) - xf = ExcelFile(path) - res = read_excel(xf, xf.sheet_names[0], header=parser_hdr) - return res + df.to_excel(self.path, header=header, + merge_cells=merge_cells, index=index) + xf = ExcelFile(self.path) + res = read_excel(xf, xf.sheet_names[0], header=parser_hdr) + return res nrows = 5 ncols = 3 @@ -1846,134 +1718,119 @@ def roundtrip2(df, header=True, parser_hdr=0, index=True): with pytest.raises(NotImplementedError): roundtrip2(df, header=False, index=False) - def test_duplicated_columns(self): + def test_duplicated_columns(self, merge_cells, engine, ext): # Test for issue #5235 - _skip_if_no_xlrd() - - with ensure_clean(self.ext) as path: - write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) - colnames = ['A', 'B', 'B'] - - write_frame.columns = colnames - write_frame.to_excel(path, 'test1') - - read_frame = read_excel(path, 'test1') - read_frame.columns = colnames - tm.assert_frame_equal(write_frame, read_frame) - - # 11007 / #10970 - write_frame = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], - columns=['A', 'B', 'A', 'B']) - write_frame.to_excel(path, 'test1') - read_frame = read_excel(path, 'test1') - read_frame.columns = ['A', 'B', 'A', 'B'] - tm.assert_frame_equal(write_frame, read_frame) - - # 10982 - write_frame.to_excel(path, 'test1', index=False, header=False) - read_frame = read_excel(path, 'test1', header=None) - write_frame.columns = [0, 1, 2, 3] - tm.assert_frame_equal(write_frame, read_frame) - - def test_swapped_columns(self): - # Test for issue #5427. - _skip_if_no_xlrd() + write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + colnames = ['A', 'B', 'B'] - with ensure_clean(self.ext) as path: - write_frame = DataFrame({'A': [1, 1, 1], - 'B': [2, 2, 2]}) - write_frame.to_excel(path, 'test1', columns=['B', 'A']) + write_frame.columns = colnames + write_frame.to_excel(self.path, 'test1') - read_frame = read_excel(path, 'test1', header=0) + read_frame = read_excel(self.path, 'test1') + read_frame.columns = colnames + tm.assert_frame_equal(write_frame, read_frame) - tm.assert_series_equal(write_frame['A'], read_frame['A']) - tm.assert_series_equal(write_frame['B'], read_frame['B']) + # 11007 / #10970 + write_frame = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], + columns=['A', 'B', 'A', 'B']) + write_frame.to_excel(self.path, 'test1') + read_frame = read_excel(self.path, 'test1') + read_frame.columns = ['A', 'B', 'A', 'B'] + tm.assert_frame_equal(write_frame, read_frame) - def test_invalid_columns(self): # 10982 - _skip_if_no_xlrd() + write_frame.to_excel(self.path, 'test1', index=False, header=False) + read_frame = read_excel(self.path, 'test1', header=None) + write_frame.columns = [0, 1, 2, 3] + tm.assert_frame_equal(write_frame, read_frame) + + def test_swapped_columns(self, merge_cells, engine, ext): + # Test for issue #5427. + write_frame = DataFrame({'A': [1, 1, 1], + 'B': [2, 2, 2]}) + write_frame.to_excel(self.path, 'test1', columns=['B', 'A']) + + read_frame = read_excel(self.path, 'test1', header=0) + + tm.assert_series_equal(write_frame['A'], read_frame['A']) + tm.assert_series_equal(write_frame['B'], read_frame['B']) - with ensure_clean(self.ext) as path: - write_frame = DataFrame({'A': [1, 1, 1], - 'B': [2, 2, 2]}) + def test_invalid_columns(self, merge_cells, engine, ext): + # 10982 + write_frame = DataFrame({'A': [1, 1, 1], + 'B': [2, 2, 2]}) - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - write_frame.to_excel(path, 'test1', columns=['B', 'C']) - expected = write_frame.reindex(columns=['B', 'C']) - read_frame = read_excel(path, 'test1') - tm.assert_frame_equal(expected, read_frame) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + write_frame.to_excel(self.path, 'test1', columns=['B', 'C']) + expected = write_frame.reindex(columns=['B', 'C']) + read_frame = read_excel(self.path, 'test1') + tm.assert_frame_equal(expected, read_frame) - with pytest.raises(KeyError): - write_frame.to_excel(path, 'test1', columns=['C', 'D']) + with pytest.raises(KeyError): + write_frame.to_excel(self.path, 'test1', columns=['C', 'D']) - def test_comment_arg(self): + def test_comment_arg(self, merge_cells, engine, ext): # Re issue #18735 # Test the comment argument functionality to read_excel - with ensure_clean(self.ext) as path: - - # Create file to read in - df = DataFrame({'A': ['one', '#one', 'one'], - 'B': ['two', 'two', '#two']}) - df.to_excel(path, 'test_c') - - # Read file without comment arg - result1 = read_excel(path, 'test_c') - result1.iloc[1, 0] = None - result1.iloc[1, 1] = None - result1.iloc[2, 1] = None - result2 = read_excel(path, 'test_c', comment='#') - tm.assert_frame_equal(result1, result2) - - def test_comment_default(self): + + # Create file to read in + df = DataFrame({'A': ['one', '#one', 'one'], + 'B': ['two', 'two', '#two']}) + df.to_excel(self.path, 'test_c') + + # Read file without comment arg + result1 = read_excel(self.path, 'test_c') + result1.iloc[1, 0] = None + result1.iloc[1, 1] = None + result1.iloc[2, 1] = None + result2 = read_excel(self.path, 'test_c', comment='#') + tm.assert_frame_equal(result1, result2) + + def test_comment_default(self, merge_cells, engine, ext): # Re issue #18735 # Test the comment argument default to read_excel - with ensure_clean(self.ext) as path: - # Create file to read in - df = DataFrame({'A': ['one', '#one', 'one'], - 'B': ['two', 'two', '#two']}) - df.to_excel(path, 'test_c') + # Create file to read in + df = DataFrame({'A': ['one', '#one', 'one'], + 'B': ['two', 'two', '#two']}) + df.to_excel(self.path, 'test_c') - # Read file with default and explicit comment=None - result1 = read_excel(path, 'test_c') - result2 = read_excel(path, 'test_c', comment=None) - tm.assert_frame_equal(result1, result2) + # Read file with default and explicit comment=None + result1 = read_excel(self.path, 'test_c') + result2 = read_excel(self.path, 'test_c', comment=None) + tm.assert_frame_equal(result1, result2) - def test_comment_used(self): + def test_comment_used(self, merge_cells, engine, ext): # Re issue #18735 # Test the comment argument is working as expected when used - with ensure_clean(self.ext) as path: - # Create file to read in - df = DataFrame({'A': ['one', '#one', 'one'], - 'B': ['two', 'two', '#two']}) - df.to_excel(path, 'test_c') + # Create file to read in + df = DataFrame({'A': ['one', '#one', 'one'], + 'B': ['two', 'two', '#two']}) + df.to_excel(self.path, 'test_c') - # Test read_frame_comment against manually produced expected output - expected = DataFrame({'A': ['one', None, 'one'], - 'B': ['two', None, None]}) - result = read_excel(path, 'test_c', comment='#') - tm.assert_frame_equal(result, expected) + # Test read_frame_comment against manually produced expected output + expected = DataFrame({'A': ['one', None, 'one'], + 'B': ['two', None, None]}) + result = read_excel(self.path, 'test_c', comment='#') + tm.assert_frame_equal(result, expected) - def test_comment_emptyline(self): + def test_comment_emptyline(self, merge_cells, engine, ext): # Re issue #18735 # Test that read_excel ignores commented lines at the end of file - with ensure_clean(self.ext) as path: - df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']}) - df.to_excel(path, index=False) + df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']}) + df.to_excel(self.path, index=False) - # Test that all-comment lines at EoF are ignored - expected = DataFrame({'a': [1], 'b': [2]}) - result = read_excel(path, comment='#') - tm.assert_frame_equal(result, expected) + # Test that all-comment lines at EoF are ignored + expected = DataFrame({'a': [1], 'b': [2]}) + result = read_excel(self.path, comment='#') + tm.assert_frame_equal(result, expected) - def test_datetimes(self): + def test_datetimes(self, merge_cells, engine, ext): # Test writing and reading datetimes. For issue #9139. (xref #9185) - _skip_if_no_xlrd() - datetimes = [datetime(2013, 1, 13, 1, 2, 3), datetime(2013, 1, 13, 2, 45, 56), datetime(2013, 1, 13, 4, 29, 49), @@ -1986,21 +1843,18 @@ def test_datetimes(self): datetime(2013, 1, 13, 16, 37, 0), datetime(2013, 1, 13, 18, 20, 52)] - with ensure_clean(self.ext) as path: - write_frame = DataFrame({'A': datetimes}) - write_frame.to_excel(path, 'Sheet1') - read_frame = read_excel(path, 'Sheet1', header=0) + write_frame = DataFrame({'A': datetimes}) + write_frame.to_excel(self.path, 'Sheet1') + read_frame = read_excel(self.path, 'Sheet1', header=0) - tm.assert_series_equal(write_frame['A'], read_frame['A']) + tm.assert_series_equal(write_frame['A'], read_frame['A']) # GH7074 - def test_bytes_io(self): - _skip_if_no_xlrd() - + def test_bytes_io(self, merge_cells, engine, ext): bio = BytesIO() df = DataFrame(np.random.randn(10, 2)) # pass engine explicitly as there is no file path to infer from - writer = ExcelWriter(bio, engine=self.engine_name) + writer = ExcelWriter(bio, engine=engine) df.to_excel(writer) writer.save() bio.seek(0) @@ -2008,62 +1862,59 @@ def test_bytes_io(self): tm.assert_frame_equal(df, reread_df) # GH8188 - def test_write_lists_dict(self): - _skip_if_no_xlrd() - + def test_write_lists_dict(self, merge_cells, engine, ext): df = DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}], 'numeric': [1, 2, 3.0], 'str': ['apple', 'banana', 'cherry']}) expected = df.copy() expected.mixed = expected.mixed.apply(str) expected.numeric = expected.numeric.astype('int64') - with ensure_clean(self.ext) as path: - df.to_excel(path, 'Sheet1') - read = read_excel(path, 'Sheet1', header=0) - tm.assert_frame_equal(read, expected) + + df.to_excel(self.path, 'Sheet1') + read = read_excel(self.path, 'Sheet1', header=0) + tm.assert_frame_equal(read, expected) # GH13347 - def test_true_and_false_value_options(self): + def test_true_and_false_value_options(self, merge_cells, engine, ext): df = pd.DataFrame([['foo', 'bar']], columns=['col1', 'col2']) expected = df.replace({'foo': True, 'bar': False}) - with ensure_clean(self.ext) as path: - df.to_excel(path) - read_frame = read_excel(path, true_values=['foo'], - false_values=['bar']) - tm.assert_frame_equal(read_frame, expected) - def test_freeze_panes(self): + df.to_excel(self.path) + read_frame = read_excel(self.path, true_values=['foo'], + false_values=['bar']) + tm.assert_frame_equal(read_frame, expected) + + def test_freeze_panes(self, merge_cells, engine, ext): # GH15160 expected = DataFrame([[1, 2], [3, 4]], columns=['col1', 'col2']) - with ensure_clean(self.ext) as path: - expected.to_excel(path, "Sheet1", freeze_panes=(1, 1)) - result = read_excel(path) - tm.assert_frame_equal(expected, result) + expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1)) + result = read_excel(self.path) + tm.assert_frame_equal(expected, result) - def test_path_pathlib(self): + def test_path_pathlib(self, merge_cells, engine, ext): df = tm.makeDataFrame() - writer = partial(df.to_excel, engine=self.engine_name) + writer = partial(df.to_excel, engine=engine) reader = partial(pd.read_excel) result = tm.round_trip_pathlib(writer, reader, - path="foo.{}".format(self.ext)) + path="foo.{}".format(ext)) tm.assert_frame_equal(df, result) - def test_path_localpath(self): + def test_path_localpath(self, merge_cells, engine, ext): df = tm.makeDataFrame() - writer = partial(df.to_excel, engine=self.engine_name) + writer = partial(df.to_excel, engine=engine) reader = partial(pd.read_excel) result = tm.round_trip_pathlib(writer, reader, - path="foo.{}".format(self.ext)) + path="foo.{}".format(ext)) tm.assert_frame_equal(df, result) -class TestOpenpyxlTests(ExcelWriterBase): - engine_name = 'openpyxl' - ext = '.xlsx' - check_skip = staticmethod(_skip_if_no_openpyxl) +@td.skip_if_no('openpyxl') +@pytest.mark.parametrize("merge_cells,ext,engine", [ + (None, '.xlsx', 'openpyxl')]) +class TestOpenpyxlTests(_WriterBase): - def test_to_excel_styleconverter(self): + def test_to_excel_styleconverter(self, merge_cells, ext, engine): from openpyxl import styles hstyle = { @@ -2117,7 +1968,7 @@ def test_to_excel_styleconverter(self): assert kw['number_format'] == number_format assert kw['protection'] == protection - def test_write_cells_merge_styled(self): + def test_write_cells_merge_styled(self, merge_cells, ext, engine): from pandas.io.formats.excel import ExcelCell sheet_name = 'merge_styled' @@ -2138,7 +1989,7 @@ def test_write_cells_merge_styled(self): mergestart=1, mergeend=1, style=sty_merged), ] - with ensure_clean('.xlsx') as path: + with ensure_clean(ext) as path: writer = _OpenpyxlWriter(path) writer.write_cells(initial_cells, sheet_name=sheet_name) writer.write_cells(merge_cells, sheet_name=sheet_name) @@ -2150,44 +2001,41 @@ def test_write_cells_merge_styled(self): assert xcell_a2.font == openpyxl_sty_merged -class TestXlwtTests(ExcelWriterBase): - ext = '.xls' - engine_name = 'xlwt' - check_skip = staticmethod(_skip_if_no_xlwt) +@td.skip_if_no('xlwt') +@pytest.mark.parametrize("merge_cells,ext,engine", [ + (None, '.xls', 'xlwt')]) +class TestXlwtTests(_WriterBase): - def test_excel_raise_error_on_multiindex_columns_and_no_index(self): - _skip_if_no_xlwt() + def test_excel_raise_error_on_multiindex_columns_and_no_index( + self, merge_cells, ext, engine): # MultiIndex as columns is not yet implemented 9794 cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) df = DataFrame(np.random.randn(10, 3), columns=cols) with pytest.raises(NotImplementedError): - with ensure_clean(self.ext) as path: + with ensure_clean(ext) as path: df.to_excel(path, index=False) - def test_excel_multiindex_columns_and_index_true(self): - _skip_if_no_xlwt() + def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext, + engine): cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) df = pd.DataFrame(np.random.randn(10, 3), columns=cols) - with ensure_clean(self.ext) as path: + with ensure_clean(ext) as path: df.to_excel(path, index=True) - def test_excel_multiindex_index(self): - _skip_if_no_xlwt() + def test_excel_multiindex_index(self, merge_cells, ext, engine): # MultiIndex as index works so assert no error #9794 cols = MultiIndex.from_tuples([('site', ''), ('2014', 'height'), ('2014', 'weight')]) df = DataFrame(np.random.randn(3, 10), index=cols) - with ensure_clean(self.ext) as path: + with ensure_clean(ext) as path: df.to_excel(path, index=False) - def test_to_excel_styleconverter(self): - _skip_if_no_xlwt() - + def test_to_excel_styleconverter(self, merge_cells, ext, engine): import xlwt hstyle = {"font": {"bold": True}, @@ -2207,23 +2055,21 @@ def test_to_excel_styleconverter(self): assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert -class TestXlsxWriterTests(ExcelWriterBase): - ext = '.xlsx' - engine_name = 'xlsxwriter' - check_skip = staticmethod(_skip_if_no_xlsxwriter) +@td.skip_if_no('xlsxwriter') +@pytest.mark.parametrize("merge_cells,ext,engine", [ + (None, '.xlsx', 'xlsxwriter')]) +class TestXlsxWriterTests(_WriterBase): - def test_column_format(self): + @td.skip_if_no('openpyxl') + def test_column_format(self, merge_cells, ext, engine): # Test that column formats are applied to cells. Test for issue #9167. # Applicable to xlsxwriter only. - _skip_if_no_xlsxwriter() - with warnings.catch_warnings(): # Ignore the openpyxl lxml warning. warnings.simplefilter("ignore") - _skip_if_no_openpyxl() import openpyxl - with ensure_clean(self.ext) as path: + with ensure_clean(ext) as path: frame = DataFrame({'A': [123456, 123456], 'B': [123456, 123456]}) @@ -2260,54 +2106,28 @@ def test_column_format(self): assert read_num_format == num_format -class TestOpenpyxlTests_NoMerge(ExcelWriterBase): - ext = '.xlsx' - engine_name = 'openpyxl' - check_skip = staticmethod(_skip_if_no_openpyxl) - - # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. - merge_cells = False - - -class TestXlwtTests_NoMerge(ExcelWriterBase): - ext = '.xls' - engine_name = 'xlwt' - check_skip = staticmethod(_skip_if_no_xlwt) - - # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. - merge_cells = False - - -class TestXlsxWriterTests_NoMerge(ExcelWriterBase): - ext = '.xlsx' - engine_name = 'xlsxwriter' - check_skip = staticmethod(_skip_if_no_xlsxwriter) - - # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. - merge_cells = False - - class TestExcelWriterEngineTests(object): - def test_ExcelWriter_dispatch(self): - with tm.assert_raises_regex(ValueError, 'No engine'): - ExcelWriter('nothing') - - try: - import xlsxwriter # noqa - writer_klass = _XlsxWriter - except ImportError: - _skip_if_no_openpyxl() - writer_klass = _OpenpyxlWriter - - with ensure_clean('.xlsx') as path: + @pytest.mark.parametrize('klass,ext', [ + pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif( + not td.safe_import('xlsxwriter'), reason='No xlsxwriter')), + pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif( + not td.safe_import('openpyxl'), reason='No openpyxl')), + pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif( + not td.safe_import('xlwt'), reason='No xlwt')) + ]) + def test_ExcelWriter_dispatch(self, klass, ext): + with ensure_clean(ext) as path: writer = ExcelWriter(path) - assert isinstance(writer, writer_klass) + if ext == '.xlsx' and td.safe_import('xlsxwriter'): + # xlsxwriter has preference over openpyxl if both installed + assert isinstance(writer, _XlsxWriter) + else: + assert isinstance(writer, klass) - _skip_if_no_xlwt() - with ensure_clean('.xls') as path: - writer = ExcelWriter(path) - assert isinstance(writer, _XlwtWriter) + def test_ExcelWriter_dispatch_raises(self): + with tm.assert_raises_regex(ValueError, 'No engine'): + ExcelWriter('nothing') def test_register_writer(self): # some awkward mocking to test out dispatch and such actually works @@ -2498,11 +2318,11 @@ def custom_converter(css): assert n_cells == (10 + 1) * (3 + 1) +@td.skip_if_no('openpyxl') class TestFSPath(object): @pytest.mark.skipif(sys.version_info < (3, 6), reason='requires fspath') def test_excelfile_fspath(self): - _skip_if_no_openpyxl() with tm.ensure_clean('foo.xlsx') as path: df = DataFrame({"A": [1, 2]}) df.to_excel(path) @@ -2513,7 +2333,6 @@ def test_excelfile_fspath(self): @pytest.mark.skipif(sys.version_info < (3, 6), reason='requires fspath') # @pytest.mark.xfail def test_excelwriter_fspath(self): - _skip_if_no_openpyxl() with tm.ensure_clean('foo.xlsx') as path: writer = ExcelWriter(path) assert os.fspath(writer) == str(path) diff --git a/pandas/util/_test_decorators.py b/pandas/util/_test_decorators.py index 0fd5648739e5c..b2745ab5eec77 100644 --- a/pandas/util/_test_decorators.py +++ b/pandas/util/_test_decorators.py @@ -57,7 +57,11 @@ def safe_import(mod_name, min_version=None): return mod else: import sys - version = getattr(sys.modules[mod_name], '__version__') + try: + version = getattr(sys.modules[mod_name], '__version__') + except AttributeError: + # xlrd uses a capitalized attribute name + version = getattr(sys.modules[mod_name], '__VERSION__') if version: from distutils.version import LooseVersion if LooseVersion(version) >= LooseVersion(min_version): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 0009e26f8b100..942416408e4f0 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2165,7 +2165,7 @@ def network(t, url="http://www.google.com", from pytest import skip t.network = True - @wraps(t) + @compat.wraps(t) def wrapper(*args, **kwargs): if check_before_test and not raise_on_error: if not can_connect(url, error_classes):
- [x] closes #19816 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Once I started looking at it, it made sense to make the module more "pytest compatible". There was a lot of skipping being done imperatively that seemed better suited to using class-level decorators, so as I moved some of that around I also replaced the setup/teardown mechanism of the "Excel Writing" tests with a pytest fixture
https://api.github.com/repos/pandas-dev/pandas/pulls/19829
2018-02-21T21:33:39Z
2018-02-26T12:21:07Z
2018-02-26T12:21:07Z
2018-02-26T16:15:04Z
make ops.add_foo take just class
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 061b69f25e7ac..e4ef1b97882d9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6131,8 +6131,8 @@ def isin(self, values): DataFrame._add_numeric_operations() DataFrame._add_series_or_dataframe_operations() -ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs) -ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs) +ops.add_flex_arithmetic_methods(DataFrame) +ops.add_special_arithmetic_methods(DataFrame) def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): diff --git a/pandas/core/ops.py b/pandas/core/ops.py index b20f208d14dc5..7bdbac66b4f31 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -37,7 +37,7 @@ construct_1d_object_array_from_listlike) from pandas.core.dtypes.generic import ( ABCSeries, - ABCDataFrame, + ABCDataFrame, ABCPanel, ABCIndex, ABCSparseSeries, ABCSparseArray) @@ -711,6 +711,64 @@ def mask_cmp_op(x, y, op, allowed_types): # Functions that add arithmetic methods to objects, given arithmetic factory # methods +def _get_method_wrappers(cls): + """ + Find the appropriate operation-wrappers to use when defining flex/special + arithmetic, boolean, and comparison operations with the given class. + + Parameters + ---------- + cls : class + + Returns + ------- + arith_flex : function or None + comp_flex : function or None + arith_special : function + comp_special : function + bool_special : function + + Notes + ----- + None is only returned for SparseArray + """ + if issubclass(cls, ABCSparseSeries): + # Be sure to catch this before ABCSeries and ABCSparseArray, + # as they will both come see SparseSeries as a subclass + arith_flex = _flex_method_SERIES + comp_flex = _flex_method_SERIES + arith_special = _arith_method_SPARSE_SERIES + comp_special = _arith_method_SPARSE_SERIES + bool_special = _bool_method_SERIES + # TODO: I don't think the functions defined by bool_method are tested + elif issubclass(cls, ABCSeries): + # Just Series; SparseSeries is caught above + arith_flex = _flex_method_SERIES + comp_flex = _flex_method_SERIES + arith_special = _arith_method_SERIES + comp_special = _comp_method_SERIES + bool_special = _bool_method_SERIES + elif issubclass(cls, ABCSparseArray): + arith_flex = None + comp_flex = None + arith_special = _arith_method_SPARSE_ARRAY + comp_special = _arith_method_SPARSE_ARRAY + bool_special = _arith_method_SPARSE_ARRAY + elif issubclass(cls, ABCPanel): + arith_flex = _flex_method_PANEL + comp_flex = _comp_method_PANEL + arith_special = _arith_method_PANEL + comp_special = _comp_method_PANEL + bool_special = _arith_method_PANEL + elif issubclass(cls, ABCDataFrame): + # Same for DataFrame and SparseDataFrame + arith_flex = _arith_method_FRAME + comp_flex = _flex_comp_method_FRAME + arith_special = _arith_method_FRAME + comp_special = _comp_method_FRAME + bool_special = _arith_method_FRAME + return arith_flex, comp_flex, arith_special, comp_special, bool_special + def _create_methods(cls, arith_method, comp_method, bool_method, special=False): @@ -743,16 +801,18 @@ def _create_methods(cls, arith_method, comp_method, bool_method, # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] + if have_divmod: + # divmod doesn't have an op that is supported by numexpr + new_methods['divmod'] = arith_method(cls, divmod, special) + + new_methods.update(dict( + eq=comp_method(cls, operator.eq, special), + ne=comp_method(cls, operator.ne, special), + lt=comp_method(cls, operator.lt, special), + gt=comp_method(cls, operator.gt, special), + le=comp_method(cls, operator.le, special), + ge=comp_method(cls, operator.ge, special))) - # Comp methods never had a default axis set - if comp_method: - new_methods.update(dict( - eq=comp_method(cls, operator.eq, special), - ne=comp_method(cls, operator.ne, special), - lt=comp_method(cls, operator.lt, special), - gt=comp_method(cls, operator.gt, special), - le=comp_method(cls, operator.le, special), - ge=comp_method(cls, operator.ge, special))) if bool_method: new_methods.update( dict(and_=bool_method(cls, operator.and_, special), @@ -762,9 +822,6 @@ def _create_methods(cls, arith_method, comp_method, bool_method, rand_=bool_method(cls, rand_, special), ror_=bool_method(cls, ror_, special), rxor=bool_method(cls, rxor, special))) - if have_divmod: - # divmod doesn't have an op that is supported by numexpr - new_methods['divmod'] = arith_method(cls, divmod, special) if special: dunderize = lambda x: '__{name}__'.format(name=x.strip('_')) @@ -788,22 +845,17 @@ def add_methods(cls, new_methods): # ---------------------------------------------------------------------- # Arithmetic -def add_special_arithmetic_methods(cls, arith_method=None, - comp_method=None, bool_method=None): +def add_special_arithmetic_methods(cls): """ Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. Parameters ---------- - arith_method : function (optional) - factory for special arithmetic methods: - f(cls, op, special) - comp_method : function (optional) - factory for rich comparison - signature: f(cls, op, special) - bool_method : function (optional) - factory for boolean methods - signature: f(cls, op, special) + cls : class + special methods will be defined and pinned to this class """ + _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls) new_methods = _create_methods(cls, arith_method, comp_method, bool_method, special=True) # inplace operators (I feel like these should get passed an `inplace=True` @@ -836,28 +888,26 @@ def f(self, other): __ipow__=_wrap_inplace_method(new_methods["__pow__"]))) if not compat.PY3: new_methods["__idiv__"] = _wrap_inplace_method(new_methods["__div__"]) - if bool_method: - new_methods.update( - dict(__iand__=_wrap_inplace_method(new_methods["__and__"]), - __ior__=_wrap_inplace_method(new_methods["__or__"]), - __ixor__=_wrap_inplace_method(new_methods["__xor__"]))) + + new_methods.update( + dict(__iand__=_wrap_inplace_method(new_methods["__and__"]), + __ior__=_wrap_inplace_method(new_methods["__or__"]), + __ixor__=_wrap_inplace_method(new_methods["__xor__"]))) add_methods(cls, new_methods=new_methods) -def add_flex_arithmetic_methods(cls, flex_arith_method, flex_comp_method=None): +def add_flex_arithmetic_methods(cls): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. Parameters ---------- - flex_arith_method : function - factory for flex arithmetic methods: - f(cls, op, special) - flex_comp_method : function, optional, - factory for rich comparison - signature: f(cls, op, special) + cls : class + flex methods will be defined and pinned to this class """ + flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls) new_methods = _create_methods(cls, flex_arith_method, flex_comp_method, bool_method=None, special=False) @@ -1284,14 +1334,6 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): return flex_wrapper -series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES, - flex_comp_method=_flex_method_SERIES) - -series_special_funcs = dict(arith_method=_arith_method_SERIES, - comp_method=_comp_method_SERIES, - bool_method=_bool_method_SERIES) - - # ----------------------------------------------------------------------------- # DataFrame @@ -1533,14 +1575,6 @@ def f(self, other): return f -frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME, - flex_comp_method=_flex_comp_method_FRAME) - -frame_special_funcs = dict(arith_method=_arith_method_FRAME, - comp_method=_comp_method_FRAME, - bool_method=_arith_method_FRAME) - - # ----------------------------------------------------------------------------- # Panel @@ -1629,16 +1663,38 @@ def f(self, other, axis=0): return f -panel_special_funcs = dict(arith_method=_arith_method_PANEL, - comp_method=_comp_method_PANEL, - bool_method=_arith_method_PANEL) - -panel_flex_funcs = dict(flex_arith_method=_flex_method_PANEL, - flex_comp_method=_comp_method_PANEL) - # ----------------------------------------------------------------------------- # Sparse +def _cast_sparse_series_op(left, right, opname): + """ + For SparseSeries operation, coerce to float64 if the result is expected + to have NaN or inf values + + Parameters + ---------- + left : SparseArray + right : SparseArray + opname : str + + Returns + ------- + left : SparseArray + right : SparseArray + """ + opname = opname.strip('_') + + if is_integer_dtype(left) and is_integer_dtype(right): + # series coerces to float64 if result should have NaN/inf + if opname in ('floordiv', 'mod') and (right.values == 0).any(): + left = left.astype(np.float64) + right = right.astype(np.float64) + elif opname in ('rfloordiv', 'rmod') and (left.values == 0).any(): + left = left.astype(np.float64) + right = right.astype(np.float64) + + return left, right + def _arith_method_SPARSE_SERIES(cls, op, special): """ @@ -1674,8 +1730,8 @@ def _sparse_series_op(left, right, op, name): new_name = get_op_result_name(left, right) from pandas.core.sparse.array import _sparse_array_op - result = _sparse_array_op(left.values, right.values, op, name, - series=True) + lvalues, rvalues = _cast_sparse_series_op(left.values, right.values, name) + result = _sparse_array_op(lvalues, rvalues, op, name) return left._constructor(result, index=new_index, name=new_name) @@ -1697,7 +1753,7 @@ def wrapper(self, other): dtype = getattr(other, 'dtype', None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) - return _sparse_array_op(self, other, op, name, series=False) + return _sparse_array_op(self, other, op, name) elif is_scalar(other): with np.errstate(all='ignore'): fill = op(_get_fill(self), np.asarray(other)) @@ -1710,13 +1766,3 @@ def wrapper(self, other): wrapper.__name__ = name return wrapper - - -sparse_array_special_funcs = dict(arith_method=_arith_method_SPARSE_ARRAY, - comp_method=_arith_method_SPARSE_ARRAY, - bool_method=_arith_method_SPARSE_ARRAY) - -sparse_series_special_funcs = dict(arith_method=_arith_method_SPARSE_SERIES, - comp_method=_arith_method_SPARSE_SERIES, - bool_method=_bool_method_SERIES) -# TODO: I don't think the functions defined by bool_method are tested diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 3be1e3ef8734d..fc7fad861df44 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1527,8 +1527,8 @@ def _extract_axis(self, data, axis=0, intersect=False): slicers={'major_axis': 'index', 'minor_axis': 'columns'}) -ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs) -ops.add_flex_arithmetic_methods(Panel, **ops.panel_flex_funcs) +ops.add_special_arithmetic_methods(Panel) +ops.add_flex_arithmetic_methods(Panel) Panel._add_numeric_operations() diff --git a/pandas/core/series.py b/pandas/core/series.py index b42e02bc99237..26b7fd552b062 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3100,8 +3100,8 @@ def to_period(self, freq=None, copy=True): Series._add_series_or_dataframe_operations() # Add arithmetic! -ops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs) -ops.add_special_arithmetic_methods(Series, **ops.series_special_funcs) +ops.add_flex_arithmetic_methods(Series) +ops.add_special_arithmetic_methods(Series) # ----------------------------------------------------------------------------- diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 92c4fe932f066..5532d7522cd2d 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -53,20 +53,11 @@ def _get_fill(arr): return np.asarray(arr.fill_value) -def _sparse_array_op(left, right, op, name, series=False): +def _sparse_array_op(left, right, op, name): if name.startswith('__'): # For lookups in _libs.sparse we need non-dunder op name name = name[2:-2] - if series and is_integer_dtype(left) and is_integer_dtype(right): - # series coerces to float64 if result should have NaN/inf - if name in ('floordiv', 'mod') and (right.values == 0).any(): - left = left.astype(np.float64) - right = right.astype(np.float64) - elif name in ('rfloordiv', 'rmod') and (left.values == 0).any(): - left = left.astype(np.float64) - right = right.astype(np.float64) - # dtype used to find corresponding sparse method if not is_dtype_equal(left.dtype, right.dtype): dtype = find_common_type([left.dtype, right.dtype]) @@ -850,5 +841,4 @@ def _make_index(length, indices, kind): return index -ops.add_special_arithmetic_methods(SparseArray, - **ops.sparse_array_special_funcs) +ops.add_special_arithmetic_methods(SparseArray) diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 872a17d8dbabe..d89b1d681c478 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -1014,5 +1014,5 @@ def homogenize(series_dict): # use unaccelerated ops for sparse objects -ops.add_flex_arithmetic_methods(SparseDataFrame, **ops.frame_flex_funcs) -ops.add_special_arithmetic_methods(SparseDataFrame, **ops.frame_special_funcs) +ops.add_flex_arithmetic_methods(SparseDataFrame) +ops.add_special_arithmetic_methods(SparseDataFrame) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 26cf9dbadbbf2..7a1496bf11117 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -812,6 +812,5 @@ def from_coo(cls, A, dense_index=False): # overwrite series methods with unaccelerated Sparse-specific versions -ops.add_flex_arithmetic_methods(SparseSeries, **ops.series_flex_funcs) -ops.add_special_arithmetic_methods(SparseSeries, - **ops.sparse_series_special_funcs) +ops.add_flex_arithmetic_methods(SparseSeries) +ops.add_special_arithmetic_methods(SparseSeries)
As discussed Also small cleanup for Sparse ops.
https://api.github.com/repos/pandas-dev/pandas/pulls/19828
2018-02-21T21:33:21Z
2018-02-26T11:37:50Z
2018-02-26T11:37:50Z
2018-06-22T03:21:52Z
Implement add_offset_array for PeriodIndex
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 8f2d7d382a16e..e410c50fb006b 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -44,6 +44,7 @@ from pandas.util._decorators import (Appender, Substitution, cache_readonly, deprecate_kwarg) from pandas.compat import zip, u +from pandas.errors import PerformanceWarning import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -746,6 +747,28 @@ def _sub_period(self, other): # result must be Int64Index or Float64Index return Index(new_data, name=self.name) + def _add_offset_array(self, other): + # Array/Index of DateOffset objects + if len(other) == 1: + return self + other[0] + else: + warnings.warn("Adding/subtracting array of DateOffsets to " + "{cls} not vectorized" + .format(cls=type(self).__name__), PerformanceWarning) + res_values = self.astype('O').values + np.array(other) + return self.__class__(res_values) + + def _sub_offset_array(self, other): + # Array/Index of DateOffset objects + if len(other) == 1: + return self - other[0] + else: + warnings.warn("Adding/subtracting array of DateOffsets to " + "{cls} not vectorized" + .format(cls=type(self).__name__), PerformanceWarning) + res_values = self.astype('O').values - np.array(other) + return self.__class__(res_values) + def shift(self, n): """ Specialized shift which produces an PeriodIndex diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 5f8f9533e9c44..c64ed96eb87c4 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -9,6 +9,7 @@ period_range, Period, PeriodIndex, _np_version_under1p10) import pandas.core.indexes.period as period +from pandas.errors import PerformanceWarning _common_mismatch = [pd.offsets.YearBegin(2), @@ -254,32 +255,57 @@ def test_comp_nat(self, dtype): class TestPeriodIndexArithmetic(object): - def test_pi_add_offset_array(self): + @pytest.mark.parametrize('box', [np.array, pd.Index]) + def test_pi_add_offset_array(self, box): # GH#18849 pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')]) - offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12), - pd.offsets.QuarterEnd(n=-2, startingMonth=12)]) - res = pi + offs + offs = box([pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12)]) expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')]) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi + offs tm.assert_index_equal(res, expected) + with tm.assert_produces_warning(PerformanceWarning): + res2 = offs + pi + tm.assert_index_equal(res2, expected) + unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + # addition/subtraction ops with incompatible offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. with pytest.raises(period.IncompatibleFrequency): - pi + unanchored - with pytest.raises(TypeError): - unanchored + pi + with tm.assert_produces_warning(PerformanceWarning): + pi + unanchored + with pytest.raises(period.IncompatibleFrequency): + with tm.assert_produces_warning(PerformanceWarning): + unanchored + pi - @pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case') - def test_pi_radd_offset_array(self): - # GH#18849 + @pytest.mark.parametrize('box', [np.array, pd.Index]) + def test_pi_sub_offset_array(self, box): + # GH#18824 pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')]) - offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12), - pd.offsets.QuarterEnd(n=-2, startingMonth=12)]) - res = offs + pi - expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')]) + other = box([pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12)]) + + expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))]) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi - other tm.assert_index_equal(res, expected) + anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + with pytest.raises(period.IncompatibleFrequency): + with tm.assert_produces_warning(PerformanceWarning): + pi - anchored + with pytest.raises(period.IncompatibleFrequency): + with tm.assert_produces_warning(PerformanceWarning): + anchored - pi + def test_pi_add_iadd_pi_raises(self): rng = pd.period_range('1/1/2000', freq='D', periods=5) other = pd.period_range('1/6/2000', freq='D', periods=5)
This already works for DTI/TDI, this just rounds it out. There will be some code de-duplication we can do if/when #19744 goes through.
https://api.github.com/repos/pandas-dev/pandas/pulls/19826
2018-02-21T20:52:33Z
2018-02-22T03:08:50Z
2018-02-22T03:08:50Z
2018-02-22T05:45:04Z
Keep subclassing in apply
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ed93503388893..9dbbe3f9e2b77 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -295,8 +295,10 @@ Other Enhancements - ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) - :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`) - Added :func:`SeriesGroupBy.is_monotonic_increasing` and :func:`SeriesGroupBy.is_monotonic_decreasing` (:issue:`17015`) +- For subclassed ``DataFrames``, :func:`DataFrame.apply` will now preserve the ``Series`` subclass (if defined) when passing the data to the applied function (:issue:`19822`) - :func:`DataFrame.from_dict` now accepts a ``columns`` argument that can be used to specify the column names when ``orient='index'`` is used (:issue:`18529`) + .. _whatsnew_0230.api_breaking: Backwards incompatible API changes diff --git a/pandas/core/apply.py b/pandas/core/apply.py index c65943fbbb201..9056f78ee02ed 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -162,7 +162,7 @@ def apply_empty_result(self): pass if reduce: - return Series(np.nan, index=self.agg_axis) + return self.obj._constructor_sliced(np.nan, index=self.agg_axis) else: return self.obj.copy() @@ -175,11 +175,13 @@ def apply_raw(self): result = np.apply_along_axis(self.f, self.axis, self.values) # TODO: mixed type case - from pandas import DataFrame, Series if result.ndim == 2: - return DataFrame(result, index=self.index, columns=self.columns) + return self.obj._constructor(result, + index=self.index, + columns=self.columns) else: - return Series(result, index=self.agg_axis) + return self.obj._constructor_sliced(result, + index=self.agg_axis) def apply_broadcast(self, target): result_values = np.empty_like(target.values) @@ -232,7 +234,7 @@ def apply_standard(self): axis=self.axis, dummy=dummy, labels=labels) - return Series(result, index=labels) + return self.obj._constructor_sliced(result, index=labels) except Exception: pass @@ -291,8 +293,7 @@ def wrap_results(self): return self.wrap_results_for_axis() # dict of scalars - from pandas import Series - result = Series(results) + result = self.obj._constructor_sliced(results) result.index = self.res_index return result @@ -379,7 +380,6 @@ def wrap_results_for_axis(self): # we have a non-series and don't want inference elif not isinstance(results[0], ABCSeries): from pandas import Series - result = Series(results) result.index = self.res_index diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py index c52b512c2930a..caaa311e9ee96 100644 --- a/pandas/tests/frame/test_subclass.py +++ b/pandas/tests/frame/test_subclass.py @@ -514,3 +514,59 @@ def test_subclassed_wide_to_long(self): long_frame = pd.wide_to_long(df, ["A", "B"], i="id", j="year") tm.assert_frame_equal(long_frame, expected) + + def test_subclassed_apply(self): + # GH 19822 + + def check_row_subclass(row): + assert isinstance(row, tm.SubclassedSeries) + + def strech(row): + if row["variable"] == "height": + row["value"] += 0.5 + return row + + df = tm.SubclassedDataFrame([ + ['John', 'Doe', 'height', 5.5], + ['Mary', 'Bo', 'height', 6.0], + ['John', 'Doe', 'weight', 130], + ['Mary', 'Bo', 'weight', 150]], + columns=['first', 'last', 'variable', 'value']) + + df.apply(lambda x: check_row_subclass(x)) + df.apply(lambda x: check_row_subclass(x), axis=1) + + expected = tm.SubclassedDataFrame([ + ['John', 'Doe', 'height', 6.0], + ['Mary', 'Bo', 'height', 6.5], + ['John', 'Doe', 'weight', 130], + ['Mary', 'Bo', 'weight', 150]], + columns=['first', 'last', 'variable', 'value']) + + result = df.apply(lambda x: strech(x), axis=1) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + expected = tm.SubclassedDataFrame([ + [1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + + result = df.apply(lambda x: tm.SubclassedSeries([1, 2, 3]), axis=1) + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + result = df.apply(lambda x: [1, 2, 3], axis=1, result_type="expand") + assert isinstance(result, tm.SubclassedDataFrame) + tm.assert_frame_equal(result, expected) + + expected = tm.SubclassedSeries([ + [1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + + result = df.apply(lambda x: [1, 2, 3], axis=1) + assert not isinstance(result, tm.SubclassedDataFrame) + tm.assert_series_equal(result, expected)
When generating new objects on apply, it calls `self.obj._constructor` and `self.obj._constructior_sliced` instead of: `DataFrame` and `Series`; keeping subclassing. - [x] closes #19822 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] keeps subclass inside `DataFrame.apply`
https://api.github.com/repos/pandas-dev/pandas/pulls/19823
2018-02-21T19:14:17Z
2018-02-24T15:10:35Z
2018-02-24T15:10:35Z
2018-02-24T15:10:37Z
Fix rfloordiv return type, un-xfail Timedelta tests
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 76c4fa08fca4d..4290ae68ce785 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -765,6 +765,7 @@ Timedelta - Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`) - Bug in :func:`Period.asfreq` where periods near ``datetime(1, 1, 1)`` could be converted incorrectly (:issue:`19643`) - Bug in :func:`Timedelta.total_seconds()` causing precision errors i.e. ``Timedelta('30S').total_seconds()==30.000000000000004`` (:issue:`19458`) +- Bug in :func: `Timedelta.__rmod__` where operating with a ``numpy.timedelta64`` returned a ``timedelta64`` object instead of a ``Timedelta`` (:issue:`19820`) - Multiplication of :class:`TimedeltaIndex` by ``TimedeltaIndex`` will now raise ``TypeError`` instead of raising ``ValueError`` in cases of length mis-match (:issue`19333`) - diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 78fdeb988e0f2..15d6252f93592 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1109,7 +1109,11 @@ class Timedelta(_Timedelta): return self // other.delta return NotImplemented - if hasattr(other, 'dtype'): + elif is_timedelta64_object(other): + # convert to Timedelta below + pass + + elif hasattr(other, 'dtype'): if other.dtype.kind == 'm': # also timedelta-like return _broadcast_floordiv_td64(self.value, other, _floordiv) @@ -1144,7 +1148,11 @@ class Timedelta(_Timedelta): return other.delta // self return NotImplemented - if hasattr(other, 'dtype'): + elif is_timedelta64_object(other): + # convert to Timedelta below + pass + + elif hasattr(other, 'dtype'): if other.dtype.kind == 'm': # also timedelta-like return _broadcast_floordiv_td64(self.value, other, _rfloordiv) diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index 48da23f3575ab..90f745c2a641c 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -441,7 +441,6 @@ def test_mod_timedeltalike(self): result = td % NaT assert result is NaT - @pytest.mark.xfail(reason='GH#19378 floordiv td64 returns td64') def test_mod_timedelta64_nat(self): # GH#19365 td = Timedelta(hours=37) @@ -449,7 +448,6 @@ def test_mod_timedelta64_nat(self): result = td % np.timedelta64('NaT', 'ns') assert result is NaT - @pytest.mark.xfail(reason='GH#19378 floordiv td64 returns td64') def test_mod_timedelta64(self): # GH#19365 td = Timedelta(hours=37) @@ -458,7 +456,6 @@ def test_mod_timedelta64(self): assert isinstance(result, Timedelta) assert result == Timedelta(hours=1) - @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') def test_mod_offset(self): # GH#19365 td = Timedelta(hours=37) @@ -505,7 +502,6 @@ def test_rmod_pytimedelta(self): assert isinstance(result, Timedelta) assert result == Timedelta(minutes=1) - @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') def test_rmod_timedelta64(self): # GH#19365 td = Timedelta(minutes=3) @@ -564,7 +560,6 @@ def test_divmod(self): assert np.isnan(result[0]) assert result[1] is pd.NaT - @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') def test_divmod_offset(self): # GH#19365 td = Timedelta(days=2, hours=6) @@ -588,7 +583,6 @@ def test_rdivmod_pytimedelta(self): assert isinstance(result[1], Timedelta) assert result[1] == Timedelta(hours=6) - @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') def test_rdivmod_offset(self): result = divmod(pd.offsets.Hour(54), Timedelta(hours=-4)) assert result[0] == -14
- [ ] closes #xxxx - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19820
2018-02-21T18:16:05Z
2018-02-22T11:15:41Z
2018-02-22T11:15:41Z
2018-06-22T03:22:39Z
fix Timedelta.__mul__(NaT)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a4b943f995a33..a24eabc219764 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -735,6 +735,7 @@ Datetimelike Timedelta ^^^^^^^^^ +- Bug in :func:`Timedelta.__mul__` where multiplying by ``NaT`` returned ``NaT`` instead of raising a ``TypeError`` (:issue:`19819`) - Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` had results cast to ``dtype='int64'`` (:issue:`17250`) - Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (:issue:`19043`) - Bug in :func:`Timedelta.__floordiv__` and :func:`Timedelta.__rfloordiv__` dividing by many incompatible numpy objects was incorrectly allowed (:issue:`18846`) diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 78fdeb988e0f2..1285cbb9ff62b 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1057,7 +1057,7 @@ class Timedelta(_Timedelta): return other * self.to_timedelta64() elif other is NaT: - return NaT + raise TypeError('Cannot multiply Timedelta with NaT') elif not (is_integer_object(other) or is_float_object(other)): # only integers and floats allowed diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index 48da23f3575ab..8460633febba9 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -216,6 +216,16 @@ class TestTimedeltaMultiplicationDivision(object): # --------------------------------------------------------------- # Timedelta.__mul__, __rmul__ + @pytest.mark.parametrize('td_nat', [pd.NaT, + np.timedelta64('NaT', 'ns'), + np.timedelta64('NaT')]) + @pytest.mark.parametrize('op', [operator.mul, ops.rmul]) + def test_td_mul_nat(self, op, td_nat): + # GH#19819 + td = Timedelta(10, unit='d') + with pytest.raises(TypeError): + op(td, td_nat) + @pytest.mark.parametrize('op', [operator.mul, ops.rmul]) def test_td_mul_scalar(self, op): # GH#19738 diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 4257c610fb960..a80c5d6611b8a 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -62,11 +62,6 @@ def test_unary_ops(self): assert abs(-td) == td assert abs(-td) == Timedelta('10d') - def test_binary_ops_nat(self): - td = Timedelta(10, unit='d') - # FIXME: The next test is wrong: td * NaT should raise - assert (td * pd.NaT) is pd.NaT - class TestTimedeltaComparison(object): def test_comparison_object_array(self):
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19819
2018-02-21T17:49:17Z
2018-02-22T11:12:52Z
2018-02-22T11:12:52Z
2018-06-22T03:22:56Z
Update df.to_stata() docstring
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d81d22173bfbd..e34aaaae9b887 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1612,7 +1612,7 @@ def to_stata(self, fname, convert_dates=None, write_index=True, time_stamp : datetime A datetime to use as file creation date. Default is the current time. - dataset_label : str + data_label : str A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as @@ -1635,10 +1635,18 @@ def to_stata(self, fname, convert_dates=None, write_index=True, Examples -------- + >>> data.to_stata('./data_file.dta') + + Or with dates + + >>> data.to_stata('./date_data_file.dta', {2 : 'tw'}) + + Alternatively you can create an instance of the StataWriter class + >>> writer = StataWriter('./data_file.dta', data) >>> writer.write_file() - Or with dates + With dates: >>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'}) >>> writer.write_file()
Updates the docstring for the `to_stata()` method to fix the `dataset_label` parameter. See #19817. Also changes the examples; it's a little weird for the examples for `to_stata()` to detail the `StataWriter` class. - [ ] closes #19817 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19818
2018-02-21T17:04:17Z
2018-02-22T02:20:06Z
2018-02-22T02:20:06Z
2018-02-22T02:20:09Z
ASV: excel asv occasional failure
diff --git a/asv_bench/benchmarks/io/excel.py b/asv_bench/benchmarks/io/excel.py index a7c6c43d15026..58ab6bb8046c5 100644 --- a/asv_bench/benchmarks/io/excel.py +++ b/asv_bench/benchmarks/io/excel.py @@ -25,13 +25,12 @@ def setup(self, engine): self.writer_read.save() self.bio_read.seek(0) - self.bio_write = BytesIO() - self.bio_write.seek(0) - self.writer_write = ExcelWriter(self.bio_write, engine=engine) - def time_read_excel(self, engine): read_excel(self.bio_read) def time_write_excel(self, engine): - self.df.to_excel(self.writer_write, sheet_name='Sheet1') - self.writer_write.save() + bio_write = BytesIO() + bio_write.seek(0) + writer_write = ExcelWriter(bio_write, engine=engine) + self.df.to_excel(writer_write, sheet_name='Sheet1') + writer_write.save()
closes #19779
https://api.github.com/repos/pandas-dev/pandas/pulls/19811
2018-02-21T11:05:51Z
2018-02-21T11:53:21Z
2018-02-21T11:53:21Z
2018-02-21T11:53:21Z
BF: Skip test_read_excel_parse_dates if no xlwt which is used in to_excel
diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index ebb8424b78ed4..4c790a0f0f64a 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -968,6 +968,7 @@ def test_read_excel_chunksize(self): def test_read_excel_parse_dates(self): # GH 11544, 12051 _skip_if_no_openpyxl() + _skip_if_no_xlwt() # for df2.to_excel df = DataFrame( {'col': [1, 2, 3],
https://api.github.com/repos/pandas-dev/pandas/pulls/19803
2018-02-20T22:37:24Z
2018-02-21T11:30:59Z
2018-02-21T11:30:59Z
2018-02-21T17:49:01Z
ENH: Add columns parameter to from_dict
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a4b943f995a33..f3dca23d1039f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -295,6 +295,7 @@ Other Enhancements - ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) - :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`) - Added :func:`SeriesGroupBy.is_monotonic_increasing` and :func:`SeriesGroupBy.is_monotonic_decreasing` (:issue:`17015`) +- :func:`DataFrame.from_dict` now accepts a ``columns`` argument that can be used to specify the column names when ``orient='index'`` is used (:issue:`18529`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index d81d22173bfbd..d006fd755cd35 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -876,7 +876,7 @@ def dot(self, other): # IO methods (to / from other formats) @classmethod - def from_dict(cls, data, orient='columns', dtype=None): + def from_dict(cls, data, orient='columns', dtype=None, columns=None): """ Construct DataFrame from dict of array-like or dicts @@ -890,12 +890,17 @@ def from_dict(cls, data, orient='columns', dtype=None): (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer + columns: list, default None + Column labels to use when orient='index'. Raises a ValueError + if used with orient='columns' + + .. versionadded:: 0.23.0 Returns ------- DataFrame """ - index, columns = None, None + index = None orient = orient.lower() if orient == 'index': if len(data) > 0: @@ -904,7 +909,11 @@ def from_dict(cls, data, orient='columns', dtype=None): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) - elif orient != 'columns': # pragma: no cover + elif orient == 'columns': + if columns is not None: + raise ValueError("cannot use columns parameter with " + "orient='columns'") + else: # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8abd88d8a379c..394997201f320 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1091,6 +1091,25 @@ def test_constructor_orient(self): xp = DataFrame.from_dict(a).T.reindex(list(a.keys())) tm.assert_frame_equal(rs, xp) + def test_from_dict_columns_parameter(self): + # GH 18529 + # Test new columns parameter for from_dict that was added to make + # from_items(..., orient='index', columns=[...]) easier to replicate + result = DataFrame.from_dict(OrderedDict([('A', [1, 2]), + ('B', [4, 5])]), + orient='index', columns=['one', 'two']) + expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'], + columns=['one', 'two']) + tm.assert_frame_equal(result, expected) + + msg = "cannot use columns parameter with orient='columns'" + with tm.assert_raises_regex(ValueError, msg): + DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]), + orient='columns', columns=['one', 'two']) + with tm.assert_raises_regex(ValueError, msg): + DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]), + columns=['one', 'two']) + def test_constructor_Series_named(self): a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x') df = DataFrame(a)
- [x] xref #18529 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry @jorisvandenbossche it seems to have been quite straightforward to implement this but let me know if I missed anything.
https://api.github.com/repos/pandas-dev/pandas/pulls/19802
2018-02-20T21:56:46Z
2018-02-22T10:34:47Z
2018-02-22T10:34:47Z
2018-02-22T10:47:52Z
dispatch Series[datetime64] comparison ops to DatetimeIndex
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index eb8133a1bbf97..c9b446b97e956 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -138,11 +138,9 @@ def wrapper(self, other): result = func(np.asarray(other)) result = com._values_from_object(result) - if isinstance(other, Index): - o_mask = other.values.view('i8') == libts.iNaT - else: - o_mask = other.view('i8') == libts.iNaT - + # Make sure to pass an array to result[...]; indexing with + # Series breaks with older version of numpy + o_mask = np.array(isna(other)) if o_mask.any(): result[o_mask] = nat_result diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 83f17a332f4be..931e91b941a7e 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -10,8 +10,7 @@ import numpy as np import pandas as pd -from pandas._libs import (lib, index as libindex, - algos as libalgos, ops as libops) +from pandas._libs import algos as libalgos, ops as libops from pandas import compat from pandas.util._decorators import Appender @@ -1127,24 +1126,20 @@ def na_op(x, y): # integer comparisons # we have a datetime/timedelta and may need to convert + assert not needs_i8_conversion(x) mask = None - if (needs_i8_conversion(x) or - (not is_scalar(y) and needs_i8_conversion(y))): - - if is_scalar(y): - mask = isna(x) - y = libindex.convert_scalar(x, com._values_from_object(y)) - else: - mask = isna(x) | isna(y) - y = y.view('i8') + if not is_scalar(y) and needs_i8_conversion(y): + mask = isna(x) | isna(y) + y = y.view('i8') x = x.view('i8') - try: + method = getattr(x, name, None) + if method is not None: with np.errstate(all='ignore'): - result = getattr(x, name)(y) + result = method(y) if result is NotImplemented: raise TypeError("invalid type comparison") - except AttributeError: + else: result = op(x, y) if mask is not None and mask.any(): @@ -1174,6 +1169,14 @@ def wrapper(self, other, axis=None): return self._constructor(res_values, index=self.index, name=res_name) + if is_datetime64_dtype(self) or is_datetime64tz_dtype(self): + # Dispatch to DatetimeIndex to ensure identical + # Series/Index behavior + res_values = dispatch_to_index_op(op, self, other, + pd.DatetimeIndex) + return self._constructor(res_values, index=self.index, + name=res_name) + elif is_timedelta64_dtype(self): res_values = dispatch_to_index_op(op, self, other, pd.TimedeltaIndex) @@ -1191,8 +1194,7 @@ def wrapper(self, other, axis=None): elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array # as it will broadcast - if (not is_scalar(lib.item_from_zerodim(other)) and - len(self) != len(other)): + if other.ndim != 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') res_values = na_op(self.values, np.asarray(other)) diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index 6bb4229883525..f263ac78cd343 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -2,7 +2,7 @@ import pytest -from datetime import datetime, date +from datetime import datetime import numpy as np import pandas as pd import operator as op @@ -349,7 +349,7 @@ def test_loc_datetime_length_one(self): @pytest.mark.parametrize('datetimelike', [ Timestamp('20130101'), datetime(2013, 1, 1), - date(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')]) + np.datetime64('2013-01-01T00:00', 'ns')]) @pytest.mark.parametrize('op,expected', [ (op.lt, [True, False, False, False]), (op.le, [True, True, False, False]), diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 5b8d9cfab3e0d..ec0d7296e540e 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -88,6 +88,23 @@ def test_ser_cmp_result_names(self, names, op): class TestTimestampSeriesComparison(object): + def test_dt64ser_cmp_date_invalid(self): + # GH#19800 datetime.date comparison raises to + # match DatetimeIndex/Timestamp. This also matches the behavior + # of stdlib datetime.datetime + ser = pd.Series(pd.date_range('20010101', periods=10), name='dates') + date = ser.iloc[0].to_pydatetime().date() + assert not (ser == date).any() + assert (ser != date).all() + with pytest.raises(TypeError): + ser > date + with pytest.raises(TypeError): + ser < date + with pytest.raises(TypeError): + ser >= date + with pytest.raises(TypeError): + ser <= date + def test_dt64ser_cmp_period_scalar(self): ser = Series(pd.period_range('2000-01-01', periods=10, freq='D')) val = Period('2000-01-04', freq='D') diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 4b5ad336139b0..6247079e4ac3a 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -10,7 +10,7 @@ import pandas as pd import pandas.compat as compat from pandas.core.dtypes.common import ( - is_object_dtype, is_datetimetz, + is_object_dtype, is_datetimetz, is_datetime64_dtype, needs_i8_conversion) import pandas.util.testing as tm from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex, @@ -296,14 +296,21 @@ def test_none_comparison(self): # result = None != o # noqa # assert result.iat[0] # assert result.iat[1] + if (is_datetime64_dtype(o) or is_datetimetz(o)): + # Following DatetimeIndex (and Timestamp) convention, + # inequality comparisons with Series[datetime64] raise + with pytest.raises(TypeError): + None > o + with pytest.raises(TypeError): + o > None + else: + result = None > o + assert not result.iat[0] + assert not result.iat[1] - result = None > o - assert not result.iat[0] - assert not result.iat[1] - - result = o < None - assert not result.iat[0] - assert not result.iat[1] + result = o < None + assert not result.iat[0] + assert not result.iat[1] def test_ndarray_compat_properties(self):
When we tried this a few weeks ago (#19524) there were idiosyncratic CI errors. Hopefully the smaller scope here will avoid that problem. If this works, we'll be very nearly ready to move these ops to the Block level and ensure consistency with DataFrame in addition to Index/Series.
https://api.github.com/repos/pandas-dev/pandas/pulls/19800
2018-02-20T21:16:13Z
2018-03-01T22:54:43Z
2018-03-01T22:54:43Z
2018-06-22T03:24:44Z
DOC: Edit installation instructions for clarity.
diff --git a/doc/source/install.rst b/doc/source/install.rst index c4e331d64e721..4ff63d59024b2 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -6,7 +6,7 @@ Installation ============ -The easiest way for the majority of users to install pandas is to install it +The easiest way to install pandas is to install it as part of the `Anaconda <http://docs.continuum.io/anaconda/>`__ distribution, a cross platform distribution for data analysis and scientific computing. This is the recommended installation method for most users. @@ -40,7 +40,7 @@ packages that make up the `SciPy <http://www.scipy.org/>`__ stack (Linux, Mac OS X, Windows) Python distribution for data analytics and scientific computing. -After running a simple installer, the user will have access to pandas and the +After running the installer, the user will have access to pandas and the rest of the `SciPy <http://www.scipy.org/>`__ stack without needing to install anything else, and without needing to wait for any software to be compiled. @@ -51,9 +51,9 @@ A full list of the packages available as part of the `Anaconda <http://docs.continuum.io/anaconda/>`__ distribution `can be found here <http://docs.continuum.io/anaconda/pkg-docs.html>`__. -An additional advantage of installing with Anaconda is that you don't require -admin rights to install it, it will install in the user's home directory, and -this also makes it trivial to delete Anaconda at a later date (just delete +Another advantage to installing Anaconda is that you don't need +admin rights to install it. Anaconda can install in the user's home directory, +which makes it trivial to delete Anaconda if you decide (just delete that folder). .. _install.miniconda: @@ -85,9 +85,9 @@ downloading and running the `Miniconda will do this for you. The installer `can be found here <http://conda.pydata.org/miniconda.html>`__ -The next step is to create a new conda environment (these are analogous to a -virtualenv but they also allow you to specify precisely which Python version -to install also). Run the following commands from a terminal window:: +The next step is to create a new conda environment. A conda environment is like a +virtualenv that allows you to specify a specific version of Python and set of libraries. +Run the following commands from a terminal window:: conda create -n name_of_my_env python @@ -118,8 +118,8 @@ distribution:: conda install anaconda -If you require any packages that are available to pip but not conda, simply -install pip, and use pip to install these packages:: +If you need packages that are available to pip but not conda, then +install pip, and then use pip to install those packages:: conda install pip pip install django @@ -134,15 +134,12 @@ pandas can be installed via pip from pip install pandas -This will likely require the installation of a number of dependencies, -including NumPy, will require a compiler to compile required bits of code, -and can take a few minutes to complete. Installing using your Linux distribution's package manager. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The commands in this table will install pandas for Python 3 from your distribution. -To install pandas for Python 2 you may need to use the package ``python-pandas``. +To install pandas for Python 2, you may need to use the ``python-pandas`` package. .. csv-table:: :header: "Distribution", "Status", "Download / Repository Link", "Install method" @@ -169,9 +166,9 @@ See the :ref:`contributing documentation <contributing>` for complete instructio Running the test suite ~~~~~~~~~~~~~~~~~~~~~~ -pandas is equipped with an exhaustive set of unit tests covering about 97% of +pandas is equipped with an exhaustive set of unit tests, covering about 97% of the codebase as of this writing. To run it on your machine to verify that -everything is working (and you have all of the dependencies, soft and hard, +everything is working (and that you have all of the dependencies, soft and hard, installed), make sure you have `pytest <http://doc.pytest.org/en/latest/>`__ and run: @@ -214,8 +211,8 @@ Recommended Dependencies .. note:: - You are highly encouraged to install these libraries, as they provide large speedups, especially - if working with large data sets. + You are highly encouraged to install these libraries, as they provide speed improvements, especially + when working with large data sets. .. _install.optional_dependencies:
Rewrote some lines to ease reading. Didn't change substance.
https://api.github.com/repos/pandas-dev/pandas/pulls/19798
2018-02-20T20:21:41Z
2018-02-21T10:56:22Z
2018-02-21T10:56:22Z
2018-02-21T10:56:32Z
Simplify bool ops closures, improve tests
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fb19fd81fe7c7..8045f152f8dd5 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -651,6 +651,7 @@ Other API Changes - :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`) - Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`) - :class:`DateOffset` objects render more simply, e.g. ``<DateOffset: days=1>`` instead of ``<DateOffset: kwds={'days': 1}>`` (:issue:`19403`) +- Boolean operations ``&, |, ^`` between a ``Series`` and an ``Index`` will no longer raise ``TypeError`` (:issue:`19792`) - ``Categorical.fillna`` now validates its ``value`` and ``method`` keyword arguments. It now raises when both or none are specified, matching the behavior of :meth:`Series.fillna` (:issue:`19682`) - :func:`Series.str.replace` now takes an optional `regex` keyword which, when set to ``False``, uses literal string replacement rather than regex replacement (:issue:`16808`) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 83f17a332f4be..c7039e5e1d9a9 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1241,23 +1241,22 @@ def _bool_method_SERIES(cls, op, special): Wrapper function for Series arithmetic operations, to avoid code duplication. """ + name = _get_op_name(op, special) def na_op(x, y): try: result = op(x, y) except TypeError: - if isinstance(y, list): - y = construct_1d_object_array_from_listlike(y) - - if isinstance(y, (np.ndarray, ABCSeries)): - if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)): - result = op(x, y) # when would this be hit? - else: - x = _ensure_object(x) - y = _ensure_object(y) - result = libops.vec_binop(x, y, op) + assert not isinstance(y, (list, ABCSeries, pd.Index)) + if isinstance(y, np.ndarray): + # bool-bool dtype operations should be OK, should not get here + assert not (is_bool_dtype(x) and is_bool_dtype(y)) + x = _ensure_object(x) + y = _ensure_object(y) + result = libops.vec_binop(x, y, op) else: # let null fall thru + assert is_scalar(y) if not isna(y): y = bool(y) try: @@ -1278,32 +1277,39 @@ def wrapper(self, other): self, other = _align_method_SERIES(self, other, align_asobject=True) + res_name = get_op_result_name(self, other) + if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented - elif isinstance(other, ABCSeries): - name = get_op_result_name(self, other) + elif isinstance(other, (ABCSeries, pd.Index)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) - - filler = (fill_int if is_self_int_dtype and is_other_int_dtype - else fill_bool) - - res_values = na_op(self.values, other.values) - unfilled = self._constructor(res_values, - index=self.index, name=name) - return filler(unfilled) + ovalues = other.values else: # scalars, list, tuple, np.array - filler = (fill_int if is_self_int_dtype and - is_integer_dtype(np.asarray(other)) else fill_bool) - - res_values = na_op(self.values, other) - unfilled = self._constructor(res_values, index=self.index) - return filler(unfilled).__finalize__(self) + is_other_int_dtype = is_integer_dtype(np.asarray(other)) + if is_list_like(other) and not isinstance(other, np.ndarray): + # TODO: Can we do this before the is_integer_dtype check? + # could the is_integer_dtype check be checking the wrong + # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? + other = construct_1d_object_array_from_listlike(other) + ovalues = other + + filler = (fill_int if is_self_int_dtype and is_other_int_dtype + else fill_bool) + + res_values = na_op(self.values, ovalues) + unfilled = self._constructor(res_values, index=self.index, + name=res_name) + result = filler(unfilled) + if not isinstance(other, ABCSeries): + result = result.__finalize__(self) + return result + wrapper.__name__ = name return wrapper diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index f90fcce973f00..834ee75443c52 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -178,46 +178,47 @@ def test_comparison_tuples(self): expected = Series([True, False]) assert_series_equal(result, expected) - def test_comparison_operators_with_nas(self): + @pytest.mark.parametrize('op', ['lt', 'le', 'gt', 'ge', 'eq', 'ne']) + def test_comparison_operators_with_nas(self, op): ser = Series(bdate_range('1/1/2000', periods=10), dtype=object) ser[::2] = np.nan # test that comparisons work - ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] - for op in ops: - val = ser[5] + val = ser[5] - f = getattr(operator, op) - result = f(ser, val) + f = getattr(operator, op) + result = f(ser, val) - expected = f(ser.dropna(), val).reindex(ser.index) + expected = f(ser.dropna(), val).reindex(ser.index) - if op == 'ne': - expected = expected.fillna(True).astype(bool) - else: - expected = expected.fillna(False).astype(bool) + if op == 'ne': + expected = expected.fillna(True).astype(bool) + else: + expected = expected.fillna(False).astype(bool) - assert_series_equal(result, expected) + assert_series_equal(result, expected) - # fffffffuuuuuuuuuuuu - # result = f(val, s) - # expected = f(val, s.dropna()).reindex(s.index) - # assert_series_equal(result, expected) + # fffffffuuuuuuuuuuuu + # result = f(val, s) + # expected = f(val, s.dropna()).reindex(s.index) + # assert_series_equal(result, expected) - # boolean &, |, ^ should work with object arrays and propagate NAs + @pytest.mark.parametrize('bool_op', ['and_', 'or_', 'xor']) + def test_bool_operators_with_nas(self, bool_op): + # boolean &, |, ^ should work with object arrays and propagate NAs - ops = ['and_', 'or_', 'xor'] - mask = ser.isna() - for bool_op in ops: - func = getattr(operator, bool_op) + ser = Series(bdate_range('1/1/2000', periods=10), dtype=object) + ser[::2] = np.nan - filled = ser.fillna(ser[0]) + func = getattr(operator, bool_op) + result = func(ser < ser[9], ser > ser[3]) - result = func(ser < ser[9], ser > ser[3]) + mask = ser.isna() + filled = ser.fillna(ser[0]) + expected = func(filled < filled[9], filled > filled[3]) + expected[mask] = False - expected = func(filled < filled[9], filled > filled[3]) - expected[mask] = False - assert_series_equal(result, expected) + assert_series_equal(result, expected) def test_comparison_object_numeric_nas(self): ser = Series(np.random.randn(10), dtype=object) @@ -371,10 +372,10 @@ def test_comparison_different_length(self): b = Series([2, 3, 4]) pytest.raises(ValueError, a.__eq__, b) - def test_comparison_label_based(self): + def test_bool_opslabel_based(self): # GH 4947 - # comparisons should be label based + # boolean ops should be label based a = Series([True, False, True], list('bca')) b = Series([False, True, False], list('abc')) @@ -1398,45 +1399,73 @@ def test_ops_datetimelike_align(self): def test_operators_bitwise(self): # GH 9016: support bitwise op for integer types - index = list('bca') + s_0123 = Series(range(4), dtype='int64') + s_1111 = Series([1] * 4, dtype='int8') - s_tft = Series([True, False, True], index=index) - s_fff = Series([False, False, False], index=index) - s_tff = Series([True, False, False], index=index) - s_empty = Series([]) + # We cannot wrap s111.astype(np.int32) with pd.Index because it + # will case to int64 + res = s_0123.astype(np.int16) | s_1111.astype(np.int32) + expected = Series([1, 1, 3, 3], dtype='int32') + assert_series_equal(res, expected) - # TODO: unused - # s_0101 = Series([0, 1, 0, 1]) + def test_operators_bitwise_invalid(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype='int64') + s_1111 = Series([1] * 4, dtype='int8') + + with pytest.raises(TypeError): + s_1111 & 'a' + with pytest.raises(TypeError): + s_1111 & ['a', 'b', 'c', 'd'] + with pytest.raises(TypeError): + s_0123 & 3.14 + with pytest.raises(TypeError): + s_0123 & [0.1, 4, 3.14, 2] + def test_operators_bitwise_int_series_with_float_series(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype='int64') + s_ftft = Series([False, True, False, True]) + result = s_0123 & Series([0.1, 4, -3.14, 2]) + assert_series_equal(result, s_ftft) + + @pytest.mark.parametrize('box', [np.array, pd.Index, pd.Series]) + def test_operators_bitwise_with_int_arraylike(self, box): + # GH#9016: support bitwise op for integer types + # GH#19795 allow for operating with Index s_0123 = Series(range(4), dtype='int64') s_3333 = Series([3] * 4) s_4444 = Series([4] * 4) - res = s_tft & s_empty - expected = s_fff + res = s_0123 & box(s_3333) + expected = Series(range(4), dtype='int64') assert_series_equal(res, expected) - res = s_tft | s_empty - expected = s_tft + res = s_0123 | box(s_4444) + expected = Series(range(4, 8), dtype='int64') assert_series_equal(res, expected) - res = s_0123 & s_3333 - expected = Series(range(4), dtype='int64') + s_1111 = Series([1] * 4, dtype='int8') + res = s_0123 & box(s_1111) + expected = Series([0, 1, 0, 1], dtype='int64') assert_series_equal(res, expected) - res = s_0123 | s_4444 - expected = Series(range(4, 8), dtype='int64') - assert_series_equal(res, expected) + def test_operators_bitwise_with_bool(self): + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype='int64') - s_a0b1c0 = Series([1], list('b')) + assert_series_equal(s_0123 & False, Series([False] * 4)) + assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) + assert_series_equal(s_0123 & [False], Series([False] * 4)) + assert_series_equal(s_0123 & (False), Series([False] * 4)) - res = s_tft & s_a0b1c0 - expected = s_tff.reindex(list('abc')) - assert_series_equal(res, expected) + def test_operators_bitwise_with_integers(self): + # GH#9016: support bitwise op for integer types + index = list('bca') - res = s_tft | s_a0b1c0 - expected = s_tft.reindex(list('abc')) - assert_series_equal(res, expected) + s_tft = Series([True, False, True], index=index) + s_fff = Series([False, False, False], index=index) + s_0123 = Series(range(4), dtype='int64') n0 = 0 res = s_tft & n0 @@ -1456,20 +1485,34 @@ def test_operators_bitwise(self): expected = Series([0, 1, 0, 1]) assert_series_equal(res, expected) - s_1111 = Series([1] * 4, dtype='int8') - res = s_0123 & s_1111 - expected = Series([0, 1, 0, 1], dtype='int64') + def test_operators_bitwise_with_reindexing(self): + # ops where reindeing needs to be done, so operating with a Series + # makes sense but with an Index or array does not + # GH#9016: support bitwise op for integer types + index = list('bca') + + s_tft = Series([True, False, True], index=index) + s_fff = Series([False, False, False], index=index) + s_tff = Series([True, False, False], index=index) + s_empty = Series([]) + s_a0b1c0 = Series([1], list('b')) + s_0123 = Series(range(4), dtype='int64') + + res = s_tft & s_empty + expected = s_fff assert_series_equal(res, expected) - res = s_0123.astype(np.int16) | s_1111.astype(np.int32) - expected = Series([1, 1, 3, 3], dtype='int32') + res = s_tft | s_empty + expected = s_tft assert_series_equal(res, expected) - pytest.raises(TypeError, lambda: s_1111 & 'a') - pytest.raises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd']) - pytest.raises(TypeError, lambda: s_0123 & np.NaN) - pytest.raises(TypeError, lambda: s_0123 & 3.14) - pytest.raises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2]) + res = s_tft & s_a0b1c0 + expected = s_tff.reindex(list('abc')) + assert_series_equal(res, expected) + + res = s_tft | s_a0b1c0 + expected = s_tft.reindex(list('abc')) + assert_series_equal(res, expected) # s_0123 will be all false now because of reindexing like s_tft if compat.PY3: @@ -1491,16 +1534,15 @@ def test_operators_bitwise(self): exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c']) assert_series_equal(s_0123 & s_tft, exp) - assert_series_equal(s_0123 & False, Series([False] * 4)) - assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) - assert_series_equal(s_0123 & [False], Series([False] * 4)) - assert_series_equal(s_0123 & (False), Series([False] * 4)) - assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), - Series([False] * 4)) + def test_operators_bitwise_with_nans(self): + # with NaNs present op(series, series) works but op(series, index) + # is not implemented + # GH#9016: support bitwise op for integer types + s_0123 = Series(range(4), dtype='int64') + result = s_0123 & Series([False, np.NaN, False, False]) + assert_series_equal(result, Series([False] * 4)) s_ftft = Series([False, True, False, True]) - assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft) - s_abNd = Series(['a', 'b', np.NaN, 'd']) res = s_0123 & s_abNd expected = s_ftft @@ -1509,17 +1551,16 @@ def test_operators_bitwise(self): def test_scalar_na_cmp_corners(self): s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) - def tester(a, b): - return a & b - - pytest.raises(TypeError, tester, s, datetime(2005, 1, 1)) + with pytest.raises(TypeError): + s & datetime(2005, 1, 1) s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)]) s[::2] = np.nan expected = Series(True, index=s.index) expected[::2] = False - assert_series_equal(tester(s, list(s)), expected) + result = s & list(s) + assert_series_equal(result, expected) d = DataFrame({'A': s}) # TODO: Fix this exception - needs to be fixed! (see GH5035) @@ -1530,7 +1571,8 @@ def tester(a, b): # https://github.com/pandas-dev/pandas/issues/5284 pytest.raises(ValueError, lambda: d.__and__(s, axis='columns')) - pytest.raises(ValueError, tester, s, d) + with pytest.raises(ValueError): + s & d # this is wrong as its not a boolean result # result = d.__and__(s,axis='index')
Series bool ops are not going to be possible to dispatch to their Index counterparts, so this one I'm _mostly_ going to leave alone. This breaks up the tests, adds an xfail for op(Series, Categorical), and fixes+tests op(series, Index). It makes some headway in simplifying the closure so the possible paths through the code are more obvious. There is a TODO comment on line 1113 where I think there is a potential bug, but need someone who understands the _intended_ behavior better to weigh in.
https://api.github.com/repos/pandas-dev/pandas/pulls/19795
2018-02-20T18:59:45Z
2018-03-04T21:40:24Z
null
2020-04-05T17:40:57Z
TST: move more series tests to test_arithmetic
diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index f727edf8fb7d8..5b8d9cfab3e0d 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -1,17 +1,26 @@ # -*- coding: utf-8 -*- from datetime import datetime, timedelta import operator +from decimal import Decimal import numpy as np import pytest -from pandas import Series, Timestamp, Period +from pandas import Series, Timestamp, Timedelta, Period, NaT from pandas._libs.tslibs.period import IncompatibleFrequency import pandas as pd import pandas.util.testing as tm +@pytest.fixture +def tdser(): + """ + Return a Series with dtype='timedelta64[ns]', including a NaT. + """ + return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') + + # ------------------------------------------------------------------ # Comparisons @@ -262,6 +271,97 @@ def test_cmp_series_period_series_mixed_freq(self): # ------------------------------------------------------------------ # Arithmetic +class TestSeriesDivision(object): + # __div__, __rdiv__, __floordiv__, __rfloordiv__ + # for non-timestamp/timedelta/period dtypes + + def test_divide_decimal(self): + # resolves issue GH#9787 + expected = Series([Decimal(5)]) + + ser = Series([Decimal(10)]) + result = ser / Decimal(2) + + tm.assert_series_equal(result, expected) + + ser = Series([Decimal(10)]) + result = ser // Decimal(2) + + tm.assert_series_equal(result, expected) + + def test_div_equiv_binop(self): + # Test Series.div as well as Series.__div__ + # float/integer issue + # GH#7785 + first = Series([1, 0], name='first') + second = Series([-0.01, -0.02], name='second') + expected = Series([-0.01, -np.inf]) + + result = second.div(first) + tm.assert_series_equal(result, expected, check_names=False) + + result = second / first + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype2', [ + np.int64, np.int32, np.int16, np.int8, + np.float64, np.float32, np.float16, + np.uint64, np.uint32, np.uint16, np.uint8]) + @pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64]) + def test_ser_div_ser(self, dtype1, dtype2): + # no longer do integer div for any ops, but deal with the 0's + first = Series([3, 4, 5, 8], name='first').astype(dtype1) + second = Series([0, 0, 0, 3], name='second').astype(dtype2) + + with np.errstate(all='ignore'): + expected = Series(first.values.astype(np.float64) / second.values, + dtype='float64', name=None) + expected.iloc[0:3] = np.inf + + result = first / second + tm.assert_series_equal(result, expected) + assert not result.equals(second / first) + + def test_rdiv_zero_compat(self): + # GH#8674 + zero_array = np.array([0] * 5) + data = np.random.randn(5) + expected = Series([0.] * 5) + + result = zero_array / Series(data) + tm.assert_series_equal(result, expected) + + result = Series(zero_array) / data + tm.assert_series_equal(result, expected) + + result = Series(zero_array) / Series(data) + tm.assert_series_equal(result, expected) + + def test_div_zero_inf_signs(self): + # GH#9144, inf signing + ser = Series([-1, 0, 1], name='first') + expected = Series([-np.inf, np.nan, np.inf], name='first') + + result = ser / 0 + tm.assert_series_equal(result, expected) + + def test_rdiv_zero(self): + # GH#9144 + ser = Series([-1, 0, 1], name='first') + expected = Series([0.0, np.nan, 0.0], name='first') + + result = 0 / ser + tm.assert_series_equal(result, expected) + + def test_floordiv_div(self): + # GH#9144 + ser = Series([-1, 0, 1], name='first') + + result = ser // 0 + expected = Series([-np.inf, np.nan, np.inf], name='first') + tm.assert_series_equal(result, expected) + + class TestSeriesArithmetic(object): # Standard, numeric, or otherwise not-Timestamp/Timedelta/Period dtypes @pytest.mark.parametrize('data', [ @@ -316,6 +416,20 @@ def test_series_radd_str(self): tm.assert_series_equal('a' + ser, pd.Series(['ax', np.nan, 'ax'])) tm.assert_series_equal(ser + 'a', pd.Series(['xa', np.nan, 'xa'])) + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_with_dtype_radd_timedelta(self, dtype): + # note this test is _not_ aimed at timedelta64-dtyped Series + ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), + pd.Timedelta('3 days')], dtype=dtype) + expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), + pd.Timedelta('6 days')]) + + result = pd.Timedelta('3 days') + ser + tm.assert_series_equal(result, expected) + + result = ser + pd.Timedelta('3 days') + tm.assert_series_equal(result, expected) + class TestPeriodSeriesArithmetic(object): def test_ops_series_timedelta(self): @@ -377,3 +491,361 @@ def test_dt64ser_sub_datetime_dtype(self): ser = Series([ts]) result = pd.to_timedelta(np.abs(ser - dt)) assert result.dtype == 'timedelta64[ns]' + + +class TestTimedeltaSeriesAdditionSubtraction(object): + # Tests for Series[timedelta64[ns]] __add__, __sub__, __radd__, __rsub__ + + # ------------------------------------------------------------------ + # Operations with int-like others + + def test_td64series_add_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + tdser + Series([2, 3, 4]) + + @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') + def test_td64series_radd_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + Series([2, 3, 4]) + tdser + + def test_td64series_sub_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + tdser - Series([2, 3, 4]) + + @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') + def test_td64series_rsub_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + Series([2, 3, 4]) - tdser + + def test_td64_series_add_intlike(self): + # GH#19123 + tdi = pd.TimedeltaIndex(['59 days', '59 days', 'NaT']) + ser = Series(tdi) + + other = Series([20, 30, 40], dtype='uint8') + + pytest.raises(TypeError, ser.__add__, 1) + pytest.raises(TypeError, ser.__sub__, 1) + + pytest.raises(TypeError, ser.__add__, other) + pytest.raises(TypeError, ser.__sub__, other) + + pytest.raises(TypeError, ser.__add__, other.values) + pytest.raises(TypeError, ser.__sub__, other.values) + + pytest.raises(TypeError, ser.__add__, pd.Index(other)) + pytest.raises(TypeError, ser.__sub__, pd.Index(other)) + + @pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)]) + def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser): + with pytest.raises(TypeError): + tdser + scalar + with pytest.raises(TypeError): + scalar + tdser + with pytest.raises(TypeError): + tdser - scalar + with pytest.raises(TypeError): + scalar - tdser + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [ + np.array([1, 2, 3]), + pd.Index([1, 2, 3]), + pytest.param(Series([1, 2, 3]), + marks=pytest.mark.xfail(reason='GH#19123 integer ' + 'interpreted as nanos')) + ]) + def test_td64series_add_sub_numeric_array_invalid(self, vector, + dtype, tdser): + vector = vector.astype(dtype) + with pytest.raises(TypeError): + tdser + vector + with pytest.raises(TypeError): + vector + tdser + with pytest.raises(TypeError): + tdser - vector + with pytest.raises(TypeError): + vector - tdser + + # ------------------------------------------------------------------ + # Operations with datetime-like others + + def test_td64series_add_sub_timestamp(self): + # GH#11925 + tdser = Series(pd.timedelta_range('1 day', periods=3)) + ts = Timestamp('2012-01-01') + expected = Series(pd.date_range('2012-01-02', periods=3)) + tm.assert_series_equal(ts + tdser, expected) + tm.assert_series_equal(tdser + ts, expected) + + expected2 = Series(pd.date_range('2011-12-31', periods=3, freq='-1D')) + tm.assert_series_equal(ts - tdser, expected2) + tm.assert_series_equal(ts + (-tdser), expected2) + + with pytest.raises(TypeError): + tdser - ts + + # ------------------------------------------------------------------ + # Operations with timedelta-like others (including DateOffsets) + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_td64_series_with_tdi(self, names): + # GH#17250 make sure result dtype is correct + # GH#19043 make sure names are propagated correctly + tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0]) + ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1]) + expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], + name=names[2]) + + result = tdi + ser + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + result = ser + tdi + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)], + name=names[2]) + + result = tdi - ser + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + result = ser - tdi + tm.assert_series_equal(result, -expected) + assert result.dtype == 'timedelta64[ns]' + + def test_td64_sub_NaT(self): + # GH#18808 + ser = Series([NaT, Timedelta('1s')]) + res = ser - NaT + expected = Series([NaT, NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + + +class TestTimedeltaSeriesMultiplicationDivision(object): + # Tests for Series[timedelta64[ns]] + # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ + + # ------------------------------------------------------------------ + # __floordiv__, __rfloordiv__ + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_floordiv(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + result = td1 // scalar_td + expected = Series([0, 0, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_rfloordiv(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + result = scalar_td // td1 + expected = Series([1, 1, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_rfloordiv_explicit(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # We can test __rfloordiv__ using this syntax, + # see `test_timedelta_rfloordiv` + result = td1.__rfloordiv__(scalar_td) + expected = Series([1, 1, np.nan]) + tm.assert_series_equal(result, expected) + + # ------------------------------------------------------------------ + # Operations with int-like others + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), + pd.Index([20, 30, 40]), + Series([20, 30, 40])]) + def test_td64series_div_numeric_array(self, vector, dtype, tdser): + # GH#4521 + # divide/multiply by integers + vector = vector.astype(dtype) + expected = Series(['2.95D', '1D 23H 12m', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser / vector + tm.assert_series_equal(result, expected) + + with pytest.raises(TypeError): + vector / tdser + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), + pd.Index([20, 30, 40]), + Series([20, 30, 40])]) + def test_td64series_mul_numeric_array(self, vector, dtype, tdser): + # GH#4521 + # divide/multiply by integers + vector = vector.astype(dtype) + + expected = Series(['1180 Days', '1770 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * vector + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [ + np.array([20, 30, 40]), + pytest.param(pd.Index([20, 30, 40]), + marks=pytest.mark.xfail(reason='__mul__ raises ' + 'instead of returning ' + 'NotImplemented')), + Series([20, 30, 40]) + ]) + def test_td64series_rmul_numeric_array(self, vector, dtype, tdser): + # GH#4521 + # divide/multiply by integers + vector = vector.astype(dtype) + + expected = Series(['1180 Days', '1770 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = vector * tdser + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)]) + def test_td64series_mul_numeric_scalar(self, one, tdser): + # GH#4521 + # divide/multiply by integers + expected = Series(['-59 Days', '-59 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * (-one) + tm.assert_series_equal(result, expected) + result = (-one) * tdser + tm.assert_series_equal(result, expected) + + expected = Series(['118 Days', '118 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * (2 * one) + tm.assert_series_equal(result, expected) + result = (2 * one) * tdser + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('two', [ + 2, 2.0, + pytest.param(np.array(2), + marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' + 'incorrectly True.')), + pytest.param(np.array(2.0), + marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' + 'incorrectly True.')), + ]) + def test_td64series_div_numeric_scalar(self, two, tdser): + # GH#4521 + # divide/multiply by integers + expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]') + + result = tdser / two + tm.assert_series_equal(result, expected) + + # ------------------------------------------------------------------ + # Operations with timedelta-like others + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_tdi_mul_int_series(self, names): + # GH#19042 + tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], + name=names[0]) + ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) + + expected = Series(['0days', '1day', '4days', '9days', '16days'], + dtype='timedelta64[ns]', + name=names[2]) + + result = ser * tdi + tm.assert_series_equal(result, expected) + + # The direct operation tdi * ser still needs to be fixed. + result = ser.__rmul__(tdi) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_float_series_rdiv_tdi(self, names): + # GH#19042 + # TODO: the direct operation TimedeltaIndex / Series still + # needs to be fixed. + tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], + name=names[0]) + ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) + + expected = Series([tdi[n] / ser[n] for n in range(len(ser))], + dtype='timedelta64[ns]', + name=names[2]) + + result = ser.__rdiv__(tdi) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_td64series_mul_timedeltalike_invalid(self, scalar_td): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = 'operate|unsupported|cannot|not supported' + with tm.assert_raises_regex(TypeError, pattern): + td1 * scalar_td + with tm.assert_raises_regex(TypeError, pattern): + scalar_td * td1 + + +class TestTimedeltaSeriesInvalidArithmeticOps(object): + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_td64series_pow_invalid(self, scalar_td): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = 'operate|unsupported|cannot|not supported' + with tm.assert_raises_regex(TypeError, pattern): + scalar_td ** td1 + with tm.assert_raises_regex(TypeError, pattern): + td1 ** scalar_td diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 554b3e15d8f10..f90fcce973f00 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -9,7 +9,7 @@ import operator from itertools import product, starmap -from numpy import nan, inf +from numpy import nan import numpy as np import pandas as pd @@ -29,11 +29,6 @@ from .common import TestData -@pytest.fixture -def tdser(): - return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') - - class TestSeriesComparisons(object): def test_series_comparison_scalars(self): series = Series(date_range('1/1/2000', periods=10)) @@ -579,291 +574,7 @@ def test_comp_ops_df_compat(self): left.to_frame() < right.to_frame() -class TestSeriesArithmetic(object): - def test_divide_decimal(self): - """ resolves issue #9787 """ - from decimal import Decimal - - expected = Series([Decimal(5)]) - - s = Series([Decimal(10)]) - s = s / Decimal(2) - - assert_series_equal(expected, s) - - s = Series([Decimal(10)]) - s = s // Decimal(2) - - assert_series_equal(expected, s) - - @pytest.mark.parametrize( - 'dtype2', - [ - np.int64, np.int32, np.int16, np.int8, - np.float64, np.float32, np.float16, - np.uint64, np.uint32, - np.uint16, np.uint8 - ]) - @pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64]) - def test_ser_div_ser(self, dtype1, dtype2): - # no longer do integer div for any ops, but deal with the 0's - first = Series([3, 4, 5, 8], name='first').astype(dtype1) - second = Series([0, 0, 0, 3], name='second').astype(dtype2) - - with np.errstate(all='ignore'): - expected = Series(first.values.astype(np.float64) / second.values, - dtype='float64', name=None) - expected.iloc[0:3] = np.inf - - result = first / second - assert_series_equal(result, expected) - assert not result.equals(second / first) - - def test_div_equiv_binop(self): - # Test Series.div as well as Series.__div__ - # float/integer issue - # GH#7785 - first = pd.Series([1, 0], name='first') - second = pd.Series([-0.01, -0.02], name='second') - expected = Series([-0.01, -np.inf]) - - result = second.div(first) - assert_series_equal(result, expected, check_names=False) - - result = second / first - assert_series_equal(result, expected) - - def test_rdiv_zero_compat(self): - # GH#8674 - zero_array = np.array([0] * 5) - data = np.random.randn(5) - expected = pd.Series([0.] * 5) - - result = zero_array / pd.Series(data) - assert_series_equal(result, expected) - - result = pd.Series(zero_array) / data - assert_series_equal(result, expected) - - result = pd.Series(zero_array) / pd.Series(data) - assert_series_equal(result, expected) - - def test_div_zero_inf_signs(self): - # GH#9144, inf signing - ser = Series([-1, 0, 1], name='first') - expected = Series([-np.inf, np.nan, np.inf], name='first') - - result = ser / 0 - assert_series_equal(result, expected) - - def test_rdiv_zero(self): - # GH#9144 - ser = Series([-1, 0, 1], name='first') - expected = Series([0.0, np.nan, 0.0], name='first') - - result = 0 / ser - assert_series_equal(result, expected) - - def test_floordiv_div(self): - # GH#9144 - ser = Series([-1, 0, 1], name='first') - - result = ser // 0 - expected = Series([-inf, nan, inf], name='first') - assert_series_equal(result, expected) - - -class TestTimedeltaSeriesArithmeticWithIntegers(object): - # Tests for Series with dtype 'timedelta64[ns]' arithmetic operations - # with integer and int-like others - - # ------------------------------------------------------------------ - # Addition and Subtraction - - def test_td64series_add_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - tdser + Series([2, 3, 4]) - - @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') - def test_td64series_radd_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - Series([2, 3, 4]) + tdser - - def test_td64series_sub_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - tdser - Series([2, 3, 4]) - - @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') - def test_td64series_rsub_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - Series([2, 3, 4]) - tdser - - def test_td64_series_add_intlike(self): - # GH#19123 - tdi = pd.TimedeltaIndex(['59 days', '59 days', 'NaT']) - ser = Series(tdi) - - other = Series([20, 30, 40], dtype='uint8') - - pytest.raises(TypeError, ser.__add__, 1) - pytest.raises(TypeError, ser.__sub__, 1) - - pytest.raises(TypeError, ser.__add__, other) - pytest.raises(TypeError, ser.__sub__, other) - - pytest.raises(TypeError, ser.__add__, other.values) - pytest.raises(TypeError, ser.__sub__, other.values) - - pytest.raises(TypeError, ser.__add__, pd.Index(other)) - pytest.raises(TypeError, ser.__sub__, pd.Index(other)) - - @pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)]) - def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser): - with pytest.raises(TypeError): - tdser + scalar - with pytest.raises(TypeError): - scalar + tdser - with pytest.raises(TypeError): - tdser - scalar - with pytest.raises(TypeError): - scalar - tdser - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [ - np.array([1, 2, 3]), - pd.Index([1, 2, 3]), - pytest.param(Series([1, 2, 3]), - marks=pytest.mark.xfail(reason='GH#19123 integer ' - 'interpreted as nanos')) - ]) - def test_td64series_add_sub_numeric_array_invalid(self, vector, - dtype, tdser): - vector = vector.astype(dtype) - with pytest.raises(TypeError): - tdser + vector - with pytest.raises(TypeError): - vector + tdser - with pytest.raises(TypeError): - tdser - vector - with pytest.raises(TypeError): - vector - tdser - - # ------------------------------------------------------------------ - # Multiplicaton and Division - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), - pd.Index([20, 30, 40]), - Series([20, 30, 40])]) - def test_td64series_div_numeric_array(self, vector, dtype, tdser): - # GH 4521 - # divide/multiply by integers - vector = vector.astype(dtype) - expected = Series(['2.95D', '1D 23H 12m', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser / vector - assert_series_equal(result, expected) - - with pytest.raises(TypeError): - vector / tdser - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), - pd.Index([20, 30, 40]), - Series([20, 30, 40])]) - def test_td64series_mul_numeric_array(self, vector, dtype, tdser): - # GH 4521 - # divide/multiply by integers - vector = vector.astype(dtype) - - expected = Series(['1180 Days', '1770 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser * vector - assert_series_equal(result, expected) - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [ - np.array([20, 30, 40]), - pytest.param(pd.Index([20, 30, 40]), - marks=pytest.mark.xfail(reason='__mul__ raises ' - 'instead of returning ' - 'NotImplemented')), - Series([20, 30, 40]) - ]) - def test_td64series_rmul_numeric_array(self, vector, dtype, tdser): - # GH 4521 - # divide/multiply by integers - vector = vector.astype(dtype) - - expected = Series(['1180 Days', '1770 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = vector * tdser - assert_series_equal(result, expected) - - @pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)]) - def test_td64series_mul_numeric_scalar(self, one, tdser): - # GH 4521 - # divide/multiply by integers - expected = Series(['-59 Days', '-59 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser * (-one) - assert_series_equal(result, expected) - result = (-one) * tdser - assert_series_equal(result, expected) - - expected = Series(['118 Days', '118 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser * (2 * one) - assert_series_equal(result, expected) - result = (2 * one) * tdser - assert_series_equal(result, expected) - - @pytest.mark.parametrize('two', [ - 2, 2.0, - pytest.param(np.array(2), - marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' - 'incorrectly True.')), - pytest.param(np.array(2.0), - marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' - 'incorrectly True.')), - ]) - def test_td64series_div_numeric_scalar(self, two, tdser): - # GH 4521 - # divide/multiply by integers - expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]') - - result = tdser / two - assert_series_equal(result, expected) - - class TestTimedeltaSeriesArithmetic(object): - def test_td64series_add_sub_timestamp(self): - # GH11925 - tdser = Series(timedelta_range('1 day', periods=3)) - ts = Timestamp('2012-01-01') - expected = Series(date_range('2012-01-02', periods=3)) - assert_series_equal(ts + tdser, expected) - assert_series_equal(tdser + ts, expected) - - expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D')) - assert_series_equal(ts - tdser, expected2) - assert_series_equal(ts + (-tdser), expected2) - - with pytest.raises(TypeError): - tdser - ts def test_timedelta64_operations_with_DateOffset(self): # GH 10699 @@ -1081,13 +792,6 @@ def test_timedelta64_ops_nat(self): assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta) - def test_td64_sub_NaT(self): - # GH#18808 - ser = Series([NaT, Timedelta('1s')]) - res = ser - NaT - expected = Series([NaT, NaT], dtype='timedelta64[ns]') - tm.assert_series_equal(res, expected) - @pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4), Timedelta(minutes=5, seconds=4), Timedelta('5m4s').to_timedelta64()]) @@ -1103,135 +807,6 @@ def test_operators_timedelta64_with_timedelta(self, scalar_td): td1 / scalar_td scalar_td / td1 - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # check that we are getting a TypeError - # with 'operate' (from core/ops.py) for the ops that are not - # defined - pattern = 'operate|unsupported|cannot|not supported' - with tm.assert_raises_regex(TypeError, pattern): - td1 * scalar_td - with tm.assert_raises_regex(TypeError, pattern): - scalar_td * td1 - with tm.assert_raises_regex(TypeError, pattern): - scalar_td ** td1 - with tm.assert_raises_regex(TypeError, pattern): - td1 ** scalar_td - - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_timedelta_rfloordiv(self, scalar_td): - # GH#18831 - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - result = scalar_td // td1 - expected = Series([1, 1, np.nan]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_timedelta_rfloordiv_explicit(self, scalar_td): - # GH#18831 - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # We can test __rfloordiv__ using this syntax, - # see `test_timedelta_rfloordiv` - result = td1.__rfloordiv__(scalar_td) - expected = Series([1, 1, np.nan]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_timedelta_floordiv(self, scalar_td): - # GH#18831 - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - result = td1 // scalar_td - expected = Series([0, 0, np.nan]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('names', [(None, None, None), - ('Egon', 'Venkman', None), - ('NCC1701D', 'NCC1701D', 'NCC1701D')]) - def test_td64_series_with_tdi(self, names): - # GH#17250 make sure result dtype is correct - # GH#19043 make sure names are propagated correctly - tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0]) - ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1]) - expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], - name=names[2]) - - result = tdi + ser - tm.assert_series_equal(result, expected) - assert result.dtype == 'timedelta64[ns]' - - result = ser + tdi - tm.assert_series_equal(result, expected) - assert result.dtype == 'timedelta64[ns]' - - expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)], - name=names[2]) - - result = tdi - ser - tm.assert_series_equal(result, expected) - assert result.dtype == 'timedelta64[ns]' - - result = ser - tdi - tm.assert_series_equal(result, -expected) - assert result.dtype == 'timedelta64[ns]' - - @pytest.mark.parametrize('names', [(None, None, None), - ('Egon', 'Venkman', None), - ('NCC1701D', 'NCC1701D', 'NCC1701D')]) - def test_tdi_mul_int_series(self, names): - # GH#19042 - tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], - name=names[0]) - ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) - - expected = Series(['0days', '1day', '4days', '9days', '16days'], - dtype='timedelta64[ns]', - name=names[2]) - - result = ser * tdi - tm.assert_series_equal(result, expected) - - # The direct operation tdi * ser still needs to be fixed. - result = ser.__rmul__(tdi) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('names', [(None, None, None), - ('Egon', 'Venkman', None), - ('NCC1701D', 'NCC1701D', 'NCC1701D')]) - def test_float_series_rdiv_tdi(self, names): - # GH#19042 - # TODO: the direct operation TimedeltaIndex / Series still - # needs to be fixed. - tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], - name=names[0]) - ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) - - expected = Series([tdi[n] / ser[n] for n in range(len(ser))], - dtype='timedelta64[ns]', - name=names[2]) - - result = ser.__rdiv__(tdi) - tm.assert_series_equal(result, expected) - class TestDatetimeSeriesArithmetic(object): @pytest.mark.parametrize( @@ -1994,138 +1569,6 @@ def test_operators_reverse_object(self, op): expected = op(1., arr.astype(float)) assert_series_equal(result.astype(float), expected) - def test_arith_ops_df_compat(self): - # GH 1134 - s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') - - exp = pd.Series([3.0, 4.0, np.nan, np.nan], - index=list('ABCD'), name='x') - assert_series_equal(s1 + s2, exp) - assert_series_equal(s2 + s1, exp) - - exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s1.to_frame() + s2.to_frame(), exp) - assert_frame_equal(s2.to_frame() + s1.to_frame(), exp) - - # different length - s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - - exp = pd.Series([3, 4, 5, np.nan], - index=list('ABCD'), name='x') - assert_series_equal(s3 + s4, exp) - assert_series_equal(s4 + s3, exp) - - exp = pd.DataFrame({'x': [3, 4, 5, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s3.to_frame() + s4.to_frame(), exp) - assert_frame_equal(s4.to_frame() + s3.to_frame(), exp) - - def test_bool_ops_df_compat(self): - # GH 1134 - s1 = pd.Series([True, False, True], index=list('ABC'), name='x') - s2 = pd.Series([True, True, False], index=list('ABD'), name='x') - - exp = pd.Series([True, False, False, False], - index=list('ABCD'), name='x') - assert_series_equal(s1 & s2, exp) - assert_series_equal(s2 & s1, exp) - - # True | np.nan => True - exp = pd.Series([True, True, True, False], - index=list('ABCD'), name='x') - assert_series_equal(s1 | s2, exp) - # np.nan | True => np.nan, filled with False - exp = pd.Series([True, True, False, False], - index=list('ABCD'), name='x') - assert_series_equal(s2 | s1, exp) - - # DataFrame doesn't fill nan with False - exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s1.to_frame() & s2.to_frame(), exp) - assert_frame_equal(s2.to_frame() & s1.to_frame(), exp) - - exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s1.to_frame() | s2.to_frame(), exp) - assert_frame_equal(s2.to_frame() | s1.to_frame(), exp) - - # different length - s3 = pd.Series([True, False, True], index=list('ABC'), name='x') - s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x') - - exp = pd.Series([True, False, True, False], - index=list('ABCD'), name='x') - assert_series_equal(s3 & s4, exp) - assert_series_equal(s4 & s3, exp) - - # np.nan | True => np.nan, filled with False - exp = pd.Series([True, True, True, False], - index=list('ABCD'), name='x') - assert_series_equal(s3 | s4, exp) - # True | np.nan => True - exp = pd.Series([True, True, True, True], - index=list('ABCD'), name='x') - assert_series_equal(s4 | s3, exp) - - exp = pd.DataFrame({'x': [True, False, True, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s3.to_frame() & s4.to_frame(), exp) - assert_frame_equal(s4.to_frame() & s3.to_frame(), exp) - - exp = pd.DataFrame({'x': [True, True, True, np.nan]}, - index=list('ABCD')) - assert_frame_equal(s3.to_frame() | s4.to_frame(), exp) - assert_frame_equal(s4.to_frame() | s3.to_frame(), exp) - - def test_series_frame_radd_bug(self): - # GH 353 - vals = Series(tm.rands_array(5, 10)) - result = 'foo_' + vals - expected = vals.map(lambda x: 'foo_' + x) - assert_series_equal(result, expected) - - frame = DataFrame({'vals': vals}) - result = 'foo_' + frame - expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)}) - assert_frame_equal(result, expected) - - # really raise this time - with pytest.raises(TypeError): - datetime.now() + self.ts - - with pytest.raises(TypeError): - self.ts + datetime.now() - - @pytest.mark.parametrize('dtype', [None, object]) - def test_series_with_dtype_radd_timedelta(self, dtype): - ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), - pd.Timedelta('3 days')], dtype=dtype) - expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), - pd.Timedelta('6 days')]) - - result = pd.Timedelta('3 days') + ser - assert_series_equal(result, expected) - - result = ser + pd.Timedelta('3 days') - assert_series_equal(result, expected) - - def test_operators_frame(self): - # rpow does not work with DataFrame - df = DataFrame({'A': self.ts}) - - assert_series_equal(self.ts + self.ts, self.ts + df['A'], - check_names=False) - assert_series_equal(self.ts ** self.ts, self.ts ** df['A'], - check_names=False) - assert_series_equal(self.ts < self.ts, self.ts < df['A'], - check_names=False) - assert_series_equal(self.ts / self.ts, self.ts / df['A'], - check_names=False) - def test_operators_combine(self): def _check_fill(meth, op, a, b, fill_value=0): exp_index = a.index.union(b.index) @@ -2231,15 +1674,6 @@ def test_datetime64_with_index(self): df['result'] = df['date'] - df.index assert_series_equal(df['result'], df['expected'], check_names=False) - def test_dti_tz_convert_to_utc(self): - base = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], - tz='UTC') - idx1 = base.tz_convert('Asia/Tokyo')[:2] - idx2 = base.tz_convert('US/Eastern')[1:] - - res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) - assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) - def test_op_duplicate_index(self): # GH14227 s1 = Series([1, 2], index=[1, 1]) @@ -2294,3 +1728,130 @@ def test_idxminmax_with_inf(self): assert np.isnan(s.idxmin(skipna=False)) assert s.idxmax() == 0 np.isnan(s.idxmax(skipna=False)) + + +class TestSeriesOperationsDataFrameCompat(object): + def test_operators_frame(self): + # rpow does not work with DataFrame + ts = tm.makeTimeSeries() + ts.name = 'ts' + + df = DataFrame({'A': ts}) + + assert_series_equal(ts + ts, ts + df['A'], + check_names=False) + assert_series_equal(ts ** ts, ts ** df['A'], + check_names=False) + assert_series_equal(ts < ts, ts < df['A'], + check_names=False) + assert_series_equal(ts / ts, ts / df['A'], + check_names=False) + + def test_series_frame_radd_bug(self): + # GH#353 + vals = Series(tm.rands_array(5, 10)) + result = 'foo_' + vals + expected = vals.map(lambda x: 'foo_' + x) + assert_series_equal(result, expected) + + frame = DataFrame({'vals': vals}) + result = 'foo_' + frame + expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)}) + assert_frame_equal(result, expected) + + ts = tm.makeTimeSeries() + ts.name = 'ts' + + # really raise this time + with pytest.raises(TypeError): + datetime.now() + ts + + with pytest.raises(TypeError): + ts + datetime.now() + + def test_bool_ops_df_compat(self): + # GH 1134 + s1 = pd.Series([True, False, True], index=list('ABC'), name='x') + s2 = pd.Series([True, True, False], index=list('ABD'), name='x') + + exp = pd.Series([True, False, False, False], + index=list('ABCD'), name='x') + assert_series_equal(s1 & s2, exp) + assert_series_equal(s2 & s1, exp) + + # True | np.nan => True + exp = pd.Series([True, True, True, False], + index=list('ABCD'), name='x') + assert_series_equal(s1 | s2, exp) + # np.nan | True => np.nan, filled with False + exp = pd.Series([True, True, False, False], + index=list('ABCD'), name='x') + assert_series_equal(s2 | s1, exp) + + # DataFrame doesn't fill nan with False + exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s1.to_frame() & s2.to_frame(), exp) + assert_frame_equal(s2.to_frame() & s1.to_frame(), exp) + + exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s1.to_frame() | s2.to_frame(), exp) + assert_frame_equal(s2.to_frame() | s1.to_frame(), exp) + + # different length + s3 = pd.Series([True, False, True], index=list('ABC'), name='x') + s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x') + + exp = pd.Series([True, False, True, False], + index=list('ABCD'), name='x') + assert_series_equal(s3 & s4, exp) + assert_series_equal(s4 & s3, exp) + + # np.nan | True => np.nan, filled with False + exp = pd.Series([True, True, True, False], + index=list('ABCD'), name='x') + assert_series_equal(s3 | s4, exp) + # True | np.nan => True + exp = pd.Series([True, True, True, True], + index=list('ABCD'), name='x') + assert_series_equal(s4 | s3, exp) + + exp = pd.DataFrame({'x': [True, False, True, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s3.to_frame() & s4.to_frame(), exp) + assert_frame_equal(s4.to_frame() & s3.to_frame(), exp) + + exp = pd.DataFrame({'x': [True, True, True, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s3.to_frame() | s4.to_frame(), exp) + assert_frame_equal(s4.to_frame() | s3.to_frame(), exp) + + def test_arith_ops_df_compat(self): + # GH#1134 + s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') + + exp = pd.Series([3.0, 4.0, np.nan, np.nan], + index=list('ABCD'), name='x') + assert_series_equal(s1 + s2, exp) + assert_series_equal(s2 + s1, exp) + + exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s1.to_frame() + s2.to_frame(), exp) + assert_frame_equal(s2.to_frame() + s1.to_frame(), exp) + + # different length + s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') + + exp = pd.Series([3, 4, 5, np.nan], + index=list('ABCD'), name='x') + assert_series_equal(s3 + s4, exp) + assert_series_equal(s4 + s3, exp) + + exp = pd.DataFrame({'x': [3, 4, 5, np.nan]}, + index=list('ABCD')) + assert_frame_equal(s3.to_frame() + s4.to_frame(), exp) + assert_frame_equal(s4.to_frame() + s3.to_frame(), exp) diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index 2e15c964e4e93..b54645d04bd1a 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -88,6 +88,15 @@ def test_series_tz_convert(self): tm.assert_raises_regex(TypeError, "Cannot convert tz-naive", ts.tz_convert, 'US/Eastern') + def test_series_tz_convert_to_utc(self): + base = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], + tz='UTC') + idx1 = base.tz_convert('Asia/Tokyo')[:2] + idx2 = base.tz_convert('US/Eastern')[1:] + + res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) + tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) + # ----------------------------------------------------------------- # Series.append
Doesn't overlap with other outstanding PRs. Per usual, there is more that _could_ be done, but we're doing this in pieces. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19794
2018-02-20T18:08:31Z
2018-02-21T23:50:05Z
2018-02-21T23:50:05Z
2018-06-22T03:24:31Z
Raise OptionError instead of KeyError in __getattr__. Fixes #19789.
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ca5749afd11bc..1a8fa5a89468a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -908,3 +908,4 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) +- Bug in accessing a :func:`pandas.get_option`, which raised ``KeyError`` rather than ``OptionError`` when looking up a non-existant option key in some cases (:issue:`19789`) diff --git a/pandas/core/config.py b/pandas/core/config.py index 692aed178719d..369e0568346ef 100644 --- a/pandas/core/config.py +++ b/pandas/core/config.py @@ -196,7 +196,10 @@ def __getattr__(self, key): if prefix: prefix += "." prefix += key - v = object.__getattribute__(self, "d")[key] + try: + v = object.__getattribute__(self, "d")[key] + except KeyError: + raise OptionError("No such option") if isinstance(v, dict): return DictWrapper(v, prefix) else: diff --git a/pandas/tests/test_config.py b/pandas/tests/test_config.py index 8d6f36ac6a798..91ce65dcce9b2 100644 --- a/pandas/tests/test_config.py +++ b/pandas/tests/test_config.py @@ -428,3 +428,9 @@ def test_option_context_scope(self): # Ensure the current context is reset assert self.cf.get_option(option_name) == original_value + + def test_dictwrapper_getattr(self): + options = self.cf.options + # GH 19789 + pytest.raises(self.cf.OptionError, getattr, options, 'bananas') + assert not hasattr(options, 'bananas')
- [x] closes #19789 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19790
2018-02-20T14:51:37Z
2018-02-24T15:08:17Z
2018-02-24T15:08:17Z
2018-02-24T15:08:19Z
parametrize a whole mess of tests
diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index ddc97636ae0a8..9ce34e9b22efb 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -614,19 +614,19 @@ def test_sub_dti_dti(self): result = dti2 - dti1 tm.assert_index_equal(result, expected) - def test_sub_period(self): - # GH 13078 + @pytest.mark.parametrize('freq', [None, 'D']) + def test_sub_period(self, freq): + # GH#13078 # not supported, check TypeError p = pd.Period('2011-01-01', freq='D') - for freq in [None, 'D']: - idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq) + idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq) - with pytest.raises(TypeError): - idx - p + with pytest.raises(TypeError): + idx - p - with pytest.raises(TypeError): - p - idx + with pytest.raises(TypeError): + p - idx def test_ufunc_coercions(self): idx = date_range('2011-01-01', periods=3, freq='2D', name='x') diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index 197a42bdaacbb..176f5bd0c1a2a 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -351,52 +351,51 @@ def test_constructor_coverage(self): freq='B') pytest.raises(ValueError, DatetimeIndex, periods=10, freq='D') - def test_constructor_datetime64_tzformat(self): - # see gh-6572: ISO 8601 format results in pytz.FixedOffset - for freq in ['AS', 'W-SUN']: - idx = date_range('2013-01-01T00:00:00-05:00', - '2016-01-01T23:59:59-05:00', freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=pytz.FixedOffset(-300)) - tm.assert_index_equal(idx, expected) - # Unable to use `US/Eastern` because of DST - expected_i8 = date_range('2013-01-01T00:00:00', - '2016-01-01T23:59:59', freq=freq, - tz='America/Lima') - tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) - - idx = date_range('2013-01-01T00:00:00+09:00', - '2016-01-01T23:59:59+09:00', freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=pytz.FixedOffset(540)) - tm.assert_index_equal(idx, expected) - expected_i8 = date_range('2013-01-01T00:00:00', - '2016-01-01T23:59:59', freq=freq, - tz='Asia/Tokyo') - tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + @pytest.mark.parametrize('freq', ['AS', 'W-SUN']) + def test_constructor_datetime64_tzformat(self, freq): + # see GH#6572: ISO 8601 format results in pytz.FixedOffset + idx = date_range('2013-01-01T00:00:00-05:00', + '2016-01-01T23:59:59-05:00', freq=freq) + expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz=pytz.FixedOffset(-300)) + tm.assert_index_equal(idx, expected) + # Unable to use `US/Eastern` because of DST + expected_i8 = date_range('2013-01-01T00:00:00', + '2016-01-01T23:59:59', freq=freq, + tz='America/Lima') + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + + idx = date_range('2013-01-01T00:00:00+09:00', + '2016-01-01T23:59:59+09:00', freq=freq) + expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz=pytz.FixedOffset(540)) + tm.assert_index_equal(idx, expected) + expected_i8 = date_range('2013-01-01T00:00:00', + '2016-01-01T23:59:59', freq=freq, + tz='Asia/Tokyo') + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) # Non ISO 8601 format results in dateutil.tz.tzoffset - for freq in ['AS', 'W-SUN']: - idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00', - freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=pytz.FixedOffset(-300)) - tm.assert_index_equal(idx, expected) - # Unable to use `US/Eastern` because of DST - expected_i8 = date_range('2013-01-01T00:00:00', - '2016-01-01T23:59:59', freq=freq, - tz='America/Lima') - tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) - - idx = date_range('2013/1/1 0:00:00+9:00', - '2016/1/1 23:59:59+09:00', freq=freq) - expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', - freq=freq, tz=pytz.FixedOffset(540)) - tm.assert_index_equal(idx, expected) - expected_i8 = date_range('2013-01-01T00:00:00', - '2016-01-01T23:59:59', freq=freq, - tz='Asia/Tokyo') - tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00', + freq=freq) + expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz=pytz.FixedOffset(-300)) + tm.assert_index_equal(idx, expected) + # Unable to use `US/Eastern` because of DST + expected_i8 = date_range('2013-01-01T00:00:00', + '2016-01-01T23:59:59', freq=freq, + tz='America/Lima') + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) + + idx = date_range('2013/1/1 0:00:00+9:00', + '2016/1/1 23:59:59+09:00', freq=freq) + expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59', + freq=freq, tz=pytz.FixedOffset(540)) + tm.assert_index_equal(idx, expected) + expected_i8 = date_range('2013-01-01T00:00:00', + '2016-01-01T23:59:59', freq=freq, + tz='Asia/Tokyo') + tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8) def test_constructor_dtype(self): @@ -451,36 +450,35 @@ def test_dti_constructor_preserve_dti_freq(self): rng2 = DatetimeIndex(rng) assert rng.freq == rng2.freq - def test_dti_constructor_years_only(self): + @pytest.mark.parametrize('tz', [None, 'UTC', 'Asia/Tokyo', + 'dateutil/US/Pacific']) + def test_dti_constructor_years_only(self, tz): # GH 6961 - for tz in [None, 'UTC', 'Asia/Tokyo', 'dateutil/US/Pacific']: - rng1 = date_range('2014', '2015', freq='M', tz=tz) - expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz) + rng1 = date_range('2014', '2015', freq='M', tz=tz) + expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz) - rng2 = date_range('2014', '2015', freq='MS', tz=tz) - expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', - tz=tz) + rng2 = date_range('2014', '2015', freq='MS', tz=tz) + expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz) - rng3 = date_range('2014', '2020', freq='A', tz=tz) - expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz) + rng3 = date_range('2014', '2020', freq='A', tz=tz) + expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz) - rng4 = date_range('2014', '2020', freq='AS', tz=tz) - expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', - tz=tz) + rng4 = date_range('2014', '2020', freq='AS', tz=tz) + expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz) - for rng, expected in [(rng1, expected1), (rng2, expected2), - (rng3, expected3), (rng4, expected4)]: - tm.assert_index_equal(rng, expected) + for rng, expected in [(rng1, expected1), (rng2, expected2), + (rng3, expected3), (rng4, expected4)]: + tm.assert_index_equal(rng, expected) - def test_dti_constructor_small_int(self): + @pytest.mark.parametrize('dtype', [np.int64, np.int32, np.int16, np.int8]) + def test_dti_constructor_small_int(self, dtype): # GH 13721 exp = DatetimeIndex(['1970-01-01 00:00:00.00000000', '1970-01-01 00:00:00.00000001', '1970-01-01 00:00:00.00000002']) - for dtype in [np.int64, np.int32, np.int16, np.int8]: - arr = np.array([0, 10, 20], dtype=dtype) - tm.assert_index_equal(DatetimeIndex(arr), exp) + arr = np.array([0, 10, 20], dtype=dtype) + tm.assert_index_equal(DatetimeIndex(arr), exp) def test_ctor_str_intraday(self): rng = DatetimeIndex(['1-1-2000 00:00:01']) @@ -499,7 +497,7 @@ def test_index_cast_datetime64_other_units(self): assert (idx.values == conversion.ensure_datetime64ns(arr)).all() def test_constructor_int64_nocopy(self): - # #1624 + # GH#1624 arr = np.arange(1000, dtype=np.int64) index = DatetimeIndex(arr) @@ -512,19 +510,17 @@ def test_constructor_int64_nocopy(self): arr[50:100] = -1 assert (index.asi8[50:100] != -1).all() - def test_from_freq_recreate_from_data(self): - freqs = ['M', 'Q', 'A', 'D', 'B', 'BH', 'T', 'S', 'L', 'U', 'H', 'N', - 'C'] - - for f in freqs: - org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1) - idx = DatetimeIndex(org, freq=f) - tm.assert_index_equal(idx, org) - - org = DatetimeIndex(start='2001/02/01 09:00', freq=f, - tz='US/Pacific', periods=1) - idx = DatetimeIndex(org, freq=f, tz='US/Pacific') - tm.assert_index_equal(idx, org) + @pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B', 'BH', + 'T', 'S', 'L', 'U', 'H', 'N', 'C']) + def test_from_freq_recreate_from_data(self, freq): + org = DatetimeIndex(start='2001/02/01 09:00', freq=freq, periods=1) + idx = DatetimeIndex(org, freq=freq) + tm.assert_index_equal(idx, org) + + org = DatetimeIndex(start='2001/02/01 09:00', freq=freq, + tz='US/Pacific', periods=1) + idx = DatetimeIndex(org, freq=freq, tz='US/Pacific') + tm.assert_index_equal(idx, org) def test_datetimeindex_constructor_misc(self): arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04'] diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3738398d017f8..d2ec465468dfb 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -222,16 +222,13 @@ def test_range_misspecified(self): with tm.assert_raises_regex(ValueError, msg): date_range() - def test_compat_replace(self): + @pytest.mark.parametrize('f', [compat.long, int]) + def test_compat_replace(self, f): # https://github.com/statsmodels/statsmodels/issues/3349 # replace should take ints/longs for compat - - for f in [compat.long, int]: - result = date_range(Timestamp('1960-04-01 00:00:00', - freq='QS-JAN'), - periods=f(76), - freq='QS-JAN') - assert len(result) == 76 + result = date_range(Timestamp('1960-04-01 00:00:00', freq='QS-JAN'), + periods=f(76), freq='QS-JAN') + assert len(result) == 76 def test_catch_infinite_loop(self): offset = offsets.DateOffset(minute=5) @@ -484,24 +481,24 @@ def test_range_tz_dateutil(self): assert dr[0] == start assert dr[2] == end - def test_range_closed(self): + @pytest.mark.parametrize('freq', ["1D", "3D", "2M", "7W", "3H", "A"]) + def test_range_closed(self, freq): begin = datetime(2011, 1, 1) end = datetime(2014, 1, 1) - for freq in ["1D", "3D", "2M", "7W", "3H", "A"]: - closed = date_range(begin, end, closed=None, freq=freq) - left = date_range(begin, end, closed="left", freq=freq) - right = date_range(begin, end, closed="right", freq=freq) - expected_left = left - expected_right = right + closed = date_range(begin, end, closed=None, freq=freq) + left = date_range(begin, end, closed="left", freq=freq) + right = date_range(begin, end, closed="right", freq=freq) + expected_left = left + expected_right = right - if end == closed[-1]: - expected_left = closed[:-1] - if begin == closed[0]: - expected_right = closed[1:] + if end == closed[-1]: + expected_left = closed[:-1] + if begin == closed[0]: + expected_right = closed[1:] - tm.assert_index_equal(expected_left, left) - tm.assert_index_equal(expected_right, right) + tm.assert_index_equal(expected_left, left) + tm.assert_index_equal(expected_right, right) def test_range_closed_with_tz_aware_start_end(self): # GH12409, GH12684 @@ -546,28 +543,28 @@ def test_range_closed_with_tz_aware_start_end(self): tm.assert_index_equal(expected_left, left) tm.assert_index_equal(expected_right, right) - def test_range_closed_boundary(self): - # GH 11804 - for closed in ['right', 'left', None]: - right_boundary = date_range('2015-09-12', '2015-12-01', - freq='QS-MAR', closed=closed) - left_boundary = date_range('2015-09-01', '2015-09-12', - freq='QS-MAR', closed=closed) - both_boundary = date_range('2015-09-01', '2015-12-01', - freq='QS-MAR', closed=closed) - expected_right = expected_left = expected_both = both_boundary - - if closed == 'right': - expected_left = both_boundary[1:] - if closed == 'left': - expected_right = both_boundary[:-1] - if closed is None: - expected_right = both_boundary[1:] - expected_left = both_boundary[:-1] - - tm.assert_index_equal(right_boundary, expected_right) - tm.assert_index_equal(left_boundary, expected_left) - tm.assert_index_equal(both_boundary, expected_both) + @pytest.mark.parametrize('closed', ['right', 'left', None]) + def test_range_closed_boundary(self, closed): + # GH#11804 + right_boundary = date_range('2015-09-12', '2015-12-01', + freq='QS-MAR', closed=closed) + left_boundary = date_range('2015-09-01', '2015-09-12', + freq='QS-MAR', closed=closed) + both_boundary = date_range('2015-09-01', '2015-12-01', + freq='QS-MAR', closed=closed) + expected_right = expected_left = expected_both = both_boundary + + if closed == 'right': + expected_left = both_boundary[1:] + if closed == 'left': + expected_right = both_boundary[:-1] + if closed is None: + expected_right = both_boundary[1:] + expected_left = both_boundary[:-1] + + tm.assert_index_equal(right_boundary, expected_right) + tm.assert_index_equal(left_boundary, expected_left) + tm.assert_index_equal(both_boundary, expected_both) def test_years_only(self): # GH 6961 diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 05678b0c8dd45..2cf33644377ab 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -356,12 +356,11 @@ def test_does_not_convert_mixed_integer(self): assert cols.dtype == joined.dtype tm.assert_numpy_array_equal(cols.values, joined.values) - def test_join_self(self): + @pytest.mark.parametrize('how', ['outer', 'inner', 'left', 'right']) + def test_join_self(self, how): index = date_range('1/1/2000', periods=10) - kinds = 'outer', 'inner', 'left', 'right' - for kind in kinds: - joined = index.join(index, how=kind) - assert index is joined + joined = index.join(index, how=how) + assert index is joined def assert_index_parameters(self, index): assert index.freq == '40960N' @@ -381,18 +380,17 @@ def test_ns_index(self): freq=index.freq) self.assert_index_parameters(new_index) - def test_join_with_period_index(self): + @pytest.mark.parametrize('how', ['left', 'right', 'inner', 'outer']) + def test_join_with_period_index(self, how): df = tm.makeCustomDataframe( 10, 10, data_gen_f=lambda *args: np.random.randint(2), c_idx_type='p', r_idx_type='dt') s = df.iloc[:5, 0] - joins = 'left', 'right', 'inner', 'outer' - for join in joins: - with tm.assert_raises_regex(ValueError, - 'can only call with other ' - 'PeriodIndex-ed objects'): - df.columns.join(s.index, how=join) + with tm.assert_raises_regex(ValueError, + 'can only call with other ' + 'PeriodIndex-ed objects'): + df.columns.join(s.index, how=how) def test_factorize(self): idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02', @@ -439,18 +437,18 @@ def test_factorize(self): tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, idx3) - def test_factorize_tz(self): - # GH 13750 - for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: - base = pd.date_range('2016-11-05', freq='H', periods=100, tz=tz) - idx = base.repeat(5) + @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']) + def test_factorize_tz(self, tz): + # GH#13750 + base = pd.date_range('2016-11-05', freq='H', periods=100, tz=tz) + idx = base.repeat(5) - exp_arr = np.arange(100, dtype=np.intp).repeat(5) + exp_arr = np.arange(100, dtype=np.intp).repeat(5) - for obj in [idx, pd.Series(idx)]: - arr, res = obj.factorize() - tm.assert_numpy_array_equal(arr, exp_arr) - tm.assert_index_equal(res, base) + for obj in [idx, pd.Series(idx)]: + arr, res = obj.factorize() + tm.assert_numpy_array_equal(arr, exp_arr) + tm.assert_index_equal(res, base) def test_factorize_dst(self): # GH 13750 diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 48ceefd6368c0..a9f1a5e608ac7 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -346,25 +346,25 @@ def test_take_invalid_kwargs(self): indices, mode='clip') # TODO: This method came from test_datetime; de-dup with version above - def test_take2(self): + @pytest.mark.parametrize('tz', [None, 'US/Eastern', 'Asia/Tokyo']) + def test_take2(self, tz): dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] - for tz in [None, 'US/Eastern', 'Asia/Tokyo']: - idx = DatetimeIndex(start='2010-01-01 09:00', - end='2010-02-01 09:00', freq='H', tz=tz, - name='idx') - expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) + idx = DatetimeIndex(start='2010-01-01 09:00', + end='2010-02-01 09:00', freq='H', tz=tz, + name='idx') + expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) - taken1 = idx.take([5, 6, 8, 12]) - taken2 = idx[[5, 6, 8, 12]] + taken1 = idx.take([5, 6, 8, 12]) + taken2 = idx[[5, 6, 8, 12]] - for taken in [taken1, taken2]: - tm.assert_index_equal(taken, expected) - assert isinstance(taken, DatetimeIndex) - assert taken.freq is None - assert taken.tz == expected.tz - assert taken.name == expected.name + for taken in [taken1, taken2]: + tm.assert_index_equal(taken, expected) + assert isinstance(taken, DatetimeIndex) + assert taken.freq is None + assert taken.tz == expected.tz + assert taken.name == expected.name def test_take_fill_value(self): # GH 12631 diff --git a/pandas/tests/indexes/datetimes/test_missing.py b/pandas/tests/indexes/datetimes/test_missing.py index adc0b7b3d81e8..c8d47caa7e947 100644 --- a/pandas/tests/indexes/datetimes/test_missing.py +++ b/pandas/tests/indexes/datetimes/test_missing.py @@ -1,50 +1,52 @@ +import pytest + import pandas as pd import pandas.util.testing as tm class TestDatetimeIndex(object): - def test_fillna_datetime64(self): + @pytest.mark.parametrize('tz', ['US/Eastern', 'Asia/Tokyo']) + def test_fillna_datetime64(self, tz): # GH 11343 - for tz in ['US/Eastern', 'Asia/Tokyo']: - idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT, - '2011-01-01 11:00']) - - exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', - '2011-01-01 11:00']) - tm.assert_index_equal( - idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp) - - # tz mismatch - exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), - pd.Timestamp('2011-01-01 10:00', tz=tz), - pd.Timestamp('2011-01-01 11:00')], dtype=object) - tm.assert_index_equal( - idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp) - - # object - exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x', - pd.Timestamp('2011-01-01 11:00')], dtype=object) - tm.assert_index_equal(idx.fillna('x'), exp) - - idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT, - '2011-01-01 11:00'], tz=tz) - - exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', - '2011-01-01 11:00'], tz=tz) - tm.assert_index_equal( - idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp) - - exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), - pd.Timestamp('2011-01-01 10:00'), - pd.Timestamp('2011-01-01 11:00', tz=tz)], - dtype=object) - tm.assert_index_equal( - idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp) - - # object - exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), - 'x', - pd.Timestamp('2011-01-01 11:00', tz=tz)], - dtype=object) - tm.assert_index_equal(idx.fillna('x'), exp) + idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT, + '2011-01-01 11:00']) + + exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', + '2011-01-01 11:00']) + tm.assert_index_equal( + idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp) + + # tz mismatch + exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), + pd.Timestamp('2011-01-01 10:00', tz=tz), + pd.Timestamp('2011-01-01 11:00')], dtype=object) + tm.assert_index_equal( + idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp) + + # object + exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x', + pd.Timestamp('2011-01-01 11:00')], dtype=object) + tm.assert_index_equal(idx.fillna('x'), exp) + + idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT, + '2011-01-01 11:00'], tz=tz) + + exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', + '2011-01-01 11:00'], tz=tz) + tm.assert_index_equal( + idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp) + + exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), + pd.Timestamp('2011-01-01 10:00'), + pd.Timestamp('2011-01-01 11:00', tz=tz)], + dtype=object) + tm.assert_index_equal( + idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp) + + # object + exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), + 'x', + pd.Timestamp('2011-01-01 11:00', tz=tz)], + dtype=object) + tm.assert_index_equal(idx.fillna('x'), exp) diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index b42cd454803b8..ed7e425924097 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -96,111 +96,111 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, dr, out=0) - def test_repeat_range(self): + @pytest.mark.parametrize('tz', tz) + def test_repeat_range(self, tz): rng = date_range('1/1/2000', '1/1/2001') result = rng.repeat(5) assert result.freq is None assert len(result) == 5 * len(rng) - for tz in self.tz: - index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz) - exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', - '2001-01-02', '2001-01-02'], tz=tz) - for res in [index.repeat(2), np.repeat(index, 2)]: - tm.assert_index_equal(res, exp) - assert res.freq is None - - index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz) - exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', - '2001-01-03', '2001-01-03'], tz=tz) - for res in [index.repeat(2), np.repeat(index, 2)]: - tm.assert_index_equal(res, exp) - assert res.freq is None - - index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'], - tz=tz) - exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01', - 'NaT', 'NaT', 'NaT', - '2003-01-01', '2003-01-01', '2003-01-01'], - tz=tz) - for res in [index.repeat(3), np.repeat(index, 3)]: - tm.assert_index_equal(res, exp) - assert res.freq is None - - def test_repeat(self): + index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz) + exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', + '2001-01-02', '2001-01-02'], tz=tz) + for res in [index.repeat(2), np.repeat(index, 2)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz) + exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', + '2001-01-03', '2001-01-03'], tz=tz) + for res in [index.repeat(2), np.repeat(index, 2)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'], + tz=tz) + exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01', + 'NaT', 'NaT', 'NaT', + '2003-01-01', '2003-01-01', '2003-01-01'], + tz=tz) + for res in [index.repeat(3), np.repeat(index, 3)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + @pytest.mark.parametrize('tz', tz) + def test_repeat(self, tz): reps = 2 msg = "the 'axis' parameter is not supported" - for tz in self.tz: - rng = pd.date_range(start='2016-01-01', periods=2, - freq='30Min', tz=tz) - - expected_rng = DatetimeIndex([ - Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), - Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), - Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), - Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), - ]) - - res = rng.repeat(reps) - tm.assert_index_equal(res, expected_rng) - assert res.freq is None + rng = pd.date_range(start='2016-01-01', periods=2, + freq='30Min', tz=tz) - tm.assert_index_equal(np.repeat(rng, reps), expected_rng) - tm.assert_raises_regex(ValueError, msg, np.repeat, - rng, reps, axis=1) + expected_rng = DatetimeIndex([ + Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), + Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'), + Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), + Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'), + ]) - def test_resolution(self): + res = rng.repeat(reps) + tm.assert_index_equal(res, expected_rng) + assert res.freq is None + + tm.assert_index_equal(np.repeat(rng, reps), expected_rng) + tm.assert_raises_regex(ValueError, msg, np.repeat, + rng, reps, axis=1) + + @pytest.mark.parametrize('tz', tz) + def test_resolution(self, tz): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], ['day', 'day', 'day', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond']): - for tz in self.tz: - idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, - tz=tz) - assert idx.resolution == expected + idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, + tz=tz) + assert idx.resolution == expected - def test_value_counts_unique(self): + @pytest.mark.parametrize('tz', tz) + def test_value_counts_unique(self, tz): # GH 7735 - for tz in self.tz: - idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10) - # create repeated values, 'n'th element is repeated by n+1 times - idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), - tz=tz) + idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10) + # create repeated values, 'n'th element is repeated by n+1 times + idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), + tz=tz) - exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, - tz=tz) - expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') + exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, + tz=tz) + expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64') - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(), expected) + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(), expected) - expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, - tz=tz) - tm.assert_index_equal(idx.unique(), expected) + expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, + tz=tz) + tm.assert_index_equal(idx.unique(), expected) - idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', - '2013-01-01 09:00', '2013-01-01 08:00', - '2013-01-01 08:00', pd.NaT], tz=tz) + idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', + '2013-01-01 09:00', '2013-01-01 08:00', + '2013-01-01 08:00', pd.NaT], tz=tz) - exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], - tz=tz) - expected = Series([3, 2], index=exp_idx) + exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], + tz=tz) + expected = Series([3, 2], index=exp_idx) - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(), expected) + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(), expected) - exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', - pd.NaT], tz=tz) - expected = Series([3, 2, 1], index=exp_idx) + exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', + pd.NaT], tz=tz) + expected = Series([3, 2, 1], index=exp_idx) - for obj in [idx, Series(idx)]: - tm.assert_series_equal(obj.value_counts(dropna=False), - expected) + for obj in [idx, Series(idx)]: + tm.assert_series_equal(obj.value_counts(dropna=False), + expected) - tm.assert_index_equal(idx.unique(), exp_idx) + tm.assert_index_equal(idx.unique(), exp_idx) def test_nonunique_contains(self): # GH 9512 @@ -324,15 +324,16 @@ def test_drop_duplicates(self): res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) - def test_infer_freq(self): + @pytest.mark.parametrize('freq', [ + 'A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', + '-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', + '-3S']) + def test_infer_freq(self, freq): # GH 11018 - for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', - '-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', - '-3S']: - idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10) - result = pd.DatetimeIndex(idx.asi8, freq='infer') - tm.assert_index_equal(idx, result) - assert result.freq == freq + idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10) + result = pd.DatetimeIndex(idx.asi8, freq='infer') + tm.assert_index_equal(idx, result) + assert result.freq == freq def test_nat_new(self): idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x') @@ -344,57 +345,57 @@ def test_nat_new(self): exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) - def test_nat(self): + @pytest.mark.parametrize('tz', [None, 'US/Eastern', 'UTC']) + def test_nat(self, tz): assert pd.DatetimeIndex._na_value is pd.NaT assert pd.DatetimeIndex([])._na_value is pd.NaT - for tz in [None, 'US/Eastern', 'UTC']: - idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz) - assert idx._can_hold_na + idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz) + assert idx._can_hold_na - tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) - assert not idx.hasnans - tm.assert_numpy_array_equal(idx._nan_idxs, - np.array([], dtype=np.intp)) + tm.assert_numpy_array_equal(idx._isnan, np.array([False, False])) + assert not idx.hasnans + tm.assert_numpy_array_equal(idx._nan_idxs, + np.array([], dtype=np.intp)) - idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz) - assert idx._can_hold_na + idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz) + assert idx._can_hold_na - tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) - assert idx.hasnans - tm.assert_numpy_array_equal(idx._nan_idxs, - np.array([1], dtype=np.intp)) + tm.assert_numpy_array_equal(idx._isnan, np.array([False, True])) + assert idx.hasnans + tm.assert_numpy_array_equal(idx._nan_idxs, + np.array([1], dtype=np.intp)) - def test_equals(self): + @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']) + def test_equals(self, tz): # GH 13107 - for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: - idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT']) - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) - assert idx.astype(object).equals(idx) - assert idx.astype(object).equals(idx.astype(object)) - assert not idx.equals(list(idx)) - assert not idx.equals(pd.Series(idx)) - - idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'], - tz='US/Pacific') - assert not idx.equals(idx2) - assert not idx.equals(idx2.copy()) - assert not idx.equals(idx2.astype(object)) - assert not idx.astype(object).equals(idx2) - assert not idx.equals(list(idx2)) - assert not idx.equals(pd.Series(idx2)) - - # same internal, different tz - idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific') - tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) - assert not idx.equals(idx3) - assert not idx.equals(idx3.copy()) - assert not idx.equals(idx3.astype(object)) - assert not idx.astype(object).equals(idx3) - assert not idx.equals(list(idx3)) - assert not idx.equals(pd.Series(idx3)) + idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT']) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'], + tz='US/Pacific') + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # same internal, different tz + idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific') + tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) + assert not idx.equals(idx3) + assert not idx.equals(idx3.copy()) + assert not idx.equals(idx3.astype(object)) + assert not idx.astype(object).equals(idx3) + assert not idx.equals(list(idx3)) + assert not idx.equals(pd.Series(idx3)) class TestBusinessDatetimeIndex(object): diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 83e7a0cd68d63..6f0756949edc6 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -38,18 +38,21 @@ def test_dti_date_out_of_range(self): pytest.raises(ValueError, DatetimeIndex, ['1400-01-01']) pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)]) - def test_dti_timestamp_fields(self): + @pytest.mark.parametrize('field', [ + 'dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', + 'days_in_month', 'is_month_start', 'is_month_end', + 'is_quarter_start', 'is_quarter_end', 'is_year_start', + 'is_year_end', 'weekday_name']) + def test_dti_timestamp_fields(self, field): # extra fields from DatetimeIndex like quarter and week idx = tm.makeDateIndex(100) + expected = getattr(idx, field)[-1] + result = getattr(Timestamp(idx[-1]), field) + assert result == expected - fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', - 'days_in_month', 'is_month_start', 'is_month_end', - 'is_quarter_start', 'is_quarter_end', 'is_year_start', - 'is_year_end', 'weekday_name'] - for f in fields: - expected = getattr(idx, f)[-1] - result = getattr(Timestamp(idx[-1]), f) - assert result == expected + def test_dti_timestamp_freq_fields(self): + # extra fields from DatetimeIndex like quarter and week + idx = tm.makeDateIndex(100) assert idx.freq == Timestamp(idx[-1], idx.freq).freq assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 62854676d43be..217610b76cf0f 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -170,17 +170,17 @@ def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz): expected = Index([9, 9, 9]) tm.assert_index_equal(ut.hour, expected) - def test_dti_tz_convert_trans_pos_plus_1__bug(self): + @pytest.mark.parametrize('freq, n', [('H', 1), ('T', 60), ('S', 3600)]) + def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n): # Regression test for tslib.tz_convert(vals, tz1, tz2). # See https://github.com/pandas-dev/pandas/issues/4496 for details. - for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: - idx = date_range(datetime(2011, 3, 26, 23), - datetime(2011, 3, 27, 1), freq=freq) - idx = idx.tz_localize('UTC') - idx = idx.tz_convert('Europe/Moscow') + idx = date_range(datetime(2011, 3, 26, 23), + datetime(2011, 3, 27, 1), freq=freq) + idx = idx.tz_localize('UTC') + idx = idx.tz_convert('Europe/Moscow') - expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected)) + expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected)) def test_dti_tz_convert_dst(self): for freq, n in [('H', 1), ('T', 60), ('S', 3600)]: @@ -700,20 +700,20 @@ def test_dti_tz_constructors(self, tzstr): # ------------------------------------------------------------- # Unsorted - def test_join_utc_convert(self): + @pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right']) + def test_join_utc_convert(self, how): rng = date_range('1/1/2011', periods=100, freq='H', tz='utc') left = rng.tz_convert('US/Eastern') right = rng.tz_convert('Europe/Berlin') - for how in ['inner', 'outer', 'left', 'right']: - result = left.join(left[:-5], how=how) - assert isinstance(result, DatetimeIndex) - assert result.tz == left.tz + result = left.join(left[:-5], how=how) + assert isinstance(result, DatetimeIndex) + assert result.tz == left.tz - result = left.join(right[:-5], how=how) - assert isinstance(result, DatetimeIndex) - assert result.tz.zone == 'UTC' + result = left.join(right[:-5], how=how) + assert isinstance(result, DatetimeIndex) + assert result.tz.zone == 'UTC' def test_dti_drop_dont_lose_tz(self): # GH#2621 diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index b5926933544e8..fbf0977a04d82 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1013,18 +1013,20 @@ def test_string_na_nat_conversion(self, cache): assert_series_equal(dresult, expected, check_names=False) assert dresult.name == 'foo' + @pytest.mark.parametrize('dtype', [ + 'datetime64[h]', 'datetime64[m]', + 'datetime64[s]', 'datetime64[ms]', + 'datetime64[us]', 'datetime64[ns]']) @pytest.mark.parametrize('cache', [True, False]) - def test_dti_constructor_numpy_timeunits(self, cache): + def test_dti_constructor_numpy_timeunits(self, cache, dtype): # GH 9114 base = pd.to_datetime(['2000-01-01T00:00', '2000-01-02T00:00', 'NaT'], cache=cache) - for dtype in ['datetime64[h]', 'datetime64[m]', 'datetime64[s]', - 'datetime64[ms]', 'datetime64[us]', 'datetime64[ns]']: - values = base.values.astype(dtype) + values = base.values.astype(dtype) - tm.assert_index_equal(DatetimeIndex(values), base) - tm.assert_index_equal(to_datetime(values, cache=cache), base) + tm.assert_index_equal(DatetimeIndex(values), base) + tm.assert_index_equal(to_datetime(values, cache=cache), base) @pytest.mark.parametrize('cache', [True, False]) def test_dayfirst(self, cache): diff --git a/pandas/tests/indexes/period/test_arithmetic.py b/pandas/tests/indexes/period/test_arithmetic.py index 5f8f9533e9c44..e16d346542b9e 100644 --- a/pandas/tests/indexes/period/test_arithmetic.py +++ b/pandas/tests/indexes/period/test_arithmetic.py @@ -613,7 +613,8 @@ def test_pi_ops(self): exp = pd.Index([0, -1, -2, -3], name='idx') tm.assert_index_equal(result, exp) - def test_pi_ops_errors(self): + @pytest.mark.parametrize('ng', ["str", 1.5]) + def test_pi_ops_errors(self, ng): idx = PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'], freq='M', name='idx') ser = pd.Series(idx) @@ -621,34 +622,33 @@ def test_pi_ops_errors(self): msg = r"unsupported operand type\(s\)" for obj in [idx, ser]: - for ng in ["str", 1.5]: - with tm.assert_raises_regex(TypeError, msg): - obj + ng + with tm.assert_raises_regex(TypeError, msg): + obj + ng - with pytest.raises(TypeError): - # error message differs between PY2 and 3 - ng + obj + with pytest.raises(TypeError): + # error message differs between PY2 and 3 + ng + obj + + with tm.assert_raises_regex(TypeError, msg): + obj - ng - with tm.assert_raises_regex(TypeError, msg): - obj - ng + with pytest.raises(TypeError): + np.add(obj, ng) + if _np_version_under1p10: + assert np.add(ng, obj) is NotImplemented + else: with pytest.raises(TypeError): - np.add(obj, ng) + np.add(ng, obj) - if _np_version_under1p10: - assert np.add(ng, obj) is NotImplemented - else: - with pytest.raises(TypeError): - np.add(ng, obj) + with pytest.raises(TypeError): + np.subtract(obj, ng) + if _np_version_under1p10: + assert np.subtract(ng, obj) is NotImplemented + else: with pytest.raises(TypeError): - np.subtract(obj, ng) - - if _np_version_under1p10: - assert np.subtract(ng, obj) is NotImplemented - else: - with pytest.raises(TypeError): - np.subtract(ng, obj) + np.subtract(ng, obj) def test_pi_ops_nat(self): idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], diff --git a/pandas/tests/indexes/period/test_asfreq.py b/pandas/tests/indexes/period/test_asfreq.py index c8724b2a3bc91..ea59a57069faa 100644 --- a/pandas/tests/indexes/period/test_asfreq.py +++ b/pandas/tests/indexes/period/test_asfreq.py @@ -8,9 +8,6 @@ class TestPeriodIndex(object): - def setup_method(self, method): - pass - def test_asfreq(self): pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001') pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001') @@ -85,21 +82,21 @@ def test_asfreq_nat(self): expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q') tm.assert_index_equal(result, expected) - def test_asfreq_mult_pi(self): + @pytest.mark.parametrize('freq', ['D', '3D']) + def test_asfreq_mult_pi(self, freq): pi = PeriodIndex(['2001-01', '2001-02', 'NaT', '2001-03'], freq='2M') - for freq in ['D', '3D']: - result = pi.asfreq(freq) - exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT', - '2001-04-30'], freq=freq) - tm.assert_index_equal(result, exp) - assert result.freq == exp.freq - - result = pi.asfreq(freq, how='S') - exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT', - '2001-03-01'], freq=freq) - tm.assert_index_equal(result, exp) - assert result.freq == exp.freq + result = pi.asfreq(freq) + exp = PeriodIndex(['2001-02-28', '2001-03-31', 'NaT', + '2001-04-30'], freq=freq) + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + result = pi.asfreq(freq, how='S') + exp = PeriodIndex(['2001-01-01', '2001-02-01', 'NaT', + '2001-03-01'], freq=freq) + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq def test_asfreq_combined_pi(self): pi = pd.PeriodIndex(['2001-01-01 00:00', '2001-01-02 02:00', 'NaT'], diff --git a/pandas/tests/indexes/period/test_construction.py b/pandas/tests/indexes/period/test_construction.py index eca80d17b1dc3..be741592ec7a2 100644 --- a/pandas/tests/indexes/period/test_construction.py +++ b/pandas/tests/indexes/period/test_construction.py @@ -286,14 +286,14 @@ def test_constructor_simple_new_empty(self): result = idx._simple_new(idx, name='p', freq='M') tm.assert_index_equal(result, idx) - def test_constructor_floats(self): - # GH13079 - for floats in [[1.1, 2.1], np.array([1.1, 2.1])]: - with pytest.raises(TypeError): - pd.PeriodIndex._simple_new(floats, freq='M') + @pytest.mark.parametrize('floats', [[1.1, 2.1], np.array([1.1, 2.1])]) + def test_constructor_floats(self, floats): + # GH#13079 + with pytest.raises(TypeError): + pd.PeriodIndex._simple_new(floats, freq='M') - with pytest.raises(TypeError): - pd.PeriodIndex(floats, freq='M') + with pytest.raises(TypeError): + pd.PeriodIndex(floats, freq='M') def test_constructor_nat(self): pytest.raises(ValueError, period_range, start='NaT', @@ -343,16 +343,14 @@ def test_constructor_freq_mult(self): with tm.assert_raises_regex(ValueError, msg): period_range('2011-01', periods=3, freq='0M') - def test_constructor_freq_mult_dti_compat(self): - import itertools - mults = [1, 2, 3, 4, 5] - freqs = ['A', 'M', 'D', 'T', 'S'] - for mult, freq in itertools.product(mults, freqs): - freqstr = str(mult) + freq - pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10) - expected = date_range(start='2014-04-01', freq=freqstr, - periods=10).to_period(freqstr) - tm.assert_index_equal(pidx, expected) + @pytest.mark.parametrize('freq', ['A', 'M', 'D', 'T', 'S']) + @pytest.mark.parametrize('mult', [1, 2, 3, 4, 5]) + def test_constructor_freq_mult_dti_compat(self, mult, freq): + freqstr = str(mult) + freq + pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10) + expected = date_range(start='2014-04-01', freq=freqstr, + periods=10).to_period(freqstr) + tm.assert_index_equal(pidx, expected) def test_constructor_freq_combined(self): for freq in ['1D1H', '1H1D']: @@ -445,11 +443,12 @@ def test_constructor_error(self): with tm.assert_raises_regex(ValueError, msg): PeriodIndex(start=start) - def test_recreate_from_data(self): - for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']: - org = PeriodIndex(start='2001/04/01', freq=o, periods=1) - idx = PeriodIndex(org.values, freq=o) - tm.assert_index_equal(idx, org) + @pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B', + 'T', 'S', 'L', 'U', 'N', 'H']) + def test_recreate_from_data(self, freq): + org = PeriodIndex(start='2001/04/01', freq=freq, periods=1) + idx = PeriodIndex(org.values, freq=freq) + tm.assert_index_equal(idx, org) def test_map_with_string_constructor(self): raw = [2005, 2007, 2009] diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 3b6641bc7ad5c..7d117b0b626cf 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -1,5 +1,6 @@ import numpy as np +import pytest import pandas as pd import pandas._libs.tslib as tslib @@ -368,37 +369,37 @@ def test_nat(self): tm.assert_numpy_array_equal(idx._nan_idxs, np.array([1], dtype=np.intp)) - def test_equals(self): - # GH 13107 - for freq in ['D', 'M']: - idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'], - freq=freq) - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) - assert idx.astype(object).equals(idx) - assert idx.astype(object).equals(idx.astype(object)) - assert not idx.equals(list(idx)) - assert not idx.equals(pd.Series(idx)) - - idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'], - freq='H') - assert not idx.equals(idx2) - assert not idx.equals(idx2.copy()) - assert not idx.equals(idx2.astype(object)) - assert not idx.astype(object).equals(idx2) - assert not idx.equals(list(idx2)) - assert not idx.equals(pd.Series(idx2)) - - # same internal, different tz - idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H') - tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) - assert not idx.equals(idx3) - assert not idx.equals(idx3.copy()) - assert not idx.equals(idx3.astype(object)) - assert not idx.astype(object).equals(idx3) - assert not idx.equals(list(idx3)) - assert not idx.equals(pd.Series(idx3)) + @pytest.mark.parametrize('freq', ['D', 'M']) + def test_equals(self, freq): + # GH#13107 + idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'], + freq=freq) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.astype(object).equals(idx) + assert idx.astype(object).equals(idx.astype(object)) + assert not idx.equals(list(idx)) + assert not idx.equals(pd.Series(idx)) + + idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'], + freq='H') + assert not idx.equals(idx2) + assert not idx.equals(idx2.copy()) + assert not idx.equals(idx2.astype(object)) + assert not idx.astype(object).equals(idx2) + assert not idx.equals(list(idx2)) + assert not idx.equals(pd.Series(idx2)) + + # same internal, different tz + idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H') + tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) + assert not idx.equals(idx3) + assert not idx.equals(idx3.copy()) + assert not idx.equals(idx3.astype(object)) + assert not idx.astype(object).equals(idx3) + assert not idx.equals(list(idx3)) + assert not idx.equals(pd.Series(idx3)) class TestPeriodIndexSeriesMethods(object): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 4c0c865928031..dd437363cfc1d 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -27,11 +27,11 @@ def create_index(self): def test_pickle_compat_construction(self): pass - def test_pickle_round_trip(self): - for freq in ['D', 'M', 'A']: - idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq) - result = tm.round_trip_pickle(idx) - tm.assert_index_equal(result, idx) + @pytest.mark.parametrize('freq', ['D', 'M', 'A']) + def test_pickle_round_trip(self, freq): + idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq=freq) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) @pytest.mark.parametrize('klass', [list, tuple, np.array, Series]) def test_where(self, klass): diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index 1ac05f9fa94b7..ec0836dfa174b 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -14,24 +14,21 @@ def _permute(obj): class TestPeriodIndex(object): - def setup_method(self, method): - pass - - def test_joins(self): + @pytest.mark.parametrize('kind', ['inner', 'outer', 'left', 'right']) + def test_joins(self, kind): index = period_range('1/1/2000', '1/20/2000', freq='D') - for kind in ['inner', 'outer', 'left', 'right']: - joined = index.join(index[:-5], how=kind) + joined = index.join(index[:-5], how=kind) - assert isinstance(joined, PeriodIndex) - assert joined.freq == index.freq + assert isinstance(joined, PeriodIndex) + assert joined.freq == index.freq - def test_join_self(self): + @pytest.mark.parametrize('kind', ['inner', 'outer', 'left', 'right']) + def test_join_self(self, kind): index = period_range('1/1/2000', '1/20/2000', freq='D') - for kind in ['inner', 'outer', 'left', 'right']: - res = index.join(index, how=kind) - assert index is res + res = index.join(index, how=kind) + assert index is res def test_join_does_not_recur(self): df = tm.makeCustomDataframe( diff --git a/pandas/tests/indexes/period/test_tools.py b/pandas/tests/indexes/period/test_tools.py index 97500f2f5ed95..38c6f257b2206 100644 --- a/pandas/tests/indexes/period/test_tools.py +++ b/pandas/tests/indexes/period/test_tools.py @@ -1,5 +1,6 @@ import numpy as np from datetime import datetime, timedelta +import pytest import pandas as pd import pandas.util.testing as tm @@ -29,32 +30,10 @@ def test_annual(self): def test_monthly(self): self._check_freq('M', '1970-01') - def test_weekly(self): - self._check_freq('W-THU', '1970-01-01') - - def test_daily(self): - self._check_freq('D', '1970-01-01') - - def test_business_daily(self): - self._check_freq('B', '1970-01-01') - - def test_hourly(self): - self._check_freq('H', '1970-01-01') - - def test_minutely(self): - self._check_freq('T', '1970-01-01') - - def test_secondly(self): - self._check_freq('S', '1970-01-01') - - def test_millisecondly(self): - self._check_freq('L', '1970-01-01') - - def test_microsecondly(self): - self._check_freq('U', '1970-01-01') - - def test_nanosecondly(self): - self._check_freq('N', '1970-01-01') + @pytest.mark.parametrize('freq', ['W-THU', 'D', 'B', 'H', 'T', + 'S', 'L', 'U', 'N']) + def test_freq(self, freq): + self._check_freq(freq, '1970-01-01') def test_negone_ordinals(self): freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S'] @@ -75,19 +54,6 @@ def test_negone_ordinals(self): class TestPeriodIndex(object): - - def setup_method(self, method): - pass - - def test_tolist(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') - rs = index.tolist() - for x in rs: - assert isinstance(x, Period) - - recon = PeriodIndex(rs) - tm.assert_index_equal(index, recon) - def test_to_timestamp(self): index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') series = Series(1, index=index, name='foo') @@ -129,24 +95,6 @@ def _get_with_delta(delta, freq='A-DEC'): tm.assert_index_equal(result.index, exp_index) assert result.name == 'foo' - def test_to_timestamp_quarterly_bug(self): - years = np.arange(1960, 2000).repeat(4) - quarters = np.tile(lrange(1, 5), 40) - - pindex = PeriodIndex(year=years, quarter=quarters) - - stamps = pindex.to_timestamp('D', 'end') - expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex]) - tm.assert_index_equal(stamps, expected) - - def test_to_timestamp_preserve_name(self): - index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009', - name='foo') - assert index.name == 'foo' - - conv = index.to_timestamp('D') - assert conv.name == 'foo' - def test_to_timestamp_repr_is_code(self): zs = [Timestamp('99-04-17 00:00:00', tz='UTC'), Timestamp('2001-04-17 00:00:00', tz='UTC'), @@ -155,57 +103,6 @@ def test_to_timestamp_repr_is_code(self): for z in zs: assert eval(repr(z)) == z - def test_to_timestamp_pi_nat(self): - # GH 7228 - index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', - name='idx') - - result = index.to_timestamp('D') - expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1), - datetime(2011, 2, 1)], name='idx') - tm.assert_index_equal(result, expected) - assert result.name == 'idx' - - result2 = result.to_period(freq='M') - tm.assert_index_equal(result2, index) - assert result2.name == 'idx' - - result3 = result.to_period(freq='3M') - exp = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='3M', name='idx') - tm.assert_index_equal(result3, exp) - assert result3.freqstr == '3M' - - msg = ('Frequency must be positive, because it' - ' represents span: -2A') - with tm.assert_raises_regex(ValueError, msg): - result.to_period(freq='-2A') - - def test_to_timestamp_pi_mult(self): - idx = PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='2M', name='idx') - result = idx.to_timestamp() - expected = DatetimeIndex( - ['2011-01-01', 'NaT', '2011-02-01'], name='idx') - tm.assert_index_equal(result, expected) - result = idx.to_timestamp(how='E') - expected = DatetimeIndex( - ['2011-02-28', 'NaT', '2011-03-31'], name='idx') - tm.assert_index_equal(result, expected) - - def test_to_timestamp_pi_combined(self): - idx = PeriodIndex(start='2011', periods=2, freq='1D1H', name='idx') - result = idx.to_timestamp() - expected = DatetimeIndex( - ['2011-01-01 00:00', '2011-01-02 01:00'], name='idx') - tm.assert_index_equal(result, expected) - result = idx.to_timestamp(how='E') - expected = DatetimeIndex( - ['2011-01-02 00:59:59', '2011-01-03 01:59:59'], name='idx') - tm.assert_index_equal(result, expected) - result = idx.to_timestamp(how='E', freq='H') - expected = DatetimeIndex( - ['2011-01-02 00:00', '2011-01-03 01:00'], name='idx') - tm.assert_index_equal(result, expected) - def test_to_timestamp_to_period_astype(self): idx = DatetimeIndex([pd.NaT, '2011-01-01', '2011-02-01'], name='idx') @@ -238,47 +135,26 @@ def test_dti_to_period(self): tm.assert_index_equal(pi3, period_range('1/1/2005', '11/1/2005', freq='M').asfreq('3D')) - def test_period_astype_to_timestamp(self): - pi = pd.PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='M') - - exp = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01']) - tm.assert_index_equal(pi.astype('datetime64[ns]'), exp) - - exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31']) - tm.assert_index_equal(pi.astype('datetime64[ns]', how='end'), exp) - - exp = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], - tz='US/Eastern') - res = pi.astype('datetime64[ns, US/Eastern]') - tm.assert_index_equal(pi.astype('datetime64[ns, US/Eastern]'), exp) - - exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31'], - tz='US/Eastern') - res = pi.astype('datetime64[ns, US/Eastern]', how='end') - tm.assert_index_equal(res, exp) - - def test_to_period_quarterly(self): + @pytest.mark.parametrize('month', MONTHS) + def test_to_period_quarterly(self, month): # make sure we can make the round trip - for month in MONTHS: - freq = 'Q-%s' % month - rng = period_range('1989Q3', '1991Q3', freq=freq) - stamps = rng.to_timestamp() - result = stamps.to_period(freq) - tm.assert_index_equal(rng, result) - - def test_to_period_quarterlyish(self): - offsets = ['BQ', 'QS', 'BQS'] - for off in offsets: - rng = date_range('01-Jan-2012', periods=8, freq=off) - prng = rng.to_period() - assert prng.freq == 'Q-DEC' + freq = 'Q-%s' % month + rng = period_range('1989Q3', '1991Q3', freq=freq) + stamps = rng.to_timestamp() + result = stamps.to_period(freq) + tm.assert_index_equal(rng, result) + + @pytest.mark.parametrize('off', ['BQ', 'QS', 'BQS']) + def test_to_period_quarterlyish(self, off): + rng = date_range('01-Jan-2012', periods=8, freq=off) + prng = rng.to_period() + assert prng.freq == 'Q-DEC' - def test_to_period_annualish(self): - offsets = ['BA', 'AS', 'BAS'] - for off in offsets: - rng = date_range('01-Jan-2012', periods=8, freq=off) - prng = rng.to_period() - assert prng.freq == 'A-DEC' + @pytest.mark.parametrize('off', ['BA', 'AS', 'BAS']) + def test_to_period_annualish(self, off): + rng = date_range('01-Jan-2012', periods=8, freq=off) + prng = rng.to_period() + assert prng.freq == 'A-DEC' def test_to_period_monthish(self): offsets = ['MS', 'BM'] @@ -304,12 +180,6 @@ def test_period_dt64_round_trip(self): pi = dti.to_period(freq='H') tm.assert_index_equal(pi.to_timestamp(), dti) - def test_to_timestamp_1703(self): - index = period_range('1/1/2012', periods=4, freq='D') - - result = index.to_timestamp() - assert result[0] == Timestamp('1/1/2012') - def test_combine_first(self): # GH 3367 didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M') @@ -325,26 +195,137 @@ def test_combine_first(self): dtype=np.float64) tm.assert_series_equal(result, expected) - def test_searchsorted(self): - for freq in ['D', '2D']: - pidx = pd.PeriodIndex(['2014-01-01', '2014-01-02', '2014-01-03', - '2014-01-04', '2014-01-05'], freq=freq) + @pytest.mark.parametrize('freq', ['D', '2D']) + def test_searchsorted(self, freq): + pidx = pd.PeriodIndex(['2014-01-01', '2014-01-02', '2014-01-03', + '2014-01-04', '2014-01-05'], freq=freq) + + p1 = pd.Period('2014-01-01', freq=freq) + assert pidx.searchsorted(p1) == 0 + + p2 = pd.Period('2014-01-04', freq=freq) + assert pidx.searchsorted(p2) == 3 + + msg = "Input has different freq=H from PeriodIndex" + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + pidx.searchsorted(pd.Period('2014-01-01', freq='H')) + + msg = "Input has different freq=5D from PeriodIndex" + with tm.assert_raises_regex(period.IncompatibleFrequency, msg): + pidx.searchsorted(pd.Period('2014-01-01', freq='5D')) + + with tm.assert_produces_warning(FutureWarning): + pidx.searchsorted(key=p2) + + +class TestPeriodIndexConversion(object): + def test_tolist(self): + index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009') + rs = index.tolist() + for x in rs: + assert isinstance(x, Period) + + recon = PeriodIndex(rs) + tm.assert_index_equal(index, recon) + + def test_to_timestamp_pi_nat(self): + # GH#7228 + index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', + name='idx') + + result = index.to_timestamp('D') + expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1), + datetime(2011, 2, 1)], name='idx') + tm.assert_index_equal(result, expected) + assert result.name == 'idx' + + result2 = result.to_period(freq='M') + tm.assert_index_equal(result2, index) + assert result2.name == 'idx' + + result3 = result.to_period(freq='3M') + exp = PeriodIndex(['NaT', '2011-01', '2011-02'], + freq='3M', name='idx') + tm.assert_index_equal(result3, exp) + assert result3.freqstr == '3M' + + msg = ('Frequency must be positive, because it' + ' represents span: -2A') + with tm.assert_raises_regex(ValueError, msg): + result.to_period(freq='-2A') + + def test_to_timestamp_preserve_name(self): + index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009', + name='foo') + assert index.name == 'foo' + + conv = index.to_timestamp('D') + assert conv.name == 'foo' + + def test_to_timestamp_quarterly_bug(self): + years = np.arange(1960, 2000).repeat(4) + quarters = np.tile(lrange(1, 5), 40) + + pindex = PeriodIndex(year=years, quarter=quarters) + + stamps = pindex.to_timestamp('D', 'end') + expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex]) + tm.assert_index_equal(stamps, expected) + + def test_to_timestamp_pi_mult(self): + idx = PeriodIndex(['2011-01', 'NaT', '2011-02'], + freq='2M', name='idx') + + result = idx.to_timestamp() + expected = DatetimeIndex(['2011-01-01', 'NaT', '2011-02-01'], + name='idx') + tm.assert_index_equal(result, expected) - p1 = pd.Period('2014-01-01', freq=freq) - assert pidx.searchsorted(p1) == 0 + result = idx.to_timestamp(how='E') + expected = DatetimeIndex(['2011-02-28', 'NaT', '2011-03-31'], + name='idx') + tm.assert_index_equal(result, expected) - p2 = pd.Period('2014-01-04', freq=freq) - assert pidx.searchsorted(p2) == 3 + def test_to_timestamp_pi_combined(self): + idx = PeriodIndex(start='2011', periods=2, freq='1D1H', name='idx') - msg = "Input has different freq=H from PeriodIndex" - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - pidx.searchsorted(pd.Period('2014-01-01', freq='H')) + result = idx.to_timestamp() + expected = DatetimeIndex(['2011-01-01 00:00', '2011-01-02 01:00'], + name='idx') + tm.assert_index_equal(result, expected) - msg = "Input has different freq=5D from PeriodIndex" - with tm.assert_raises_regex( - period.IncompatibleFrequency, msg): - pidx.searchsorted(pd.Period('2014-01-01', freq='5D')) + result = idx.to_timestamp(how='E') + expected = DatetimeIndex(['2011-01-02 00:59:59', + '2011-01-03 01:59:59'], + name='idx') + tm.assert_index_equal(result, expected) - with tm.assert_produces_warning(FutureWarning): - pidx.searchsorted(key=p2) + result = idx.to_timestamp(how='E', freq='H') + expected = DatetimeIndex(['2011-01-02 00:00', '2011-01-03 01:00'], + name='idx') + tm.assert_index_equal(result, expected) + + def test_period_astype_to_timestamp(self): + pi = pd.PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='M') + + exp = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01']) + tm.assert_index_equal(pi.astype('datetime64[ns]'), exp) + + exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31']) + tm.assert_index_equal(pi.astype('datetime64[ns]', how='end'), exp) + + exp = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], + tz='US/Eastern') + res = pi.astype('datetime64[ns, US/Eastern]') + tm.assert_index_equal(pi.astype('datetime64[ns, US/Eastern]'), exp) + + exp = pd.DatetimeIndex(['2011-01-31', '2011-02-28', '2011-03-31'], + tz='US/Eastern') + res = pi.astype('datetime64[ns, US/Eastern]', how='end') + tm.assert_index_equal(res, exp) + + def test_to_timestamp_1703(self): + index = period_range('1/1/2012', periods=4, freq='D') + + result = index.to_timestamp() + assert result[0] == Timestamp('1/1/2012') diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3dc60ed33b958..19d1a2ec98209 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -654,19 +654,19 @@ def test_dti_tdi_numeric_ops(self): expected = DatetimeIndex(['20121231', pd.NaT, '20130101']) tm.assert_index_equal(result, expected) - def test_sub_period(self): + @pytest.mark.parametrize('freq', [None, 'H']) + def test_sub_period(self, freq): # GH 13078 # not supported, check TypeError p = pd.Period('2011-01-01', freq='D') - for freq in [None, 'H']: - idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq) + idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq) - with pytest.raises(TypeError): - idx - p + with pytest.raises(TypeError): + idx - p - with pytest.raises(TypeError): - p - idx + with pytest.raises(TypeError): + p - idx def test_addition_ops(self): # with datetimes/timedelta and tdi/dti diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 690ba66b6f5ef..49737e5359c2f 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -227,14 +227,15 @@ def test_drop_duplicates(self): res = Series(idx).drop_duplicates(keep=False) tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31))) - def test_infer_freq(self): - # GH 11018 - for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S' - ]: - idx = pd.timedelta_range('1', freq=freq, periods=10) - result = pd.TimedeltaIndex(idx.asi8, freq='infer') - tm.assert_index_equal(idx, result) - assert result.freq == freq + @pytest.mark.parametrize('freq', ['D', '3D', '-3D', + 'H', '2H', '-2H', + 'T', '2T', 'S', '-3S']) + def test_infer_freq(self, freq): + # GH#11018 + idx = pd.timedelta_range('1', freq=freq, periods=10) + result = pd.TimedeltaIndex(idx.asi8, freq='infer') + tm.assert_index_equal(idx, result) + assert result.freq == freq def test_nat_new(self): diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index ce0f3b89b753e..37db9d704aa1f 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -102,13 +102,11 @@ def test_factorize(self): tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, idx3) - def test_join_self(self): - + @pytest.mark.parametrize('kind', ['outer', 'inner', 'left', 'right']) + def test_join_self(self, kind): index = timedelta_range('1 day', periods=10) - kinds = 'outer', 'inner', 'left', 'right' - for kind in kinds: - joined = index.join(index, how=kind) - tm.assert_index_equal(index, joined) + joined = index.join(index, how=kind) + tm.assert_index_equal(index, joined) def test_does_not_convert_mixed_integer(self): df = tm.makeCustomDataframe(10, 10, diff --git a/pandas/tests/scalar/period/test_period.py b/pandas/tests/scalar/period/test_period.py index 41b3bb55bfff1..dff5433adcf79 100644 --- a/pandas/tests/scalar/period/test_period.py +++ b/pandas/tests/scalar/period/test_period.py @@ -20,21 +20,21 @@ class TestPeriodProperties(object): "Test properties such as year, month, weekday, etc...." - def test_is_leap_year(self): + @pytest.mark.parametrize('freq', ['A', 'M', 'D', 'H']) + def test_is_leap_year(self, freq): # GH 13727 - for freq in ['A', 'M', 'D', 'H']: - p = Period('2000-01-01 00:00:00', freq=freq) - assert p.is_leap_year - assert isinstance(p.is_leap_year, bool) + p = Period('2000-01-01 00:00:00', freq=freq) + assert p.is_leap_year + assert isinstance(p.is_leap_year, bool) - p = Period('1999-01-01 00:00:00', freq=freq) - assert not p.is_leap_year + p = Period('1999-01-01 00:00:00', freq=freq) + assert not p.is_leap_year - p = Period('2004-01-01 00:00:00', freq=freq) - assert p.is_leap_year + p = Period('2004-01-01 00:00:00', freq=freq) + assert p.is_leap_year - p = Period('2100-01-01 00:00:00', freq=freq) - assert not p.is_leap_year + p = Period('2100-01-01 00:00:00', freq=freq) + assert not p.is_leap_year def test_quarterly_negative_ordinals(self): p = Period(ordinal=-1, freq='Q-DEC') @@ -52,40 +52,40 @@ def test_quarterly_negative_ordinals(self): assert p.month == 11 assert isinstance(p, Period) - def test_period_cons_quarterly(self): + @pytest.mark.parametrize('month', MONTHS) + def test_period_cons_quarterly(self, month): # bugs in scikits.timeseries - for month in MONTHS: - freq = 'Q-%s' % month - exp = Period('1989Q3', freq=freq) - assert '1989Q3' in str(exp) - stamp = exp.to_timestamp('D', how='end') - p = Period(stamp, freq=freq) - assert p == exp - - stamp = exp.to_timestamp('3D', how='end') - p = Period(stamp, freq=freq) - assert p == exp - - def test_period_cons_annual(self): + freq = 'Q-%s' % month + exp = Period('1989Q3', freq=freq) + assert '1989Q3' in str(exp) + stamp = exp.to_timestamp('D', how='end') + p = Period(stamp, freq=freq) + assert p == exp + + stamp = exp.to_timestamp('3D', how='end') + p = Period(stamp, freq=freq) + assert p == exp + + @pytest.mark.parametrize('month', MONTHS) + def test_period_cons_annual(self, month): # bugs in scikits.timeseries - for month in MONTHS: - freq = 'A-%s' % month - exp = Period('1989', freq=freq) - stamp = exp.to_timestamp('D', how='end') + timedelta(days=30) - p = Period(stamp, freq=freq) - assert p == exp + 1 - assert isinstance(p, Period) - - def test_period_cons_weekly(self): - for num in range(10, 17): - daystr = '2011-02-%d' % num - for day in DAYS: - freq = 'W-%s' % day - - result = Period(daystr, freq=freq) - expected = Period(daystr, freq='D').asfreq(freq) - assert result == expected - assert isinstance(result, Period) + freq = 'A-%s' % month + exp = Period('1989', freq=freq) + stamp = exp.to_timestamp('D', how='end') + timedelta(days=30) + p = Period(stamp, freq=freq) + assert p == exp + 1 + assert isinstance(p, Period) + + @pytest.mark.parametrize('day', DAYS) + @pytest.mark.parametrize('num', range(10, 17)) + def test_period_cons_weekly(self, num, day): + daystr = '2011-02-%d' % num + freq = 'W-%s' % day + + result = Period(daystr, freq=freq) + expected = Period(daystr, freq='D').asfreq(freq) + assert result == expected + assert isinstance(result, Period) def test_period_from_ordinal(self): p = pd.Period('2011-01', freq='M') @@ -212,58 +212,59 @@ def test_period_cons_combined(self): with tm.assert_raises_regex(ValueError, msg): Period('2011-01', freq='1D1W') - def test_timestamp_tz_arg(self): - for case in ['Europe/Brussels', 'Asia/Tokyo', 'US/Pacific']: - p = Period('1/1/2005', freq='M').to_timestamp(tz=case) - exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) - exp_zone = pytz.timezone(case).normalize(p) - - assert p == exp - assert p.tz == exp_zone.tzinfo - assert p.tz == exp.tz - - p = Period('1/1/2005', freq='3H').to_timestamp(tz=case) - exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) - exp_zone = pytz.timezone(case).normalize(p) - - assert p == exp - assert p.tz == exp_zone.tzinfo - assert p.tz == exp.tz - - p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=case) - exp = Timestamp('31/12/2005', tz='UTC').tz_convert(case) - exp_zone = pytz.timezone(case).normalize(p) - - assert p == exp - assert p.tz == exp_zone.tzinfo - assert p.tz == exp.tz - - p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=case) - exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) - exp_zone = pytz.timezone(case).normalize(p) - - assert p == exp - assert p.tz == exp_zone.tzinfo - assert p.tz == exp.tz - - def test_timestamp_tz_arg_dateutil(self): + @pytest.mark.parametrize('tzstr', ['Europe/Brussels', + 'Asia/Tokyo', 'US/Pacific']) + def test_timestamp_tz_arg(self, tzstr): + p = Period('1/1/2005', freq='M').to_timestamp(tz=tzstr) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) + exp_zone = pytz.timezone(tzstr).normalize(p) + + assert p == exp + assert p.tz == exp_zone.tzinfo + assert p.tz == exp.tz + + p = Period('1/1/2005', freq='3H').to_timestamp(tz=tzstr) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) + exp_zone = pytz.timezone(tzstr).normalize(p) + + assert p == exp + assert p.tz == exp_zone.tzinfo + assert p.tz == exp.tz + + p = Period('1/1/2005', freq='A').to_timestamp(freq='A', tz=tzstr) + exp = Timestamp('31/12/2005', tz='UTC').tz_convert(tzstr) + exp_zone = pytz.timezone(tzstr).normalize(p) + + assert p == exp + assert p.tz == exp_zone.tzinfo + assert p.tz == exp.tz + + p = Period('1/1/2005', freq='A').to_timestamp(freq='3H', tz=tzstr) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) + exp_zone = pytz.timezone(tzstr).normalize(p) + + assert p == exp + assert p.tz == exp_zone.tzinfo + assert p.tz == exp.tz + + @pytest.mark.parametrize('tzstr', ['dateutil/Europe/Brussels', + 'dateutil/Asia/Tokyo', + 'dateutil/US/Pacific']) + def test_timestamp_tz_arg_dateutil(self, tzstr): from pandas._libs.tslibs.timezones import dateutil_gettz from pandas._libs.tslibs.timezones import maybe_get_tz - for case in ['dateutil/Europe/Brussels', 'dateutil/Asia/Tokyo', - 'dateutil/US/Pacific']: - p = Period('1/1/2005', freq='M').to_timestamp( - tz=maybe_get_tz(case)) - exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) - assert p == exp - assert p.tz == dateutil_gettz(case.split('/', 1)[1]) - assert p.tz == exp.tz - - p = Period('1/1/2005', - freq='M').to_timestamp(freq='3H', tz=maybe_get_tz(case)) - exp = Timestamp('1/1/2005', tz='UTC').tz_convert(case) - assert p == exp - assert p.tz == dateutil_gettz(case.split('/', 1)[1]) - assert p.tz == exp.tz + tz = maybe_get_tz(tzstr) + p = Period('1/1/2005', freq='M').to_timestamp(tz=tz) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) + assert p == exp + assert p.tz == dateutil_gettz(tzstr.split('/', 1)[1]) + assert p.tz == exp.tz + + p = Period('1/1/2005', freq='M').to_timestamp(freq='3H', tz=tz) + exp = Timestamp('1/1/2005', tz='UTC').tz_convert(tzstr) + assert p == exp + assert p.tz == dateutil_gettz(tzstr.split('/', 1)[1]) + assert p.tz == exp.tz def test_timestamp_tz_arg_dateutil_from_string(self): from pandas._libs.tslibs.timezones import dateutil_gettz @@ -1403,14 +1404,14 @@ def test_sub_offset_nat(self): timedelta(hours=23, minutes=30)]: assert p - o is tslib.NaT - def test_nat_ops(self): - for freq in ['M', '2M', '3M']: - p = Period('NaT', freq=freq) - assert p + 1 is tslib.NaT - assert 1 + p is tslib.NaT - assert p - 1 is tslib.NaT - assert p - Period('2011-01', freq=freq) is tslib.NaT - assert Period('2011-01', freq=freq) - p is tslib.NaT + @pytest.mark.parametrize('freq', ['M', '2M', '3M']) + def test_nat_ops(self, freq): + p = Period('NaT', freq=freq) + assert p + 1 is tslib.NaT + assert 1 + p is tslib.NaT + assert p - 1 is tslib.NaT + assert p - Period('2011-01', freq=freq) is tslib.NaT + assert Period('2011-01', freq=freq) - p is tslib.NaT def test_period_ops_offset(self): p = Period('2011-04-01', freq='D')
What it says on the tin. Because this is a big diff, nothing is moved or changed other than parametrizing tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/19785
2018-02-20T06:06:59Z
2018-02-22T00:11:32Z
2018-02-22T00:11:32Z
2018-06-22T03:23:57Z
DOC: correct Series.searchsorted example
diff --git a/pandas/core/base.py b/pandas/core/base.py index 0ca029ffd4c25..ebd69a5f9aac1 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1165,21 +1165,16 @@ def factorize(self, sort=False, na_sentinel=-1): >>> x.searchsorted([1, 3], side='right') array([1, 3]) - >>> x = pd.Categorical(['apple', 'bread', 'bread', 'cheese', 'milk' ]) + >>> x = pd.Categorical(['apple', 'bread', 'bread', + 'cheese', 'milk'], ordered=True) [apple, bread, bread, cheese, milk] Categories (4, object): [apple < bread < cheese < milk] >>> x.searchsorted('bread') array([1]) # Note: an array, not a scalar - >>> x.searchsorted(['bread']) - array([1]) - - >>> x.searchsorted(['bread', 'eggs']) - array([1, 4]) - - >>> x.searchsorted(['bread', 'eggs'], side='right') - array([3, 4]) # eggs before milk + >>> x.searchsorted(['bread'], side='right') + array([3]) """) @Substitution(klass='IndexOpsMixin')
two major changes: - categories must be ordered using series.searchsorted. - delete last two exsamples because of `ValueError: Value(s) to be inserted must be in categories`. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19784
2018-02-20T05:04:00Z
2018-02-21T10:38:42Z
2018-02-21T10:38:42Z
2018-02-21T14:04:57Z
BUG: GH19458 fixes precision issue in TimeDelta.total_seconds()
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index f31d0a5a0667c..349d7607559c5 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -733,6 +733,7 @@ Datetimelike - Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) - Bug in :func:`Timedelta.__add__`, :func:`Timedelta.__sub__` where adding or subtracting a ``np.timedelta64`` object would return another ``np.timedelta64`` instead of a ``Timedelta`` (:issue:`19738`) - Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`) +- Bug in :func:`Timedelta.total_seconds()` causing precision errors i.e. `Timedelta('30S').total_seconds()==30.000000000000004` (:issue:`19458`) Timezones diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 4483225e1801d..78fdeb988e0f2 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -739,7 +739,7 @@ cdef class _Timedelta(timedelta): """ Total duration of timedelta in seconds (to ns precision) """ - return 1e-9 * self.value + return self.value / 1e9 def view(self, dtype): """ array view compat """ diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 0f7fb84c6520b..4257c610fb960 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -512,6 +512,15 @@ def test_implementation_limits(self): with pytest.raises(OverflowError): Timedelta(max_td.value + 1, 'ns') + def test_total_seconds_precision(self): + # GH 19458 + assert Timedelta('30S').total_seconds() == 30.0 + assert Timedelta('0').total_seconds() == 0.0 + assert Timedelta('-2S').total_seconds() == -2.0 + assert Timedelta('5.324S').total_seconds() == 5.324 + assert (Timedelta('30S').total_seconds() - 30.0) < 1e-20 + assert (30.0 - Timedelta('30S').total_seconds()) < 1e-20 + def test_timedelta_arithmetic(self): data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]') deltas = [timedelta(days=1), Timedelta(1, unit='D')]
- [X] closes #19458 - [X] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19783
2018-02-20T04:01:37Z
2018-02-20T11:20:47Z
2018-02-20T11:20:46Z
2018-02-20T12:10:09Z
Sparse Ops Cleanup
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b96af6af3707f..1aeda86c55eff 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3995,7 +3995,7 @@ def _combine_const(self, other, func, errors='raise', try_cast=True): try_cast=try_cast) return self._constructor(new_data) - def _compare_frame(self, other, func, str_rep, try_cast=True): + def _compare_frame(self, other, func, str_rep): # compare_frame assumes self._indexed_same(other) import pandas.core.computation.expressions as expressions diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 4b543262fc485..41e499da8e008 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -928,7 +928,7 @@ def insert(self, loc, item): def delete(self, loc): """ - Make a new DatetimeIndex with passed location(s) deleted. + Make a new TimedeltaIndex with passed location(s) deleted. Parameters ---------- diff --git a/pandas/core/ops.py b/pandas/core/ops.py index da65f1f31ed2a..ad6102eb6ad0f 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -721,9 +721,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, flex_comp_method=None): subtract=new_methods['sub'], divide=new_methods['div'])) # opt out of bool flex methods for now - for k in ('ror_', 'rxor', 'rand_'): - if k in new_methods: - new_methods.pop(k) + assert not any(kname in new_methods for kname in ('ror_', 'rxor', 'rand_')) add_methods(cls, new_methods=new_methods) @@ -1080,19 +1078,19 @@ def na_op(x, y): try: result = lib.scalar_binop(x, y, op) except: - msg = ("cannot compare a dtyped [{dtype}] array " - "with a scalar of type [{type}]" - ).format(dtype=x.dtype, type=type(y).__name__) - raise TypeError(msg) + raise TypeError("cannot compare a dtyped [{dtype}] array " + "with a scalar of type [{typ}]" + .format(dtype=x.dtype, + typ=type(y).__name__)) return result + fill_int = lambda x: x.fillna(0) + fill_bool = lambda x: x.fillna(False).astype(bool) + def wrapper(self, other): is_self_int_dtype = is_integer_dtype(self.dtype) - fill_int = lambda x: x.fillna(0) - fill_bool = lambda x: x.fillna(False).astype(bool) - self, other = _align_method_SERIES(self, other, align_asobject=True) if isinstance(other, ABCDataFrame): @@ -1232,10 +1230,10 @@ def to_series(right): elif right.ndim == 2: if left.shape != right.shape: - msg = ("Unable to coerce to DataFrame, shape " - "must be {req_shape}: given {given_shape}" - ).format(req_shape=left.shape, given_shape=right.shape) - raise ValueError(msg) + raise ValueError("Unable to coerce to DataFrame, shape " + "must be {req_shape}: given {given_shape}" + .format(req_shape=left.shape, + given_shape=right.shape)) right = left._constructor(right, index=left.index, columns=left.columns) @@ -1293,8 +1291,8 @@ def na_op(x, y): result[mask] = op(xrav, y) else: raise TypeError("cannot perform operation {op} between " - "objects of type {x} and {y}".format( - op=name, x=type(x), y=type(y))) + "objects of type {x} and {y}" + .format(op=name, x=type(x), y=type(y))) result, changed = maybe_upcast_putmask(result, ~mask, np.nan) result = result.reshape(x.shape) @@ -1355,7 +1353,7 @@ def f(self, other, axis=default_axis, level=None): if not self._indexed_same(other): self, other = self.align(other, 'outer', level=level, copy=False) - return self._compare_frame(other, na_op, str_rep, try_cast=False) + return self._compare_frame(other, na_op, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, na_op, @@ -1380,7 +1378,7 @@ def f(self, other): if not self._indexed_same(other): raise ValueError('Can only compare identically-labeled ' 'DataFrame objects') - return self._compare_frame(other, func, str_rep, try_cast=True) + return self._compare_frame(other, func, str_rep) elif isinstance(other, ABCSeries): return _combine_series_frame(self, other, func, @@ -1532,10 +1530,6 @@ def wrapper(self, other): .format(other=type(other))) wrapper.__name__ = name - if name.startswith("__"): - # strip special method names, e.g. `__add__` needs to be `add` when - # passed to _sparse_series_op - name = name[2:-2] return wrapper @@ -1568,7 +1562,7 @@ def wrapper(self, other): dtype = getattr(other, 'dtype', None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) - return _sparse_array_op(self, other, op, name) + return _sparse_array_op(self, other, op, name, series=False) elif is_scalar(other): with np.errstate(all='ignore'): fill = op(_get_fill(self), np.asarray(other)) @@ -1579,8 +1573,6 @@ def wrapper(self, other): raise TypeError('operation with {other} not supported' .format(other=type(other))) - if name.startswith("__"): - name = name[2:-2] wrapper.__name__ = name return wrapper @@ -1591,4 +1583,5 @@ def wrapper(self, other): sparse_series_special_funcs = dict(arith_method=_arith_method_SPARSE_SERIES, comp_method=_arith_method_SPARSE_SERIES, - bool_method=None) + bool_method=_bool_method_SERIES) +# TODO: I don't think the functions defined by bool_method are tested diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 4f7152666f7bf..92c4fe932f066 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -54,6 +54,9 @@ def _get_fill(arr): def _sparse_array_op(left, right, op, name, series=False): + if name.startswith('__'): + # For lookups in _libs.sparse we need non-dunder op name + name = name[2:-2] if series and is_integer_dtype(left) and is_integer_dtype(right): # series coerces to float64 if result should have NaN/inf @@ -119,6 +122,10 @@ def _sparse_array_op(left, right, op, name, series=False): def _wrap_result(name, data, sparse_index, fill_value, dtype=None): """ wrap op result to have correct dtype """ + if name.startswith('__'): + # e.g. __eq__ --> eq + name = name[2:-2] + if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'): dtype = np.bool diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 19b126216db81..872a17d8dbabe 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -551,7 +551,6 @@ def _combine_frame(self, other, func, fill_value=None, level=None): return self._constructor(index=new_index).__finalize__(self) new_data = {} - new_fill_value = None if fill_value is not None: # TODO: be a bit more intelligent here for col in new_columns: @@ -568,6 +567,7 @@ def _combine_frame(self, other, func, fill_value=None, level=None): new_data[col] = func(this[col], other[col]) # if the fill values are the same use them? or use a valid one + new_fill_value = None other_fill_value = getattr(other, 'default_fill_value', np.nan) if self.default_fill_value == other_fill_value: new_fill_value = self.default_fill_value diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 335a4c80adc63..26cf9dbadbbf2 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -811,10 +811,7 @@ def from_coo(cls, A, dense_index=False): return _coo_to_sparse_series(A, dense_index=dense_index) -# overwrite series methods with unaccelerated versions -ops.add_special_arithmetic_methods(SparseSeries, **ops.series_special_funcs) +# overwrite series methods with unaccelerated Sparse-specific versions ops.add_flex_arithmetic_methods(SparseSeries, **ops.series_flex_funcs) -# overwrite basic arithmetic to use SparseSeries version -# force methods to overwrite previous definitions. ops.add_special_arithmetic_methods(SparseSeries, **ops.sparse_series_special_funcs)
This is in preparation for implementing the direct-pinning discussed [here](https://github.com/pandas-dev/pandas/pull/19649#discussion_r167527010). The main roadblock to that is SparseSeries which calls ops.add_special_arithmetic_methods twice. This PR gets rid of the double-call, so the follow-up will be straight-forward. A handful of cleanups are done here too: - unrelated docstring fix for TimedeltaIndex.replace - minor formatting improvements - remove an unused kwarg for DataFrame._compare_frame - remove a loop in add_flex_arithmetic_methods, replace with an assertion that it is unnecessary - fix names for some SparseArray method names, e.g. ATM `SparseArray.__eq__.__name__ == 'eq'`
https://api.github.com/repos/pandas-dev/pandas/pulls/19782
2018-02-20T03:14:27Z
2018-02-21T00:15:57Z
2018-02-21T00:15:57Z
2018-02-21T00:18:17Z
DOC: RangeIndex as default index
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b96af6af3707f..b25dbe4fac6da 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -251,11 +251,11 @@ class DataFrame(NDFrame): data : numpy ndarray (structured or homogeneous), dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects index : Index or array-like - Index to use for resulting frame. Will default to np.arange(n) if + Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to - np.arange(n) if no column labels are provided + RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False diff --git a/pandas/core/series.py b/pandas/core/series.py index 90dc14836ab55..b54b95d986b97 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -131,7 +131,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): index : array-like or Index (1d) Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to - RangeIndex(len(data)) if not provided. If both a dict and index + RangeIndex (0, 1, 2, ..., n) if not provided. If both a dict and index sequence are used, the index will override the keys found in the dict. dtype : numpy.dtype or None
Small correction to DataFrame docstring
https://api.github.com/repos/pandas-dev/pandas/pulls/19781
2018-02-20T03:04:38Z
2018-02-22T02:17:54Z
2018-02-22T02:17:54Z
2018-02-22T10:35:50Z
DOC: added plotting module to the api reference docs
diff --git a/doc/source/api.rst b/doc/source/api.rst index 103b0fe9ff019..46c6fb46b9d41 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -2378,15 +2378,23 @@ Style Export and Import Styler.to_excel Plotting -~~~~~~~~ +-------- -.. currentmodule:: pandas +.. currentmodule:: pandas.plotting + +The following functions are contained in the `pandas.plotting` module. .. autosummary:: :toctree: generated/ - plotting.register_matplotlib_converters - plotting.deregister_matplotlib_converters + andrews_curves + bootstrap_plot + deregister_matplotlib_converters + lag_plot + parallel_coordinates + radviz + register_matplotlib_converters + scatter_matrix .. currentmodule:: pandas
- [x ] closes #19777 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19780
2018-02-20T02:46:01Z
2018-02-22T00:18:04Z
2018-02-22T00:18:04Z
2018-02-22T00:18:40Z
CI: remove PIP & old conda build in favor of pandas-ci builds
diff --git a/.travis.yml b/.travis.yml index b1168f18315c3..22ef6c819c6d4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -50,9 +50,6 @@ matrix: packages: - python-gtk2 # In allow_failures - - dist: trusty - env: - - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" - dist: trusty env: - JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true COVERAGE=true @@ -63,36 +60,26 @@ matrix: # In allow_failures - dist: trusty env: - - JOB="3.6_PIP_BUILD_TEST" TEST_ARGS="--skip-slow" PIP_BUILD_TEST=true + - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" addons: apt: packages: - xsel # In allow_failures - - dist: trusty - env: - - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" - # In allow_failures - dist: trusty env: - JOB="3.6_DOC" DOC=true allow_failures: - - dist: trusty - env: - - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" - dist: trusty env: - JOB="2.7_SLOW" SLOW=true - dist: trusty env: - - JOB="3.6_PIP_BUILD_TEST" TEST_ARGS="--skip-slow" PIP_BUILD_TEST=true + - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" addons: apt: packages: - xsel - - dist: trusty - env: - - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" - dist: trusty env: - JOB="3.6_DOC" DOC=true diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 458ff083b65eb..9ccb4baf25505 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -115,15 +115,6 @@ if [ "$COVERAGE" ]; then pip install coverage pytest-cov fi -echo -if [ -z "$PIP_BUILD_TEST" ] ; then - - # build but don't install - echo "[build em]" - time python setup.py build_ext --inplace || exit 1 - -fi - # we may have run installations echo echo "[conda installs]" @@ -161,23 +152,8 @@ conda list pandas pip list --format columns |grep pandas # build and install -echo - -if [ "$PIP_BUILD_TEST" ]; then - - # build & install testing - echo "[building release]" - time bash scripts/build_dist_for_release.sh || exit 1 - conda uninstall -y cython - time pip install dist/*tar.gz || exit 1 - -else - - # install our pandas - echo "[running setup.py develop]" - python setup.py develop || exit 1 - -fi +echo "[running setup.py develop]" +python setup.py develop || exit 1 echo echo "[show pandas]" diff --git a/ci/requirements-3.6_PIP_BUILD_TEST.build b/ci/requirements-3.6_PIP_BUILD_TEST.build deleted file mode 100644 index 1c4b46aea3865..0000000000000 --- a/ci/requirements-3.6_PIP_BUILD_TEST.build +++ /dev/null @@ -1,6 +0,0 @@ -python=3.6* -python-dateutil -pytz -nomkl -numpy -cython diff --git a/ci/requirements-3.6_PIP_BUILD_TEST.pip b/ci/requirements-3.6_PIP_BUILD_TEST.pip deleted file mode 100644 index f4617133cad5b..0000000000000 --- a/ci/requirements-3.6_PIP_BUILD_TEST.pip +++ /dev/null @@ -1,6 +0,0 @@ -xarray -geopandas -seaborn -pandas_datareader -statsmodels -scikit-learn diff --git a/ci/requirements-3.6_PIP_BUILD_TEST.sh b/ci/requirements-3.6_PIP_BUILD_TEST.sh deleted file mode 100644 index 3a8cf673b32f2..0000000000000 --- a/ci/requirements-3.6_PIP_BUILD_TEST.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -source activate pandas - -echo "install 36 PIP_BUILD_TEST" - -conda install -n pandas -c conda-forge pyarrow dask pyqt qtpy diff --git a/ci/script_multi.sh b/ci/script_multi.sh index 45c61ee3172fe..2b2d4d5488b91 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -23,23 +23,7 @@ fi export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))') echo PYTHONHASHSEED=$PYTHONHASHSEED -if [ "$PIP_BUILD_TEST" ] ; then - echo "[build-test]" - - echo "[env]" - pip list --format columns |grep pandas - - echo "[running]" - cd /tmp - unset PYTHONPATH - - echo "[build-test: single]" - python -c 'import pandas; pandas.test(["--skip-slow", "--skip-network", "-r xX", "-m single"])' - - echo "[build-test: not single]" - python -c 'import pandas; pandas.test(["-n 2", "--skip-slow", "--skip-network", "-r xX", "-m not single"])' - -elif [ "$DOC" ]; then +if [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" elif [ "$COVERAGE" ]; then diff --git a/ci/script_single.sh b/ci/script_single.sh index 021a5a7714fb5..f376c920ac71b 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -21,10 +21,7 @@ if echo "$TEST_ARGS" | grep -e --skip-network -q; then export http_proxy=http://1.2.3.4 https_proxy=http://1.2.3.4; fi -if [ "$PIP_BUILD_TEST" ]; then - echo "We are not running pytest as this is a build test." - -elif [ "$DOC" ]; then +if [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" elif [ "$COVERAGE" ]; then
xref https://github.com/pandas-dev/pandas-ci/pull/3
https://api.github.com/repos/pandas-dev/pandas/pulls/19775
2018-02-19T20:02:37Z
2018-02-19T23:05:15Z
2018-02-19T23:05:15Z
2018-02-19T23:05:15Z
BUG: DataFrame.diff(axis=0) with DatetimeTZ data
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 90ce6b47728fb..ac527387ae6a1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -833,6 +833,7 @@ Timezones - Bug in the :class:`DataFrame` constructor, where tz-aware Datetimeindex and a given column name will result in an empty ``DataFrame`` (:issue:`19157`) - Bug in :func:`Timestamp.tz_localize` where localizing a timestamp near the minimum or maximum valid values could overflow and return a timestamp with an incorrect nanosecond value (:issue:`12677`) - Bug when iterating over :class:`DatetimeIndex` that was localized with fixed timezone offset that rounded nanosecond precision to microseconds (:issue:`19603`) +- Bug in :func:`DataFrame.diff` that raised an ``IndexError`` with tz-aware values (:issue:`18578`) Offsets ^^^^^^^ diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 00ef8f9cef598..240c9b1f3377c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2905,6 +2905,35 @@ def shift(self, periods, axis=0, mgr=None): return [self.make_block_same_class(new_values, placement=self.mgr_locs)] + def diff(self, n, axis=0, mgr=None): + """1st discrete difference + + Parameters + ---------- + n : int, number of periods to diff + axis : int, axis to diff upon. default 0 + mgr : default None + + Return + ------ + A list with a new TimeDeltaBlock. + + Note + ---- + The arguments here are mimicking shift so they are called correctly + by apply. + """ + if axis == 0: + # Cannot currently calculate diff across multiple blocks since this + # function is invoked via apply + raise NotImplementedError + new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 + + # Reshape the new_values like how algos.diff does for timedelta data + new_values = new_values.reshape(1, len(new_values)) + new_values = new_values.astype('timedelta64[ns]') + return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] + def concat_same_type(self, to_concat, placement=None): """ Concatenate list of single blocks of the same type. diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index e1bc310e1e934..ceb6c942c81b1 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -57,6 +57,32 @@ def test_diff(self): 1), 'z': pd.Series(1)}).astype('float64') assert_frame_equal(result, expected) + @pytest.mark.parametrize('tz', [None, 'UTC']) + def test_diff_datetime_axis0(self, tz): + # GH 18578 + df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz), + 1: date_range('2010', freq='D', periods=2, tz=tz)}) + + result = df.diff(axis=0) + expected = DataFrame({0: pd.TimedeltaIndex(['NaT', '1 days']), + 1: pd.TimedeltaIndex(['NaT', '1 days'])}) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('tz', [None, 'UTC']) + def test_diff_datetime_axis1(self, tz): + # GH 18578 + df = DataFrame({0: date_range('2010', freq='D', periods=2, tz=tz), + 1: date_range('2010', freq='D', periods=2, tz=tz)}) + if tz is None: + result = df.diff(axis=1) + expected = DataFrame({0: pd.TimedeltaIndex(['NaT', 'NaT']), + 1: pd.TimedeltaIndex(['0 days', + '0 days'])}) + assert_frame_equal(result, expected) + else: + with pytest.raises(NotImplementedError): + result = df.diff(axis=1) + def test_diff_timedelta(self): # GH 4533 df = DataFrame(dict(time=[Timestamp('20130101 9:01'),
- [x] closes #18578 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Unfortunately, `DataFrame.diff` doesn't work correctly with axis=1; it returns values like so after this patch: ``` In [2]: df = DataFrame({0: date_range('2010', freq='D', periods=2, tz='utc'), ...: 1: date_range('2010', freq='D', periods=2, tz='utc')}) ...: In [3]: df.diff(axis=1) Out[3]: 0 1 0 NaT NaT 1 NaT NaT ``` I believe this is because `diff` is implemented by iterating over the 2 DatetimeTZBlocks (via `BlockManager.apply`, but `diff` needs data from both blocks at once. (Is it possible to consolidate 2 DatetimeTZBlocks?) I made this raise a `NotImplimentedError` for now.
https://api.github.com/repos/pandas-dev/pandas/pulls/19773
2018-02-19T18:42:01Z
2018-03-01T22:50:35Z
2018-03-01T22:50:35Z
2018-03-01T23:48:14Z
BUG: Fix MultiIndex .loc with all numpy arrays
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 76c4fa08fca4d..e6ed71c719c9a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -833,6 +833,7 @@ MultiIndex - Bug in :func:`MultiIndex.get_loc` which would cast boolean to integer labels (:issue:`19086`) - Bug in :func:`MultiIndex.get_loc` which would fail to locate keys containing ``NaN`` (:issue:`18485`) - Bug in :func:`MultiIndex.get_loc` in large :class:`MultiIndex`, would fail when levels had different dtypes (:issue:`18520`) +- Bug in indexing where nested indexers having only numpy arrays are handled incorrectly (:issue:`19686`) I/O diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 352ce921d1d44..eb3aeda7902fc 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2107,10 +2107,9 @@ def is_nested_tuple(tup, labels): if not isinstance(tup, tuple): return False - # are we nested tuple of: tuple,list,slice for i, k in enumerate(tup): - if isinstance(k, (tuple, list, slice)): + if is_list_like(k) or isinstance(k, slice): return isinstance(labels, MultiIndex) return False diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index 433b0d87ac005..86a5a82441ee8 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -8,7 +8,7 @@ import pandas as pd from pandas.compat import lrange, StringIO -from pandas import Series, DataFrame, Timestamp, date_range, MultiIndex +from pandas import Series, DataFrame, Timestamp, date_range, MultiIndex, Index from pandas.util import testing as tm from pandas.tests.indexing.common import Base @@ -711,3 +711,44 @@ def test_identity_slice_returns_new_object(self): original_series[:3] = [7, 8, 9] assert all(sliced_series[:3] == [7, 8, 9]) + + @pytest.mark.parametrize( + 'indexer_type_1', + (list, tuple, set, slice, np.ndarray, Series, Index)) + @pytest.mark.parametrize( + 'indexer_type_2', + (list, tuple, set, slice, np.ndarray, Series, Index)) + def test_loc_getitem_nested_indexer(self, indexer_type_1, indexer_type_2): + # GH #19686 + # .loc should work with nested indexers which can be + # any list-like objects (see `pandas.api.types.is_list_like`) or slices + + def convert_nested_indexer(indexer_type, keys): + if indexer_type == np.ndarray: + return np.array(keys) + if indexer_type == slice: + return slice(*keys) + return indexer_type(keys) + + a = [10, 20, 30] + b = [1, 2, 3] + index = pd.MultiIndex.from_product([a, b]) + df = pd.DataFrame( + np.arange(len(index), dtype='int64'), + index=index, columns=['Data']) + + keys = ([10, 20], [2, 3]) + types = (indexer_type_1, indexer_type_2) + + # check indexers with all the combinations of nested objects + # of all the valid types + indexer = tuple( + convert_nested_indexer(indexer_type, k) + for indexer_type, k in zip(types, keys)) + + result = df.loc[indexer, 'Data'] + expected = pd.Series( + [1, 2, 4, 5], name='Data', + index=pd.MultiIndex.from_product(keys)) + + tm.assert_series_equal(result, expected)
- [x] closes #19686 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19772
2018-02-19T18:12:17Z
2018-02-23T01:40:20Z
2018-02-23T01:40:20Z
2018-02-23T03:50:50Z
Fix Timedelta floordiv, rfloordiv with offset, fix td64 return types
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 11c49995372f5..56c30615a20fa 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -716,6 +716,8 @@ Datetimelike - Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`) - Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`) - Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) +- Bug in :func:`Timedelta.__add__`, :func:`Timedelta.__sub__` where adding or subtracting a ``np.timedelta64`` object would return another ``np.timedelta64`` instead of a ``Timedelta`` (:issue:`19738`) +- Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`) - Timezones diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 37693068e0974..bd1fefa7403a8 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -478,11 +478,16 @@ def _binary_op_method_timedeltalike(op, name): elif other is NaT: return NaT + elif is_timedelta64_object(other): + # convert to Timedelta below; avoid catching this in + # has-dtype check before then + pass + elif is_datetime64_object(other) or PyDateTime_CheckExact(other): # the PyDateTime_CheckExact case is for a datetime object that # is specifically *not* a Timestamp, as the Timestamp case will be # handled after `_validate_ops_compat` returns False below - from ..tslib import Timestamp + from timestamps import Timestamp return op(self, Timestamp(other)) # We are implicitly requiring the canonical behavior to be # defined by Timestamp methods. @@ -503,6 +508,9 @@ def _binary_op_method_timedeltalike(op, name): # failed to parse as timedelta return NotImplemented + if other is NaT: + # e.g. if original other was timedelta64('NaT') + return NaT return Timedelta(op(self.value, other.value), unit='ns') f.__name__ = name @@ -1096,6 +1104,9 @@ class Timedelta(_Timedelta): # just defer if hasattr(other, '_typ'): # Series, DataFrame, ... + if other._typ == 'dateoffset' and hasattr(other, 'delta'): + # Tick offset + return self // other.delta return NotImplemented if hasattr(other, 'dtype'): @@ -1128,6 +1139,9 @@ class Timedelta(_Timedelta): # just defer if hasattr(other, '_typ'): # Series, DataFrame, ... + if other._typ == 'dateoffset' and hasattr(other, 'delta'): + # Tick offset + return other.delta // self return NotImplemented if hasattr(other, 'dtype'): diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index 90c911c24f6a9..c6f1a6a1032cd 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -100,7 +100,6 @@ def test_td_add_pytimedelta(self, op): assert isinstance(result, Timedelta) assert result == Timedelta(days=19) - @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') @pytest.mark.parametrize('op', [operator.add, ops.radd]) def test_td_add_timedelta64(self, op): td = Timedelta(10, unit='d') @@ -130,13 +129,11 @@ def test_td_sub_pytimedelta(self): assert isinstance(result, Timedelta) assert result == expected - @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') def test_td_sub_timedelta64(self): td = Timedelta(10, unit='d') expected = Timedelta(0, unit='ns') result = td - td.to_timedelta64() assert isinstance(result, Timedelta) - # comparison fails even if we comment out the isinstance assertion assert result == expected def test_td_sub_nat(self): @@ -144,7 +141,6 @@ def test_td_sub_nat(self): result = td - NaT assert result is NaT - @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') def test_td_sub_td64_nat(self): td = Timedelta(10, unit='d') result = td - np.timedelta64('NaT') @@ -171,7 +167,6 @@ def test_td_rsub_pytimedelta(self): assert isinstance(result, Timedelta) assert result == expected - @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') def test_td_rsub_timedelta64(self): td = Timedelta(10, unit='d') expected = Timedelta(0, unit='ns') @@ -188,7 +183,6 @@ def test_td_rsub_nat(self): result = np.datetime64('NaT') - td assert result is NaT - @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') def test_td_rsub_td64_nat(self): td = Timedelta(10, unit='d') result = np.timedelta64('NaT') - td @@ -304,6 +298,12 @@ def test_td_floordiv_null_scalar(self): assert np.isnan(td // NaT) assert np.isnan(td // np.timedelta64('NaT')) + def test_td_floordiv_offsets(self): + # GH#19738 + td = Timedelta(hours=3, minutes=4) + assert td // pd.offsets.Hour(1) == 3 + assert td // pd.offsets.Minute(2) == 92 + def test_td_floordiv_invalid_scalar(self): # GH#18846 td = Timedelta(hours=3, minutes=4) @@ -322,7 +322,7 @@ def test_td_floordiv_numeric_scalar(self): assert td // np.int32(2.0) == expected assert td // np.uint8(2.0) == expected - def test_floordiv_timedeltalike_array(self): + def test_td_floordiv_timedeltalike_array(self): # GH#18846 td = Timedelta(hours=3, minutes=4) scalar = Timedelta(hours=3, minutes=3) @@ -371,6 +371,10 @@ def test_td_rfloordiv_null_scalar(self): assert np.isnan(td.__rfloordiv__(NaT)) assert np.isnan(td.__rfloordiv__(np.timedelta64('NaT'))) + def test_td_rfloordiv_offsets(self): + # GH#19738 + assert pd.offsets.Hour(1) // Timedelta(minutes=25) == 2 + def test_td_rfloordiv_invalid_scalar(self): # GH#18846 td = Timedelta(hours=3, minutes=3)
Fixes xfails introduced in #19752.
https://api.github.com/repos/pandas-dev/pandas/pulls/19770
2018-02-19T17:45:25Z
2018-02-19T23:16:37Z
2018-02-19T23:16:37Z
2018-06-22T03:23:40Z
DEPR: remove pandas.core.common is_*
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 2f820043d7b6f..5520e3af8b0ed 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -388,6 +388,23 @@ Convert to an xarray DataArray p.to_xarray() + + +.. _whatsnew_0230.api_breaking.core_common: + +pandas.core.common removals +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following error & warning messages are removed from ``pandas.core.common`` (:issue:`13634`, :issue:`19769`): + +- ``PerformanceWarning`` +- ``UnsupportedFunctionCall`` +- ``UnsortedIndexError`` +- ``AbstractMethodError`` + +These are available from import from ``pandas.errors`` (since 0.19.0). + + .. _whatsnew_0230.api_breaking.apply: Changes to make output of ``DataFrame.apply`` consistent @@ -638,6 +655,7 @@ Removal of prior version deprecations/changes - The modules `pandas.tools.hashing` and `pandas.util.hashing` have been removed (:issue:`16223`) - The top-level functions ``pd.rolling_*``, ``pd.expanding_*`` and ``pd.ewm*`` have been removed (Deprecated since v0.18). Instead, use the DataFrame/Series methods :attr:`~DataFrame.rolling`, :attr:`~DataFrame.expanding` and :attr:`~DataFrame.ewm` (:issue:`18723`) +- Imports from ``pandas.core.common`` for functions such as ``is_datetime64_dtype`` are now removed. These are located in ``pandas.api.types``. (:issue:`13634`, :issue:`19769`) .. _whatsnew_0230.performance: diff --git a/pandas/core/base.py b/pandas/core/base.py index 0ca029ffd4c25..d061c4144cff9 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -17,7 +17,7 @@ is_extension_array_dtype) from pandas.util._validators import validate_bool_kwarg - +from pandas.errors import AbstractMethodError from pandas.core import common as com, algorithms import pandas.core.nanops as nanops import pandas._libs.lib as lib @@ -46,7 +46,7 @@ class StringMixin(object): # Formatting def __unicode__(self): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def __str__(self): """ @@ -278,10 +278,10 @@ def _gotitem(self, key, ndim, subset=None): subset to act on """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def aggregate(self, func, *args, **kwargs): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) agg = aggregate @@ -1252,4 +1252,4 @@ def duplicated(self, keep='first'): # abstracts def _update_inplace(self, result, **kwargs): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) diff --git a/pandas/core/common.py b/pandas/core/common.py index 6748db825acf0..278c07b544011 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -2,8 +2,6 @@ Misc tools for implementing data structures """ -import sys -import warnings from datetime import datetime, timedelta from functools import partial import inspect @@ -20,66 +18,8 @@ from pandas.core.dtypes.inference import _iterable_not_string from pandas.core.dtypes.missing import isna, isnull, notnull # noqa from pandas.api import types -from pandas.core.dtypes import common from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike -# compat -from pandas.errors import ( # noqa - PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError, - AbstractMethodError) - -# back-compat of public API -# deprecate these functions -m = sys.modules['pandas.core.common'] -for t in [t for t in dir(types) if not t.startswith('_')]: - - def outer(t=t): - - def wrapper(*args, **kwargs): - warnings.warn("pandas.core.common.{t} is deprecated. " - "import from the public API: " - "pandas.api.types.{t} instead".format(t=t), - DeprecationWarning, stacklevel=3) - return getattr(types, t)(*args, **kwargs) - return wrapper - - setattr(m, t, outer(t)) - -# back-compat for non-public functions -# deprecate these functions -for t in ['is_datetime_arraylike', - 'is_datetime_or_timedelta_dtype', - 'is_datetimelike', - 'is_datetimelike_v_numeric', - 'is_datetimelike_v_object', - 'is_datetimetz', - 'is_int_or_datetime_dtype', - 'is_period_arraylike', - 'is_string_like', - 'is_string_like_dtype']: - - def outer(t=t): - - def wrapper(*args, **kwargs): - warnings.warn("pandas.core.common.{t} is deprecated. " - "These are not longer public API functions, " - "but can be imported from " - "pandas.api.types.{t} instead".format(t=t), - DeprecationWarning, stacklevel=3) - return getattr(common, t)(*args, **kwargs) - return wrapper - - setattr(m, t, outer(t)) - - -# deprecate array_equivalent - -def array_equivalent(*args, **kwargs): - warnings.warn("'pandas.core.common.array_equivalent' is deprecated and " - "is no longer public API", DeprecationWarning, stacklevel=2) - from pandas.core.dtypes import missing - return missing.array_equivalent(*args, **kwargs) - class SettingWithCopyError(ValueError): pass diff --git a/pandas/core/resample.py b/pandas/core/resample.py index df656092f476e..772568ee84737 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -16,7 +16,7 @@ from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.tseries.offsets import DateOffset, Tick, Day, delta_to_nanoseconds from pandas.core.indexes.period import PeriodIndex -import pandas.core.common as com +from pandas.errors import AbstractMethodError import pandas.core.algorithms as algos from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries @@ -205,10 +205,10 @@ def __setattr__(self, attr, value): def __getitem__(self, key): try: return super(Resampler, self).__getitem__(key) - except (KeyError, com.AbstractMethodError): + except (KeyError, AbstractMethodError): # compat for deprecated - if isinstance(self.obj, com.ABCSeries): + if isinstance(self.obj, ABCSeries): return self._deprecated('__getitem__')[key] raise @@ -233,7 +233,7 @@ def _convert_obj(self, obj): return obj def _get_binner_for_time(self): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _set_binner(self): """ @@ -372,10 +372,10 @@ def transform(self, arg, *args, **kwargs): arg, *args, **kwargs) def _downsample(self, f): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _upsample(self, f, limit=None, fill_value=None): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _gotitem(self, key, ndim, subset=None): """ @@ -464,7 +464,7 @@ def _get_resampler_for_grouping(self, groupby, **kwargs): def _wrap_result(self, result): """ potentially wrap any results """ - if isinstance(result, com.ABCSeries) and self._selection is not None: + if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py index 7e6430accc546..bd4891326c751 100644 --- a/pandas/tests/api/test_types.py +++ b/pandas/tests/api/test_types.py @@ -3,10 +3,8 @@ import pytest from warnings import catch_warnings -import numpy as np import pandas -from pandas.core import common as com from pandas.api import types from pandas.util import testing as tm @@ -52,42 +50,6 @@ def check_deprecation(self, fold, fnew): except AttributeError: pytest.raises(AttributeError, lambda: fnew('foo')) - def test_deprecation_core_common(self): - - # test that we are in fact deprecating - # the pandas.core.common introspectors - for t in self.allowed: - self.check_deprecation(getattr(com, t), getattr(types, t)) - - def test_deprecation_core_common_array_equivalent(self): - - with tm.assert_produces_warning(DeprecationWarning): - com.array_equivalent(np.array([1, 2]), np.array([1, 2])) - - def test_deprecation_core_common_moved(self): - - # these are in pandas.core.dtypes.common - l = ['is_datetime_arraylike', - 'is_datetime_or_timedelta_dtype', - 'is_datetimelike', - 'is_datetimelike_v_numeric', - 'is_datetimelike_v_object', - 'is_datetimetz', - 'is_int_or_datetime_dtype', - 'is_period_arraylike', - 'is_string_like', - 'is_string_like_dtype'] - - from pandas.core.dtypes import common as c - for t in l: - self.check_deprecation(getattr(com, t), getattr(c, t)) - - def test_removed_from_core_common(self): - - for t in ['is_null_datelike_scalar', - 'ensure_float']: - pytest.raises(AttributeError, lambda: getattr(com, t)) - def test_deprecated_from_api_types(self): for t in self.deprecated:
xref #13990 xref #19304
https://api.github.com/repos/pandas-dev/pandas/pulls/19769
2018-02-19T17:30:28Z
2018-02-22T00:12:22Z
2018-02-22T00:12:22Z
2018-02-22T00:12:51Z
DOC: correct Panel.apply exsample
diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 2cb80e938afb9..7f973992fb07f 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1020,7 +1020,7 @@ def apply(self, func, axis='major', **kwargs): Equivalent to previous: - >>> p.apply(lambda x: x.sum(), axis='minor') + >>> p.apply(lambda x: x.sum(), axis='major') Return the shapes of each DataFrame over axis 2 (i.e the shapes of items x major), as a Series
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19766
2018-02-19T16:02:22Z
2018-02-19T16:10:32Z
2018-02-19T16:10:32Z
2018-02-20T01:45:57Z
DOC/BLD: update vendored IPython.sphinxext version
diff --git a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py b/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py index c5ec26aefd442..b93a151fb3cb0 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_console_highlighting.py @@ -1,116 +1,28 @@ -"""reST directive for syntax-highlighting ipython interactive sessions. - -XXX - See what improvements can be made based on the new (as of Sept 2009) -'pycon' lexer for the python console. At the very least it will give better -highlighted tracebacks. """ +reST directive for syntax-highlighting ipython interactive sessions. -#----------------------------------------------------------------------------- -# Needed modules - -# Standard library -import re - -# Third party -from pygments.lexer import Lexer, do_insertions -from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer, - PythonTracebackLexer) -from pygments.token import Comment, Generic +""" from sphinx import highlighting - -#----------------------------------------------------------------------------- -# Global constants -line_re = re.compile('.*?\n') - -#----------------------------------------------------------------------------- -# Code begins - classes and functions - - -class IPythonConsoleLexer(Lexer): - - """ - For IPython console output or doctests, such as: - - .. sourcecode:: ipython - - In [1]: a = 'foo' - - In [2]: a - Out[2]: 'foo' - - In [3]: print(a) - foo - - In [4]: 1 / 0 - - Notes: - - - Tracebacks are not currently supported. - - - It assumes the default IPython prompts, not customized ones. - """ - - name = 'IPython console session' - aliases = ['ipython'] - mimetypes = ['text/x-ipython-console'] - input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)") - output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)") - continue_prompt = re.compile(" \.\.\.+:") - tb_start = re.compile("\-+") - - def get_tokens_unprocessed(self, text): - pylexer = PythonLexer(**self.options) - tblexer = PythonTracebackLexer(**self.options) - - curcode = '' - insertions = [] - for match in line_re.finditer(text): - line = match.group() - input_prompt = self.input_prompt.match(line) - continue_prompt = self.continue_prompt.match(line.rstrip()) - output_prompt = self.output_prompt.match(line) - if line.startswith("#"): - insertions.append((len(curcode), - [(0, Comment, line)])) - elif input_prompt is not None: - insertions.append((len(curcode), - [(0, Generic.Prompt, input_prompt.group())])) - curcode += line[input_prompt.end():] - elif continue_prompt is not None: - insertions.append((len(curcode), - [(0, Generic.Prompt, continue_prompt.group())])) - curcode += line[continue_prompt.end():] - elif output_prompt is not None: - # Use the 'error' token for output. We should probably make - # our own token, but error is typically in a bright color like - # red, so it works fine for our output prompts. - insertions.append((len(curcode), - [(0, Generic.Error, output_prompt.group())])) - curcode += line[output_prompt.end():] - else: - if curcode: - for item in do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)): - yield item - curcode = '' - insertions = [] - yield match.start(), Generic.Output, line - if curcode: - for item in do_insertions(insertions, - pylexer.get_tokens_unprocessed(curcode)): - yield item - +from IPython.lib.lexers import IPyLexer def setup(app): """Setup as a sphinx extension.""" # This is only a lexer, so adding it below to pygments appears sufficient. - # But if somebody knows that the right API usage should be to do that via + # But if somebody knows what the right API usage should be to do that via # sphinx, by all means fix it here. At least having this setup.py # suppresses the sphinx warning we'd get without it. - pass + metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} + return metadata + +# Register the extension as a valid pygments lexer. +# Alternatively, we could register the lexer with pygments instead. This would +# require using setuptools entrypoints: http://pygments.org/docs/plugins + +ipy2 = IPyLexer(python3=False) +ipy3 = IPyLexer(python3=True) -#----------------------------------------------------------------------------- -# Register the extension as a valid pygments lexer -highlighting.lexers['ipython'] = IPythonConsoleLexer() +highlighting.lexers['ipython'] = ipy2 +highlighting.lexers['ipython2'] = ipy2 +highlighting.lexers['ipython3'] = ipy3 diff --git a/doc/sphinxext/ipython_sphinxext/ipython_directive.py b/doc/sphinxext/ipython_sphinxext/ipython_directive.py index 5616d732eb1c6..a0e6728861b66 100644 --- a/doc/sphinxext/ipython_sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_sphinxext/ipython_directive.py @@ -83,8 +83,29 @@ See http://matplotlib.org/sampledoc/ipython_directive.html for additional documentation. -ToDo ----- +Pseudo-Decorators +================= + +Note: Only one decorator is supported per input. If more than one decorator +is specified, then only the last one is used. + +In addition to the Pseudo-Decorators/options described at the above link, +several enhancements have been made. The directive will emit a message to the +console at build-time if code-execution resulted in an exception or warning. +You can suppress these on a per-block basis by specifying the :okexcept: +or :okwarning: options: + +.. code-block:: rst + + .. ipython:: + :okexcept: + :okwarning: + + In [1]: 1/0 + In [2]: # raise warning. + +To Do +----- - Turn the ad-hoc test() function into a real test suite. - Break up ipython-specific functionality from matplotlib stuff into better @@ -98,48 +119,31 @@ - VΓ‘clavΕ milauer <eudoxos-AT-arcig.cz>: Prompt generalizations. - Skipper Seabold, refactoring, cleanups, pure python addition """ -from __future__ import print_function -from __future__ import unicode_literals #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Stdlib +import atexit +import errno import os import re import sys import tempfile import ast -from pandas.compat import zip, range, map, lmap, u, text_type, cStringIO as StringIO import warnings - -# To keep compatibility with various python versions -try: - from hashlib import md5 -except ImportError: - from md5 import md5 +import shutil +from io import StringIO # Third-party -import sphinx from docutils.parsers.rst import directives -from docutils import nodes -from sphinx.util.compat import Directive +from docutils.parsers.rst import Directive # Our own -try: - from traitlets.config import Config -except ImportError: - from IPython import Config +from traitlets.config import Config from IPython import InteractiveShell from IPython.core.profiledir import ProfileDir -from IPython.utils import io -from IPython.utils.py3compat import PY3 - -if PY3: - from io import StringIO -else: - from StringIO import StringIO #----------------------------------------------------------------------------- # Globals @@ -191,8 +195,8 @@ def block_parser(part, rgxin, rgxout, fmtin, fmtout): continue if line_stripped.startswith('@'): - # we're assuming at most one decorator -- may need to - # rethink + # Here is where we assume there is, at most, one decorator. + # Might need to rethink this. decorator = line_stripped continue @@ -223,12 +227,17 @@ def block_parser(part, rgxin, rgxout, fmtin, fmtout): if matchout or nextline.startswith('#'): break elif nextline.startswith(continuation): + # The default ipython_rgx* treat the space following the colon as optional. + # However, If the space is there we must consume it or code + # employing the cython_magic extension will fail to execute. + # + # This works with the default ipython_rgx* patterns, + # If you modify them, YMMV. nextline = nextline[Nc:] if nextline and nextline[0] == ' ': nextline = nextline[1:] inputline += '\n' + nextline - else: rest.append(nextline) i+= 1 @@ -250,42 +259,19 @@ def block_parser(part, rgxin, rgxout, fmtin, fmtout): return block -class DecodingStringIO(StringIO, object): - def __init__(self,buf='',encodings=('utf8',), *args, **kwds): - super(DecodingStringIO, self).__init__(buf, *args, **kwds) - self.set_encodings(encodings) - - def set_encodings(self, encodings): - self.encodings = encodings - - def write(self,data): - if isinstance(data, text_type): - return super(DecodingStringIO, self).write(data) - else: - for enc in self.encodings: - try: - data = data.decode(enc) - return super(DecodingStringIO, self).write(data) - except : - pass - # default to brute utf8 if no encoding succeeded - return super(DecodingStringIO, self).write(data.decode('utf8', 'replace')) - - class EmbeddedSphinxShell(object): """An embedded IPython instance to run inside Sphinx""" - def __init__(self, exec_lines=None,state=None): + def __init__(self, exec_lines=None): - self.cout = DecodingStringIO(u'') + self.cout = StringIO() if exec_lines is None: exec_lines = [] - self.state = state - # Create config object for IPython config = Config() + config.HistoryManager.hist_file = ':memory:' config.InteractiveShell.autocall = False config.InteractiveShell.autoindent = False config.InteractiveShell.colors = 'NoColor' @@ -297,17 +283,9 @@ def __init__(self, exec_lines=None,state=None): profile = ProfileDir.create_profile_dir(pdir) # Create and initialize global ipython, but don't start its mainloop. - # This will persist across different EmbededSphinxShell instances. + # This will persist across different EmbeddedSphinxShell instances. IP = InteractiveShell.instance(config=config, profile_dir=profile) - - # io.stdout redirect must be done after instantiating InteractiveShell - io.stdout = self.cout - io.stderr = self.cout - - # For debugging, so we can see normal output, use this: - #from IPython.utils.io import Tee - #io.stdout = Tee(self.cout, channel='stdout') # dbg - #io.stderr = Tee(self.cout, channel='stderr') # dbg + atexit.register(self.cleanup) # Store a few parts of IPython we'll need. self.IP = IP @@ -316,12 +294,17 @@ def __init__(self, exec_lines=None,state=None): self.input = '' self.output = '' + self.tmp_profile_dir = tmp_profile_dir self.is_verbatim = False self.is_doctest = False self.is_suppress = False # Optionally, provide more detailed information to shell. + # this is assigned by the SetUp method of IPythonDirective + # to point at itself. + # + # So, you can access handy things at self.directive.state self.directive = None # on the first call to the savefig decorator, we'll import @@ -332,6 +315,9 @@ def __init__(self, exec_lines=None,state=None): for line in exec_lines: self.process_input_line(line, store_history=False) + def cleanup(self): + shutil.rmtree(self.tmp_profile_dir, ignore_errors=True) + def clear_cout(self): self.cout.seek(0) self.cout.truncate(0) @@ -346,11 +332,7 @@ def process_input_line(self, line, store_history=True): splitter.push(line) more = splitter.push_accepts_more() if not more: - try: - source_raw = splitter.source_raw_reset()[1] - except: - # recent ipython #4504 - source_raw = splitter.raw_reset() + source_raw = splitter.raw_reset() self.IP.run_cell(source_raw, store_history=store_history) finally: sys.stdout = stdout @@ -368,9 +350,9 @@ def process_image(self, decorator): source_dir = self.source_dir saveargs = decorator.split(' ') filename = saveargs[1] - # insert relative path to image file in source - outfile = os.path.relpath(os.path.join(savefig_dir,filename), - source_dir) + # insert relative path to image file in source (as absolute path for Sphinx) + outfile = '/' + os.path.relpath(os.path.join(savefig_dir,filename), + source_dir) imagerows = ['.. image:: %s'%outfile] @@ -403,17 +385,10 @@ def process_input(self, data, input_prompt, lineno): is_savefig = decorator is not None and \ decorator.startswith('@savefig') - # set the encodings to be used by DecodingStringIO - # to convert the execution output into unicode if - # needed. this attrib is set by IpythonDirective.run() - # based on the specified block options, defaulting to ['ut - self.cout.set_encodings(self.output_encoding) - input_lines = input.split('\n') - if len(input_lines) > 1: - if input_lines[-1] != "": - input_lines.append('') # make sure there's a blank line + if input_lines[-1] != "": + input_lines.append('') # make sure there's a blank line # so splitter buffer gets reset continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) @@ -456,30 +431,75 @@ def process_input(self, data, input_prompt, lineno): ret.append(formatted_line) if not is_suppress and len(rest.strip()) and is_verbatim: - # the "rest" is the standard output of the - # input, which needs to be added in - # verbatim mode + # The "rest" is the standard output of the input. This needs to be + # added when in verbatim mode. If there is no "rest", then we don't + # add it, as the new line will be added by the processed output. ret.append(rest) + # Fetch the processed output. (This is not the submitted output.) self.cout.seek(0) - output = self.cout.read() + processed_output = self.cout.read() if not is_suppress and not is_semicolon: - ret.append(output) - elif is_semicolon: # get spacing right + # + # In IPythonDirective.run, the elements of `ret` are eventually + # combined such that '' entries correspond to newlines. So if + # `processed_output` is equal to '', then the adding it to `ret` + # ensures that there is a blank line between consecutive inputs + # that have no outputs, as in: + # + # In [1]: x = 4 + # + # In [2]: x = 5 + # + # When there is processed output, it has a '\n' at the tail end. So + # adding the output to `ret` will provide the necessary spacing + # between consecutive input/output blocks, as in: + # + # In [1]: x + # Out[1]: 5 + # + # In [2]: x + # Out[2]: 5 + # + # When there is stdout from the input, it also has a '\n' at the + # tail end, and so this ensures proper spacing as well. E.g.: + # + # In [1]: print x + # 5 + # + # In [2]: x = 5 + # + # When in verbatim mode, `processed_output` is empty (because + # nothing was passed to IP. Sometimes the submitted code block has + # an Out[] portion and sometimes it does not. When it does not, we + # need to ensure proper spacing, so we have to add '' to `ret`. + # However, if there is an Out[] in the submitted code, then we do + # not want to add a newline as `process_output` has stuff to add. + # The difficulty is that `process_input` doesn't know if + # `process_output` will be called---so it doesn't know if there is + # Out[] in the code block. The requires that we include a hack in + # `process_block`. See the comments there. + # + ret.append(processed_output) + elif is_semicolon: + # Make sure there is a newline after the semicolon. ret.append('') # context information - filename = self.state.document.current_source - lineno = self.state.document.current_line + filename = "Unknown" + lineno = 0 + if self.directive.state: + filename = self.directive.state.document.current_source + lineno = self.directive.state.document.current_line # output any exceptions raised during execution to stdout # unless :okexcept: has been specified. - if not is_okexcept and "Traceback" in output: + if not is_okexcept and "Traceback" in processed_output: s = "\nException in %s at block ending on line %s\n" % (filename, lineno) s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n" sys.stdout.write('\n\n>>>' + ('-' * 73)) sys.stdout.write(s) - sys.stdout.write(output) + sys.stdout.write(processed_output) sys.stdout.write('<<<' + ('-' * 73) + '\n\n') # output any warning raised during execution to stdout @@ -490,28 +510,32 @@ def process_input(self, data, input_prompt, lineno): s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n" sys.stdout.write('\n\n>>>' + ('-' * 73)) sys.stdout.write(s) - sys.stdout.write('-' * 76 + '\n') + sys.stdout.write(('-' * 76) + '\n') s=warnings.formatwarning(w.message, w.category, w.filename, w.lineno, w.line) sys.stdout.write(s) sys.stdout.write('<<<' + ('-' * 73) + '\n') self.cout.truncate(0) - return (ret, input_lines, output, is_doctest, decorator, image_file, - image_directive) + + return (ret, input_lines, processed_output, + is_doctest, decorator, image_file, image_directive) - def process_output(self, data, output_prompt, - input_lines, output, is_doctest, decorator, image_file): + def process_output(self, data, output_prompt, input_lines, output, + is_doctest, decorator, image_file): """ Process data block for OUTPUT token. """ + # Recall: `data` is the submitted output, and `output` is the processed + # output from `input_lines`. + TAB = ' ' * 4 if is_doctest and output is not None: - found = output + found = output # This is the processed output found = found.strip() submitted = data.strip() @@ -522,7 +546,7 @@ def process_output(self, data, output_prompt, source = self.directive.state.document.current_source content = self.directive.content # Add tabs and join into a single string. - content = '\n'.join(TAB + line for line in content) + content = '\n'.join([TAB + line for line in content]) # Make sure the output contains the output prompt. ind = found.find(output_prompt) @@ -553,6 +577,31 @@ def process_output(self, data, output_prompt, else: self.custom_doctest(decorator, input_lines, found, submitted) + # When in verbatim mode, this holds additional submitted output + # to be written in the final Sphinx output. + # https://github.com/ipython/ipython/issues/5776 + out_data = [] + + is_verbatim = decorator=='@verbatim' or self.is_verbatim + if is_verbatim and data.strip(): + # Note that `ret` in `process_block` has '' as its last element if + # the code block was in verbatim mode. So if there is no submitted + # output, then we will have proper spacing only if we do not add + # an additional '' to `out_data`. This is why we condition on + # `and data.strip()`. + + # The submitted output has no output prompt. If we want the + # prompt and the code to appear, we need to join them now + # instead of adding them separately---as this would create an + # undesired newline. How we do this ultimately depends on the + # format of the output regex. I'll do what works for the default + # prompt for now, and we might have to adjust if it doesn't work + # in other cases. Finally, the submitted output does not have + # a trailing newline, so we must add it manually. + out_data.append("{0} {1}\n".format(output_prompt, data)) + + return out_data + def process_comment(self, data): """Process data fPblock for COMMENT token.""" if not self.is_suppress: @@ -563,9 +612,7 @@ def save_image(self, image_file): Saves the image file to disk. """ self.ensure_pyplot() - command = ('plt.gcf().savefig("%s", bbox_inches="tight", ' - 'dpi=100)' % image_file) - + command = 'plt.gcf().savefig("%s")'%image_file #print 'SAVEFIG', command # dbg self.process_input_line('bookmark ipy_thisdir', store_history=False) self.process_input_line('cd -b ipy_savedir', store_history=False) @@ -588,18 +635,53 @@ def process_block(self, block): image_file = None image_directive = None + found_input = False for token, data in block: if token == COMMENT: out_data = self.process_comment(data) elif token == INPUT: - (out_data, input_lines, output, is_doctest, decorator, - image_file, image_directive) = \ + found_input = True + (out_data, input_lines, output, is_doctest, + decorator, image_file, image_directive) = \ self.process_input(data, input_prompt, lineno) elif token == OUTPUT: + if not found_input: + + TAB = ' ' * 4 + linenumber = 0 + source = 'Unavailable' + content = 'Unavailable' + if self.directive: + linenumber = self.directive.state.document.current_line + source = self.directive.state.document.current_source + content = self.directive.content + # Add tabs and join into a single string. + content = '\n'.join([TAB + line for line in content]) + + e = ('\n\nInvalid block: Block contains an output prompt ' + 'without an input prompt.\n\n' + 'Document source: {0}\n\n' + 'Content begins at line {1}: \n\n{2}\n\n' + 'Problematic block within content: \n\n{TAB}{3}\n\n') + e = e.format(source, linenumber, content, block, TAB=TAB) + + # Write, rather than include in exception, since Sphinx + # will truncate tracebacks. + sys.stdout.write(e) + raise RuntimeError('An invalid block was detected.') + out_data = \ - self.process_output(data, output_prompt, - input_lines, output, is_doctest, - decorator, image_file) + self.process_output(data, output_prompt, input_lines, + output, is_doctest, decorator, + image_file) + if out_data: + # Then there was user submitted output in verbatim mode. + # We need to remove the last element of `ret` that was + # added in `process_input`, as it is '' and would introduce + # an undesirable newline. + assert(ret[-1] == '') + del ret[-1] + if out_data: ret.extend(out_data) @@ -740,8 +822,7 @@ class IPythonDirective(Directive): 'verbatim' : directives.flag, 'doctest' : directives.flag, 'okexcept': directives.flag, - 'okwarning': directives.flag, - 'output_encoding': directives.unchanged_required + 'okwarning': directives.flag } shell = None @@ -753,14 +834,9 @@ def get_config_options(self): config = self.state.document.settings.env.config # get config variables to set figure output directory - confdir = self.state.document.settings.env.app.confdir savefig_dir = config.ipython_savefig_dir - source_dir = os.path.dirname(self.state.document.current_source) - if savefig_dir is None: - savefig_dir = config.html_static_path - if isinstance(savefig_dir, list): - savefig_dir = savefig_dir[0] # safe to assume only one path? - savefig_dir = os.path.join(confdir, savefig_dir) + source_dir = self.state.document.settings.env.srcdir + savefig_dir = os.path.join(source_dir, savefig_dir) # get regex and prompt stuff rgxin = config.ipython_rgxin @@ -779,6 +855,12 @@ def setup(self): (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, mplbackend, exec_lines, hold_count) = self.get_config_options() + try: + os.makedirs(savefig_dir) + except OSError as e: + if e.errno != errno.EEXIST: + raise + if self.shell is None: # We will be here many times. However, when the # EmbeddedSphinxShell is created, its interactive shell member @@ -786,13 +868,11 @@ def setup(self): if mplbackend and 'matplotlib.backends' not in sys.modules: import matplotlib - # Repeated calls to use() will not hurt us since `mplbackend` - # is the same each time. matplotlib.use(mplbackend) # Must be called after (potentially) importing matplotlib and # setting its backend since exec_lines might import pylab. - self.shell = EmbeddedSphinxShell(exec_lines, self.state) + self.shell = EmbeddedSphinxShell(exec_lines) # Store IPython directive to enable better error messages self.shell.directive = self @@ -800,14 +880,9 @@ def setup(self): # reset the execution count if we haven't processed this doc #NOTE: this may be borked if there are multiple seen_doc tmp files #check time stamp? - if self.state.document.current_source not in self.seen_docs: + if not self.state.document.current_source in self.seen_docs: self.shell.IP.history_manager.reset() self.shell.IP.execution_count = 1 - try: - self.shell.IP.prompt_manager.width = 0 - except AttributeError: - # GH14003: class promptManager has removed after IPython 5.x - pass self.seen_docs.add(self.state.document.current_source) # and attach to shell so we don't have to pass them around @@ -846,13 +921,13 @@ def run(self): self.shell.is_okexcept = 'okexcept' in options self.shell.is_okwarning = 'okwarning' in options - self.shell.output_encoding = [options.get('output_encoding', 'utf8')] - # handle pure python code if 'python' in self.arguments: content = self.content self.content = self.shell.process_pure_python(content) + # parts consists of all text within the ipython-block. + # Each part is an input/output block. parts = '\n'.join(self.content).split('\n\n') lines = ['.. code-block:: ipython', ''] @@ -863,7 +938,8 @@ def run(self): if len(block): rows, figure = self.shell.process_block(block) for row in rows: - lines.extend([' %s'%line for line in row.split('\n')]) + lines.extend([' {0}'.format(line) + for line in row.split('\n')]) if figure is not None: figures.append(figure) @@ -873,7 +949,7 @@ def run(self): lines.extend(figure.split('\n')) lines.append('') - if len(lines)>2: + if len(lines) > 2: if debug: print('\n'.join(lines)) else: @@ -893,7 +969,7 @@ def setup(app): setup.app = app app.add_directive('ipython', IPythonDirective) - app.add_config_value('ipython_savefig_dir', None, 'env') + app.add_config_value('ipython_savefig_dir', 'savefig', 'env') app.add_config_value('ipython_rgxin', re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env') app.add_config_value('ipython_rgxout', @@ -914,6 +990,9 @@ def setup(app): app.add_config_value('ipython_holdcount', True, 'env') + metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} + return metadata + # Simple smoke test, needs to be converted to a proper automatic test. def test(): @@ -1074,7 +1153,7 @@ def test(): #ipython_directive.DEBUG = True # dbg #options = dict(suppress=True) # dbg - options = dict() + options = {} for example in examples: content = example.split('\n') IPythonDirective('debug', arguments=None, options=options,
updated to commit cc353b25b0fff58e4ed13899df9b3c8153df01d9 from ipython/ipython xref https://github.com/pandas-dev/pandas/issues/18147, https://github.com/pandas-dev/pandas/pull/19687 Alternative for https://github.com/pandas-dev/pandas/pull/19657 until new release of IPython is out
https://api.github.com/repos/pandas-dev/pandas/pulls/19765
2018-02-19T10:59:16Z
2018-02-19T13:18:18Z
2018-02-19T13:18:18Z
2018-02-19T13:18:45Z
DOC: fix various warnings and errors in the docs (from deprecations/api changes
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index c455fbb8d0687..c81842d3d9212 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -274,7 +274,7 @@ Passing a list of labels or tuples works similar to reindexing: df.loc[[('bar', 'two'), ('qux', 'one')]] -.. info:: +.. note:: It is important to note that tuples and lists are not treated identically in pandas when it comes to indexing. Whereas a tuple is interpreted as one diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 78e2fdb46f659..582750b16f40d 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -506,7 +506,7 @@ to be inserted (for example, a ``Series`` or NumPy array), or a function of one argument to be called on the ``DataFrame``. A *copy* of the original DataFrame is returned, with the new values inserted. -.. versionmodified:: 0.23.0 +.. versionchanged:: 0.23.0 Starting with Python 3.6 the order of ``**kwargs`` is preserved. This allows for *dependent* assignment, where an expression later in ``**kwargs`` can refer diff --git a/doc/source/io.rst b/doc/source/io.rst index 7bb34e4d232dd..6120f7d25a0c3 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2262,6 +2262,7 @@ is not round-trippable, nor are any names beginning with 'level_' within a indicate missing values and the subsequent read cannot distinguish the intent. .. ipython:: python + :okwarning: df.index.name = 'index' df.to_json('test.json', orient='table') diff --git a/doc/source/whatsnew/v0.10.0.txt b/doc/source/whatsnew/v0.10.0.txt index a0c4a3e0073f9..222a2da23865c 100644 --- a/doc/source/whatsnew/v0.10.0.txt +++ b/doc/source/whatsnew/v0.10.0.txt @@ -411,15 +411,23 @@ N Dimensional Panels (Experimental) Adding experimental support for Panel4D and factory functions to create n-dimensional named panels. :ref:`Docs <dsintro.panel4d>` for NDim. Here is a taste of what to expect. - .. ipython:: python - :okwarning: - - p4d = Panel4D(randn(2, 2, 5, 4), - labels=['Label1','Label2'], - items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - p4d +.. code-block:: ipython + + In [58]: p4d = Panel4D(randn(2, 2, 5, 4), + ....: labels=['Label1','Label2'], + ....: items=['Item1', 'Item2'], + ....: major_axis=date_range('1/1/2000', periods=5), + ....: minor_axis=['A', 'B', 'C', 'D']) + ....: + + In [59]: p4d + Out[59]: + <class 'pandas.core.panelnd.Panel4D'> + Dimensions: 2 (labels) x 2 (items) x 5 (major_axis) x 4 (minor_axis) + Labels axis: Label1 to Label2 + Items axis: Item1 to Item2 + Major_axis axis: 2000-01-01 00:00:00 to 2000-01-05 00:00:00 + Minor_axis axis: A to D diff --git a/doc/source/whatsnew/v0.13.1.txt b/doc/source/whatsnew/v0.13.1.txt index 5e5653945fefa..51ca6116d42ce 100644 --- a/doc/source/whatsnew/v0.13.1.txt +++ b/doc/source/whatsnew/v0.13.1.txt @@ -140,14 +140,21 @@ API changes applied would be called with an empty ``Series`` to guess whether a ``Series`` or ``DataFrame`` should be returned: - .. ipython:: python + .. code-block:: ipython + + In [32]: def applied_func(col): + ....: print("Apply function being called with: ", col) + ....: return col.sum() + ....: - def applied_func(col): - print("Apply function being called with: ", col) - return col.sum() + In [33]: empty = DataFrame(columns=['a', 'b']) - empty = DataFrame(columns=['a', 'b']) - empty.apply(applied_func) + In [34]: empty.apply(applied_func) + Apply function being called with: Series([], Length: 0, dtype: float64) + Out[34]: + a NaN + b NaN + Length: 2, dtype: float64 Now, when ``apply`` is called on an empty ``DataFrame``: if the ``reduce`` argument is ``True`` a ``Series`` will returned, if it is ``False`` a @@ -155,10 +162,22 @@ API changes function being applied will be called with an empty series to try and guess the return type. - .. ipython:: python + .. code-block:: ipython + + In [35]: empty.apply(applied_func, reduce=True) + Out[35]: + a NaN + b NaN + Length: 2, dtype: float64 + + In [36]: empty.apply(applied_func, reduce=False) + Out[36]: + Empty DataFrame + Columns: [a, b] + Index: [] + + [0 rows x 2 columns] - empty.apply(applied_func, reduce=True) - empty.apply(applied_func, reduce=False) Prior Version Deprecations/Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.15.0.txt b/doc/source/whatsnew/v0.15.0.txt index ef17904d5ab1a..c5ef6c8c9d74a 100644 --- a/doc/source/whatsnew/v0.15.0.txt +++ b/doc/source/whatsnew/v0.15.0.txt @@ -1044,7 +1044,7 @@ Other: idx = MultiIndex.from_product([['a'], range(3), list("pqr")], names=['foo', 'bar', 'baz']) idx.set_names('qux', level=0) - idx.set_names(['qux','baz'], level=[0,1]) + idx.set_names(['qux','corge'], level=[0,1]) idx.set_levels(['a','b','c'], level='bar') idx.set_levels([['a','b','c'],[1,2,3]], level=[1,2]) diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 3e673bd4cbc28..0c2e494f29bc1 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -894,17 +894,14 @@ imported. Matplotlib plot methods (``plt.plot``, ``ax.plot``, ...), will not nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You must explicitly register these methods: -.. ipython:: python - - from pandas.tseries import converter - converter.register() - - fig, ax = plt.subplots() - plt.plot(pd.date_range('2017', periods=6), range(6)) - Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these converters on first-use (:issue:17710). +.. note:: + + This change has been temporarily reverted in pandas 0.21.1, + for more details see :ref:`here <whatsnew_0211.converters>`. + .. _whatsnew_0210.api: Other API Changes diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a2198d9103528..97f451dd1f56a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -197,6 +197,7 @@ A ``DataFrame`` can now be written to and subsequently read back via JSON while Please note that the string `index` is not supported with the round trip format, as it is used by default in ``write_json`` to indicate a missing index name. .. ipython:: python + :okwarning: df.index.name = 'index' df.to_json('test.json', orient='table') diff --git a/doc/source/whatsnew/v0.8.0.txt b/doc/source/whatsnew/v0.8.0.txt index b9cece752981e..b2d1d16e86990 100644 --- a/doc/source/whatsnew/v0.8.0.txt +++ b/doc/source/whatsnew/v0.8.0.txt @@ -217,12 +217,12 @@ nanosecond support (the ``nanosecond`` field store the nanosecond value between ``DatetimeIndex`` to regular NumPy arrays. If you have code that requires an array of ``datetime.datetime`` objects, you -have a couple of options. First, the ``asobject`` property of ``DatetimeIndex`` +have a couple of options. First, the ``astype(object)`` method of ``DatetimeIndex`` produces an array of ``Timestamp`` objects: .. ipython:: python - stamp_array = rng.asobject + stamp_array = rng.astype(object) stamp_array stamp_array[5] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a001037b573d4..32101a5c3c5ec 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -114,7 +114,7 @@ - if `axis` is 1 or `'columns'` then `by` may contain column levels and/or index labels - .. versionmodified:: 0.23.0 + .. versionchanged:: 0.23.0 Allow specifying index or column level names.""", versionadded_to_excel='', optional_labels="""labels : array-like, optional @@ -2696,7 +2696,7 @@ def assign(self, **kwargs): or modified columns. All items are computed first, and then assigned in alphabetical order. - .. versionmodified :: 0.23.0 + .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later.
https://api.github.com/repos/pandas-dev/pandas/pulls/19763
2018-02-19T10:36:33Z
2018-02-20T09:20:56Z
2018-02-20T09:20:56Z
2018-02-20T09:21:04Z
ENH: ISO8601-compliant datetime string conversion in `iterrows()` and Series construction.
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 99a3773603fc4..7f33372f765fb 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -766,7 +766,6 @@ Datetimelike - Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) - Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` addition and subtraction where name of the returned object was not always set consistently. (:issue:`19744`) - Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` addition and subtraction where operations with numpy arrays raised ``TypeError`` (:issue:`19847`) -- Timedelta ^^^^^^^^^ @@ -918,6 +917,7 @@ Reshaping - :func:`Series.rename` now accepts ``axis`` as a kwarg (:issue:`18589`) - Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) - Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`) +- Bug in :func:`DataFrame.iterrows`, which would infers strings not compliant to `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ to datetimes (:issue:`19671`) Other ^^^^^ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 352ce29f5c37b..b1d0dc2a2442e 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -904,16 +904,23 @@ def maybe_infer_to_datetimelike(value, convert_dates=False): def try_datetime(v): # safe coerce to datetime64 try: - v = tslib.array_to_datetime(v, errors='raise') + # GH19671 + v = tslib.array_to_datetime(v, + require_iso8601=True, + errors='raise') except ValueError: # we might have a sequence of the same-datetimes with tz's # if so coerce to a DatetimeIndex; if they are not the same, - # then these stay as object dtype + # then these stay as object dtype, xref GH19671 try: - from pandas import to_datetime - return to_datetime(v) - except Exception: + from pandas._libs.tslibs import conversion + from pandas import DatetimeIndex + + values, tz = conversion.datetime_to_datetime64(v) + return DatetimeIndex(values).tz_localize( + 'UTC').tz_convert(tz=tz) + except (ValueError, TypeError): pass except Exception: diff --git a/pandas/core/internals.py b/pandas/core/internals.py index d385185fbb558..00ef8f9cef598 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2602,8 +2602,8 @@ def _maybe_coerce_values(self, values): """Input validation for values passed to __init__. Ensure that we have datetime64ns, coercing if necessary. - Parametetrs - ----------- + Parameters + ---------- values : array-like Must be convertible to datetime64 diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index 31bd962b67afb..96a9e3227b40b 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -301,6 +301,10 @@ def test_maybe_infer_to_datetimelike(self): [NaT, 'b', 1]])) assert result.size == 6 + # GH19671 + result = Series(['M1701', Timestamp('20130101')]) + assert result.dtype.kind == 'O' + class TestConvert(object): diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index 91fe7f99ca681..8ba5469480e64 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -15,7 +15,8 @@ from numpy.random import randn import numpy as np -from pandas import DataFrame, Series, date_range, timedelta_range, Categorical +from pandas import (DataFrame, Series, date_range, timedelta_range, + Categorical, SparseDataFrame) import pandas as pd from pandas.util.testing import (assert_almost_equal, @@ -214,6 +215,18 @@ def test_iterrows(self): exp = self.mixed_frame.loc[k] self._assert_series_equal(v, exp) + def test_iterrows_iso8601(self): + # GH19671 + if self.klass == SparseDataFrame: + pytest.xfail(reason='SparseBlock datetime type not implemented.') + + s = self.klass( + {'non_iso8601': ['M1701', 'M1802', 'M1903', 'M2004'], + 'iso8601': date_range('2000-01-01', periods=4, freq='M')}) + for k, v in s.iterrows(): + exp = s.loc[k] + self._assert_series_equal(v, exp) + def test_itertuples(self): for i, tup in enumerate(self.frame.itertuples()): s = self.klass._constructor_sliced(tup[1:])
- [x] closes https://github.com/pandas-dev/pandas/issues/19671 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19762
2018-02-19T09:19:19Z
2018-02-25T23:05:25Z
2018-02-25T23:05:25Z
2018-02-25T23:17:37Z
DOC: correct Period.strftime exsample
diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index c11a8b149bc13..32ffe4e6d0453 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1403,7 +1403,7 @@ cdef class _Period(object): Examples -------- - >>> a = Period(freq='Q@JUL', year=2006, quarter=1) + >>> a = Period(freq='Q-JUL', year=2006, quarter=1) >>> a.strftime('%F-Q%q') '2006-Q1' >>> # Output the last month in the quarter of this date
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19758
2018-02-19T02:55:34Z
2018-02-19T13:52:14Z
2018-02-19T13:52:14Z
2018-02-19T14:49:48Z
ENH: fake http proxy in case of --skip-network testing
diff --git a/ci/script_multi.sh b/ci/script_multi.sh index 6c354fc4cab0b..45c61ee3172fe 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -12,6 +12,11 @@ if [ -n "$LOCALE_OVERRIDE" ]; then python -c "$pycmd" fi +# Enforce absent network during testing by faking a proxy +if echo "$TEST_ARGS" | grep -e --skip-network -q; then + export http_proxy=http://1.2.3.4 https_proxy=http://1.2.3.4; +fi + # Workaround for pytest-xdist flaky collection order # https://github.com/pytest-dev/pytest/issues/920 # https://github.com/pytest-dev/pytest/issues/1075 diff --git a/ci/script_single.sh b/ci/script_single.sh index 74b0e897f1d73..021a5a7714fb5 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -16,6 +16,11 @@ if [ "$SLOW" ]; then TEST_ARGS="--only-slow --skip-network" fi +# Enforce absent network during testing by faking a proxy +if echo "$TEST_ARGS" | grep -e --skip-network -q; then + export http_proxy=http://1.2.3.4 https_proxy=http://1.2.3.4; +fi + if [ "$PIP_BUILD_TEST" ]; then echo "We are not running pytest as this is a build test." diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index b438d6a6137b0..a595d9f18d6b8 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -53,6 +53,7 @@ def test_xarray(df): assert df.to_xarray() is not None +@tm.network def test_statsmodels(): statsmodels = import_module('statsmodels') # noqa @@ -73,6 +74,7 @@ def test_scikit_learn(df): clf.predict(digits.data[-1:]) +@tm.network def test_seaborn(): seaborn = import_module('seaborn')
- [ ] tests added / passed - most likely they will fail until #19754 is merged. there might be more - [ ] whatsnew entry - probably not worthwhile for this
https://api.github.com/repos/pandas-dev/pandas/pulls/19757
2018-02-19T02:49:43Z
2018-02-19T15:38:41Z
2018-02-19T15:38:41Z
2018-02-20T22:35:27Z
ENH: implement Timedelta.__mod__ and __divmod__
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 50cff4c7bbdfb..5f3a01f0725d4 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -283,6 +283,20 @@ Rounded division (floor-division) of a ``timedelta64[ns]`` Series by a scalar td // pd.Timedelta(days=3, hours=4) pd.Timedelta(days=3, hours=4) // td +.. _timedeltas.mod_divmod: + +The mod (%) and divmod operations are defined for ``Timedelta`` when operating with another timedelta-like or with a numeric argument. + +.. ipython:: python + + pd.Timedelta(hours=37) % datetime.timedelta(hours=2) + + # divmod against a timedelta-like returns a pair (int, Timedelta) + divmod(datetime.timedelta(hours=2), pd.Timedelta(minutes=11)) + + # divmod against a numeric returns a pair (Timedelta, Timedelta) + divmod(pd.Timedelta(hours=25), 86400000000000) + Attributes ---------- diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 11c49995372f5..aa1e434aae6e9 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -117,6 +117,20 @@ resetting indexes. See the :ref:`Sorting by Indexes and Values # Sort by 'second' (index) and 'A' (column) df_multi.sort_values(by=['second', 'A']) +.. _whatsnew_0230.enhancements.timedelta_mod + +Timedelta mod method +^^^^^^^^^^^^^^^^^^^^ + +``mod`` (%) and ``divmod`` operations are now defined on ``Timedelta`` objects +when operating with either timedelta-like or with numeric arguments. +See the :ref:`documentation here <timedeltas.mod_divmod>`. (:issue:`19365`) + +.. ipython:: python + + td = pd.Timedelta(hours=37) + td % pd.Timedelta(minutes=45) + .. _whatsnew_0230.enhancements.ran_inf: ``.rank()`` handles ``inf`` values when ``NaN`` are present diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 37693068e0974..f10175fddd00b 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1149,6 +1149,24 @@ class Timedelta(_Timedelta): return np.nan return other.value // self.value + def __mod__(self, other): + # Naive implementation, room for optimization + return self.__divmod__(other)[1] + + def __rmod__(self, other): + # Naive implementation, room for optimization + return self.__rdivmod__(other)[1] + + def __divmod__(self, other): + # Naive implementation, room for optimization + div = self // other + return div, self - div * other + + def __rdivmod__(self, other): + # Naive implementation, room for optimization + div = other // self + return div, other - div * self + cdef _floordiv(int64_t value, right): return value // right diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py index 90c911c24f6a9..43e9491b9de0b 100644 --- a/pandas/tests/scalar/timedelta/test_arithmetic.py +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -420,3 +420,189 @@ def test_td_rfloordiv_numeric_series(self): assert res is NotImplemented with pytest.raises(TypeError): ser // td + + def test_mod_timedeltalike(self): + # GH#19365 + td = Timedelta(hours=37) + + # Timedelta-like others + result = td % Timedelta(hours=6) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + result = td % timedelta(minutes=60) + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % NaT + assert result is NaT + + @pytest.mark.xfail(reason='GH#19378 floordiv td64 returns td64') + def test_mod_timedelta64_nat(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % np.timedelta64('NaT', 'ns') + assert result is NaT + + @pytest.mark.xfail(reason='GH#19378 floordiv td64 returns td64') + def test_mod_timedelta64(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % np.timedelta64(2, 'h') + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') + def test_mod_offset(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % pd.offsets.Hour(5) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=2) + + # ---------------------------------------------------------------- + # Timedelta.__mod__, __rmod__ + + def test_mod_numeric(self): + # GH#19365 + td = Timedelta(hours=37) + + # Numeric Others + result = td % 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % 1e12 + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + result = td % int(1e12) + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + def test_mod_invalid(self): + # GH#19365 + td = Timedelta(hours=37) + + with pytest.raises(TypeError): + td % pd.Timestamp('2018-01-22') + + with pytest.raises(TypeError): + td % [] + + def test_rmod_pytimedelta(self): + # GH#19365 + td = Timedelta(minutes=3) + + result = timedelta(minutes=4) % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=1) + + @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') + def test_rmod_timedelta64(self): + # GH#19365 + td = Timedelta(minutes=3) + result = np.timedelta64(5, 'm') % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=2) + + def test_rmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + + with pytest.raises(TypeError): + pd.Timestamp('2018-01-22') % td + + with pytest.raises(TypeError): + 15 % td + + with pytest.raises(TypeError): + 16.0 % td + + with pytest.raises(TypeError): + np.array([22, 24]) % td + + # ---------------------------------------------------------------- + # Timedelta.__divmod__, __rdivmod__ + + def test_divmod_numeric(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, 53 * 3600 * 1e9) + assert result[0] == Timedelta(1, unit='ns') + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=1) + + assert result + result = divmod(td, np.nan) + assert result[0] is pd.NaT + assert result[1] is pd.NaT + + def test_divmod(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + result = divmod(td, 54) + assert result[0] == Timedelta(hours=1) + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(0) + + result = divmod(td, pd.NaT) + assert np.isnan(result[0]) + assert result[1] is pd.NaT + + @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') + def test_divmod_offset(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, pd.offsets.Hour(-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + def test_divmod_invalid(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + with pytest.raises(TypeError): + divmod(td, pd.Timestamp('2018-01-22')) + + def test_rdivmod_pytimedelta(self): + # GH#19365 + result = divmod(timedelta(days=2, hours=6), Timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + @pytest.mark.xfail(reason='GH#19378 floordiv by Tick not implemented') + def test_rdivmod_offset(self): + result = divmod(pd.offsets.Hour(54), Timedelta(hours=-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + def test_rdivmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + + with pytest.raises(TypeError): + divmod(pd.Timestamp('2018-01-22'), td) + + with pytest.raises(TypeError): + divmod(15, td) + + with pytest.raises(TypeError): + divmod(16.0, td) + + with pytest.raises(TypeError): + divmod(np.array([22, 24]), td)
This implements exclusively the `__mod__, __rmod__, __divmod__, __rdivmod__` parts of #19365, does _not_ implement any of the bugfixes or tests that got rolled into that PR. Several tests are xfailed for now, will be fixed after #19738. These tests will be rebased/moved to test_arithmetic after #19752. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19755
2018-02-19T00:04:04Z
2018-02-19T23:16:05Z
2018-02-19T23:16:05Z
2018-06-22T03:23:22Z
BF: mark two tests (test_{statsmodels,seaborn}) as requiring network
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index b438d6a6137b0..a595d9f18d6b8 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -53,6 +53,7 @@ def test_xarray(df): assert df.to_xarray() is not None +@tm.network def test_statsmodels(): statsmodels = import_module('statsmodels') # noqa @@ -73,6 +74,7 @@ def test_scikit_learn(df): clf.predict(digits.data[-1:]) +@tm.network def test_seaborn(): seaborn = import_module('seaborn')
- [notworthit] closes #xxxx - [nowtheywill] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [notworthit] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19754
2018-02-18T23:13:05Z
2018-02-19T06:00:14Z
null
2018-02-19T06:00:14Z
TST: add test for numpy ops, esp. nanmin/max bug for np<1.13
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index df3c49a73d227..dffb303af6ae1 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -13,6 +13,7 @@ import pandas.core.nanops as nanops import pandas.util.testing as tm import pandas.util._test_decorators as td +from pandas.compat.numpy import _np_version_under1p13 use_bn = nanops._USE_BOTTLENECK @@ -1015,3 +1016,34 @@ def test_use_bottleneck(): assert not pd.get_option('use_bottleneck') pd.set_option('use_bottleneck', use_bn) + + +@pytest.mark.parametrize("numpy_op, expected", [ + (np.sum, 10), + (np.nansum, 10), + (np.mean, 2.5), + (np.nanmean, 2.5), + (np.median, 2.5), + (np.nanmedian, 2.5), + (np.min, 1), + (np.max, 4), +]) +def test_numpy_ops(numpy_op, expected): + # GH8383 + result = numpy_op(pd.Series([1, 2, 3, 4])) + assert result == expected + + +@pytest.mark.parametrize("numpy_op, expected", [ + (np.nanmin, 1), + (np.nanmax, 4), +]) +def test_numpy_ops_np_version_under1p13(numpy_op, expected): + # GH8383 + result = numpy_op(pd.Series([1, 2, 3, 4])) + if _np_version_under1p13: + # bug for numpy < 1.13, where result is a series, should be a scalar + with pytest.raises(ValueError): + assert result == expected + else: + assert result == expected
- [x] closes #8383 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` The bug in #8383 concerns np.nanmin and np.nanmax only, don't know if I'm testing too much.
https://api.github.com/repos/pandas-dev/pandas/pulls/19753
2018-02-18T22:13:06Z
2018-02-19T13:50:57Z
2018-02-19T13:50:57Z
2018-05-18T18:09:17Z
split off scalar tests to submodules
diff --git a/pandas/tests/scalar/interval/__init__.py b/pandas/tests/scalar/interval/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/interval/test_interval.py similarity index 100% rename from pandas/tests/scalar/test_interval.py rename to pandas/tests/scalar/interval/test_interval.py diff --git a/pandas/tests/scalar/period/__init__.py b/pandas/tests/scalar/period/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/scalar/test_period.py b/pandas/tests/scalar/period/test_period.py similarity index 100% rename from pandas/tests/scalar/test_period.py rename to pandas/tests/scalar/period/test_period.py diff --git a/pandas/tests/scalar/test_period_asfreq.py b/pandas/tests/scalar/period/test_period_asfreq.py similarity index 100% rename from pandas/tests/scalar/test_period_asfreq.py rename to pandas/tests/scalar/period/test_period_asfreq.py diff --git a/pandas/tests/scalar/timedelta/__init__.py b/pandas/tests/scalar/timedelta/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/scalar/timedelta/test_arithmetic.py b/pandas/tests/scalar/timedelta/test_arithmetic.py new file mode 100644 index 0000000000000..90c911c24f6a9 --- /dev/null +++ b/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -0,0 +1,422 @@ +# -*- coding: utf-8 -*- +""" +Tests for scalar Timedelta arithmetic ops +""" +from datetime import datetime, timedelta +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas.core import ops +from pandas import Timedelta, Timestamp, NaT + + +class TestTimedeltaAdditionSubtraction(object): + """ + Tests for Timedelta methods: + + __add__, __radd__, + __sub__, __rsub__ + """ + @pytest.mark.parametrize('ten_seconds', [ + Timedelta(10, unit='s'), + timedelta(seconds=10), + np.timedelta64(10, 's'), + np.timedelta64(10000000000, 'ns'), + pd.offsets.Second(10)]) + def test_td_add_sub_ten_seconds(self, ten_seconds): + # GH#6808 + base = Timestamp('20130101 09:01:12.123456') + expected_add = Timestamp('20130101 09:01:22.123456') + expected_sub = Timestamp('20130101 09:01:02.123456') + + result = base + ten_seconds + assert result == expected_add + + result = base - ten_seconds + assert result == expected_sub + + @pytest.mark.parametrize('one_day_ten_secs', [ + Timedelta('1 day, 00:00:10'), + Timedelta('1 days, 00:00:10'), + timedelta(days=1, seconds=10), + np.timedelta64(1, 'D') + np.timedelta64(10, 's'), + pd.offsets.Day() + pd.offsets.Second(10)]) + def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs): + # GH#6808 + base = Timestamp('20130102 09:01:12.123456') + expected_add = Timestamp('20130103 09:01:22.123456') + expected_sub = Timestamp('20130101 09:01:02.123456') + + result = base + one_day_ten_secs + assert result == expected_add + + result = base - one_day_ten_secs + assert result == expected_sub + + @pytest.mark.parametrize('op', [operator.add, ops.radd]) + def test_td_add_datetimelike_scalar(self, op): + # GH#19738 + td = Timedelta(10, unit='d') + + result = op(td, datetime(2016, 1, 1)) + if op is operator.add: + # datetime + Timedelta does _not_ call Timedelta.__radd__, + # so we get a datetime back instead of a Timestamp + assert isinstance(result, Timestamp) + assert result == Timestamp(2016, 1, 11) + + result = op(td, Timestamp('2018-01-12 18:09')) + assert isinstance(result, Timestamp) + assert result == Timestamp('2018-01-22 18:09') + + result = op(td, np.datetime64('2018-01-12')) + assert isinstance(result, Timestamp) + assert result == Timestamp('2018-01-22') + + result = op(td, NaT) + assert result is NaT + + with pytest.raises(TypeError): + op(td, 2) + with pytest.raises(TypeError): + op(td, 2.0) + + @pytest.mark.parametrize('op', [operator.add, ops.radd]) + def test_td_add_td(self, op): + td = Timedelta(10, unit='d') + + result = op(td, Timedelta(days=10)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=20) + + @pytest.mark.parametrize('op', [operator.add, ops.radd]) + def test_td_add_pytimedelta(self, op): + td = Timedelta(10, unit='d') + result = op(td, timedelta(days=9)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=19) + + @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') + @pytest.mark.parametrize('op', [operator.add, ops.radd]) + def test_td_add_timedelta64(self, op): + td = Timedelta(10, unit='d') + result = op(td, np.timedelta64(-4, 'D')) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=6) + + @pytest.mark.parametrize('op', [operator.add, ops.radd]) + def test_td_add_offset(self, op): + td = Timedelta(10, unit='d') + + result = op(td, pd.offsets.Hour(6)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=10, hours=6) + + def test_td_sub_td(self): + td = Timedelta(10, unit='d') + expected = Timedelta(0, unit='ns') + result = td - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_sub_pytimedelta(self): + td = Timedelta(10, unit='d') + expected = Timedelta(0, unit='ns') + result = td - td.to_pytimedelta() + assert isinstance(result, Timedelta) + assert result == expected + + @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') + def test_td_sub_timedelta64(self): + td = Timedelta(10, unit='d') + expected = Timedelta(0, unit='ns') + result = td - td.to_timedelta64() + assert isinstance(result, Timedelta) + # comparison fails even if we comment out the isinstance assertion + assert result == expected + + def test_td_sub_nat(self): + td = Timedelta(10, unit='d') + result = td - NaT + assert result is NaT + + @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') + def test_td_sub_td64_nat(self): + td = Timedelta(10, unit='d') + result = td - np.timedelta64('NaT') + assert result is NaT + + def test_td_sub_offset(self): + td = Timedelta(10, unit='d') + result = td - pd.offsets.Hour(1) + assert isinstance(result, Timedelta) + assert result == Timedelta(239, unit='h') + + def test_td_sub_numeric_raises(self): + td = td = Timedelta(10, unit='d') + with pytest.raises(TypeError): + td - 2 + with pytest.raises(TypeError): + td - 2.0 + + def test_td_rsub_pytimedelta(self): + td = Timedelta(10, unit='d') + expected = Timedelta(0, unit='ns') + + result = td.to_pytimedelta() - td + assert isinstance(result, Timedelta) + assert result == expected + + @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') + def test_td_rsub_timedelta64(self): + td = Timedelta(10, unit='d') + expected = Timedelta(0, unit='ns') + + result = td.to_timedelta64() - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_rsub_nat(self): + td = Timedelta(10, unit='d') + result = NaT - td + assert result is NaT + + result = np.datetime64('NaT') - td + assert result is NaT + + @pytest.mark.xfail(reason='GH#19738 argument not converted to Timedelta') + def test_td_rsub_td64_nat(self): + td = Timedelta(10, unit='d') + result = np.timedelta64('NaT') - td + assert result is NaT + + def test_td_rsub_offset(self): + result = pd.offsets.Hour(1) - Timedelta(10, unit='d') + assert isinstance(result, Timedelta) + assert result == Timedelta(-239, unit='h') + + def test_td_rsub_numeric_raises(self): + td = td = Timedelta(10, unit='d') + with pytest.raises(TypeError): + 2 - td + with pytest.raises(TypeError): + 2.0 - td + + +class TestTimedeltaMultiplicationDivision(object): + """ + Tests for Timedelta methods: + + __mul__, __rmul__, + __div__, __rdiv__, + __truediv__, __rtruediv__, + __floordiv__, __rfloordiv__, + __mod__, __rmod__, + __divmod__, __rdivmod__ + """ + + # --------------------------------------------------------------- + # Timedelta.__mul__, __rmul__ + + @pytest.mark.parametrize('op', [operator.mul, ops.rmul]) + def test_td_mul_scalar(self, op): + # GH#19738 + td = Timedelta(minutes=3) + + result = op(td, 2) + assert result == Timedelta(minutes=6) + + result = op(td, 1.5) + assert result == Timedelta(minutes=4, seconds=30) + + assert op(td, np.nan) is NaT + + assert op(-1, td).value == -1 * td.value + assert op(-1.0, td).value == -1.0 * td.value + + with pytest.raises(TypeError): + # timedelta * datetime is gibberish + op(td, Timestamp(2016, 1, 2)) + + with pytest.raises(TypeError): + # invalid multiply with another timedelta + op(td, td) + + # --------------------------------------------------------------- + # Timedelta.__div__, __truediv__ + + def test_td_div_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit='d') + + result = td / pd.offsets.Hour(1) + assert result == 240 + + assert td / td == 1 + assert td / np.timedelta64(60, 'h') == 4 + + assert np.isnan(td / NaT) + + def test_td_div_numeric_scalar(self): + # GH#19738 + td = Timedelta(10, unit='d') + + result = td / 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=5) + + result = td / 5.0 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=2) + + # --------------------------------------------------------------- + # Timedelta.__rdiv__ + + def test_td_rdiv_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit='d') + result = pd.offsets.Hour(1) / td + assert result == 1 / 240.0 + + assert np.timedelta64(60, 'h') / td == 0.25 + + # --------------------------------------------------------------- + # Timedelta.__floordiv__ + + def test_td_floordiv_timedeltalike_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + scalar = Timedelta(hours=3, minutes=3) + + assert td // scalar == 1 + assert -td // scalar.to_pytimedelta() == -2 + assert (2 * td) // scalar.to_timedelta64() == 2 + + def test_td_floordiv_null_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + assert td // np.nan is NaT + assert np.isnan(td // NaT) + assert np.isnan(td // np.timedelta64('NaT')) + + def test_td_floordiv_invalid_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + with pytest.raises(TypeError): + td // np.datetime64('2016-01-01', dtype='datetime64[us]') + + def test_td_floordiv_numeric_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + expected = Timedelta(hours=1, minutes=32) + assert td // 2 == expected + assert td // 2.0 == expected + assert td // np.float64(2.0) == expected + assert td // np.int32(2.0) == expected + assert td // np.uint8(2.0) == expected + + def test_floordiv_timedeltalike_array(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + scalar = Timedelta(hours=3, minutes=3) + + # Array-like others + assert td // np.array(scalar.to_timedelta64()) == 1 + + res = (3 * td) // np.array([scalar.to_timedelta64()]) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + res = (10 * td) // np.array([scalar.to_timedelta64(), + np.timedelta64('NaT')]) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + def test_td_floordiv_numeric_series(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + ser = pd.Series([1], dtype=np.int64) + res = td // ser + assert res.dtype.kind == 'm' + + # --------------------------------------------------------------- + # Timedelta.__rfloordiv__ + + def test_td_rfloordiv_timedeltalike_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + scalar = Timedelta(hours=3, minutes=4) + + # scalar others + # x // Timedelta is defined only for timedelta-like x. int-like, + # float-like, and date-like, in particular, should all either + # a) raise TypeError directly or + # b) return NotImplemented, following which the reversed + # operation will raise TypeError. + assert td.__rfloordiv__(scalar) == 1 + assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2 + assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0 + + def test_td_rfloordiv_null_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + assert np.isnan(td.__rfloordiv__(NaT)) + assert np.isnan(td.__rfloordiv__(np.timedelta64('NaT'))) + + def test_td_rfloordiv_invalid_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + dt64 = np.datetime64('2016-01-01', dtype='datetime64[us]') + with pytest.raises(TypeError): + td.__rfloordiv__(dt64) + + def test_td_rfloordiv_numeric_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + assert td.__rfloordiv__(np.nan) is NotImplemented + assert td.__rfloordiv__(3.5) is NotImplemented + assert td.__rfloordiv__(2) is NotImplemented + + with pytest.raises(TypeError): + td.__rfloordiv__(np.float64(2.0)) + with pytest.raises(TypeError): + td.__rfloordiv__(np.int32(2.0)) + with pytest.raises(TypeError): + td.__rfloordiv__(np.uint8(9)) + + def test_td_rfloordiv_timedeltalike_array(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + scalar = Timedelta(hours=3, minutes=4) + + # Array-like others + assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1 + + res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()])) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + arr = np.array([(10 * scalar).to_timedelta64(), + np.timedelta64('NaT')]) + res = td.__rfloordiv__(arr) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + def test_td_rfloordiv_numeric_series(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + ser = pd.Series([1], dtype=np.int64) + res = td.__rfloordiv__(ser) + assert res is NotImplemented + with pytest.raises(TypeError): + ser // td diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py similarity index 83% rename from pandas/tests/scalar/test_timedelta.py rename to pandas/tests/scalar/timedelta/test_timedelta.py index 667266be2a89b..420b66b4ce0dc 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -74,46 +74,6 @@ class Other: assert td.__mul__(other) is NotImplemented assert td.__floordiv__(other) is NotImplemented - def test_timedelta_ops_scalar(self): - # GH 6808 - base = pd.to_datetime('20130101 09:01:12.123456') - expected_add = pd.to_datetime('20130101 09:01:22.123456') - expected_sub = pd.to_datetime('20130101 09:01:02.123456') - - for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10), - np.timedelta64(10, 's'), - np.timedelta64(10000000000, 'ns'), - pd.offsets.Second(10)]: - result = base + offset - assert result == expected_add - - result = base - offset - assert result == expected_sub - - base = pd.to_datetime('20130102 09:01:12.123456') - expected_add = pd.to_datetime('20130103 09:01:22.123456') - expected_sub = pd.to_datetime('20130101 09:01:02.123456') - - for offset in [pd.to_timedelta('1 day, 00:00:10'), - pd.to_timedelta('1 days, 00:00:10'), - timedelta(days=1, seconds=10), - np.timedelta64(1, 'D') + np.timedelta64(10, 's'), - pd.offsets.Day() + pd.offsets.Second(10)]: - result = base + offset - assert result == expected_add - - result = base - offset - assert result == expected_sub - - def test_ops_offsets(self): - td = Timedelta(10, unit='d') - assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1) - assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td - assert 240 == td / pd.offsets.Hour(1) - assert 1 / 240.0 == pd.offsets.Hour(1) / td - assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1) - assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td - def test_unary_ops(self): td = Timedelta(10, unit='d') @@ -129,130 +89,8 @@ def test_unary_ops(self): def test_binary_ops_nat(self): td = Timedelta(10, unit='d') - - assert (td - pd.NaT) is pd.NaT - assert (td + pd.NaT) is pd.NaT + # FIXME: The next test is wrong: td * NaT should raise assert (td * pd.NaT) is pd.NaT - assert (td / pd.NaT) is np.nan - assert (td // pd.NaT) is np.nan - assert (td // np.timedelta64('NaT')) is np.nan - - def test_binary_ops_integers(self): - td = Timedelta(10, unit='d') - - assert td * 2 == Timedelta(20, unit='d') - assert td / 2 == Timedelta(5, unit='d') - assert td // 2 == Timedelta(5, unit='d') - - # invert - assert td * -1 == Timedelta('-10d') - assert -1 * td == Timedelta('-10d') - - # can't operate with integers - pytest.raises(TypeError, lambda: td + 2) - pytest.raises(TypeError, lambda: td - 2) - - def test_binary_ops_with_timedelta(self): - td = Timedelta(10, unit='d') - - assert td - td == Timedelta(0, unit='ns') - assert td + td == Timedelta(20, unit='d') - assert td / td == 1 - - # invalid multiply with another timedelta - pytest.raises(TypeError, lambda: td * td) - - def test_floordiv(self): - # GH#18846 - td = Timedelta(hours=3, minutes=4) - scalar = Timedelta(hours=3, minutes=3) - - # scalar others - assert td // scalar == 1 - assert -td // scalar.to_pytimedelta() == -2 - assert (2 * td) // scalar.to_timedelta64() == 2 - - assert td // np.nan is pd.NaT - assert np.isnan(td // pd.NaT) - assert np.isnan(td // np.timedelta64('NaT')) - - with pytest.raises(TypeError): - td // np.datetime64('2016-01-01', dtype='datetime64[us]') - - expected = Timedelta(hours=1, minutes=32) - assert td // 2 == expected - assert td // 2.0 == expected - assert td // np.float64(2.0) == expected - assert td // np.int32(2.0) == expected - assert td // np.uint8(2.0) == expected - - # Array-like others - assert td // np.array(scalar.to_timedelta64()) == 1 - - res = (3 * td) // np.array([scalar.to_timedelta64()]) - expected = np.array([3], dtype=np.int64) - tm.assert_numpy_array_equal(res, expected) - - res = (10 * td) // np.array([scalar.to_timedelta64(), - np.timedelta64('NaT')]) - expected = np.array([10, np.nan]) - tm.assert_numpy_array_equal(res, expected) - - ser = pd.Series([1], dtype=np.int64) - res = td // ser - assert res.dtype.kind == 'm' - - def test_rfloordiv(self): - # GH#18846 - td = Timedelta(hours=3, minutes=3) - scalar = Timedelta(hours=3, minutes=4) - - # scalar others - # x // Timedelta is defined only for timedelta-like x. int-like, - # float-like, and date-like, in particular, should all either - # a) raise TypeError directly or - # b) return NotImplemented, following which the reversed - # operation will raise TypeError. - assert td.__rfloordiv__(scalar) == 1 - assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2 - assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0 - - assert np.isnan(td.__rfloordiv__(pd.NaT)) - assert np.isnan(td.__rfloordiv__(np.timedelta64('NaT'))) - - dt64 = np.datetime64('2016-01-01', dtype='datetime64[us]') - with pytest.raises(TypeError): - td.__rfloordiv__(dt64) - - assert td.__rfloordiv__(np.nan) is NotImplemented - assert td.__rfloordiv__(3.5) is NotImplemented - assert td.__rfloordiv__(2) is NotImplemented - - with pytest.raises(TypeError): - td.__rfloordiv__(np.float64(2.0)) - with pytest.raises(TypeError): - td.__rfloordiv__(np.int32(2.0)) - with pytest.raises(TypeError): - td.__rfloordiv__(np.uint8(9)) - - # Array-like others - assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1 - - res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()])) - expected = np.array([3], dtype=np.int64) - tm.assert_numpy_array_equal(res, expected) - - arr = np.array([(10 * scalar).to_timedelta64(), - np.timedelta64('NaT')]) - res = td.__rfloordiv__(arr) - expected = np.array([10, np.nan]) - tm.assert_numpy_array_equal(res, expected) - - ser = pd.Series([1], dtype=np.int64) - res = td.__rfloordiv__(ser) - assert res is NotImplemented - with pytest.raises(TypeError): - ser // td class TestTimedeltaComparison(object): diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py similarity index 100% rename from pandas/tests/scalar/test_timestamp.py rename to pandas/tests/scalar/timestamp/test_timestamp.py
Spun off from #19738, implements test_arithmetic for timedeltas, _moves_ and organizes tests without changing anything in the non-test code. A handful of xfail tests are implemented (these are fixed in #19378)
https://api.github.com/repos/pandas-dev/pandas/pulls/19752
2018-02-18T20:34:27Z
2018-02-19T16:45:31Z
2018-02-19T16:45:31Z
2018-06-22T03:31:08Z
DEPR: Add deprecation warning for factorize() order keyword
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 35856b64c171a..ed3069943bb6a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -610,6 +610,7 @@ Deprecations - The ``broadcast`` parameter of ``.apply()`` is deprecated in favor of ``result_type='broadcast'`` (:issue:`18577`) - The ``reduce`` parameter of ``.apply()`` is deprecated in favor of ``result_type='reduce'`` (:issue:`18577`) +- The ``order`` parameter of :func:`factorize` is deprecated and will be removed in a future release (:issue:`19727`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c754c063fce8e..624045a3d64bc 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -32,6 +32,7 @@ from pandas.core import common as com from pandas._libs import algos, lib, hashtable as htable from pandas._libs.tslib import iNaT +from pandas.util._decorators import deprecate_kwarg # --------------- # @@ -436,6 +437,7 @@ def isin(comps, values): return f(comps, values) +@deprecate_kwarg(old_arg_name='order', new_arg_name=None) def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None): """ Encode input values as an enumerated type or categorical variable diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index b1e3177547ac6..884b1eb7342c6 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -248,6 +248,15 @@ def test_uint64_factorize(self): tm.assert_numpy_array_equal(labels, exp_labels) tm.assert_numpy_array_equal(uniques, exp_uniques) + def test_deprecate_order(self): + # gh 19727 - check warning is raised for deprecated keyword, order. + # Test not valid once order keyword is removed. + data = np.array([2**63, 1, 2**63], dtype=np.uint64) + with tm.assert_produces_warning(expected_warning=FutureWarning): + algos.factorize(data, order=True) + with tm.assert_produces_warning(False): + algos.factorize(data) + class TestUnique(object): diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 3b0a428218771..2bc017ef226ce 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -34,9 +34,14 @@ def _f2(new=False): def _f3(new=0): return new + @deprecate_kwarg('old', None) + def _f4(old=True, unchanged=True): + return old + self.f1 = _f1 self.f2 = _f2 self.f3 = _f3 + self.f4 = _f4 def test_deprecate_kwarg(self): x = 78 @@ -72,6 +77,15 @@ def test_bad_deprecate_kwarg(self): def f4(new=None): pass + def test_deprecate_keyword(self): + x = 9 + with tm.assert_produces_warning(FutureWarning): + result = self.f4(old=x) + assert result is x + with tm.assert_produces_warning(None): + result = self.f4(unchanged=x) + assert result is True + def test_rands(): r = tm.rands(10) diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index eed9cee54efb3..1753bc8b8fc33 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -65,8 +65,9 @@ def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): ---------- old_arg_name : str Name of argument in function to deprecate - new_arg_name : str - Name of preferred argument in function + new_arg_name : str or None + Name of preferred argument in function. Use None to raise warning that + ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; @@ -82,12 +83,15 @@ def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): ... >>> f(columns='should work ok') should work ok + >>> f(cols='should raise warning') FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning + >>> f(cols='should error', columns="can\'t pass do both") TypeError: Can only specify 'cols' or 'columns', not both + >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') @@ -96,6 +100,25 @@ def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! + + + To raise a warning that a keyword will be removed entirely in the future + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) + ... def f(cols='', another_param=''): + ... print(cols) + ... + >>> f(cols='should raise warning') + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + >>> f(another_param='should not raise warning') + should not raise warning + + >>> f(cols='should raise warning', another_param='') + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning """ if mapping is not None and not hasattr(mapping, 'get') and \ @@ -107,6 +130,17 @@ def _deprecate_kwarg(func): @wraps(func) def wrapper(*args, **kwargs): old_arg_value = kwargs.pop(old_arg_name, None) + + if new_arg_name is None and old_arg_value is not None: + msg = ( + "the '{old_name}' keyword is deprecated and will be " + "removed in a future version " + "please takes steps to stop use of '{old_name}'" + ).format(old_name=old_arg_name) + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + kwargs[old_arg_name] = old_arg_value + return func(*args, **kwargs) + if old_arg_value is not None: if mapping is not None: if hasattr(mapping, 'get'):
#### What's this PR do? In response to #19727. Adds functionality to the ``deprecate_kwargs`` utility. Currently, the utility doesn't allow the deprecation of an old keyword without having a new one in place. The change proposed here allows the deprecation of an *old* keyword without specifying a *new* keyword by allowing users to pass ``new_arg_name=None`` to ``deprecate_kwargs``. #### PR checklist - [X] closes #19727 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19751
2018-02-18T19:54:51Z
2018-02-21T11:37:47Z
2018-02-21T11:37:47Z
2018-02-22T05:17:15Z
FIX: const-correctness in numpy helpers
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index 844be9b292be3..5cfa51dc8a0be 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -32,7 +32,7 @@ PANDAS_INLINE PyObject* get_value_1d(PyArrayObject* ap, Py_ssize_t i) { // returns ASCII or UTF8 (py3) view on python str // python object owns memory, should not be freed -PANDAS_INLINE char* get_c_string(PyObject* obj) { +PANDAS_INLINE const char* get_c_string(PyObject* obj) { #if PY_VERSION_HEX >= 0x03000000 return PyUnicode_AsUTF8(obj); #else @@ -40,7 +40,7 @@ PANDAS_INLINE char* get_c_string(PyObject* obj) { #endif } -PANDAS_INLINE PyObject* char_to_string(char* data) { +PANDAS_INLINE PyObject* char_to_string(const char* data) { #if PY_VERSION_HEX >= 0x03000000 return PyUnicode_FromString(data); #else
In python 3.7 the return type of PyUnicode_AsUTF8 changed from (char *) to (const char *). PyUnicode_FromString also takes (const char *) as input, also be explicit about that. https://bugs.python.org/issue28769 commit 2a404b63d48d73bbaa007d89efb7a01048475acd in cpython - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19749
2018-02-18T17:34:19Z
2018-02-18T23:32:33Z
2018-02-18T23:32:33Z
2018-02-19T01:34:34Z
DOC: Updated tutorials.rst, added 3 recent video tutorials
diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index 710212bc237cd..db9385519bff2 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -9,52 +9,52 @@ This is a guide to many pandas tutorials, geared mainly for new users. Internal Guides --------------- -pandas own :ref:`10 Minutes to pandas<10min>` +pandas' own :ref:`10 Minutes to pandas<10min>`. -More complex recipes are in the :ref:`Cookbook<cookbook>` +More complex recipes are in the :ref:`Cookbook<cookbook>`. pandas Cookbook --------------- -The goal of this cookbook (by `Julia Evans <http://jvns.ca>`_) is to +The goal of this 2015 cookbook (by `Julia Evans <http://jvns.ca>`_) is to give you some concrete examples for getting started with pandas. These are examples with real-world data, and all the bugs and weirdness that entails. -Here are links to the v0.1 release. For an up-to-date table of contents, see the `pandas-cookbook GitHub +Here are links to the v0.2 release. For an up-to-date table of contents, see the `pandas-cookbook GitHub repository <http://github.com/jvns/pandas-cookbook>`_. To run the examples in this tutorial, you'll need to clone the GitHub repository and get IPython Notebook running. See `How to use this cookbook <https://github.com/jvns/pandas-cookbook#how-to-use-this-cookbook>`_. -- `A quick tour of the IPython Notebook: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_ +- `A quick tour of the IPython Notebook: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/A%20quick%20tour%20of%20IPython%20Notebook.ipynb>`_ Shows off IPython's awesome tab completion and magic functions. -- `Chapter 1: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb>`_ +- `Chapter 1: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb>`_ Reading your data into pandas is pretty much the easiest thing. Even when the encoding is wrong! -- `Chapter 2: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%202%20-%20Selecting%20data%20&%20finding%20the%20most%20common%20complaint%20type.ipynb>`_ +- `Chapter 2: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%202%20-%20Selecting%20data%20&%20finding%20the%20most%20common%20complaint%20type.ipynb>`_ It's not totally obvious how to select data from a pandas dataframe. Here we explain the basics (how to take slices and get columns) -- `Chapter 3: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%203%20-%20Which%20borough%20has%20the%20most%20noise%20complaints%3F%20%28or%2C%20more%20selecting%20data%29.ipynb>`_ +- `Chapter 3: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%203%20-%20Which%20borough%20has%20the%20most%20noise%20complaints%3F%20%28or%2C%20more%20selecting%20data%29.ipynb>`_ Here we get into serious slicing and dicing and learn how to filter dataframes in complicated ways, really fast. -- `Chapter 4: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%204%20-%20Find%20out%20on%20which%20weekday%20people%20bike%20the%20most%20with%20groupby%20and%20aggregate.ipynb>`_ +- `Chapter 4: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%204%20-%20Find%20out%20on%20which%20weekday%20people%20bike%20the%20most%20with%20groupby%20and%20aggregate.ipynb>`_ Groupby/aggregate is seriously my favorite thing about pandas and I use it all the time. You should probably read this. -- `Chapter 5: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%205%20-%20Combining%20dataframes%20and%20scraping%20Canadian%20weather%20data.ipynb>`_ +- `Chapter 5: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%205%20-%20Combining%20dataframes%20and%20scraping%20Canadian%20weather%20data.ipynb>`_ Here you get to find out if it's cold in Montreal in the winter (spoiler: yes). Web scraping with pandas is fun! Here we combine dataframes. -- `Chapter 6: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%206%20-%20String%20operations%21%20Which%20month%20was%20the%20snowiest%3F.ipynb>`_ +- `Chapter 6: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%206%20-%20String%20operations%21%20Which%20month%20was%20the%20snowiest%3F.ipynb>`_ Strings with pandas are great. It has all these vectorized string operations and they're the best. We will turn a bunch of strings containing "Snow" into vectors of numbers in a trice. -- `Chapter 7: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb>`_ +- `Chapter 7: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%207%20-%20Cleaning%20up%20messy%20data.ipynb>`_ Cleaning up messy data is never a joy, but with pandas it's easier. -- `Chapter 8: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.1/cookbook/Chapter%208%20-%20How%20to%20deal%20with%20timestamps.ipynb>`_ +- `Chapter 8: <http://nbviewer.ipython.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%208%20-%20How%20to%20deal%20with%20timestamps.ipynb>`_ Parsing Unix timestamps is confusing at first but it turns out to be really easy. -Lessons for New pandas Users +Lessons for new pandas users ---------------------------- For more resources, please visit the main `repository <https://bitbucket.org/hrojas/learn-pandas>`__. @@ -125,7 +125,7 @@ There are four sections covering selected topics as follows: .. _tutorial-exercises-new-users: -Exercises for New Users +Exercises for new users ----------------------- Practice your skills with real data sets and exercises. For more resources, please visit the main `repository <https://github.com/guipsamora/pandas_exercises>`__. @@ -152,9 +152,14 @@ For more resources, please visit the main `repository <https://github.com/guipsa .. _tutorial-modern: -Modern Pandas +Modern pandas ------------- +Tutorial series written in 2016 by +`Tom Augspurger <https://github.com/TomAugspurger>`_. +The source may be found in the GitHub repository +`TomAugspurger/effective-pandas <https://github.com/TomAugspurger/effective-pandas>`_. + - `Modern Pandas <http://tomaugspurger.github.io/modern-1-intro.html>`_ - `Method Chaining <http://tomaugspurger.github.io/method-chaining.html>`_ - `Indexes <http://tomaugspurger.github.io/modern-3-indexes.html>`_ @@ -168,6 +173,20 @@ Excel charts with pandas, vincent and xlsxwriter - `Using Pandas and XlsxWriter to create Excel charts <https://pandas-xlsxwriter-charts.readthedocs.io/>`_ +Video Tutorials +--------------- + +- `Pandas From The Ground Up <https://www.youtube.com/watch?v=5JnMutdy6Fw>`_ + (2015) (2:24) + `GitHub repo <https://github.com/brandon-rhodes/pycon-pandas-tutorial>`_ +- `Introduction Into Pandas <https://www.youtube.com/watch?v=-NR-ynQg0YM>`_ + (2016) (1:28) + `GitHub repo <https://github.com/chendaniely/2016-pydata-carolinas-pandas>`_ +- `Pandas: .head() to .tail() <https://www.youtube.com/watch?v=7vuO9QXDN50>`_ + (2016) (1:26) + `GitHub repo <https://github.com/TomAugspurger/pydata-chi-h2t>`_ + + Various Tutorials -----------------
I have made the following changes to `tutorials.rst` (found [here](http://pandas-docs.github.io/pandas-docs-travis/tutorials.html)): - Updated the version of the [pandas-cookbook](https://github.com/jvns/pandas-cookbook) from `v0.1` to `v0.2`. - Added an introductory paragraph to the tutorial series by @TomAugspurger. - Added 3 recent, high-quality video tutorials (Youtube).
https://api.github.com/repos/pandas-dev/pandas/pulls/19748
2018-02-18T11:39:47Z
2018-02-18T13:38:17Z
2018-02-18T13:38:17Z
2018-02-18T13:38:18Z
DOC: Spellcheck of gotchas.rst (FAQ page)
diff --git a/ci/lint.sh b/ci/lint.sh index e3a39668885f0..fcd65fc5aba5e 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -156,6 +156,7 @@ if [ "$LINT" ]; then RET=1 fi echo "Check for deprecated messages without sphinx directive DONE" + else echo "NOT Linting" fi diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index bc490877e190d..b7042ef390018 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -22,22 +22,22 @@ Frequently Asked Questions (FAQ) DataFrame memory usage ---------------------- -The memory usage of a dataframe (including the index) -is shown when accessing the ``info`` method of a dataframe. A -configuration option, ``display.memory_usage`` (see :ref:`options`), -specifies if the dataframe's memory usage will be displayed when -invoking the ``df.info()`` method. +The memory usage of a ``DataFrame`` (including the index) is shown when calling +the :meth:`~DataFrame.info`. A configuration option, ``display.memory_usage`` +(see :ref:`the list of options <options.available>`), specifies if the +``DataFrame``'s memory usage will be displayed when invoking the ``df.info()`` +method. -For example, the memory usage of the dataframe below is shown -when calling ``df.info()``: +For example, the memory usage of the ``DataFrame`` below is shown +when calling :meth:`~DataFrame.info`: .. ipython:: python dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]', 'complex128', 'object', 'bool'] n = 5000 - data = dict([ (t, np.random.randint(100, size=n).astype(t)) - for t in dtypes]) + data = dict([(t, np.random.randint(100, size=n).astype(t)) + for t in dtypes]) df = pd.DataFrame(data) df['categorical'] = df['object'].astype('category') @@ -48,7 +48,7 @@ pandas does not count the memory used by values in columns with ``dtype=object``. Passing ``memory_usage='deep'`` will enable a more accurate memory usage report, -that accounts for the full usage of the contained objects. This is optional +accounting for the full usage of the contained objects. This is optional as it can be expensive to do this deeper introspection. .. ipython:: python @@ -58,11 +58,11 @@ as it can be expensive to do this deeper introspection. By default the display option is set to ``True`` but can be explicitly overridden by passing the ``memory_usage`` argument when invoking ``df.info()``. -The memory usage of each column can be found by calling the ``memory_usage`` -method. This returns a Series with an index represented by column names -and memory usage of each column shown in bytes. For the dataframe above, -the memory usage of each column and the total memory usage of the -dataframe can be found with the memory_usage method: +The memory usage of each column can be found by calling the +:meth:`~DataFrame.memory_usage` method. This returns a ``Series`` with an index +represented by column names and memory usage of each column shown in bytes. For +the ``DataFrame`` above, the memory usage of each column and the total memory +usage can be found with the ``memory_usage`` method: .. ipython:: python @@ -71,18 +71,18 @@ dataframe can be found with the memory_usage method: # total memory usage of dataframe df.memory_usage().sum() -By default the memory usage of the dataframe's index is shown in the -returned Series, the memory usage of the index can be suppressed by passing +By default the memory usage of the ``DataFrame``'s index is shown in the +returned ``Series``, the memory usage of the index can be suppressed by passing the ``index=False`` argument: .. ipython:: python df.memory_usage(index=False) -The memory usage displayed by the ``info`` method utilizes the -``memory_usage`` method to determine the memory usage of a dataframe -while also formatting the output in human-readable units (base-2 -representation; i.e., 1KB = 1024 bytes). +The memory usage displayed by the :meth:`~DataFrame.info` method utilizes the +:meth:`~DataFrame.memory_usage` method to determine the memory usage of a +``DataFrame`` while also formatting the output in human-readable units (base-2 +representation; i.e. 1KB = 1024 bytes). See also :ref:`Categorical Memory Usage <categorical.memory>`. @@ -91,17 +91,18 @@ See also :ref:`Categorical Memory Usage <categorical.memory>`. Using If/Truth Statements with pandas ------------------------------------- -pandas follows the NumPy convention of raising an error when you try to convert something to a ``bool``. -This happens in a ``if`` or when using the boolean operations, ``and``, ``or``, or ``not``. It is not clear -what the result of +pandas follows the NumPy convention of raising an error when you try to convert +something to a ``bool``. This happens in an ``if``-statement or when using the +boolean operations: ``and``, ``or``, and ``not``. It is not clear what the result +of the following code should be: .. code-block:: python >>> if pd.Series([False, True, False]): ... -should be. Should it be ``True`` because it's not zero-length? ``False`` because there are ``False`` values? -It is unclear, so instead, pandas raises a ``ValueError``: +Should it be ``True`` because it's not zero-length, or ``False`` because there +are ``False`` values? It is unclear, so instead, pandas raises a ``ValueError``: .. code-block:: python @@ -111,9 +112,9 @@ It is unclear, so instead, pandas raises a ``ValueError``: ... ValueError: The truth value of an array is ambiguous. Use a.empty, a.any() or a.all(). - -If you see that, you need to explicitly choose what you want to do with it (e.g., use `any()`, `all()` or `empty`). -or, you might want to compare if the pandas object is ``None`` +You need to explicitly choose what you want to do with the ``DataFrame``, e.g. +use :meth:`~DataFrame.any`, :meth:`~DataFrame.all` or :meth:`~DataFrame.empty`. +Alternatively, you might want to compare if the pandas object is ``None``: .. code-block:: python @@ -122,7 +123,7 @@ or, you might want to compare if the pandas object is ``None`` >>> I was not None -or return if ``any`` value is ``True``. +Below is how to check if any of the values are ``True``: .. code-block:: python @@ -130,7 +131,8 @@ or return if ``any`` value is ``True``. print("I am any") >>> I am any -To evaluate single-element pandas objects in a boolean context, use the method ``.bool()``: +To evaluate single-element pandas objects in a boolean context, use the method +:meth:`~DataFrame.bool`: .. ipython:: python @@ -161,25 +163,25 @@ See :ref:`boolean comparisons<basics.compare>` for more examples. Using the ``in`` operator ~~~~~~~~~~~~~~~~~~~~~~~~~ -Using the Python ``in`` operator on a Series tests for membership in the +Using the Python ``in`` operator on a ``Series`` tests for membership in the index, not membership among the values. -.. ipython:: +.. ipython:: python s = pd.Series(range(5), index=list('abcde')) 2 in s 'b' in s If this behavior is surprising, keep in mind that using ``in`` on a Python -dictionary tests keys, not values, and Series are dict-like. -To test for membership in the values, use the method :func:`~pandas.Series.isin`: +dictionary tests keys, not values, and ``Series`` are dict-like. +To test for membership in the values, use the method :meth:`~pandas.Series.isin`: -.. ipython:: +.. ipython:: python s.isin([2]) s.isin([2]).any() -For DataFrames, likewise, ``in`` applies to the column axis, +For ``DataFrames``, likewise, ``in`` applies to the column axis, testing for membership in the list of column names. ``NaN``, Integer ``NA`` values and ``NA`` type promotions @@ -189,12 +191,12 @@ Choice of ``NA`` representation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For lack of ``NA`` (missing) support from the ground up in NumPy and Python in -general, we were given the difficult choice between either +general, we were given the difficult choice between either: - A *masked array* solution: an array of data and an array of boolean values - indicating whether a value is there or is missing + indicating whether a value is there or is missing. - Using a special sentinel value, bit pattern, or set of sentinel values to - denote ``NA`` across the dtypes + denote ``NA`` across the dtypes. For many reasons we chose the latter. After years of production use it has proven, at least in my opinion, to be the best decision given the state of @@ -226,15 +228,16 @@ arrays. For example: s2.dtype This trade-off is made largely for memory and performance reasons, and also so -that the resulting Series continues to be "numeric". One possibility is to use -``dtype=object`` arrays instead. +that the resulting ``Series`` continues to be "numeric". One possibility is to +use ``dtype=object`` arrays instead. ``NA`` type promotions ~~~~~~~~~~~~~~~~~~~~~~ -When introducing NAs into an existing Series or DataFrame via ``reindex`` or -some other means, boolean and integer types will be promoted to a different -dtype in order to store the NAs. These are summarized by this table: +When introducing NAs into an existing ``Series`` or ``DataFrame`` via +:meth:`~Series.reindex` or some other means, boolean and integer types will be +promoted to a different dtype in order to store the NAs. The promotions are +summarized in this table: .. csv-table:: :header: "Typeclass","Promotion dtype for storing NAs" @@ -289,19 +292,19 @@ integer arrays to floating when NAs must be introduced. Differences with NumPy ---------------------- -For Series and DataFrame objects, ``var`` normalizes by ``N-1`` to produce -unbiased estimates of the sample variance, while NumPy's ``var`` normalizes -by N, which measures the variance of the sample. Note that ``cov`` -normalizes by ``N-1`` in both pandas and NumPy. +For ``Series`` and ``DataFrame`` objects, :meth:`~DataFrame.var` normalizes by +``N-1`` to produce unbiased estimates of the sample variance, while NumPy's +``var`` normalizes by N, which measures the variance of the sample. Note that +:meth:`~DataFrame.cov` normalizes by ``N-1`` in both pandas and NumPy. Thread-safety ------------- As of pandas 0.11, pandas is not 100% thread safe. The known issues relate to -the ``DataFrame.copy`` method. If you are doing a lot of copying of DataFrame -objects shared among threads, we recommend holding locks inside the threads -where the data copying occurs. +the :meth:`~DataFrame.copy` method. If you are doing a lot of copying of +``DataFrame`` objects shared among threads, we recommend holding locks inside +the threads where the data copying occurs. See `this link <https://stackoverflow.com/questions/13592618/python-pandas-dataframe-thread-safe>`__ for more information. @@ -310,7 +313,8 @@ for more information. Byte-Ordering Issues -------------------- Occasionally you may have to deal with data that were created on a machine with -a different byte order than the one on which you are running Python. A common symptom of this issue is an error like +a different byte order than the one on which you are running Python. A common +symptom of this issue is an error like: .. code-block:: python @@ -320,8 +324,8 @@ a different byte order than the one on which you are running Python. A common sy To deal with this issue you should convert the underlying NumPy array to the native -system byte order *before* passing it to Series/DataFrame/Panel constructors -using something similar to the following: +system byte order *before* passing it to ``Series`` or ``DataFrame`` +constructors using something similar to the following: .. ipython:: python
Minor changes to the documentation, specifically `gotchas.rst`, i.e. the FAQ page. * Function/method references as links. * Consistent double backticks ` `` ` around Series, DataFrame. * Minor rephrasing of a sentence or two, added some missing punctuation. * Replaced `.. ipython::` with `ipython:: python` twice, as the former is not sufficient to make the iPython code render in the docs. Reviews/comments very welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/19747
2018-02-18T09:59:15Z
2018-02-23T11:46:12Z
2018-02-23T11:46:12Z
2018-02-23T11:46:12Z
[DOCS] Add example of how to preserve order of columns with usecols.
diff --git a/doc/source/io.rst b/doc/source/io.rst index 7bb34e4d232dd..b7f8792b42fd6 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -135,8 +135,14 @@ usecols : array-like or callable, default ``None`` be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid array-like - `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. - Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. + `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. + + Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. To + instantiate a DataFrame from ``data`` with element order preserved use + ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns + in ``['foo', 'bar']`` order or + ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` for + ``['bar', 'foo']`` order. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to True: diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 7ea6d321e0fdd..4b1385514a0c4 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -102,7 +102,12 @@ that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid array-like `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element - order is ignored, so usecols=[1,0] is the same as [0,1]. + order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. + To instantiate a DataFrame from ``data`` with element order preserved use + ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns + in ``['foo', 'bar']`` order or + ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` + for ``['bar', 'foo']`` order. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to True. An
#### What's this PR do? Adding an example of how to preserve order when using the ``usecols`` parameter. I've noticed that some folks are still unaware that the columns will not necessarily be returned in the specified order passed to ``usecols``.
https://api.github.com/repos/pandas-dev/pandas/pulls/19746
2018-02-18T07:00:57Z
2018-02-21T19:53:08Z
2018-02-21T19:53:08Z
2018-02-22T05:21:23Z
Conform Series.to_csv to DataFrame.to_csv
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ed4022d422b4d..a0633a2be085a 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -245,7 +245,7 @@ Deprecations - :meth:`DataFrame.to_stata`, :meth:`read_stata`, :class:`StataReader` and :class:`StataWriter` have deprecated the ``encoding`` argument. The encoding of a Stata dta file is determined by the file type and cannot be changed (:issue:`21244`). - :meth:`MultiIndex.to_hierarchical` is deprecated and will be removed in a future version (:issue:`21613`) - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) -- +- The signature in :meth:`Series.to_csv` has been deprecated. Please follow the signature in :meth:`DataFrame.to_csv` instead (:issue:`19745`) .. _whatsnew_0240.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6380944338010..f0aa00163c902 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1710,103 +1710,6 @@ def to_panel(self): return self._constructor_expanddim(new_mgr) - def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, - columns=None, header=True, index=True, index_label=None, - mode='w', encoding=None, compression=None, quoting=None, - quotechar='"', line_terminator='\n', chunksize=None, - tupleize_cols=None, date_format=None, doublequote=True, - escapechar=None, decimal='.'): - r"""Write DataFrame to a comma-separated values (csv) file - - Parameters - ---------- - path_or_buf : string or file handle, default None - File path or object, if None is provided the result is returned as - a string. - sep : character, default ',' - Field delimiter for the output file. - na_rep : string, default '' - Missing data representation - float_format : string, default None - Format string for floating point numbers - columns : sequence, optional - Columns to write - header : boolean or list of string, default True - Write out the column names. If a list of strings is given it is - assumed to be aliases for the column names - index : boolean, default True - Write row names (index) - index_label : string or sequence, or False, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the DataFrame uses MultiIndex. If - False do not print fields for index names. Use index_label=False - for easier importing in R - mode : str - Python write mode, default 'w' - encoding : string, optional - A string representing the encoding to use in the output file, - defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. - compression : {'infer', 'gzip', 'bz2', 'xz', None}, default None - If 'infer' and `path_or_buf` is path-like, then detect compression - from the following extensions: '.gz', '.bz2' or '.xz' - (otherwise no compression). - line_terminator : string, default ``'\n'`` - The newline character or character sequence to use in the output - file - quoting : optional constant from csv module - defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` - then floats are converted to strings and thus csv.QUOTE_NONNUMERIC - will treat them as non-numeric - quotechar : string (length 1), default '\"' - character used to quote fields - doublequote : boolean, default True - Control quoting of `quotechar` inside a field - escapechar : string (length 1), default None - character used to escape `sep` and `quotechar` when appropriate - chunksize : int or None - rows to write at a time - tupleize_cols : boolean, default False - .. deprecated:: 0.21.0 - This argument will be removed and will always write each row - of the multi-index as a separate row in the CSV file. - - Write MultiIndex columns as a list of tuples (if True) or in - the new, expanded format, where each MultiIndex column is a row - in the CSV (if False). - date_format : string, default None - Format string for datetime objects - decimal: string, default '.' - Character recognized as decimal separator. E.g. use ',' for - European data - - """ - - if tupleize_cols is not None: - warnings.warn("The 'tupleize_cols' parameter is deprecated and " - "will be removed in a future version", - FutureWarning, stacklevel=2) - else: - tupleize_cols = False - - from pandas.io.formats.csvs import CSVFormatter - formatter = CSVFormatter(self, path_or_buf, - line_terminator=line_terminator, sep=sep, - encoding=encoding, - compression=compression, quoting=quoting, - na_rep=na_rep, float_format=float_format, - cols=columns, header=header, index=index, - index_label=index_label, mode=mode, - chunksize=chunksize, quotechar=quotechar, - tupleize_cols=tupleize_cols, - date_format=date_format, - doublequote=doublequote, - escapechar=escapechar, decimal=decimal) - formatter.save() - - if path_or_buf is None: - return formatter.path_or_buf.getvalue() - @Appender(_shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..cdd7d76fb01e6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9161,6 +9161,107 @@ def first_valid_index(self): def last_valid_index(self): return self._find_valid_index('last') + def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, + columns=None, header=True, index=True, index_label=None, + mode='w', encoding=None, compression=None, quoting=None, + quotechar='"', line_terminator='\n', chunksize=None, + tupleize_cols=None, date_format=None, doublequote=True, + escapechar=None, decimal='.'): + r"""Write DataFrame to a comma-separated values (csv) file + + Parameters + ---------- + path_or_buf : string or file handle, default None + File path or object, if None is provided the result is returned as + a string. + sep : character, default ',' + Field delimiter for the output file. + na_rep : string, default '' + Missing data representation + float_format : string, default None + Format string for floating point numbers + columns : sequence, optional + Columns to write + header : boolean or list of string, default True + Write out the column names. If a list of strings is given it is + assumed to be aliases for the column names + index : boolean, default True + Write row names (index) + index_label : string or sequence, or False, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the DataFrame uses MultiIndex. If + False do not print fields for index names. Use index_label=False + for easier importing in R + mode : str + Python write mode, default 'w' + encoding : string, optional + A string representing the encoding to use in the output file, + defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. + compression : {'infer', 'gzip', 'bz2', 'xz', None}, default None + If 'infer' and `path_or_buf` is path-like, then detect compression + from the following extensions: '.gz', '.bz2' or '.xz' + (otherwise no compression). + line_terminator : string, default ``'\n'`` + The newline character or character sequence to use in the output + file + quoting : optional constant from csv module + defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` + then floats are converted to strings and thus csv.QUOTE_NONNUMERIC + will treat them as non-numeric + quotechar : string (length 1), default '\"' + character used to quote fields + doublequote : boolean, default True + Control quoting of `quotechar` inside a field + escapechar : string (length 1), default None + character used to escape `sep` and `quotechar` when appropriate + chunksize : int or None + rows to write at a time + tupleize_cols : boolean, default False + .. deprecated:: 0.21.0 + This argument will be removed and will always write each row + of the multi-index as a separate row in the CSV file. + + Write MultiIndex columns as a list of tuples (if True) or in + the new, expanded format, where each MultiIndex column is a row + in the CSV (if False). + date_format : string, default None + Format string for datetime objects + decimal: string, default '.' + Character recognized as decimal separator. E.g. use ',' for + European data + + """ + + from pandas.core.frame import DataFrame + from pandas.io.formats.csvs import CSVFormatter + + df = self if isinstance(self, DataFrame) else DataFrame(self) + + if tupleize_cols is not None: + warnings.warn("The 'tupleize_cols' parameter is deprecated and " + "will be removed in a future version", + FutureWarning, stacklevel=2) + else: + tupleize_cols = False + + formatter = CSVFormatter(df, path_or_buf, + line_terminator=line_terminator, sep=sep, + encoding=encoding, + compression=compression, quoting=quoting, + na_rep=na_rep, float_format=float_format, + cols=columns, header=header, index=index, + index_label=index_label, mode=mode, + chunksize=chunksize, quotechar=quotechar, + tupleize_cols=tupleize_cols, + date_format=date_format, + doublequote=doublequote, + escapechar=escapechar, decimal=decimal) + formatter.save() + + if path_or_buf is None: + return formatter.path_or_buf.getvalue() + def _doc_parms(cls): """Return a tuple of the doc parms.""" diff --git a/pandas/core/series.py b/pandas/core/series.py index 0bdb9d9cc23a6..bf0d44702e19c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3760,16 +3760,70 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, return result - def to_csv(self, path=None, index=True, sep=",", na_rep='', + def to_csv_deprecation_decoration(to_csv): + """Decorator used to deprecate the signature of to_csv.""" + + from functools import wraps + + @wraps(to_csv) + def _to_csv(*args, **kwargs): + if len(args) > 2: + warnings.warn( + "The signature of `Series.to_csv` will be " + "aligned to that of `DataFrame.to_csv` in the " + "future. The ordering of positional " + "arguments is different, so please refer " + "to the documentation for `DataFrame.to_csv` when " + "changing your function calls. To ensure " + "future-proof calls, do not pass more than 1 " + "positional argument.", + FutureWarning, stacklevel=2, + ) + if "path" in kwargs: + assert len(args) <= 1, ( + "Cannot specify path both as positional " + "argument and keyword argument." + ) + assert "path_or_buf" not in kwargs, ( + "Can only specify path or path_or_buf, not both." + ) + warnings.warn( + "path is deprecated, use path_or_buf instead", + FutureWarning, stacklevel=2, + ) + kwargs["path_or_buf"] = kwargs.pop("path") + if "header" not in kwargs: + warnings.warn( + "The signature of `Series.to_csv` will be " + "aligned to that of `DataFrame.to_csv` in the " + "future. The default value of header will change. " + "To keep the current behaviour, use header=False.", + FutureWarning, stacklevel=2, + ) + return to_csv(*args, **kwargs) + + return _to_csv + + @to_csv_deprecation_decoration + def to_csv(self, path_or_buf=None, index=True, sep=",", na_rep='', float_format=None, header=False, index_label=None, mode='w', encoding=None, compression=None, date_format=None, - decimal='.'): - """ - Write Series to a comma-separated values (csv) file + decimal='.', columns=None, quoting=None, quotechar='"', + line_terminator='\n', chunksize=None, doublequote=True, + escapechar=None): + + r"""Write Series to a comma-separated values (csv) file + + .. deprecated:: 0.24.0 + The signature will aligned to that of :func:`DataFrame.to_csv`. + + :func:`Series.to_csv` will align its signature with that of + `DataFrame.to_csv`. Please pass in keyword arguments in accordance + with that signature instead. Parameters ---------- - path : string or file handle, default None + path_or_buf : string or file handle, default None File path or object, if None is provided the result is returned as a string. na_rep : string, default '' @@ -3777,39 +3831,67 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='', float_format : string, default None Format string for floating point numbers header : boolean, default False - Write out series name + Write out Series name. index : boolean, default True Write row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. - mode : Python write mode, default 'w' - sep : character, default "," + mode : str, default 'w' + Python write mode + sep : character, default ',' Field delimiter for the output file. encoding : string, optional - a string representing the encoding to use if the contents are - non-ascii, for python versions prior to 3 - compression : string, optional - A string representing the compression to use in the output file. - Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only - used when the first argument is a filename. - date_format: string, default None - Format string for datetime objects. + A string representing the encoding to use in the output file, + defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. + compression : {'infer', 'gzip', 'bz2', 'xz', None}, default None + If 'infer' and `path_or_buf` is path-like, then detect compression + from the following extensions: '.gz', '.bz2' or '.xz' + (otherwise no compression). + date_format : string, default None + Format string for datetime objects decimal: string, default '.' Character recognized as decimal separator. E.g. use ',' for European data + columns : sequence, optional + Columns to write + quoting : optional constant from csv module + defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` + then floats are converted to strings and thus csv.QUOTE_NONNUMERIC + will treat them as non-numeric + quotechar : string (length 1), default '\"' + character used to quote fields + line_terminator : string, default ``'\n'`` + The newline character or character sequence to use in the output + file + chunksize : int or None + rows to write at a time + doublequote : boolean, default True + Control quoting of `quotechar` inside a field + escapechar : string (length 1), default None + character used to escape `sep` and `quotechar` when appropriate + path : string or file handle + .. deprecated:: 0.21.0 + use `path_or_buf` instead + """ + from pandas.core.frame import DataFrame df = DataFrame(self) # result is only a string if no path provided, otherwise None - result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep, - float_format=float_format, header=header, - index_label=index_label, mode=mode, - encoding=encoding, compression=compression, - date_format=date_format, decimal=decimal) - if path is None: - return result + result = df.to_csv( + path_or_buf, index=index, sep=sep, na_rep=na_rep, + float_format=float_format, header=header, + index_label=index_label, mode=mode, encoding=encoding, + compression=compression, date_format=date_format, + decimal=decimal, columns=columns, quoting=quoting, + quotechar=quotechar, line_terminator=line_terminator, + chunksize=chunksize, doublequote=doublequote, + escapechar=escapechar, + ) + + return result @Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
(**EDIT**: Most of this comment is related to an old attempt. See [this comment](https://github.com/pandas-dev/pandas/pull/19745#issuecomment-404944973) and below for the content of the current PR) I tried my best to keep everything backwards compatible. A simpler solution would have been simply ```python def to_csv(self, path_or_buf=None, *args, **kwargs): """ Write Series to a comma-separated values (csv) file Parameters ---------- path_or_buf : string or file handle, default None File path or object, if None is provided the result is returned as a string. args Positional arguments passed to :func:`pandas.DataFrame.to_csv` kwargs Keyword arguments passed to :func:`pandas.DataFrame.to_csv` Returns ------- None or string If `path_or_buf` is None, returns the result as a string, otherwise returns None. Notes ----- Constructs a DataFrame using :func:`~pandas.Series.to_frame`, then calls the method :func:`pandas.DataFrame.to_csv` on that DataFrame. """ df = self.to_frame() # result is a string if path_or_buf is None, otherwise None result = df.to_csv(path_or_buf, *args, **kwargs) return result ``` Unfortunately, such a solution breaks backwards compatibility for the following reasons: - If given as keyword arguments, `path_or_buf` of `DataFrame.to_csv` corresponds to the differently named argument `path` of `Series.to_csv`. - The ordering of positional arguments for `DataFrame.to_csv` does not agree fully with the ordering of positional arguments for `Series.to_csv` - The default value of `header` is True for `DataFrame.to_csv` but False for `Series.to_csv` Maybe this is something that could be implemented in the long run? - [x] closes #19715 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19745
2018-02-18T01:06:49Z
2018-07-25T16:49:08Z
null
2023-05-11T01:17:24Z
Fix name setting in DTI/TDI __add__ and __sub__
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 879b245af49cd..fdcec41110363 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -730,6 +730,8 @@ Datetimelike - Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`) - Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`) - Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) +- Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` addition and subtraction where name of the returned object was not always set consistently. (:issue:`19744`) +- Timedelta ^^^^^^^^^ diff --git a/pandas/core/common.py b/pandas/core/common.py index 6748db825acf0..77dc1522052d4 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -121,21 +121,6 @@ def _consensus_name_attr(objs): return name -def _maybe_match_name(a, b): - a_has = hasattr(a, 'name') - b_has = hasattr(b, 'name') - if a_has and b_has: - if a.name == b.name: - return a.name - else: - return None - elif a_has: - return a.name - elif b_has: - return b.name - return None - - def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index c98f8ceea0ffa..187f9fcf52dd4 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -29,7 +29,7 @@ from pandas.core.dtypes.generic import ( ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna -from pandas.core import common as com, algorithms +from pandas.core import common as com, algorithms, ops from pandas.core.algorithms import checked_add_with_arr from pandas.errors import NullFrequencyError import pandas.io.formats.printing as printing @@ -661,29 +661,37 @@ def __add__(self, other): if isinstance(other, ABCSeries): return NotImplemented elif is_timedelta64_dtype(other): - return self._add_delta(other) + result = self._add_delta(other) elif isinstance(other, (DateOffset, timedelta)): - return self._add_delta(other) + result = self._add_delta(other) elif is_offsetlike(other): # Array/Index of DateOffset objects - return self._add_offset_array(other) + result = self._add_offset_array(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if hasattr(other, '_add_delta'): - return other._add_delta(self) - raise TypeError("cannot add TimedeltaIndex and {typ}" - .format(typ=type(other))) + result = other._add_delta(self) + else: + raise TypeError("cannot add TimedeltaIndex and {typ}" + .format(typ=type(other))) elif is_integer(other): - return self.shift(other) + # This check must come after the check for timedelta64_dtype + # or else it will incorrectly catch np.timedelta64 objects + result = self.shift(other) elif isinstance(other, (datetime, np.datetime64)): - return self._add_datelike(other) + result = self._add_datelike(other) elif isinstance(other, Index): - return self._add_datelike(other) + result = self._add_datelike(other) elif is_integer_dtype(other) and self.freq is None: # GH#19123 raise NullFrequencyError("Cannot shift with no freq") else: # pragma: no cover return NotImplemented + if result is not NotImplemented: + res_name = ops.get_op_result_name(self, other) + result.name = res_name + return result + cls.__add__ = __add__ cls.__radd__ = __add__ @@ -697,25 +705,27 @@ def __sub__(self, other): if isinstance(other, ABCSeries): return NotImplemented elif is_timedelta64_dtype(other): - return self._add_delta(-other) + result = self._add_delta(-other) elif isinstance(other, (DateOffset, timedelta)): - return self._add_delta(-other) + result = self._add_delta(-other) elif is_offsetlike(other): # Array/Index of DateOffset objects - return self._sub_offset_array(other) + result = self._sub_offset_array(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if not isinstance(other, TimedeltaIndex): raise TypeError("cannot subtract TimedeltaIndex and {typ}" .format(typ=type(other).__name__)) - return self._add_delta(-other) + result = self._add_delta(-other) elif isinstance(other, DatetimeIndex): - return self._sub_datelike(other) + result = self._sub_datelike(other) elif is_integer(other): - return self.shift(-other) + # This check must come after the check for timedelta64_dtype + # or else it will incorrectly catch np.timedelta64 objects + result = self.shift(-other) elif isinstance(other, (datetime, np.datetime64)): - return self._sub_datelike(other) + result = self._sub_datelike(other) elif isinstance(other, Period): - return self._sub_period(other) + result = self._sub_period(other) elif isinstance(other, Index): raise TypeError("cannot subtract {typ1} and {typ2}" .format(typ1=type(self).__name__, @@ -726,6 +736,11 @@ def __sub__(self, other): else: # pragma: no cover return NotImplemented + if result is not NotImplemented: + res_name = ops.get_op_result_name(self, other) + result.name = res_name + return result + cls.__sub__ = __sub__ def __rsub__(self, other): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index cc9ce1f3fd5eb..debeabf9bae23 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -886,7 +886,7 @@ def _sub_datelike(self, other): else: raise TypeError("cannot subtract DatetimeIndex and {typ}" .format(typ=type(other).__name__)) - return TimedeltaIndex(result, name=self.name, copy=False) + return TimedeltaIndex(result) def _sub_datelike_dti(self, other): """subtraction of two DatetimeIndexes""" @@ -910,20 +910,31 @@ def _maybe_update_attributes(self, attrs): return attrs def _add_delta(self, delta): - if isinstance(delta, ABCSeries): - return NotImplemented + """ + Add a timedelta-like, DateOffset, or TimedeltaIndex-like object + to self. + + Parameters + ---------- + delta : {timedelta, np.timedelta64, DateOffset, + TimedelaIndex, ndarray[timedelta64]} + Returns + ------- + result : DatetimeIndex + + Notes + ----- + The result's name is set outside of _add_delta by the calling + method (__add__ or __sub__) + """ from pandas import TimedeltaIndex - name = self.name if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) elif is_timedelta64_dtype(delta): if not isinstance(delta, TimedeltaIndex): delta = TimedeltaIndex(delta) - else: - # update name when delta is Index - name = com._maybe_match_name(self, delta) new_values = self._add_delta_tdi(delta) elif isinstance(delta, DateOffset): new_values = self._add_offset(delta).asi8 @@ -931,7 +942,7 @@ def _add_delta(self, delta): new_values = self.astype('O') + delta tz = 'UTC' if self.tz is not None else None - result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer') + result = DatetimeIndex(new_values, tz=tz, freq='infer') if self.tz is not None and self.tz is not utc: result = result.tz_convert(self.tz) return result @@ -954,22 +965,19 @@ def _add_offset(self, offset): def _add_offset_array(self, other): # Array/Index of DateOffset objects - if isinstance(other, ABCSeries): - return NotImplemented - elif len(other) == 1: + if len(other) == 1: return self + other[0] else: warnings.warn("Adding/subtracting array of DateOffsets to " "{} not vectorized".format(type(self)), PerformanceWarning) return self.astype('O') + np.array(other) + # TODO: pass freq='infer' like we do in _sub_offset_array? # TODO: This works for __add__ but loses dtype in __sub__ def _sub_offset_array(self, other): # Array/Index of DateOffset objects - if isinstance(other, ABCSeries): - return NotImplemented - elif len(other) == 1: + if len(other) == 1: return self - other[0] else: warnings.warn("Adding/subtracting array of DateOffsets to " diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 8f2d7d382a16e..60798e6d77e37 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -729,7 +729,7 @@ def _sub_datelike(self, other): if other is tslib.NaT: new_data = np.empty(len(self), dtype=np.int64) new_data.fill(tslib.iNaT) - return TimedeltaIndex(new_data, name=self.name) + return TimedeltaIndex(new_data) return NotImplemented def _sub_period(self, other): @@ -744,7 +744,7 @@ def _sub_period(self, other): new_data = new_data.astype(np.float64) new_data[self._isnan] = np.nan # result must be Int64Index or Float64Index - return Index(new_data, name=self.name) + return Index(new_data) def shift(self, n): """ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 4b543262fc485..af1c2f131fd60 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -356,19 +356,32 @@ def _maybe_update_attributes(self, attrs): return attrs def _add_delta(self, delta): + """ + Add a timedelta-like, Tick, or TimedeltaIndex-like object + to self. + + Parameters + ---------- + delta : {timedelta, np.timedelta64, Tick, TimedeltaIndex} + + Returns + ------- + result : TimedeltaIndex + + Notes + ----- + The result's name is set outside of _add_delta by the calling + method (__add__ or __sub__) + """ if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) - name = self.name elif isinstance(delta, TimedeltaIndex): new_values = self._add_delta_tdi(delta) - # update name when delta is index - name = com._maybe_match_name(self, delta) else: raise TypeError("cannot add the type {0} to a TimedeltaIndex" .format(type(delta))) - result = TimedeltaIndex(new_values, freq='infer', name=name) - return result + return TimedeltaIndex(new_values, freq='infer') def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): if isinstance(other, ABCSeries): @@ -409,7 +422,7 @@ def _add_datelike(self, other): result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=iNaT) - return DatetimeIndex(result, name=self.name, copy=False) + return DatetimeIndex(result) def _sub_datelike(self, other): # GH#19124 Timedelta - datetime is not in general well-defined. @@ -426,9 +439,7 @@ def _add_offset_array(self, other): # TimedeltaIndex can only operate with a subset of DateOffset # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError - if isinstance(other, ABCSeries): - return NotImplemented - elif len(other) == 1: + if len(other) == 1: return self + other[0] else: from pandas.errors import PerformanceWarning @@ -436,6 +447,7 @@ def _add_offset_array(self, other): "{} not vectorized".format(type(self)), PerformanceWarning) return self.astype('O') + np.array(other) + # TODO: pass freq='infer' like we do in _sub_offset_array? # TODO: This works for __add__ but loses dtype in __sub__ except AttributeError: raise TypeError("Cannot add non-tick DateOffset to TimedeltaIndex") @@ -446,9 +458,7 @@ def _sub_offset_array(self, other): # TimedeltaIndex can only operate with a subset of DateOffset # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError - if isinstance(other, ABCSeries): - return NotImplemented - elif len(other) == 1: + if len(other) == 1: return self - other[0] else: from pandas.errors import PerformanceWarning diff --git a/pandas/core/ops.py b/pandas/core/ops.py index da65f1f31ed2a..bdce611f58f06 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -42,6 +42,67 @@ ABCSparseSeries, ABCSparseArray) +# ----------------------------------------------------------------------------- +# Ops Wrapping Utilities + +def get_op_result_name(left, right): + """ + Find the appropriate name to pin to an operation result. This result + should always be either an Index or a Series. + + Parameters + ---------- + left : {Series, Index} + right : object + + Returns + ------- + name : object + Usually a string + """ + # `left` is always a pd.Series when called from within ops + if isinstance(right, (ABCSeries, pd.Index)): + name = _maybe_match_name(left, right) + else: + name = left.name + return name + + +def _maybe_match_name(a, b): + """ + Try to find a name to attach to the result of an operation between + a and b. If only one of these has a `name` attribute, return that + name. Otherwise return a consensus name if they match of None if + they have different names. + + Parameters + ---------- + a : object + b : object + + Returns + ------- + name : str or None + + See also + -------- + pandas.core.common._consensus_name_attr + """ + a_has = hasattr(a, 'name') + b_has = hasattr(b, 'name') + if a_has and b_has: + if a.name == b.name: + return a.name + else: + # TODO: what if they both have np.nan for their names? + return None + elif a_has: + return a.name + elif b_has: + return b.name + return None + + # ----------------------------------------------------------------------------- # Reversed Operations not available in the stdlib operator module. # Defining these instead of using lambdas allows us to reference them by name. @@ -824,7 +885,7 @@ def wrapper(left, right, name=name, na_op=na_op): return NotImplemented left, right = _align_method_SERIES(left, right) - res_name = _get_series_op_result_name(left, right) + res_name = get_op_result_name(left, right) if is_datetime64_dtype(left) or is_datetime64tz_dtype(left): result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) @@ -888,15 +949,6 @@ def dispatch_to_index_op(op, left, right, index_class): return result -def _get_series_op_result_name(left, right): - # `left` is always a pd.Series - if isinstance(right, (ABCSeries, pd.Index)): - name = com._maybe_match_name(left, right) - else: - name = left.name - return name - - def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) @@ -974,7 +1026,7 @@ def wrapper(self, other, axis=None): if axis is not None: self._get_axis_number(axis) - res_name = _get_series_op_result_name(self, other) + res_name = get_op_result_name(self, other) if isinstance(other, ABCDataFrame): # pragma: no cover # Defer to DataFrame implementation; fail early @@ -1100,7 +1152,7 @@ def wrapper(self, other): return NotImplemented elif isinstance(other, ABCSeries): - name = com._maybe_match_name(self, other) + name = get_op_result_name(self, other) is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) @@ -1542,7 +1594,7 @@ def wrapper(self, other): def _sparse_series_op(left, right, op, name): left, right = left.align(right, join='outer', copy=False) new_index = left.index - new_name = com._maybe_match_name(left, right) + new_name = get_op_result_name(left, right) from pandas.core.sparse.array import _sparse_array_op result = _sparse_array_op(left.values, right.values, op, name, diff --git a/pandas/core/series.py b/pandas/core/series.py index 90dc14836ab55..79ffb8be65838 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1728,7 +1728,7 @@ def _binop(self, other, func, level=None, fill_value=None): with np.errstate(all='ignore'): result = func(this_vals, other_vals) - name = com._maybe_match_name(self, other) + name = ops.get_op_result_name(self, other) result = self._constructor(result, index=new_index, name=name) result = result.__finalize__(self) if name is None: @@ -1769,7 +1769,7 @@ def combine(self, other, func, fill_value=np.nan): """ if isinstance(other, Series): new_index = self.index.union(other.index) - new_name = com._maybe_match_name(self, other) + new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=self.dtype) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) @@ -1814,7 +1814,7 @@ def combine_first(self, other): this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) # TODO: do we need name? - name = com._maybe_match_name(self, other) # noqa + name = ops.get_op_result_name(self, other) # noqa rs_vals = com._where_compat(isna(this), other._values, this._values) return self._constructor(rs_vals, index=new_index).__finalize__(self) diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index ddc97636ae0a8..f252d6ec31f89 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -721,11 +721,10 @@ def test_dti_add_series(self, tz, names): result4 = index + ser.values tm.assert_index_equal(result4, expected) - @pytest.mark.parametrize('box', [np.array, pd.Index]) - def test_dti_add_offset_array(self, tz, box): + def test_dti_add_offset_array(self, tz): # GH#18849 dti = pd.date_range('2017-01-01', periods=2, tz=tz) - other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) with tm.assert_produces_warning(PerformanceWarning): res = dti + other @@ -737,11 +736,29 @@ def test_dti_add_offset_array(self, tz, box): res2 = other + dti tm.assert_index_equal(res2, expected) - @pytest.mark.parametrize('box', [np.array, pd.Index]) - def test_dti_sub_offset_array(self, tz, box): + @pytest.mark.parametrize('names', [(None, None, None), + ('foo', 'bar', None), + ('foo', 'foo', 'foo')]) + def test_dti_add_offset_index(self, tz, names): + # GH#18849, GH#19744 + dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0]) + other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], + name=names[1]) + + with tm.assert_produces_warning(PerformanceWarning): + res = dti + other + expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))], + name=names[2], freq='infer') + tm.assert_index_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + dti + tm.assert_index_equal(res2, expected) + + def test_dti_sub_offset_array(self, tz): # GH#18824 dti = pd.date_range('2017-01-01', periods=2, tz=tz) - other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) with tm.assert_produces_warning(PerformanceWarning): res = dti - other @@ -749,6 +766,21 @@ def test_dti_sub_offset_array(self, tz, box): name=dti.name, freq='infer') tm.assert_index_equal(res, expected) + @pytest.mark.parametrize('names', [(None, None, None), + ('foo', 'bar', None), + ('foo', 'foo', 'foo')]) + def test_dti_sub_offset_index(self, tz, names): + # GH#18824, GH#19744 + dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0]) + other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], + name=names[1]) + + with tm.assert_produces_warning(PerformanceWarning): + res = dti - other + expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))], + name=names[2], freq='infer') + tm.assert_index_equal(res, expected) + @pytest.mark.parametrize('names', [(None, None, None), ('foo', 'bar', None), ('foo', 'foo', 'foo')]) diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3dc60ed33b958..029fdfcefc299 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -194,11 +194,31 @@ def test_shift_no_freq(self): # ------------------------------------------------------------- - @pytest.mark.parametrize('box', [np.array, pd.Index]) - def test_tdi_add_offset_array(self, box): + @pytest.mark.parametrize('names', [(None, None, None), + ('foo', 'bar', None), + ('foo', 'foo', 'foo')]) + def test_tdi_add_offset_index(self, names): + # GH#18849, GH#19744 + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'], + name=names[0]) + other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], + name=names[1]) + + expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))], + freq='infer', name=names[2]) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_index_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_index_equal(res2, expected) + + def test_tdi_add_offset_array(self): # GH#18849 tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) - other = box([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))], freq='infer') @@ -211,23 +231,27 @@ def test_tdi_add_offset_array(self, box): res2 = other + tdi tm.assert_index_equal(res2, expected) - anchored = box([pd.offsets.QuarterEnd(), - pd.offsets.Week(weekday=2)]) + @pytest.mark.parametrize('names', [(None, None, None), + ('foo', 'bar', None), + ('foo', 'foo', 'foo')]) + def test_tdi_sub_offset_index(self, names): + # GH#18824, GH#19744 + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'], + name=names[0]) + other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], + name=names[1]) - # addition/subtraction ops with anchored offsets should issue - # a PerformanceWarning and _then_ raise a TypeError. - with pytest.raises(TypeError): - with tm.assert_produces_warning(PerformanceWarning): - tdi + anchored - with pytest.raises(TypeError): - with tm.assert_produces_warning(PerformanceWarning): - anchored + tdi + expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))], + freq='infer', name=names[2]) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi - other + tm.assert_index_equal(res, expected) - @pytest.mark.parametrize('box', [np.array, pd.Index]) - def test_tdi_sub_offset_array(self, box): + def test_tdi_sub_offset_array(self): # GH#18824 tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) - other = box([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))], freq='infer') @@ -236,17 +260,6 @@ def test_tdi_sub_offset_array(self, box): res = tdi - other tm.assert_index_equal(res, expected) - anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) - - # addition/subtraction ops with anchored offsets should issue - # a PerformanceWarning and _then_ raise a TypeError. - with pytest.raises(TypeError): - with tm.assert_produces_warning(PerformanceWarning): - tdi - anchored - with pytest.raises(TypeError): - with tm.assert_produces_warning(PerformanceWarning): - anchored - tdi - @pytest.mark.parametrize('names', [(None, None, None), ('foo', 'bar', None), ('foo', 'foo', 'foo')]) @@ -275,8 +288,12 @@ def test_tdi_with_offset_series(self, names): res3 = tdi - other tm.assert_series_equal(res3, expected_sub) - anchored = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], - name=names[1]) + @pytest.mark.parametrize('box', [np.array, pd.Index, pd.Series]) + def test_tdi_add_sub_anchored_offset_arraylike(self, box): + # GH#18824 + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) + + anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) # addition/subtraction ops with anchored offsets should issue # a PerformanceWarning and _then_ raise a TypeError. diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 57479be4d989f..0b329f64dafa3 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -9,6 +9,7 @@ from pandas import Series, Timestamp from pandas.compat import range, lmap import pandas.core.common as com +from pandas.core import ops import pandas.util.testing as tm @@ -167,26 +168,26 @@ def test_random_state(): def test_maybe_match_name(): - matched = com._maybe_match_name( + matched = ops._maybe_match_name( Series([1], name='x'), Series( [2], name='x')) assert (matched == 'x') - matched = com._maybe_match_name( + matched = ops._maybe_match_name( Series([1], name='x'), Series( [2], name='y')) assert (matched is None) - matched = com._maybe_match_name(Series([1]), Series([2], name='x')) + matched = ops._maybe_match_name(Series([1]), Series([2], name='x')) assert (matched is None) - matched = com._maybe_match_name(Series([1], name='x'), Series([2])) + matched = ops._maybe_match_name(Series([1], name='x'), Series([2])) assert (matched is None) - matched = com._maybe_match_name(Series([1], name='x'), [2]) + matched = ops._maybe_match_name(Series([1], name='x'), [2]) assert (matched == 'x') - matched = com._maybe_match_name([1], Series([2], name='y')) + matched = ops._maybe_match_name([1], Series([2], name='y')) assert (matched == 'y')
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19744
2018-02-17T23:01:13Z
2018-02-21T23:55:22Z
2018-02-21T23:55:22Z
2018-06-22T03:31:07Z
DOC/BLD: Pinning sphinx to 1.5, as 1.7 has been released and it's incompatible with vendored numpydoc
diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index 82f8de277c57b..a474658fa2922 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -7,4 +7,4 @@ pytest>=3.1 python-dateutil>=2.5.0 pytz setuptools>=3.3 -sphinx +sphinx=1.5*
- [X] closes #19742 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19743
2018-02-17T22:00:32Z
2018-02-18T17:14:39Z
2018-02-18T17:14:39Z
2018-02-19T08:14:57Z
Fix wraparound/overflow in date_range
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a2198d9103528..fac007febd29f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -716,7 +716,7 @@ Datetimelike - Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`) - Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`) - Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) -- +- Bug in :func:`date_range` with frequency of ``Day`` or higher where dates sufficiently far in the future could wrap around to the past instead of raising ``OutOfBoundsDatetime`` (:issue:`19740`) Timezones ^^^^^^^^^ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index cc9ce1f3fd5eb..0d86b32c719fe 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2137,11 +2137,11 @@ def _generate_regular_range(start, end, periods, offset): tz = start.tz elif start is not None: b = Timestamp(start).value - e = b + np.int64(periods) * stride + e = _reraise_overflow_as_oob(b, periods, stride, side='start') tz = start.tz elif end is not None: e = Timestamp(end).value + stride - b = e - np.int64(periods) * stride + b = _reraise_overflow_as_oob(e, periods, stride, side='end') tz = end.tz else: raise ValueError("at least 'start' or 'end' should be specified " @@ -2166,6 +2166,44 @@ def _generate_regular_range(start, end, periods, offset): return data +def _reraise_overflow_as_oob(endpoint, periods, stride, side='start'): + """ + Calculate the second endpoint for passing to np.arange, checking + to avoid an integer overflow. Catch OverflowError and re-raise + as OutOfBoundsDatetime. + + Parameters + ---------- + endpoint : int + periods : int + stride : int + side : {'start', 'end'} + + Returns + ------- + other_end : int + + Raises + ------ + OutOfBoundsDatetime + """ + # GH#19740 raise instead of incorrectly wrapping around + assert side in ['start', 'end'] + if side == 'end': + stride *= -1 + + try: + other_end = checked_add_with_arr(np.int64(endpoint), + np.int64(periods) * stride) + except OverflowError: + raise libts.OutOfBoundsDatetime('Cannot generate range with ' + '{side}={endpoint} and ' + 'periods={periods}' + .format(side=side, endpoint=endpoint, + periods=periods)) + return other_end + + def date_range(start=None, end=None, periods=None, freq='D', tz=None, normalize=False, name=None, closed=None, **kwargs): """ diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3738398d017f8..dda68335e03b2 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -13,6 +13,7 @@ import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas import compat +from pandas.errors import OutOfBoundsDatetime from pandas import date_range, bdate_range, offsets, DatetimeIndex, Timestamp from pandas.tseries.offsets import (generate_range, CDay, BDay, DateOffset, MonthEnd, prefix_mapping) @@ -78,6 +79,12 @@ def test_date_range_timestamp_equiv_preserve_frequency(self): class TestDateRanges(TestData): + def test_date_range_out_of_bounds(self): + # GH#19740 + with pytest.raises(OutOfBoundsDatetime): + date_range('2016-01-01', periods=100000, freq='D') + with pytest.raises(OutOfBoundsDatetime): + date_range(end='1763-10-12', periods=100000, freq='D') def test_date_range_gen_error(self): rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
- [x] closes #14187 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19740
2018-02-17T18:53:45Z
2018-02-19T21:49:45Z
null
2018-02-19T21:50:07Z
finish off tests.tseries.test_timezones
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 8a6989c909cb2..994ff86e6fdf9 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -4,11 +4,13 @@ import pytest import pytz from pytz import utc +from dateutil.tz import gettz import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.compat import PY3 +from pandas._libs import tslib from pandas._libs.tslibs.frequencies import _INVALID_FREQ_ERROR from pandas import Timestamp, NaT @@ -215,6 +217,28 @@ def test_replace_tzinfo(self): assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() + @pytest.mark.parametrize('tz, normalize', [ + (pytz.timezone('US/Eastern'), lambda x: x.tzinfo.normalize(x)), + (gettz('US/Eastern'), lambda x: x)]) + def test_replace_across_dst(self, tz, normalize): + # GH#18319 check that 1) timezone is correctly normalized and + # 2) that hour is not incorrectly changed by this normalization + ts_naive = Timestamp('2017-12-03 16:03:30') + ts_aware = tslib._localize_pydatetime(ts_naive, tz) + + # Preliminary sanity-check + assert ts_aware == normalize(ts_aware) + + # Replace across DST boundary + ts2 = ts_aware.replace(month=6) + + # Check that `replace` preserves hour literal + assert (ts2.hour, ts2.minute) == (ts_aware.hour, ts_aware.minute) + + # Check that post-replace object is appropriately normalized + ts2b = normalize(ts2) + assert ts2 == ts2b + # -------------------------------------------------------------- @td.skip_if_windows diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py deleted file mode 100644 index 97326dc04a522..0000000000000 --- a/pandas/tests/tseries/test_timezones.py +++ /dev/null @@ -1,108 +0,0 @@ -# pylint: disable-msg=E1101,W0612 -import pytest - -import pytz - -from datetime import datetime - -from pandas._libs.tslibs import timezones -from pandas import Timestamp - - -class TestTimeZoneSupportPytz(object): - - def tz(self, tz): - # Construct a timezone object from a string. Overridden in subclass to - # parameterize tests. - return pytz.timezone(tz) - - def tzstr(self, tz): - # Construct a timezone string from a string. Overridden in subclass to - # parameterize tests. - return tz - - def localize(self, tz, x): - return tz.localize(x) - - def normalize(self, ts): - tzinfo = ts.tzinfo - return tzinfo.normalize(ts) - - def cmptz(self, tz1, tz2): - # Compare two timezones. Overridden in subclass to parameterize - # tests. - return tz1.zone == tz2.zone - - # test utility methods - def test_infer_tz(self): - eastern = self.tz('US/Eastern') - utc = pytz.utc - - _start = datetime(2001, 1, 1) - _end = datetime(2009, 1, 1) - - start = self.localize(eastern, _start) - end = self.localize(eastern, _end) - assert (timezones.infer_tzinfo(start, end) is - self.localize(eastern, _start).tzinfo) - assert (timezones.infer_tzinfo(start, None) is - self.localize(eastern, _start).tzinfo) - assert (timezones.infer_tzinfo(None, end) is - self.localize(eastern, _end).tzinfo) - - start = utc.localize(_start) - end = utc.localize(_end) - assert (timezones.infer_tzinfo(start, end) is utc) - - end = self.localize(eastern, _end) - pytest.raises(Exception, timezones.infer_tzinfo, start, end) - pytest.raises(Exception, timezones.infer_tzinfo, end, start) - - def test_replace_across_dst(self): - # GH#18319 check that 1) timezone is correctly normalized and - # 2) that hour is not incorrectly changed by this normalization - tz = self.tz('US/Eastern') - - ts_naive = Timestamp('2017-12-03 16:03:30') - ts_aware = self.localize(tz, ts_naive) - - # Preliminary sanity-check - assert ts_aware == self.normalize(ts_aware) - - # Replace across DST boundary - ts2 = ts_aware.replace(month=6) - - # Check that `replace` preserves hour literal - assert (ts2.hour, ts2.minute) == (ts_aware.hour, ts_aware.minute) - - # Check that post-replace object is appropriately normalized - ts2b = self.normalize(ts2) - assert ts2 == ts2b - - -class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz): - - def tz(self, tz): - """ - Construct a dateutil timezone. - Use tslib.maybe_get_tz so that we get the filename on the tz right - on windows. See #7337. - """ - return timezones.maybe_get_tz('dateutil/' + tz) - - def tzstr(self, tz): - """ Construct a timezone string from a string. Overridden in subclass - to parameterize tests. """ - return 'dateutil/' + tz - - def cmptz(self, tz1, tz2): - """ Compare two timezones. Overridden in subclass to parameterize - tests. """ - return tz1 == tz2 - - def localize(self, tz, x): - return x.replace(tzinfo=tz) - - def normalize(self, ts): - # no-op for dateutil - return ts diff --git a/pandas/tests/tslibs/test_timezones.py b/pandas/tests/tslibs/test_timezones.py index 603c5e3fea26f..1bb355f267938 100644 --- a/pandas/tests/tslibs/test_timezones.py +++ b/pandas/tests/tslibs/test_timezones.py @@ -5,6 +5,7 @@ import pytz import dateutil.tz +from pandas._libs import tslib from pandas._libs.tslibs import timezones from pandas import Timestamp @@ -35,3 +36,33 @@ def test_tzlocal(): offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) offset = offset.total_seconds() * 1000000000 assert ts.value + offset == Timestamp('2011-01-01').value + + +@pytest.mark.parametrize('eastern, localize', [ + (pytz.timezone('US/Eastern'), lambda tz, x: tz.localize(x)), + (dateutil.tz.gettz('US/Eastern'), lambda tz, x: x.replace(tzinfo=tz))]) +def test_infer_tz(eastern, localize): + utc = pytz.utc + + start_naive = datetime(2001, 1, 1) + end_naive = datetime(2009, 1, 1) + + start = localize(eastern, start_naive) + end = localize(eastern, end_naive) + + assert (timezones.infer_tzinfo(start, end) is + tslib._localize_pydatetime(start_naive, eastern).tzinfo) + assert (timezones.infer_tzinfo(start, None) is + tslib._localize_pydatetime(start_naive, eastern).tzinfo) + assert (timezones.infer_tzinfo(None, end) is + tslib._localize_pydatetime(end_naive, eastern).tzinfo) + + start = utc.localize(start_naive) + end = utc.localize(end_naive) + assert timezones.infer_tzinfo(start, end) is utc + + end = tslib._localize_pydatetime(end_naive, eastern) + with pytest.raises(Exception): + timezones.infer_tzinfo(start, end) + with pytest.raises(Exception): + timezones.infer_tzinfo(end, start)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19739
2018-02-17T04:58:44Z
2018-02-18T16:22:16Z
2018-02-18T16:22:16Z
2018-02-18T18:22:13Z
Implement missing tests, fix offset floordiv, rfloordiv, fix return types
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a2198d9103528..9a4c5b42c858f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -716,6 +716,8 @@ Datetimelike - Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`) - Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`) - Bug in :func:`to_datetime` where passing an out-of-bounds datetime with ``errors='coerce'`` and ``utc=True`` would raise ``OutOfBoundsDatetime`` instead of parsing to ``NaT`` (:issue:`19612`) +- Bug in :func:`Timedelta.__add__`, :func:`Timedelta.__sub__` where adding or subtracting a ``np.timedelta64`` object would return another ``np.timedelta64`` instead of a ``Timedelta`` (:issue:`19738`) +- Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`) - Timezones diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 37693068e0974..884b70a8c4609 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -478,11 +478,16 @@ def _binary_op_method_timedeltalike(op, name): elif other is NaT: return NaT + elif is_timedelta64_object(other): + # convert to Timedelta below; avoid catching this in + # has-dtype check before then + pass + elif is_datetime64_object(other) or PyDateTime_CheckExact(other): # the PyDateTime_CheckExact case is for a datetime object that # is specifically *not* a Timestamp, as the Timestamp case will be # handled after `_validate_ops_compat` returns False below - from ..tslib import Timestamp + from timestamps import Timestamp return op(self, Timestamp(other)) # We are implicitly requiring the canonical behavior to be # defined by Timestamp methods. @@ -1096,6 +1101,9 @@ class Timedelta(_Timedelta): # just defer if hasattr(other, '_typ'): # Series, DataFrame, ... + if other._typ == 'dateoffset' and hasattr(other, 'delta'): + # Tick offset + return self // other.delta return NotImplemented if hasattr(other, 'dtype'): @@ -1128,6 +1136,9 @@ class Timedelta(_Timedelta): # just defer if hasattr(other, '_typ'): # Series, DataFrame, ... + if other._typ == 'dateoffset' and hasattr(other, 'delta'): + # Tick offset + return other.delta // self return NotImplemented if hasattr(other, 'dtype'): diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 667266be2a89b..4db00015d97c3 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -1,10 +1,13 @@ """ test the scalar Timedelta """ +from datetime import datetime, timedelta +import operator + import pytest import numpy as np -from datetime import timedelta import pandas as pd +from pandas.core import ops import pandas.util.testing as tm from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series, @@ -13,6 +16,135 @@ class TestTimedeltaArithmetic(object): + @pytest.mark.parametrize('op', [operator.add, ops.radd]) + def test_td_add_datetimelike(self, op): + # GH#19738 + td = Timedelta(10, unit='d') + + result = op(td, datetime(2016, 1, 1)) + if op is operator.add: + # datetime + Timedelta does _not_ call Timedelta.__radd__, + # so we get a datetime back instead of a Timestamp + assert isinstance(result, pd.Timestamp) + assert result == pd.Timestamp(2016, 1, 11) + + result = op(td, pd.Timestamp('2018-01-12 18:09')) + assert isinstance(result, pd.Timestamp) + assert result == pd.Timestamp('2018-01-22 18:09') + + result = op(td, np.datetime64('2018-01-12')) + assert isinstance(result, pd.Timestamp) + assert result == pd.Timestamp('2018-01-22') + + @pytest.mark.parametrize('op', [operator.add, ops.radd]) + def test_td_add_timedeltalike(self, op): + td = Timedelta(10, unit='d') + + result = op(td, Timedelta(days=10)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=20) + + result = op(td, timedelta(days=9)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=19) + + result = op(td, np.timedelta64(-4, 'D')) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=6) + + result = op(td, pd.offsets.Hour(6)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=10, hours=6) + + def test_td_sub_timedeltalike(self): + td = Timedelta(10, unit='d') + + expected = Timedelta(0, unit='ns') + result = td - td + assert isinstance(result, Timedelta) + assert result == expected + + result = td - td.to_pytimedelta() + assert isinstance(result, Timedelta) + assert result == expected + + result = td - td.to_timedelta64() + assert isinstance(result, Timedelta) + assert result == expected + + result = td - pd.offsets.Hour(1) + assert isinstance(result, Timedelta) + assert result == Timedelta(239, unit='h') + + def test_td_rsub_timedeltalike(self): + td = Timedelta(10, unit='d') + + expected = Timedelta(0, unit='ns') + + result = td.to_pytimedelta() - td + assert isinstance(result, Timedelta) + assert result == expected + + result = td.to_timedelta64() - td + assert isinstance(result, Timedelta) + assert result == expected + + result = pd.offsets.Hour(1) - td + assert isinstance(result, Timedelta) + assert result == Timedelta(-239, unit='h') + + @pytest.mark.parametrize('op', [operator.mul, ops.rmul]) + def test_td_mul_scalar(self, op): + # GH#19738 + td = Timedelta(minutes=3) + + result = op(td, 2) + assert result == Timedelta(minutes=6) + + result = op(td, 1.5) + assert result == Timedelta(minutes=4, seconds=30) + + assert op(td, np.nan) is NaT + + with pytest.raises(TypeError): + # timedelta * datetime is gibberish + op(td, pd.Timestamp(2016, 1, 2)) + + with pytest.raises(TypeError): + # invalid multiply with another timedelta + op(td, td) + + def test_td_div_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit='d') + + result = td / pd.offsets.Hour(1) + assert result == 240 + + assert td / td == 1 + assert td / np.timedelta64(60, 'h') == 4 + + assert np.isnan(td / NaT) + + def test_td_div_numeric_scalar(self): + # GH#19738 + td = Timedelta(10, unit='d') + + result = td / 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=5) + + result = td / 5.0 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=2) + + def test_td_rdiv_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit='d') + result = pd.offsets.Hour(1) / td + assert result == 1 / 240.0 + + assert np.timedelta64(60, 'h') / td == 0.25 def test_arithmetic_overflow(self): with pytest.raises(OverflowError): @@ -105,15 +237,6 @@ def test_timedelta_ops_scalar(self): result = base - offset assert result == expected_sub - def test_ops_offsets(self): - td = Timedelta(10, unit='d') - assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1) - assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td - assert 240 == td / pd.offsets.Hour(1) - assert 1 / 240.0 == pd.offsets.Hour(1) / td - assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1) - assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td - def test_unary_ops(self): td = Timedelta(10, unit='d') @@ -140,10 +263,6 @@ def test_binary_ops_nat(self): def test_binary_ops_integers(self): td = Timedelta(10, unit='d') - assert td * 2 == Timedelta(20, unit='d') - assert td / 2 == Timedelta(5, unit='d') - assert td // 2 == Timedelta(5, unit='d') - # invert assert td * -1 == Timedelta('-10d') assert -1 * td == Timedelta('-10d') @@ -152,15 +271,11 @@ def test_binary_ops_integers(self): pytest.raises(TypeError, lambda: td + 2) pytest.raises(TypeError, lambda: td - 2) - def test_binary_ops_with_timedelta(self): - td = Timedelta(10, unit='d') - - assert td - td == Timedelta(0, unit='ns') - assert td + td == Timedelta(20, unit='d') - assert td / td == 1 - - # invalid multiply with another timedelta - pytest.raises(TypeError, lambda: td * td) + def test_td_floordiv_offsets(self): + # GH#19738 + td = Timedelta(hours=3, minutes=4) + assert td // pd.offsets.Hour(1) == 3 + assert td // pd.offsets.Minute(2) == 92 def test_floordiv(self): # GH#18846 @@ -202,6 +317,10 @@ def test_floordiv(self): res = td // ser assert res.dtype.kind == 'm' + def test_td_rfloordiv_offsets(self): + # GH#19738 + assert pd.offsets.Hour(1) // Timedelta(minutes=25) == 2 + def test_rfloordiv(self): # GH#18846 td = Timedelta(hours=3, minutes=3) @@ -370,7 +489,7 @@ def test_construction(self): days=10, hours=1, minutes=1, seconds=1) assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta( days=10, hours=1, minutes=1, seconds=1, microseconds=3) - assert Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta( + assert Timedelta('-10 days 1 h 1.5m 1s 3us') == -timedelta( days=10, hours=1, minutes=1, seconds=31, microseconds=3) # Currently invalid as it has a - on the hh:mm:dd part
This splits off of #19365 a bunch of peripheral changes that were never intended to be part of #19365 in the first place. - Implements many tests in the "how is there not already a test for that!?" category - Fixes(+tests) Timedelta // DateOffset and DateOffset // Timedelta - Fixes(+tests) return types for `Timedelta.__add__, __radd__, __sub__, __rsub__(np.timedelta64)` (ATM returns `timedelta64`) - Fixes a typo in test_construction that causes a dummy assertion to be made instead of the intended assertion. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19738
2018-02-17T02:38:52Z
2018-02-19T07:36:38Z
null
2018-06-22T03:31:10Z
DOC: correct merge_asof example
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 4b99b0407cfcc..7b1a0875bba59 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -457,8 +457,8 @@ def merge_asof(left, right, on=None, time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 - 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 - 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN See also diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 2f48aef1894a9..cebbcc41c3e17 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -92,11 +92,30 @@ def test_examples2(self): by='ticker', tolerance=pd.Timedelta('2ms')) - pd.merge_asof(trades, quotes, - on='time', - by='ticker', - tolerance=pd.Timedelta('10ms'), - allow_exact_matches=False) + expected = pd.DataFrame({ + 'time': pd.to_datetime(['20160525 13:30:00.023', + '20160525 13:30:00.038', + '20160525 13:30:00.048', + '20160525 13:30:00.048', + '20160525 13:30:00.048']), + 'ticker': ['MSFT', 'MSFT', 'GOOG', 'GOOG', 'AAPL'], + 'price': [51.95, 51.95, + 720.77, 720.92, 98.00], + 'quantity': [75, 155, + 100, 100, 100], + 'bid': [np.nan, 51.97, np.nan, + np.nan, np.nan], + 'ask': [np.nan, 51.98, np.nan, + np.nan, np.nan]}, + columns=['time', 'ticker', 'price', 'quantity', + 'bid', 'ask']) + + result = pd.merge_asof(trades, quotes, + on='time', + by='ticker', + tolerance=pd.Timedelta('10ms'), + allow_exact_matches=False) + assert_frame_equal(result, expected) def test_examples3(self): """ doc-string examples """
- [ ] closes #19489 - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry - update of incorrect documentation example for merge_asof. - add a test corresponding to this example.
https://api.github.com/repos/pandas-dev/pandas/pulls/19737
2018-02-17T02:18:03Z
2018-02-18T17:46:54Z
2018-02-18T17:46:53Z
2018-02-19T00:06:21Z
Split+Parametrize Timedelta tests
diff --git a/pandas/tests/scalar/timedelta/test_construction.py b/pandas/tests/scalar/timedelta/test_construction.py new file mode 100644 index 0000000000000..5ccad9e6b4e3c --- /dev/null +++ b/pandas/tests/scalar/timedelta/test_construction.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from datetime import timedelta + +import pytest +import numpy as np + +import pandas as pd +import pandas.util.testing as tm +from pandas import Timedelta + + +def test_construction(): + expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8') + assert Timedelta(10, unit='d').value == expected + assert Timedelta(10.0, unit='d').value == expected + assert Timedelta('10 days').value == expected + assert Timedelta(days=10).value == expected + assert Timedelta(days=10.0).value == expected + + expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8') + assert Timedelta('10 days 00:00:10').value == expected + assert Timedelta(days=10, seconds=10).value == expected + assert Timedelta(days=10, milliseconds=10 * 1000).value == expected + assert Timedelta(days=10, + microseconds=10 * 1000 * 1000).value == expected + + # rounding cases + assert Timedelta(82739999850000).value == 82739999850000 + assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000))) + assert Timedelta(123072001000000).value == 123072001000000 + assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000))) + + # string conversion with/without leading zero + # GH#9570 + assert Timedelta('0:00:00') == timedelta(hours=0) + assert Timedelta('00:00:00') == timedelta(hours=0) + assert Timedelta('-1:00:00') == -timedelta(hours=1) + assert Timedelta('-01:00:00') == -timedelta(hours=1) + + # more strings & abbrevs + # GH#8190 + assert Timedelta('1 h') == timedelta(hours=1) + assert Timedelta('1 hour') == timedelta(hours=1) + assert Timedelta('1 hr') == timedelta(hours=1) + assert Timedelta('1 hours') == timedelta(hours=1) + assert Timedelta('-1 hours') == -timedelta(hours=1) + assert Timedelta('1 m') == timedelta(minutes=1) + assert Timedelta('1.5 m') == timedelta(seconds=90) + assert Timedelta('1 minute') == timedelta(minutes=1) + assert Timedelta('1 minutes') == timedelta(minutes=1) + assert Timedelta('1 s') == timedelta(seconds=1) + assert Timedelta('1 second') == timedelta(seconds=1) + assert Timedelta('1 seconds') == timedelta(seconds=1) + assert Timedelta('1 ms') == timedelta(milliseconds=1) + assert Timedelta('1 milli') == timedelta(milliseconds=1) + assert Timedelta('1 millisecond') == timedelta(milliseconds=1) + assert Timedelta('1 us') == timedelta(microseconds=1) + assert Timedelta('1 micros') == timedelta(microseconds=1) + assert Timedelta('1 microsecond') == timedelta(microseconds=1) + assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500') + assert Timedelta('1 ns') == Timedelta('00:00:00.000000001') + assert Timedelta('1 nano') == Timedelta('00:00:00.000000001') + assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001') + + # combos + assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1) + assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1) + assert Timedelta('10 days 1 h 1m 1s') == timedelta( + days=10, hours=1, minutes=1, seconds=1) + assert Timedelta('-10 days 1 h 1m 1s') == -timedelta( + days=10, hours=1, minutes=1, seconds=1) + assert Timedelta('-10 days 1 h 1m 1s') == -timedelta( + days=10, hours=1, minutes=1, seconds=1) + assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta( + days=10, hours=1, minutes=1, seconds=1, microseconds=3) + assert Timedelta('-10 days 1 h 1.5m 1s 3us') == -timedelta( + days=10, hours=1, minutes=1, seconds=31, microseconds=3) + + # Currently invalid as it has a - on the hh:mm:dd part + # (only allowed on the days) + with pytest.raises(ValueError): + Timedelta('-10 days -1 h 1.5m 1s 3us') + + # only leading neg signs are allowed + with pytest.raises(ValueError): + Timedelta('10 days -1 h 1.5m 1s 3us') + + # no units specified + with pytest.raises(ValueError): + Timedelta('3.1415') + + # invalid construction + tm.assert_raises_regex(ValueError, "cannot construct a Timedelta", + lambda: Timedelta()) + tm.assert_raises_regex(ValueError, + "unit abbreviation w/o a number", + lambda: Timedelta('foo')) + tm.assert_raises_regex(ValueError, + "cannot construct a Timedelta from the " + "passed arguments, allowed keywords are ", + lambda: Timedelta(day=10)) + + # floats + expected = np.timedelta64( + 10, 's').astype('m8[ns]').view('i8') + np.timedelta64( + 500, 'ms').astype('m8[ns]').view('i8') + assert Timedelta(10.5, unit='s').value == expected + + # offset + assert pd.to_timedelta(pd.offsets.Hour(2)) == Timedelta(hours=2) + assert Timedelta(pd.offsets.Hour(2)) == Timedelta(hours=2) + assert Timedelta(pd.offsets.Second(2)) == Timedelta(seconds=2) + + # GH#11995: unicode + expected = Timedelta('1H') + result = pd.Timedelta(u'1H') + assert result == expected + assert (pd.to_timedelta(pd.offsets.Hour(2)) == + Timedelta(u'0 days, 02:00:00')) + + with pytest.raises(ValueError): + Timedelta(u'foo bar') + + +@pytest.mark.parametrize('item', list({'days': 'D', + 'seconds': 's', + 'microseconds': 'us', + 'milliseconds': 'ms', + 'minutes': 'm', + 'hours': 'h', + 'weeks': 'W'}.items())) +@pytest.mark.parametrize('npdtype', [np.int64, np.int32, np.int16, + np.float64, np.float32, np.float16]) +def test_td_construction_with_np_dtypes(npdtype, item): + # GH#8757: test construction with np dtypes + pykwarg, npkwarg = item + expected = np.timedelta64(1, npkwarg).astype('m8[ns]').view('i8') + assert Timedelta(**{pykwarg: npdtype(1)}).value == expected + + +@pytest.mark.parametrize('val', [ + '1s', '-1s', '1us', '-1us', '1 day', '-1 day', + '-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns', + '1ns', '-23:59:59.999999999']) +def test_td_from_repr_roundtrip(val): + # round-trip both for string and value + td = Timedelta(val) + assert Timedelta(td.value) == td + + # str does not normally display nanos + if not td.nanoseconds: + assert Timedelta(str(td)) == td + assert Timedelta(td._repr_base(format='all')) == td + + +def test_overflow_on_construction(): + # xref https://github.com/statsmodels/statsmodels/issues/3374 + value = pd.Timedelta('1day').value * 20169940 + with pytest.raises(OverflowError): + pd.Timedelta(value) + + # xref GH#17637 + with pytest.raises(OverflowError): + pd.Timedelta(7 * 19999, unit='D') + + with pytest.raises(OverflowError): + pd.Timedelta(timedelta(days=13 * 19999)) + + +@pytest.mark.parametrize('fmt,exp', [ + ('P6DT0H50M3.010010012S', Timedelta(days=6, minutes=50, seconds=3, + milliseconds=10, microseconds=10, + nanoseconds=12)), + ('P-6DT0H50M3.010010012S', Timedelta(days=-6, minutes=50, seconds=3, + milliseconds=10, microseconds=10, + nanoseconds=12)), + ('P4DT12H30M5S', Timedelta(days=4, hours=12, minutes=30, seconds=5)), + ('P0DT0H0M0.000000123S', Timedelta(nanoseconds=123)), + ('P0DT0H0M0.00001S', Timedelta(microseconds=10)), + ('P0DT0H0M0.001S', Timedelta(milliseconds=1)), + ('P0DT0H1M0S', Timedelta(minutes=1)), + ('P1DT25H61M61S', Timedelta(days=1, hours=25, minutes=61, seconds=61)) +]) +def test_iso_constructor(fmt, exp): + assert Timedelta(fmt) == exp + + +@pytest.mark.parametrize('fmt', [ + 'PPPPPPPPPPPP', 'PDTHMS', 'P0DT999H999M999S', + 'P1DT0H0M0.0000000000000S', 'P1DT0H0M00000000000S', + 'P1DT0H0M0.S']) +def test_iso_constructor_raises(fmt): + with tm.assert_raises_regex(ValueError, 'Invalid ISO 8601 Duration ' + 'format - {}'.format(fmt)): + Timedelta(fmt) + + +def test_td_constructor_on_nanoseconds(): + # GH#9273 + result = Timedelta(nanoseconds=100) + expected = Timedelta('100ns') + assert result == expected + + result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1, + milliseconds=1, microseconds=1, nanoseconds=1) + expected = Timedelta(694861001001001) + assert result == expected + + result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1) + expected = Timedelta('1us1ns') + assert result == expected + + result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1) + expected = Timedelta('999ns') + assert result == expected + + result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2) + expected = Timedelta('990ns') + assert result == expected + + with pytest.raises(TypeError): + Timedelta(nanoseconds='abc') diff --git a/pandas/tests/scalar/timedelta/test_formats.py b/pandas/tests/scalar/timedelta/test_formats.py new file mode 100644 index 0000000000000..8a877c7d1c0fa --- /dev/null +++ b/pandas/tests/scalar/timedelta/test_formats.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +from pandas import Timedelta + + +def test_repr(): + assert (repr(Timedelta(10, unit='d')) == + "Timedelta('10 days 00:00:00')") + assert (repr(Timedelta(10, unit='s')) == + "Timedelta('0 days 00:00:10')") + assert (repr(Timedelta(10, unit='ms')) == + "Timedelta('0 days 00:00:00.010000')") + assert (repr(Timedelta(-10, unit='ms')) == + "Timedelta('-1 days +23:59:59.990000')") + + +def test_isoformat(): + td = Timedelta(days=6, minutes=50, seconds=3, + milliseconds=10, microseconds=10, nanoseconds=12) + expected = 'P6DT0H50M3.010010012S' + result = td.isoformat() + assert result == expected + + td = Timedelta(days=4, hours=12, minutes=30, seconds=5) + result = td.isoformat() + expected = 'P4DT12H30M5S' + assert result == expected + + td = Timedelta(nanoseconds=123) + result = td.isoformat() + expected = 'P0DT0H0M0.000000123S' + assert result == expected + + # trim nano + td = Timedelta(microseconds=10) + result = td.isoformat() + expected = 'P0DT0H0M0.00001S' + assert result == expected + + # trim micro + td = Timedelta(milliseconds=1) + result = td.isoformat() + expected = 'P0DT0H0M0.001S' + assert result == expected + + # don't strip every 0 + result = Timedelta(minutes=1).isoformat() + expected = 'P0DT0H1M0S' + assert result == expected diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 420b66b4ce0dc..0f7fb84c6520b 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -36,31 +36,6 @@ def test_ops_error_str(self): assert not left == right assert left != right - def test_to_timedelta_on_nanoseconds(self): - # GH 9273 - result = Timedelta(nanoseconds=100) - expected = Timedelta('100ns') - assert result == expected - - result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1, - milliseconds=1, microseconds=1, nanoseconds=1) - expected = Timedelta(694861001001001) - assert result == expected - - result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1) - expected = Timedelta('1us1ns') - assert result == expected - - result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1) - expected = Timedelta('999ns') - assert result == expected - - result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2) - expected = Timedelta('990ns') - assert result == expected - - pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc')) - def test_ops_notimplemented(self): class Other: pass @@ -124,164 +99,6 @@ def test_compare_timedelta_ndarray(self): class TestTimedeltas(object): - def setup_method(self, method): - pass - - def test_construction(self): - - expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8') - assert Timedelta(10, unit='d').value == expected - assert Timedelta(10.0, unit='d').value == expected - assert Timedelta('10 days').value == expected - assert Timedelta(days=10).value == expected - assert Timedelta(days=10.0).value == expected - - expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8') - assert Timedelta('10 days 00:00:10').value == expected - assert Timedelta(days=10, seconds=10).value == expected - assert Timedelta(days=10, milliseconds=10 * 1000).value == expected - assert (Timedelta(days=10, microseconds=10 * 1000 * 1000) - .value == expected) - - # gh-8757: test construction with np dtypes - timedelta_kwargs = {'days': 'D', - 'seconds': 's', - 'microseconds': 'us', - 'milliseconds': 'ms', - 'minutes': 'm', - 'hours': 'h', - 'weeks': 'W'} - npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32, - np.float16] - for npdtype in npdtypes: - for pykwarg, npkwarg in timedelta_kwargs.items(): - expected = np.timedelta64(1, npkwarg).astype( - 'm8[ns]').view('i8') - assert Timedelta(**{pykwarg: npdtype(1)}).value == expected - - # rounding cases - assert Timedelta(82739999850000).value == 82739999850000 - assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000))) - assert Timedelta(123072001000000).value == 123072001000000 - assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000))) - - # string conversion with/without leading zero - # GH 9570 - assert Timedelta('0:00:00') == timedelta(hours=0) - assert Timedelta('00:00:00') == timedelta(hours=0) - assert Timedelta('-1:00:00') == -timedelta(hours=1) - assert Timedelta('-01:00:00') == -timedelta(hours=1) - - # more strings & abbrevs - # GH 8190 - assert Timedelta('1 h') == timedelta(hours=1) - assert Timedelta('1 hour') == timedelta(hours=1) - assert Timedelta('1 hr') == timedelta(hours=1) - assert Timedelta('1 hours') == timedelta(hours=1) - assert Timedelta('-1 hours') == -timedelta(hours=1) - assert Timedelta('1 m') == timedelta(minutes=1) - assert Timedelta('1.5 m') == timedelta(seconds=90) - assert Timedelta('1 minute') == timedelta(minutes=1) - assert Timedelta('1 minutes') == timedelta(minutes=1) - assert Timedelta('1 s') == timedelta(seconds=1) - assert Timedelta('1 second') == timedelta(seconds=1) - assert Timedelta('1 seconds') == timedelta(seconds=1) - assert Timedelta('1 ms') == timedelta(milliseconds=1) - assert Timedelta('1 milli') == timedelta(milliseconds=1) - assert Timedelta('1 millisecond') == timedelta(milliseconds=1) - assert Timedelta('1 us') == timedelta(microseconds=1) - assert Timedelta('1 micros') == timedelta(microseconds=1) - assert Timedelta('1 microsecond') == timedelta(microseconds=1) - assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500') - assert Timedelta('1 ns') == Timedelta('00:00:00.000000001') - assert Timedelta('1 nano') == Timedelta('00:00:00.000000001') - assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001') - - # combos - assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1) - assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1) - assert Timedelta('10 days 1 h 1m 1s') == timedelta( - days=10, hours=1, minutes=1, seconds=1) - assert Timedelta('-10 days 1 h 1m 1s') == -timedelta( - days=10, hours=1, minutes=1, seconds=1) - assert Timedelta('-10 days 1 h 1m 1s') == -timedelta( - days=10, hours=1, minutes=1, seconds=1) - assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta( - days=10, hours=1, minutes=1, seconds=1, microseconds=3) - assert Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta( - days=10, hours=1, minutes=1, seconds=31, microseconds=3) - - # Currently invalid as it has a - on the hh:mm:dd part - # (only allowed on the days) - pytest.raises(ValueError, - lambda: Timedelta('-10 days -1 h 1.5m 1s 3us')) - - # only leading neg signs are allowed - pytest.raises(ValueError, - lambda: Timedelta('10 days -1 h 1.5m 1s 3us')) - - # no units specified - pytest.raises(ValueError, lambda: Timedelta('3.1415')) - - # invalid construction - tm.assert_raises_regex(ValueError, "cannot construct a Timedelta", - lambda: Timedelta()) - tm.assert_raises_regex(ValueError, - "unit abbreviation w/o a number", - lambda: Timedelta('foo')) - tm.assert_raises_regex(ValueError, - "cannot construct a Timedelta from the " - "passed arguments, allowed keywords are ", - lambda: Timedelta(day=10)) - - # round-trip both for string and value - for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day', - '-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns', - '1ns', '-23:59:59.999999999']: - - td = Timedelta(v) - assert Timedelta(td.value) == td - - # str does not normally display nanos - if not td.nanoseconds: - assert Timedelta(str(td)) == td - assert Timedelta(td._repr_base(format='all')) == td - - # floats - expected = np.timedelta64( - 10, 's').astype('m8[ns]').view('i8') + np.timedelta64( - 500, 'ms').astype('m8[ns]').view('i8') - assert Timedelta(10.5, unit='s').value == expected - - # offset - assert (to_timedelta(pd.offsets.Hour(2)) == - Timedelta('0 days, 02:00:00')) - assert (Timedelta(pd.offsets.Hour(2)) == - Timedelta('0 days, 02:00:00')) - assert (Timedelta(pd.offsets.Second(2)) == - Timedelta('0 days, 00:00:02')) - - # gh-11995: unicode - expected = Timedelta('1H') - result = pd.Timedelta(u'1H') - assert result == expected - assert (to_timedelta(pd.offsets.Hour(2)) == - Timedelta(u'0 days, 02:00:00')) - - pytest.raises(ValueError, lambda: Timedelta(u'foo bar')) - - def test_overflow_on_construction(self): - # xref https://github.com/statsmodels/statsmodels/issues/3374 - value = pd.Timedelta('1day').value * 20169940 - pytest.raises(OverflowError, pd.Timedelta, value) - - # xref gh-17637 - with pytest.raises(OverflowError): - pd.Timedelta(7 * 19999, unit='D') - - with pytest.raises(OverflowError): - pd.Timedelta(timedelta(days=13 * 19999)) - def test_total_seconds_scalar(self): # see gh-10939 rng = Timedelta('1 days, 10:11:12.100123456') @@ -291,17 +108,6 @@ def test_total_seconds_scalar(self): rng = Timedelta(np.nan) assert np.isnan(rng.total_seconds()) - def test_repr(self): - - assert (repr(Timedelta(10, unit='d')) == - "Timedelta('10 days 00:00:00')") - assert (repr(Timedelta(10, unit='s')) == - "Timedelta('0 days 00:00:10')") - assert (repr(Timedelta(10, unit='ms')) == - "Timedelta('0 days 00:00:00.010000')") - assert (repr(Timedelta(-10, unit='ms')) == - "Timedelta('-1 days +23:59:59.990000')") - def test_conversion(self): for td in [Timedelta(10, unit='d'), @@ -756,63 +562,3 @@ def test_components(self): result = s.dt.components assert not result.iloc[0].isna().all() assert result.iloc[1].isna().all() - - def test_isoformat(self): - td = Timedelta(days=6, minutes=50, seconds=3, - milliseconds=10, microseconds=10, nanoseconds=12) - expected = 'P6DT0H50M3.010010012S' - result = td.isoformat() - assert result == expected - - td = Timedelta(days=4, hours=12, minutes=30, seconds=5) - result = td.isoformat() - expected = 'P4DT12H30M5S' - assert result == expected - - td = Timedelta(nanoseconds=123) - result = td.isoformat() - expected = 'P0DT0H0M0.000000123S' - assert result == expected - - # trim nano - td = Timedelta(microseconds=10) - result = td.isoformat() - expected = 'P0DT0H0M0.00001S' - assert result == expected - - # trim micro - td = Timedelta(milliseconds=1) - result = td.isoformat() - expected = 'P0DT0H0M0.001S' - assert result == expected - - # don't strip every 0 - result = Timedelta(minutes=1).isoformat() - expected = 'P0DT0H1M0S' - assert result == expected - - @pytest.mark.parametrize('fmt,exp', [ - ('P6DT0H50M3.010010012S', Timedelta(days=6, minutes=50, seconds=3, - milliseconds=10, microseconds=10, - nanoseconds=12)), - ('P-6DT0H50M3.010010012S', Timedelta(days=-6, minutes=50, seconds=3, - milliseconds=10, microseconds=10, - nanoseconds=12)), - ('P4DT12H30M5S', Timedelta(days=4, hours=12, minutes=30, seconds=5)), - ('P0DT0H0M0.000000123S', Timedelta(nanoseconds=123)), - ('P0DT0H0M0.00001S', Timedelta(microseconds=10)), - ('P0DT0H0M0.001S', Timedelta(milliseconds=1)), - ('P0DT0H1M0S', Timedelta(minutes=1)), - ('P1DT25H61M61S', Timedelta(days=1, hours=25, minutes=61, seconds=61)) - ]) - def test_iso_constructor(self, fmt, exp): - assert Timedelta(fmt) == exp - - @pytest.mark.parametrize('fmt', [ - 'PPPPPPPPPPPP', 'PDTHMS', 'P0DT999H999M999S', - 'P1DT0H0M0.0000000000000S', 'P1DT0H0M00000000000S', - 'P1DT0H0M0.S']) - def test_iso_constructor_raises(self, fmt): - with tm.assert_raises_regex(ValueError, 'Invalid ISO 8601 Duration ' - 'format - {}'.format(fmt)): - Timedelta(fmt)
Started splitting non-contentious parts off of #19365. This even-easier bit got split off of that. Just organizing+splitting+parametrizing tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/19736
2018-02-17T01:54:34Z
2018-02-20T11:16:30Z
2018-02-20T11:16:30Z
2018-06-22T03:31:07Z
TST: GH18498 - Split test_pytables into sub-modules
diff --git a/pandas/tests/io/pytables/__init__.py b/pandas/tests/io/pytables/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/io/pytables/common.py b/pandas/tests/io/pytables/common.py new file mode 100644 index 0000000000000..f014874c79ce0 --- /dev/null +++ b/pandas/tests/io/pytables/common.py @@ -0,0 +1,122 @@ +import os +import tempfile +from contextlib import contextmanager +from distutils.version import LooseVersion + +import pytest +import pandas.util.testing as tm +from pandas.io.pytables import HDFStore + +tables = pytest.importorskip('tables') +_default_compressor = ('blosc' if LooseVersion(tables.__version__) >= + LooseVersion('2.2') else 'zlib') + + +def get_file_name(): + """ Create a file name for test data files """ + return 'tmp.__%s__.h5' % tm.rands(10) + + +def create_tempfile(file_name=None): + """ Create an unopened named temporary file """ + if file_name is None: + file_name = get_file_name() + return os.path.join(tempfile.gettempdir(), file_name) + + +@contextmanager +def ensure_clean_path(path=None): + """ + return essentially a named temporary file that is not opened + and deleted on existing; if path is a list, then create and + return list of filenames + """ + try: + if isinstance(path, list): + filenames = [create_tempfile(p) for p in path] + yield filenames + else: + filenames = [create_tempfile(path)] + yield filenames[0] + finally: + for f in filenames: + safe_remove(f) + + +@contextmanager +def ensure_clean_store(path=None, mode='a', complevel=None, complib=None, + fletcher32=False): + + try: + if path is None: + path = create_tempfile(path) + + store = HDFStore(path, mode=mode, complevel=complevel, + complib=complib, fletcher32=False) + yield store + finally: + safe_close(store) + if mode == 'w' or mode == 'a': + safe_remove(path) + + +def safe_remove(path): + if path is not None: + try: + os.remove(path) + except: + pass + + +def safe_close(store): + try: + if store is not None: + store.close() + except: + pass + + +def _maybe_remove(store, key): + """For tests using tables, try removing the table to be sure there is + no content from previous tests using the same table name.""" + try: + store.remove(key) + except: + pass + + +def _check_roundtrip(obj, comparator, compression=False, **kwargs): + options = {} + if compression: + options['complib'] = _default_compressor + + with ensure_clean_store('w', **options) as store: + store['obj'] = obj + retrieved = store['obj'] + comparator(retrieved, obj, **kwargs) + + +def _check_double_roundtrip(obj, comparator, compression=False, **kwargs): + options = {} + if compression: + options['complib'] = compression or _default_compressor + + with ensure_clean_store('w', **options) as store: + store['obj'] = obj + retrieved = store['obj'] + comparator(retrieved, obj, **kwargs) + store['obj'] = retrieved + again = store['obj'] + comparator(again, obj, **kwargs) + + +def _check_roundtrip_table(obj, comparator, compression=False): + options = {} + if compression: + options['complib'] = _default_compressor + + with ensure_clean_store('w', **options) as store: + store.put('obj', obj, format='table') + retrieved = store['obj'] + + comparator(retrieved, obj) diff --git a/pandas/tests/io/pytables/data/legacy_hdf/datetimetz_object.h5 b/pandas/tests/io/pytables/data/legacy_hdf/datetimetz_object.h5 new file mode 100644 index 0000000000000..8cb4eda470398 Binary files /dev/null and b/pandas/tests/io/pytables/data/legacy_hdf/datetimetz_object.h5 differ diff --git a/pandas/tests/io/data/legacy_hdf/legacy_table.h5 b/pandas/tests/io/pytables/data/legacy_hdf/legacy_table.h5 similarity index 100% rename from pandas/tests/io/data/legacy_hdf/legacy_table.h5 rename to pandas/tests/io/pytables/data/legacy_hdf/legacy_table.h5 diff --git a/pandas/tests/io/data/legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5 b/pandas/tests/io/pytables/data/legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5 similarity index 100% rename from pandas/tests/io/data/legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5 rename to pandas/tests/io/pytables/data/legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5 diff --git a/pandas/tests/io/data/legacy_hdf/pytables_native.h5 b/pandas/tests/io/pytables/data/legacy_hdf/pytables_native.h5 similarity index 100% rename from pandas/tests/io/data/legacy_hdf/pytables_native.h5 rename to pandas/tests/io/pytables/data/legacy_hdf/pytables_native.h5 diff --git a/pandas/tests/io/data/legacy_hdf/pytables_native2.h5 b/pandas/tests/io/pytables/data/legacy_hdf/pytables_native2.h5 similarity index 100% rename from pandas/tests/io/data/legacy_hdf/pytables_native2.h5 rename to pandas/tests/io/pytables/data/legacy_hdf/pytables_native2.h5 diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py new file mode 100644 index 0000000000000..ad440c0413ad5 --- /dev/null +++ b/pandas/tests/io/pytables/test_append.py @@ -0,0 +1,920 @@ +import pytest +from warnings import catch_warnings +import datetime +from datetime import timedelta + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, date_range, Timestamp, + concat, MultiIndex, Panel) +from .common import ensure_clean_store, ensure_clean_path, _maybe_remove + +import pandas.util.testing as tm +from pandas.io.pytables import read_hdf + + +def test_append(): + with ensure_clean_store() as store: + # this is allowed by almost always don't want to do it + # tables.NaturalNameWarning): + with catch_warnings(record=True): + + df = tm.makeTimeDataFrame() + _maybe_remove(store, 'df1') + store.append('df1', df[:10]) + store.append('df1', df[10:]) + tm.assert_frame_equal(store['df1'], df) + + _maybe_remove(store, 'df2') + store.put('df2', df[:10], format='table') + store.append('df2', df[10:]) + tm.assert_frame_equal(store['df2'], df) + + _maybe_remove(store, 'df3') + store.append('/df3', df[:10]) + store.append('/df3', df[10:]) + tm.assert_frame_equal(store['df3'], df) + + # this is allowed by almost always don't want to do it + # tables.NaturalNameWarning + _maybe_remove(store, '/df3 foo') + store.append('/df3 foo', df[:10]) + store.append('/df3 foo', df[10:]) + tm.assert_frame_equal(store['df3 foo'], df) + + # panel + wp = tm.makePanel() + _maybe_remove(store, 'wp1') + store.append('wp1', wp.iloc[:, :10, :]) + store.append('wp1', wp.iloc[:, 10:, :]) + tm.assert_panel_equal(store['wp1'], wp) + + # test using differt order of items on the non-index axes + _maybe_remove(store, 'wp1') + wp_append1 = wp.iloc[:, :10, :] + store.append('wp1', wp_append1) + wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1]) + store.append('wp1', wp_append2) + tm.assert_panel_equal(store['wp1'], wp) + + # dtype issues - mizxed type in a single object column + df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]]) + df['mixed_column'] = 'testing' + df.loc[2, 'mixed_column'] = np.nan + _maybe_remove(store, 'df') + store.append('df', df) + tm.assert_frame_equal(store['df'], df) + + # uints - test storage of uints + uint_data = DataFrame({ + 'u08': Series(np.random.randint(0, high=255, size=5), + dtype=np.uint8), + 'u16': Series(np.random.randint(0, high=65535, size=5), + dtype=np.uint16), + 'u32': Series(np.random.randint(0, high=2**30, size=5), + dtype=np.uint32), + 'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62], + dtype=np.uint64)}, index=np.arange(5)) + _maybe_remove(store, 'uints') + store.append('uints', uint_data) + tm.assert_frame_equal(store['uints'], uint_data) + + # uints - test storage of uints in indexable columns + _maybe_remove(store, 'uints') + # 64-bit indices not yet supported + store.append('uints', uint_data, data_columns=[ + 'u08', 'u16', 'u32']) + tm.assert_frame_equal(store['uints'], uint_data) + + +def test_append_series(): + with ensure_clean_store() as store: + # basic + ss = tm.makeStringSeries() + ts = tm.makeTimeSeries() + ns = Series(np.arange(100)) + + store.append('ss', ss) + result = store['ss'] + tm.assert_series_equal(result, ss) + assert result.name is None + + store.append('ts', ts) + result = store['ts'] + tm.assert_series_equal(result, ts) + assert result.name is None + + ns.name = 'foo' + store.append('ns', ns) + result = store['ns'] + tm.assert_series_equal(result, ns) + assert result.name == ns.name + + # select on the values + expected = ns[ns > 60] + result = store.select('ns', 'foo>60') + tm.assert_series_equal(result, expected) + + # select on the index and values + expected = ns[(ns > 70) & (ns.index < 90)] + result = store.select('ns', 'foo>70 and index<90') + tm.assert_series_equal(result, expected) + + # multi-index + mi = DataFrame(np.random.randn(5, 1), columns=['A']) + mi['B'] = np.arange(len(mi)) + mi['C'] = 'foo' + mi.loc[3:5, 'C'] = 'bar' + mi.set_index(['C', 'B'], inplace=True) + s = mi.stack() + s.index = s.index.droplevel(2) + store.append('mi', s) + tm.assert_series_equal(store['mi'], s) + + +def test_append_some_nans(): + with ensure_clean_store() as store: + df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'), + 'A1': np.random.randn(20), + 'A2': np.random.randn(20), + 'B': 'foo', 'C': 'bar', + 'D': Timestamp("20010101"), + 'E': datetime.datetime(2001, 1, 2, 0, 0)}, + index=np.arange(20)) + # some nans + _maybe_remove(store, 'df1') + df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan + store.append('df1', df[:10]) + store.append('df1', df[10:]) + tm.assert_frame_equal(store['df1'], df) + + # first column + df1 = df.copy() + df1.loc[:, 'A1'] = np.nan + _maybe_remove(store, 'df1') + store.append('df1', df1[:10]) + store.append('df1', df1[10:]) + tm.assert_frame_equal(store['df1'], df1) + + # 2nd column + df2 = df.copy() + df2.loc[:, 'A2'] = np.nan + _maybe_remove(store, 'df2') + store.append('df2', df2[:10]) + store.append('df2', df2[10:]) + tm.assert_frame_equal(store['df2'], df2) + + # datetimes + df3 = df.copy() + df3.loc[:, 'E'] = np.nan + _maybe_remove(store, 'df3') + store.append('df3', df3[:10]) + store.append('df3', df3[10:]) + tm.assert_frame_equal(store['df3'], df3) + + +def test_append_all_nans(): + with ensure_clean_store() as store: + df = DataFrame({'A1': np.random.randn(20), + 'A2': np.random.randn(20)}, + index=np.arange(20)) + df.loc[0:15, :] = np.nan + + # nan some entire rows (dropna=True) + _maybe_remove(store, 'df') + store.append('df', df[:10], dropna=True) + store.append('df', df[10:], dropna=True) + tm.assert_frame_equal(store['df'], df[-4:]) + + # nan some entire rows (dropna=False) + _maybe_remove(store, 'df2') + store.append('df2', df[:10], dropna=False) + store.append('df2', df[10:], dropna=False) + tm.assert_frame_equal(store['df2'], df) + + # tests the option io.hdf.dropna_table + pd.set_option('io.hdf.dropna_table', False) + _maybe_remove(store, 'df3') + store.append('df3', df[:10]) + store.append('df3', df[10:]) + tm.assert_frame_equal(store['df3'], df) + + pd.set_option('io.hdf.dropna_table', True) + _maybe_remove(store, 'df4') + store.append('df4', df[:10]) + store.append('df4', df[10:]) + tm.assert_frame_equal(store['df4'], df[-4:]) + + # nan some entire rows (string are still written!) + df = DataFrame({'A1': np.random.randn(20), + 'A2': np.random.randn(20), + 'B': 'foo', 'C': 'bar'}, + index=np.arange(20)) + + df.loc[0:15, :] = np.nan + + _maybe_remove(store, 'df') + store.append('df', df[:10], dropna=True) + store.append('df', df[10:], dropna=True) + tm.assert_frame_equal(store['df'], df) + + _maybe_remove(store, 'df2') + store.append('df2', df[:10], dropna=False) + store.append('df2', df[10:], dropna=False) + tm.assert_frame_equal(store['df2'], df) + + # nan some entire rows (but since we have dates they are still + # written!) + df = DataFrame({'A1': np.random.randn(20), + 'A2': np.random.randn(20), + 'B': 'foo', 'C': 'bar', + 'D': Timestamp("20010101"), + 'E': datetime.datetime(2001, 1, 2, 0, 0)}, + index=np.arange(20)) + + df.loc[0:15, :] = np.nan + + _maybe_remove(store, 'df') + store.append('df', df[:10], dropna=True) + store.append('df', df[10:], dropna=True) + tm.assert_frame_equal(store['df'], df) + + _maybe_remove(store, 'df2') + store.append('df2', df[:10], dropna=False) + store.append('df2', df[10:], dropna=False) + tm.assert_frame_equal(store['df2'], df) + + # Test to make sure defaults are to not drop. + # Corresponding to Issue 9382 + df_with_missing = DataFrame( + {'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]}) + + with ensure_clean_path() as path: + df_with_missing.to_hdf(path, 'df_with_missing', format='table') + reloaded = read_hdf(path, 'df_with_missing') + tm.assert_frame_equal(df_with_missing, reloaded) + + matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]], + [[np.nan, np.nan, np.nan], [np.nan, 5, 6]], + [[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]] + + with catch_warnings(record=True): + panel_with_missing = Panel(matrix, + items=['Item1', 'Item2', 'Item3'], + major_axis=[1, 2], + minor_axis=['A', 'B', 'C']) + + with ensure_clean_path() as path: + panel_with_missing.to_hdf( + path, 'panel_with_missing', format='table') + reloaded_panel = read_hdf(path, 'panel_with_missing') + tm.assert_panel_equal(panel_with_missing, reloaded_panel) + + +def test_append_frame_column_oriented(): + with ensure_clean_store() as store: + # column oriented + df = tm.makeTimeDataFrame() + _maybe_remove(store, 'df1') + store.append('df1', df.iloc[:, :2], axes=['columns']) + store.append('df1', df.iloc[:, 2:]) + tm.assert_frame_equal(store['df1'], df) + + result = store.select('df1', 'columns=A') + expected = df.reindex(columns=['A']) + tm.assert_frame_equal(expected, result) + + # selection on the non-indexable + result = store.select( + 'df1', ('columns=A', 'index=df.index[0:4]')) + expected = df.reindex(columns=['A'], index=df.index[0:4]) + tm.assert_frame_equal(expected, result) + + # this isn't supported + with pytest.raises(TypeError): + store.select('df1', + 'columns=A and index>df.index[4]') + + +def test_append_with_different_block_ordering(): + # GH 4096; using same frames, but different block orderings + with ensure_clean_store() as store: + for i in range(10): + df = DataFrame(np.random.randn(10, 2), columns=list('AB')) + df['index'] = range(10) + df['index'] += i * 10 + df['int64'] = Series([1] * len(df), dtype='int64') + df['int16'] = Series([1] * len(df), dtype='int16') + + if i % 2 == 0: + del df['int64'] + df['int64'] = Series([1] * len(df), dtype='int64') + if i % 3 == 0: + a = df.pop('A') + df['A'] = a + + df.set_index('index', inplace=True) + store.append('df', df) + + # test a different ordering but with more fields (like invalid + # combinate) + with ensure_clean_store() as store: + df = DataFrame(np.random.randn(10, 2), + columns=list('AB'), dtype='float64') + df['int64'] = Series([1] * len(df), dtype='int64') + df['int16'] = Series([1] * len(df), dtype='int16') + store.append('df', df) + + # store additional fields in different blocks + df['int16_2'] = Series([1] * len(df), dtype='int16') + pytest.raises(ValueError, store.append, 'df', df) + + # store multile additional fields in different blocks + df['float_3'] = Series([1.] * len(df), dtype='float64') + pytest.raises(ValueError, store.append, 'df', df) + + +def test_append_with_strings(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = tm.makePanel() + wp2 = wp.rename_axis( + {x: "%s_extra" % x for x in wp.minor_axis}, axis=2) + + def check_col(key, name, size): + assert getattr(store.get_storer(key) + .table.description, name).itemsize == size + + store.append('s1', wp, min_itemsize=20) + store.append('s1', wp2) + expected = concat([wp, wp2], axis=2) + expected = expected.reindex( + minor_axis=sorted(expected.minor_axis)) + tm.assert_panel_equal(store['s1'], expected) + check_col('s1', 'minor_axis', 20) + + # test dict format + store.append('s2', wp, min_itemsize={'minor_axis': 20}) + store.append('s2', wp2) + expected = concat([wp, wp2], axis=2) + expected = expected.reindex( + minor_axis=sorted(expected.minor_axis)) + tm.assert_panel_equal(store['s2'], expected) + check_col('s2', 'minor_axis', 20) + + # apply the wrong field (similar to #1) + store.append('s3', wp, min_itemsize={'major_axis': 20}) + pytest.raises(ValueError, store.append, 's3', wp2) + + # test truncation of bigger strings + store.append('s4', wp) + pytest.raises(ValueError, store.append, 's4', wp2) + + # avoid truncation on elements + df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']]) + store.append('df_big', df) + tm.assert_frame_equal(store.select('df_big'), df) + check_col('df_big', 'values_block_1', 15) + + # appending smaller string ok + df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']]) + store.append('df_big', df2) + expected = concat([df, df2]) + tm.assert_frame_equal(store.select('df_big'), expected) + check_col('df_big', 'values_block_1', 15) + + # avoid truncation on elements + df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']]) + store.append('df_big2', df, min_itemsize={'values': 50}) + tm.assert_frame_equal(store.select('df_big2'), df) + check_col('df_big2', 'values_block_1', 50) + + # bigger string on next append + store.append('df_new', df) + df_new = DataFrame( + [[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']]) + pytest.raises(ValueError, store.append, 'df_new', df_new) + + # min_itemsize on Series index (GH 11412) + df = tm.makeMixedDataFrame().set_index('C') + store.append('ss', df['B'], min_itemsize={'index': 4}) + tm.assert_series_equal(store.select('ss'), df['B']) + + # same as above, with data_columns=True + store.append('ss2', df['B'], data_columns=True, + min_itemsize={'index': 4}) + tm.assert_series_equal(store.select('ss2'), df['B']) + + # min_itemsize in index without appending (GH 10381) + store.put('ss3', df, format='table', + min_itemsize={'index': 6}) + # just make sure there is a longer string: + df2 = df.copy().reset_index().assign(C='longer').set_index('C') + store.append('ss3', df2) + tm.assert_frame_equal(store.select('ss3'), + pd.concat([df, df2])) + + # same as above, with a Series + store.put('ss4', df['B'], format='table', + min_itemsize={'index': 6}) + store.append('ss4', df2['B']) + tm.assert_series_equal(store.select('ss4'), + pd.concat([df['B'], df2['B']])) + + # with nans + _maybe_remove(store, 'df') + df = tm.makeTimeDataFrame() + df['string'] = 'foo' + df.loc[1:4, 'string'] = np.nan + df['string2'] = 'bar' + df.loc[4:8, 'string2'] = np.nan + df['string3'] = 'bah' + df.loc[1:, 'string3'] = np.nan + store.append('df', df) + result = store.select('df') + tm.assert_frame_equal(result, df) + + with ensure_clean_store() as store: + def check_col(key, name, size): + assert getattr(store.get_storer(key) + .table.description, name).itemsize, size + + df = DataFrame(dict(A='foo', B='bar'), index=range(10)) + + # a min_itemsize that creates a data_column + _maybe_remove(store, 'df') + store.append('df', df, min_itemsize={'A': 200}) + check_col('df', 'A', 200) + assert store.get_storer('df').data_columns == ['A'] + + # a min_itemsize that creates a data_column2 + _maybe_remove(store, 'df') + store.append('df', df, data_columns=['B'], min_itemsize={'A': 200}) + check_col('df', 'A', 200) + assert store.get_storer('df').data_columns == ['B', 'A'] + + # a min_itemsize that creates a data_column2 + _maybe_remove(store, 'df') + store.append('df', df, data_columns=[ + 'B'], min_itemsize={'values': 200}) + check_col('df', 'B', 200) + check_col('df', 'values_block_0', 200) + assert store.get_storer('df').data_columns == ['B'] + + # infer the .typ on subsequent appends + _maybe_remove(store, 'df') + store.append('df', df[:5], min_itemsize=200) + store.append('df', df[5:], min_itemsize=200) + tm.assert_frame_equal(store['df'], df) + + # invalid min_itemsize keys + df = DataFrame(['foo', 'foo', 'foo', 'barh', + 'barh', 'barh'], columns=['A']) + _maybe_remove(store, 'df') + pytest.raises(ValueError, store.append, 'df', + df, min_itemsize={'foo': 20, 'foobar': 20}) + + +def test_append_with_data_columns(): + with ensure_clean_store() as store: + df = tm.makeTimeDataFrame() + df.iloc[0, df.columns.get_loc('B')] = 1. + _maybe_remove(store, 'df') + store.append('df', df[:2], data_columns=['B']) + store.append('df', df[2:]) + tm.assert_frame_equal(store['df'], df) + + # check that we have indicies created + assert(store._handle.root.df.table.cols.index.is_indexed is True) + assert(store._handle.root.df.table.cols.B.is_indexed is True) + + # data column searching + result = store.select('df', 'B>0') + expected = df[df.B > 0] + tm.assert_frame_equal(result, expected) + + # data column searching (with an indexable and a data_columns) + result = store.select( + 'df', 'B>0 and index>df.index[3]') + df_new = df.reindex(index=df.index[4:]) + expected = df_new[df_new.B > 0] + tm.assert_frame_equal(result, expected) + + # data column selection with a string data_column + df_new = df.copy() + df_new['string'] = 'foo' + df_new.loc[1:4, 'string'] = np.nan + df_new.loc[5:6, 'string'] = 'bar' + _maybe_remove(store, 'df') + store.append('df', df_new, data_columns=['string']) + result = store.select('df', "string='foo'") + expected = df_new[df_new.string == 'foo'] + tm.assert_frame_equal(result, expected) + + # using min_itemsize and a data column + def check_col(key, name, size): + assert getattr(store.get_storer(key) + .table.description, name).itemsize == size + + with ensure_clean_store() as store: + _maybe_remove(store, 'df') + store.append('df', df_new, data_columns=['string'], + min_itemsize={'string': 30}) + check_col('df', 'string', 30) + _maybe_remove(store, 'df') + store.append( + 'df', df_new, data_columns=['string'], min_itemsize=30) + check_col('df', 'string', 30) + _maybe_remove(store, 'df') + store.append('df', df_new, data_columns=['string'], + min_itemsize={'values': 30}) + check_col('df', 'string', 30) + + with ensure_clean_store() as store: + df_new['string2'] = 'foobarbah' + df_new['string_block1'] = 'foobarbah1' + df_new['string_block2'] = 'foobarbah2' + _maybe_remove(store, 'df') + store.append('df', df_new, data_columns=['string', 'string2'], + min_itemsize={'string': 30, 'string2': 40, + 'values': 50}) + check_col('df', 'string', 30) + check_col('df', 'string2', 40) + check_col('df', 'values_block_1', 50) + + with ensure_clean_store() as store: + # multiple data columns + df_new = df.copy() + df_new.iloc[0, df_new.columns.get_loc('A')] = 1. + df_new.iloc[0, df_new.columns.get_loc('B')] = -1. + df_new['string'] = 'foo' + + sl = df_new.columns.get_loc('string') + df_new.iloc[1:4, sl] = np.nan + df_new.iloc[5:6, sl] = 'bar' + + df_new['string2'] = 'foo' + sl = df_new.columns.get_loc('string2') + df_new.iloc[2:5, sl] = np.nan + df_new.iloc[7:8, sl] = 'bar' + _maybe_remove(store, 'df') + store.append( + 'df', df_new, data_columns=['A', 'B', 'string', 'string2']) + result = store.select('df', + "string='foo' and string2='foo'" + " and A>0 and B<0") + expected = df_new[(df_new.string == 'foo') & ( + df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)] + tm.assert_frame_equal(result, expected, check_index_type=False) + + # yield an empty frame + result = store.select('df', "string='foo' and string2='cool'") + expected = df_new[(df_new.string == 'foo') & ( + df_new.string2 == 'cool')] + tm.assert_frame_equal(result, expected, check_index_type=False) + + with ensure_clean_store() as store: + # doc example + df_dc = df.copy() + df_dc['string'] = 'foo' + df_dc.loc[4:6, 'string'] = np.nan + df_dc.loc[7:9, 'string'] = 'bar' + df_dc['string2'] = 'cool' + df_dc['datetime'] = Timestamp('20010102') + df_dc = df_dc._convert(datetime=True) + df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan + + _maybe_remove(store, 'df_dc') + store.append('df_dc', df_dc, + data_columns=['B', 'C', 'string', + 'string2', 'datetime']) + result = store.select('df_dc', 'B>0') + + expected = df_dc[df_dc.B > 0] + tm.assert_frame_equal(result, expected, check_index_type=False) + + result = store.select( + 'df_dc', ['B > 0', 'C > 0', 'string == foo']) + expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & ( + df_dc.string == 'foo')] + tm.assert_frame_equal(result, expected, check_index_type=False) + + with ensure_clean_store() as store: + # doc example part 2 + np.random.seed(1234) + index = date_range('1/1/2000', periods=8) + df_dc = DataFrame(np.random.randn(8, 3), index=index, + columns=['A', 'B', 'C']) + df_dc['string'] = 'foo' + df_dc.loc[4:6, 'string'] = np.nan + df_dc.loc[7:9, 'string'] = 'bar' + df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs() + df_dc['string2'] = 'cool' + + # on-disk operations + store.append('df_dc', df_dc, data_columns=[ + 'B', 'C', 'string', 'string2']) + + result = store.select('df_dc', 'B>0') + expected = df_dc[df_dc.B > 0] + tm.assert_frame_equal(result, expected) + + result = store.select( + 'df_dc', ['B > 0', 'C > 0', 'string == "foo"']) + expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & + (df_dc.string == 'foo')] + tm.assert_frame_equal(result, expected) + + with ensure_clean_store() as store: + with catch_warnings(record=True): + # panel + # GH5717 not handling data_columns + np.random.seed(1234) + p = tm.makePanel() + + store.append('p1', p) + tm.assert_panel_equal(store.select('p1'), p) + + store.append('p2', p, data_columns=True) + tm.assert_panel_equal(store.select('p2'), p) + + result = store.select('p2', where='ItemA>0') + expected = p.to_frame() + expected = expected[expected['ItemA'] > 0] + tm.assert_frame_equal(result.to_frame(), expected) + + result = store.select( + 'p2', where='ItemA>0 & minor_axis=["A","B"]') + expected = p.to_frame() + expected = expected[expected['ItemA'] > 0] + expected = expected[expected.reset_index( + level=['major']).index.isin(['A', 'B'])] + tm.assert_frame_equal(result.to_frame(), expected) + + +def test_append_diff_item_order(): + with catch_warnings(record=True): + wp = tm.makePanel() + wp1 = wp.iloc[:, :10, :] + wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']), + 10:, :] + + with ensure_clean_store() as store: + store.put('panel', wp1, format='table') + pytest.raises(ValueError, store.put, 'panel', wp2, + append=True) + + +def test_append_hierarchical(): + index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], + ['one', 'two', 'three']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['foo', 'bar']) + df = DataFrame(np.random.randn(10, 3), index=index, + columns=['A', 'B', 'C']) + + with ensure_clean_store() as store: + store.append('mi', df) + result = store.select('mi') + tm.assert_frame_equal(result, df) + + # GH 3748 + result = store.select('mi', columns=['A', 'B']) + expected = df.reindex(columns=['A', 'B']) + tm.assert_frame_equal(result, expected) + + with ensure_clean_path('test.hdf') as path: + df.to_hdf(path, 'df', format='table') + result = read_hdf(path, 'df', columns=['A', 'B']) + expected = df.reindex(columns=['A', 'B']) + tm.assert_frame_equal(result, expected) + + +def test_append_misc(): + with ensure_clean_store() as store: + df = tm.makeDataFrame() + store.append('df', df, chunksize=1) + result = store.select('df') + tm.assert_frame_equal(result, df) + + store.append('df1', df, expectedrows=10) + result = store.select('df1') + tm.assert_frame_equal(result, df) + + # more chunksize in append tests + def check(obj, comparator): + for c in [10, 200, 1000]: + with ensure_clean_store(mode='w') as store: + store.append('obj', obj, chunksize=c) + result = store.select('obj') + comparator(result, obj) + + df = tm.makeDataFrame() + df['string'] = 'foo' + df['float322'] = 1. + df['float322'] = df['float322'].astype('float32') + df['bool'] = df['float322'] > 0 + df['time1'] = Timestamp('20130101') + df['time2'] = Timestamp('20130102') + check(df, tm.assert_frame_equal) + + with catch_warnings(record=True): + p = tm.makePanel() + check(p, tm.assert_panel_equal) + + # empty frame, GH4273 + with ensure_clean_store() as store: + # 0 len + df_empty = DataFrame(columns=list('ABC')) + store.append('df', df_empty) + pytest.raises(KeyError, store.select, 'df') + + # repeated append of 0/non-zero frames + df = DataFrame(np.random.rand(10, 3), columns=list('ABC')) + store.append('df', df) + tm.assert_frame_equal(store.select('df'), df) + store.append('df', df_empty) + tm.assert_frame_equal(store.select('df'), df) + + # store + df = DataFrame(columns=list('ABC')) + store.put('df2', df) + tm.assert_frame_equal(store.select('df2'), df) + + with catch_warnings(record=True): + # 0 len + p_empty = Panel(items=list('ABC')) + store.append('p', p_empty) + pytest.raises(KeyError, store.select, 'p') + + # repeated append of 0/non-zero frames + p = Panel(np.random.randn(3, 4, 5), items=list('ABC')) + store.append('p', p) + tm.assert_panel_equal(store.select('p'), p) + store.append('p', p_empty) + tm.assert_panel_equal(store.select('p'), p) + + # store + store.put('p2', p_empty) + tm.assert_panel_equal(store.select('p2'), p_empty) + + +def test_append_raise(): + with ensure_clean_store() as store: + # test append with invalid input to get good error messages + # list in column + df = tm.makeDataFrame() + df['invalid'] = [['a']] * len(df) + assert df.dtypes['invalid'] == np.object_ + pytest.raises(TypeError, store.append, 'df', df) + + # multiple invalid columns + df['invalid2'] = [['a']] * len(df) + df['invalid3'] = [['a']] * len(df) + pytest.raises(TypeError, store.append, 'df', df) + + # datetime with embedded nans as object + df = tm.makeDataFrame() + s = Series(datetime.datetime(2001, 1, 2), index=df.index) + s = s.astype(object) + s[0:5] = np.nan + df['invalid'] = s + assert df.dtypes['invalid'] == np.object_ + pytest.raises(TypeError, store.append, 'df', df) + + # directly ndarray + pytest.raises(TypeError, store.append, 'df', np.arange(10)) + + # series directly + pytest.raises(TypeError, store.append, + 'df', Series(np.arange(10))) + + # appending an incompatible table + df = tm.makeDataFrame() + store.append('df', df) + + df['foo'] = 'foo' + pytest.raises(ValueError, store.append, 'df', df) + + +def test_append_with_timedelta(): + # GH 3577 + # append timedelta + df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp( + '20130101') + timedelta(days=i, seconds=10) for i in range(10)])) + df['C'] = df['A'] - df['B'] + df.loc[3:5, 'C'] = np.nan + + with ensure_clean_store() as store: + # table + _maybe_remove(store, 'df') + store.append('df', df, data_columns=True) + result = store.select('df') + tm.assert_frame_equal(result, df) + + result = store.select('df', where="C<100000") + tm.assert_frame_equal(result, df) + + result = store.select('df', where="C<pd.Timedelta('-3D')") + tm.assert_frame_equal(result, df.iloc[3:]) + + result = store.select('df', "C<'-3D'") + tm.assert_frame_equal(result, df.iloc[3:]) + + # a bit hacky here as we don't really deal with the NaT properly + + result = store.select('df', "C<'-500000s'") + result = result.dropna(subset=['C']) + tm.assert_frame_equal(result, df.iloc[6:]) + + result = store.select('df', "C<'-3.5D'") + result = result.iloc[1:] + tm.assert_frame_equal(result, df.iloc[4:]) + + # fixed + _maybe_remove(store, 'df2') + store.put('df2', df) + result = store.select('df2') + tm.assert_frame_equal(result, df) + + +def test_append_with_diff_col_name_types_raises_value_error(): + df = DataFrame(np.random.randn(10, 1)) + df2 = DataFrame({'a': np.random.randn(10)}) + df3 = DataFrame({(1, 2): np.random.randn(10)}) + df4 = DataFrame({('1', 2): np.random.randn(10)}) + df5 = DataFrame({('1', 2, object): np.random.randn(10)}) + + with ensure_clean_store() as store: + name = 'df_%s' % tm.rands(10) + store.append(name, df) + + for d in (df2, df3, df4, df5): + with pytest.raises(ValueError): + store.append(name, d) + + +def test_append_to_multiple(): + df1 = tm.makeTimeDataFrame() + df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df2['foo'] = 'bar' + df = concat([df1, df2], axis=1) + + with ensure_clean_store() as store: + # exceptions + pytest.raises(ValueError, store.append_to_multiple, + {'df1': ['A', 'B'], 'df2': None}, df, + selector='df3') + pytest.raises(ValueError, store.append_to_multiple, + {'df1': None, 'df2': None}, df, selector='df3') + pytest.raises( + ValueError, store.append_to_multiple, 'df1', df, 'df1') + + # regular operation + store.append_to_multiple( + {'df1': ['A', 'B'], 'df2': None}, df, selector='df1') + result = store.select_as_multiple( + ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1') + expected = df[(df.A > 0) & (df.B > 0)] + tm.assert_frame_equal(result, expected) + + +def test_append_to_multiple_dropna(): + df1 = tm.makeTimeDataFrame() + df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan + df = concat([df1, df2], axis=1) + + with ensure_clean_store() as store: + # dropna=True should guarantee rows are synchronized + store.append_to_multiple( + {'df1': ['A', 'B'], 'df2': None}, df, selector='df1', + dropna=True) + result = store.select_as_multiple(['df1', 'df2']) + expected = df.dropna() + tm.assert_frame_equal(result, expected) + tm.assert_index_equal(store.select('df1').index, + store.select('df2').index) + + +@pytest.mark.xfail(run=False, + reason="append_to_multiple_dropna_false " + "is not raising as failed") +def test_append_to_multiple_dropna_false(): + df1 = tm.makeTimeDataFrame() + df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan + df = concat([df1, df2], axis=1) + + with ensure_clean_store() as store: + # dropna=False shouldn't synchronize row indexes + store.append_to_multiple( + {'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a', + dropna=False) + + with pytest.raises(ValueError): + store.select_as_multiple(['df1a', 'df2a']) + + assert not store.select('df1a').index.equals( + store.select('df2a').index) diff --git a/pandas/tests/io/pytables/test_categorical.py b/pandas/tests/io/pytables/test_categorical.py new file mode 100644 index 0000000000000..8db0b7a196db9 --- /dev/null +++ b/pandas/tests/io/pytables/test_categorical.py @@ -0,0 +1,157 @@ +import pytest + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, Categorical, concat) +from .common import ensure_clean_store, ensure_clean_path, _maybe_remove + +import pandas.util.testing as tm +from pandas.io.pytables import read_hdf + + +def test_categorical(): + with ensure_clean_store() as store: + # Basic + _maybe_remove(store, 's') + s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ + 'a', 'b', 'c', 'd'], ordered=False)) + store.append('s', s, format='table') + result = store.select('s') + tm.assert_series_equal(s, result) + + _maybe_remove(store, 's_ordered') + s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ + 'a', 'b', 'c', 'd'], ordered=True)) + store.append('s_ordered', s, format='table') + result = store.select('s_ordered') + tm.assert_series_equal(s, result) + + _maybe_remove(store, 'df') + + df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]}) + store.append('df', df, format='table') + result = store.select('df') + tm.assert_frame_equal(result, df) + + # Dtypes + s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category') + store.append('si', s) + result = store.select('si') + tm.assert_series_equal(result, s) + + s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category') + store.append('si2', s) + result = store.select('si2') + tm.assert_series_equal(result, s) + + # Multiple + df2 = df.copy() + df2['s2'] = Series(list('abcdefg')).astype('category') + store.append('df2', df2) + result = store.select('df2') + tm.assert_frame_equal(result, df2) + + # Make sure the metadata is OK + info = store.info() + assert '/df2 ' in info + # assert '/df2/meta/values_block_0/meta' in info + assert '/df2/meta/values_block_1/meta' in info + + # unordered + s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ + 'a', 'b', 'c', 'd'], ordered=False)) + store.append('s2', s, format='table') + result = store.select('s2') + tm.assert_series_equal(result, s) + + # Query + store.append('df3', df, data_columns=['s']) + expected = df[df.s.isin(['b', 'c'])] + result = store.select('df3', where=['s in ["b","c"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(['b', 'c'])] + result = store.select('df3', where=['s = ["b","c"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(['d'])] + result = store.select('df3', where=['s in ["d"]']) + tm.assert_frame_equal(result, expected) + + expected = df[df.s.isin(['f'])] + result = store.select('df3', where=['s in ["f"]']) + tm.assert_frame_equal(result, expected) + + # Appending with same categories is ok + store.append('df3', df) + + df = concat([df, df]) + expected = df[df.s.isin(['b', 'c'])] + result = store.select('df3', where=['s in ["b","c"]']) + tm.assert_frame_equal(result, expected) + + # Appending must have the same categories + df3 = df.copy() + df3['s'].cat.remove_unused_categories(inplace=True) + + with pytest.raises(ValueError): + store.append('df3', df3) + + # Remove, and make sure meta data is removed (its a recursive + # removal so should be). + result = store.select('df3/meta/s/meta') + assert result is not None + store.remove('df3') + + with pytest.raises(KeyError): + store.select('df3/meta/s/meta') + + +def test_categorical_conversion(): + # GH13322 + # Check that read_hdf with categorical columns doesn't return rows if + # where criteria isn't met. + obsids = ['ESP_012345_6789', 'ESP_987654_3210'] + imgids = ['APF00006np', 'APF0001imm'] + data = [4.3, 9.8] + + # Test without categories + df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data)) + + # We are expecting an empty DataFrame matching types of df + expected = df.iloc[[], :] + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format='table', data_columns=True) + result = read_hdf(path, 'df', where='obsids=B') + tm.assert_frame_equal(result, expected) + + # Test with categories + df.obsids = df.obsids.astype('category') + df.imgids = df.imgids.astype('category') + + # We are expecting an empty DataFrame matching types of df + expected = df.iloc[[], :] + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format='table', data_columns=True) + result = read_hdf(path, 'df', where='obsids=B') + tm.assert_frame_equal(result, expected) + + +def test_categorical_nan_only_columns(): + # GH18413 + # Check that read_hdf with categorical columns with NaN-only values can + # be read back. + df = pd.DataFrame({ + 'a': ['a', 'b', 'c', np.nan], + 'b': [np.nan, np.nan, np.nan, np.nan], + 'c': [1, 2, 3, 4], + 'd': pd.Series([None] * 4, dtype=object) + }) + df['a'] = df.a.astype('category') + df['b'] = df.b.astype('category') + df['d'] = df.b.astype('category') + expected = df + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format='table', data_columns=True) + result = read_hdf(path, 'df') + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/pytables/test_common.py b/pandas/tests/io/pytables/test_common.py new file mode 100644 index 0000000000000..d69148f40ba31 --- /dev/null +++ b/pandas/tests/io/pytables/test_common.py @@ -0,0 +1,1006 @@ +import pytest +from warnings import catch_warnings +import tempfile +import datetime + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, date_range, Index, Timestamp, + DatetimeIndex, compat, concat, Term) +from .common import (ensure_clean_store, safe_close, _check_roundtrip, + safe_remove, _check_roundtrip_table, create_tempfile, + _maybe_remove, ensure_clean_path) + +import pandas.util.testing as tm +import pandas.util._test_decorators as td +from pandas.compat import lrange, u +from pandas.io.pytables import HDFStore, get_store, read_hdf + + +def test_factory_fun(): + path = create_tempfile() + try: + with catch_warnings(record=True): + with get_store(path) as tbl: + raise ValueError('blah') + except ValueError: + pass + finally: + safe_remove(path) + + try: + with catch_warnings(record=True): + with get_store(path) as tbl: + tbl['a'] = tm.makeDataFrame() + + with catch_warnings(record=True): + with get_store(path) as tbl: + assert len(tbl) == 1 + assert type(tbl['a']) == DataFrame + finally: + safe_remove(path) + + +def test_context(): + path = create_tempfile() + try: + with HDFStore(path) as tbl: + raise ValueError('blah') + except ValueError: + pass + finally: + safe_remove(path) + + try: + with HDFStore(path) as tbl: + tbl['a'] = tm.makeDataFrame() + + with HDFStore(path) as tbl: + assert len(tbl) == 1 + assert type(tbl['a']) == DataFrame + finally: + safe_remove(path) + + +def test_conv_read_write(): + path = create_tempfile() + try: + def roundtrip(key, obj, **kwargs): + obj.to_hdf(path, key, **kwargs) + return read_hdf(path, key) + + o = tm.makeTimeSeries() + tm.assert_series_equal(o, roundtrip('series', o)) + + o = tm.makeStringSeries() + tm.assert_series_equal(o, roundtrip('string_series', o)) + + o = tm.makeDataFrame() + tm.assert_frame_equal(o, roundtrip('frame', o)) + + with catch_warnings(record=True): + o = tm.makePanel() + tm.assert_panel_equal(o, roundtrip('panel', o)) + + # table + df = DataFrame(dict(A=lrange(5), B=lrange(5))) + df.to_hdf(path, 'table', append=True) + result = read_hdf(path, 'table', where=['index>2']) + tm.assert_frame_equal(df[df.index > 2], result) + + finally: + safe_remove(path) + + +def test_long_strings(): + # GH6166 + # unconversion of long strings was being chopped in earlier + # versions of numpy < 1.7.2 + df = DataFrame({'a': tm.rands_array(100, size=10)}, + index=tm.rands_array(100, size=10)) + + with ensure_clean_store() as store: + store.append('df', df, data_columns=['a']) + result = store.select('df') + tm.assert_frame_equal(df, result) + + +def test_api(): + # GH4584 + # API issue when to_hdf doesn't acdept append AND format args + with ensure_clean_path() as path: + df = tm.makeDataFrame() + df.iloc[:10].to_hdf(path, 'df', append=True, format='table') + df.iloc[10:].to_hdf(path, 'df', append=True, format='table') + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + # append to False + df.iloc[:10].to_hdf(path, 'df', append=False, format='table') + df.iloc[10:].to_hdf(path, 'df', append=True, format='table') + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + with ensure_clean_path() as path: + df = tm.makeDataFrame() + df.iloc[:10].to_hdf(path, 'df', append=True) + df.iloc[10:].to_hdf(path, 'df', append=True, format='table') + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + # append to False + df.iloc[:10].to_hdf(path, 'df', append=False, format='table') + df.iloc[10:].to_hdf(path, 'df', append=True) + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + with ensure_clean_path() as path: + df = tm.makeDataFrame() + df.to_hdf(path, 'df', append=False, format='fixed') + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + df.to_hdf(path, 'df', append=False, format='f') + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + df.to_hdf(path, 'df', append=False) + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + df.to_hdf(path, 'df') + tm.assert_frame_equal(read_hdf(path, 'df'), df) + + with ensure_clean_store() as store: + path = store._path + df = tm.makeDataFrame() + + _maybe_remove(store, 'df') + store.append('df', df.iloc[:10], append=True, format='table') + store.append('df', df.iloc[10:], append=True, format='table') + tm.assert_frame_equal(store.select('df'), df) + + # append to False + _maybe_remove(store, 'df') + store.append('df', df.iloc[:10], append=False, format='table') + store.append('df', df.iloc[10:], append=True, format='table') + tm.assert_frame_equal(store.select('df'), df) + + # formats + _maybe_remove(store, 'df') + store.append('df', df.iloc[:10], append=False, format='table') + store.append('df', df.iloc[10:], append=True, format='table') + tm.assert_frame_equal(store.select('df'), df) + + _maybe_remove(store, 'df') + store.append('df', df.iloc[:10], append=False, format='table') + store.append('df', df.iloc[10:], append=True, format=None) + tm.assert_frame_equal(store.select('df'), df) + + with ensure_clean_path() as path: + # invalid + df = tm.makeDataFrame() + pytest.raises(ValueError, df.to_hdf, path, + 'df', append=True, format='f') + pytest.raises(ValueError, df.to_hdf, path, + 'df', append=True, format='fixed') + + pytest.raises(TypeError, df.to_hdf, path, + 'df', append=True, format='foo') + pytest.raises(TypeError, df.to_hdf, path, + 'df', append=False, format='bar') + + # File path doesn't exist + path = "" + pytest.raises(compat.FileNotFoundError, + read_hdf, path, 'df') + + +def test_api_default_format(): + # default_format option + with ensure_clean_store() as store: + df = tm.makeDataFrame() + + pd.set_option('io.hdf.default_format', 'fixed') + _maybe_remove(store, 'df') + store.put('df', df) + assert not store.get_storer('df').is_table + pytest.raises(ValueError, store.append, 'df2', df) + + pd.set_option('io.hdf.default_format', 'table') + _maybe_remove(store, 'df') + store.put('df', df) + assert store.get_storer('df').is_table + _maybe_remove(store, 'df2') + store.append('df2', df) + assert store.get_storer('df').is_table + + pd.set_option('io.hdf.default_format', None) + + with ensure_clean_path() as path: + df = tm.makeDataFrame() + + pd.set_option('io.hdf.default_format', 'fixed') + df.to_hdf(path, 'df') + with HDFStore(path) as store: + assert not store.get_storer('df').is_table + pytest.raises(ValueError, df.to_hdf, path, 'df2', append=True) + + pd.set_option('io.hdf.default_format', 'table') + df.to_hdf(path, 'df3') + with HDFStore(path) as store: + assert store.get_storer('df3').is_table + df.to_hdf(path, 'df4', append=True) + with HDFStore(path) as store: + assert store.get_storer('df4').is_table + + pd.set_option('io.hdf.default_format', None) + + +def test_keys(): + with ensure_clean_store() as store: + store['a'] = tm.makeTimeSeries() + store['b'] = tm.makeStringSeries() + store['c'] = tm.makeDataFrame() + with catch_warnings(record=True): + store['d'] = tm.makePanel() + store['foo/bar'] = tm.makePanel() + assert len(store) == 5 + expected = set(['/a', '/b', '/c', '/d', '/foo/bar']) + assert set(store.keys()) == expected + assert set(store) == expected + + +def test_iter_empty(): + + with ensure_clean_store() as store: + # GH 12221 + assert list(store) == [] + + +def test_repr(): + with ensure_clean_store() as store: + repr(store) + store.info() + store['a'] = tm.makeTimeSeries() + store['b'] = tm.makeStringSeries() + store['c'] = tm.makeDataFrame() + + with catch_warnings(record=True): + store['d'] = tm.makePanel() + store['foo/bar'] = tm.makePanel() + store.append('e', tm.makePanel()) + + df = tm.makeDataFrame() + df['obj1'] = 'foo' + df['obj2'] = 'bar' + df['bool1'] = df['A'] > 0 + df['bool2'] = df['B'] > 0 + df['bool3'] = True + df['int1'] = 1 + df['int2'] = 2 + df['timestamp1'] = Timestamp('20010102') + df['timestamp2'] = Timestamp('20010103') + df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0) + df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0) + df.loc[3:6, ['obj1']] = np.nan + df = df._consolidate()._convert(datetime=True) + + # PerformanceWarning + with catch_warnings(record=True): + store['df'] = df + + # make a random group in hdf space + store._handle.create_group(store._handle.root, 'bah') + + assert store.filename in repr(store) + assert store.filename in str(store) + store.info() + + # storers + with ensure_clean_store() as store: + df = tm.makeDataFrame() + store.append('df', df) + + s = store.get_storer('df') + repr(s) + str(s) + + +def test_versioning(): + with ensure_clean_store() as store: + store['a'] = tm.makeTimeSeries() + store['b'] = tm.makeDataFrame() + df = tm.makeTimeDataFrame() + _maybe_remove(store, 'df1') + store.append('df1', df[:10]) + store.append('df1', df[10:]) + assert store.root.a._v_attrs.pandas_version == '0.15.2' + assert store.root.b._v_attrs.pandas_version == '0.15.2' + assert store.root.df1._v_attrs.pandas_version == '0.15.2' + + # write a file and wipe its versioning + _maybe_remove(store, 'df2') + store.append('df2', df) + + # this is an error because its table_type is appendable, but no + # version info + store.get_node('df2')._v_attrs.pandas_version = None + pytest.raises(Exception, store.select, 'df2') + + +def test_mode(): + df = tm.makeTimeDataFrame() + + def check(mode): + with ensure_clean_path() as path: + # constructor + if mode in ['r', 'r+']: + pytest.raises(IOError, HDFStore, path, mode=mode) + + else: + store = HDFStore(path, mode=mode) + assert store._handle.mode == mode + store.close() + + with ensure_clean_path() as path: + # context + if mode in ['r', 'r+']: + def f(): + with HDFStore(path, mode=mode) as store: # noqa + pass + pytest.raises(IOError, f) + else: + with HDFStore(path, mode=mode) as store: + assert store._handle.mode == mode + + with ensure_clean_path() as path: + # conv write + if mode in ['r', 'r+']: + pytest.raises(IOError, df.to_hdf, + path, 'df', mode=mode) + df.to_hdf(path, 'df', mode='w') + else: + df.to_hdf(path, 'df', mode=mode) + + # conv read + if mode in ['w']: + pytest.raises(ValueError, read_hdf, + path, 'df', mode=mode) + else: + result = read_hdf(path, 'df', mode=mode) + tm.assert_frame_equal(result, df) + + def check_default_mode(): + # read_hdf uses default mode + with ensure_clean_path() as path: + df.to_hdf(path, 'df', mode='w') + result = read_hdf(path, 'df') + tm.assert_frame_equal(result, df) + + check('r') + check('r+') + check('a') + check('w') + check_default_mode() + + +def test_flush(): + with ensure_clean_store() as store: + store['a'] = tm.makeTimeSeries() + store.flush() + store.flush(fsync=True) + + +def test_getattr(): + with ensure_clean_store() as store: + s = tm.makeTimeSeries() + store['a'] = s + + # test attribute access + result = store.a + tm.assert_series_equal(result, s) + result = getattr(store, 'a') + tm.assert_series_equal(result, s) + + df = tm.makeTimeDataFrame() + store['df'] = df + result = store.df + tm.assert_frame_equal(result, df) + + # errors + pytest.raises(AttributeError, getattr, store, 'd') + + for x in ['mode', 'path', 'handle', 'complib']: + pytest.raises(AttributeError, getattr, store, x) + + # not stores + for x in ['mode', 'path', 'handle', 'complib']: + getattr(store, "_%s" % x) + + +def test_to_hdf_with_min_itemsize(): + with ensure_clean_path() as path: + # min_itemsize in index with to_hdf (GH 10381) + df = tm.makeMixedDataFrame().set_index('C') + df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6}) + # just make sure there is a longer string: + df2 = df.copy().reset_index().assign(C='longer').set_index('C') + df2.to_hdf(path, 'ss3', append=True, format='table') + tm.assert_frame_equal(pd.read_hdf(path, 'ss3'), + pd.concat([df, df2])) + + # same as above, with a Series + df['B'].to_hdf(path, 'ss4', format='table', + min_itemsize={'index': 6}) + df2['B'].to_hdf(path, 'ss4', append=True, format='table') + tm.assert_series_equal(pd.read_hdf(path, 'ss4'), + pd.concat([df['B'], df2['B']])) + + +def test_pass_spec_to_storer(): + df = tm.makeDataFrame() + with ensure_clean_store() as store: + store.put('df', df) + pytest.raises(TypeError, store.select, 'df', columns=['A']) + pytest.raises(TypeError, store.select, + 'df', where=[('columns=A')]) + + +def test_table_values_dtypes_roundtrip(): + with ensure_clean_store() as store: + df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8') + store.append('df_f8', df1) + tm.assert_series_equal(df1.dtypes, store['df_f8'].dtypes) + + df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8') + store.append('df_i8', df2) + tm.assert_series_equal(df2.dtypes, store['df_i8'].dtypes) + + # incompatible dtype + pytest.raises(ValueError, store.append, 'df_i8', df1) + + # check creation/storage/retrieval of float32 (a bit hacky to + # actually create them thought) + df1 = DataFrame( + np.array([[1], [2], [3]], dtype='f4'), columns=['A']) + store.append('df_f4', df1) + tm.assert_series_equal(df1.dtypes, store['df_f4'].dtypes) + assert df1.dtypes[0] == 'float32' + + # check with mixed dtypes + df1 = DataFrame(dict((c, Series(np.random.randn(5), dtype=c)) + for c in ['float32', 'float64', 'int32', + 'int64', 'int16', 'int8'])) + df1['string'] = 'foo' + df1['float322'] = 1. + df1['float322'] = df1['float322'].astype('float32') + df1['bool'] = df1['float32'] > 0 + df1['time1'] = Timestamp('20130101') + df1['time2'] = Timestamp('20130102') + + store.append('df_mixed_dtypes1', df1) + result = store.select('df_mixed_dtypes1').get_dtype_counts() + expected = Series({'float32': 2, 'float64': 1, 'int32': 1, + 'bool': 1, 'int16': 1, 'int8': 1, + 'int64': 1, 'object': 1, 'datetime64[ns]': 2}) + result = result.sort_index() + result = expected.sort_index() + tm.assert_series_equal(result, expected) + + +def test_table_mixed_dtypes(): + # frame + df = tm.makeDataFrame() + df['obj1'] = 'foo' + df['obj2'] = 'bar' + df['bool1'] = df['A'] > 0 + df['bool2'] = df['B'] > 0 + df['bool3'] = True + df['int1'] = 1 + df['int2'] = 2 + df['timestamp1'] = Timestamp('20010102') + df['timestamp2'] = Timestamp('20010103') + df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0) + df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0) + df.loc[3:6, ['obj1']] = np.nan + df = df._consolidate()._convert(datetime=True) + + with ensure_clean_store() as store: + store.append('df1_mixed', df) + tm.assert_frame_equal(store.select('df1_mixed'), df) + + with catch_warnings(record=True): + # panel + wp = tm.makePanel() + wp['obj1'] = 'foo' + wp['obj2'] = 'bar' + wp['bool1'] = wp['ItemA'] > 0 + wp['bool2'] = wp['ItemB'] > 0 + wp['int1'] = 1 + wp['int2'] = 2 + wp = wp._consolidate() + + with catch_warnings(record=True): + with ensure_clean_store() as store: + store.append('p1_mixed', wp) + tm.assert_panel_equal(store.select('p1_mixed'), wp) + + +def test_unimplemented_dtypes_table_columns(): + with ensure_clean_store() as store: + l = [('date', datetime.date(2001, 1, 2))] + + # py3 ok for unicode + if not compat.PY3: + l.append(('unicode', u('\\u03c3'))) + + # currently not supported dtypes #### + for n, f in l: + df = tm.makeDataFrame() + df[n] = f + pytest.raises( + TypeError, store.append, 'df1_%s' % n, df) + + # frame + df = tm.makeDataFrame() + df['obj1'] = 'foo' + df['obj2'] = 'bar' + df['datetime1'] = datetime.date(2001, 1, 2) + df = df._consolidate()._convert(datetime=True) + + with ensure_clean_store() as store: + # this fails because we have a date in the object block...... + pytest.raises(TypeError, store.append, 'df_unimplemented', df) + + +def test_same_name_scoping(): + with ensure_clean_store() as store: + df = DataFrame(np.random.randn(20, 2), + index=pd.date_range('20130101', periods=20)) + store.put('df', df, format='table') + expected = df[df.index > pd.Timestamp('20130105')] + + result = store.select('df', 'index>datetime.datetime(2013,1,5)') + tm.assert_frame_equal(result, expected) + + # technically an error, but allow it + result = store.select('df', 'index>datetime.datetime(2013,1,5)') + tm.assert_frame_equal(result, expected) + + result = store.select('df', 'index>datetime.datetime(2013,1,5)') + tm.assert_frame_equal(result, expected) + + +def test_series(): + s = tm.makeStringSeries() + _check_roundtrip(s, tm.assert_series_equal) + + ts = tm.makeTimeSeries() + _check_roundtrip(ts, tm.assert_series_equal) + + ts2 = Series(ts.index, Index(ts.index, dtype=object)) + _check_roundtrip(ts2, tm.assert_series_equal) + + ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), + dtype=object)) + _check_roundtrip(ts3, tm.assert_series_equal, check_index_type=False) + + +@pytest.mark.parametrize("compression", [ + False, pytest.param(True, marks=td.skip_if_windows_python_3) +]) +def test_frame(compression): + df = tm.makeDataFrame() + + # put in some random NAs + df.values[0, 0] = np.nan + df.values[5, 3] = np.nan + + _check_roundtrip_table(df, tm.assert_frame_equal, compression=compression) + _check_roundtrip(df, tm.assert_frame_equal, compression=compression) + + tdf = tm.makeTimeDataFrame() + _check_roundtrip(tdf, tm.assert_frame_equal, compression=compression) + + with ensure_clean_store() as store: + # not consolidated + df['foo'] = np.random.randn(len(df)) + store['df'] = df + recons = store['df'] + assert recons._data.is_consolidated() + + # empty + _check_roundtrip(df[:0], tm.assert_frame_equal) + + +def test_empty_series_frame(): + s0 = Series() + s1 = Series(name='myseries') + df0 = DataFrame() + df1 = DataFrame(index=['a', 'b', 'c']) + df2 = DataFrame(columns=['d', 'e', 'f']) + + _check_roundtrip(s0, tm.assert_series_equal) + _check_roundtrip(s1, tm.assert_series_equal) + _check_roundtrip(df0, tm.assert_frame_equal) + _check_roundtrip(df1, tm.assert_frame_equal) + _check_roundtrip(df2, tm.assert_frame_equal) + + +def test_empty_series(): + for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']: + s = Series(dtype=dtype) + _check_roundtrip(s, tm.assert_series_equal) + + +def test_overwrite_node(): + with ensure_clean_store() as store: + store['a'] = tm.makeTimeDataFrame() + ts = tm.makeTimeSeries() + store['a'] = ts + + tm.assert_series_equal(store['a'], ts) + + +def test_invalid_filtering(): + # can't use more than one filter (atm) + df = tm.makeTimeDataFrame() + with ensure_clean_store() as store: + store.put('df', df, format='table') + + # not implemented + pytest.raises(NotImplementedError, store.select, + 'df', "columns=['A'] | columns=['B']") + + # in theory we could deal with this + pytest.raises(NotImplementedError, store.select, + 'df', "columns=['A','B'] & columns=['C']") + + +def test_coordinates(): + df = tm.makeTimeDataFrame() + with ensure_clean_store() as store: + _maybe_remove(store, 'df') + store.append('df', df) + + # all + c = store.select_as_coordinates('df') + assert((c.values == np.arange(len(df.index))).all()) + + # get coordinates back & test vs frame + _maybe_remove(store, 'df') + + df = DataFrame(dict(A=lrange(5), B=lrange(5))) + store.append('df', df) + c = store.select_as_coordinates('df', ['index<3']) + assert((c.values == np.arange(3)).all()) + result = store.select('df', where=c) + expected = df.loc[0:2, :] + tm.assert_frame_equal(result, expected) + + c = store.select_as_coordinates('df', ['index>=3', 'index<=4']) + assert((c.values == np.arange(2) + 3).all()) + result = store.select('df', where=c) + expected = df.loc[3:4, :] + tm.assert_frame_equal(result, expected) + assert isinstance(c, Index) + + # multiple tables + _maybe_remove(store, 'df1') + _maybe_remove(store, 'df2') + df1 = tm.makeTimeDataFrame() + df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + store.append('df1', df1, data_columns=['A', 'B']) + store.append('df2', df2) + + c = store.select_as_coordinates('df1', ['A>0', 'B>0']) + df1_result = store.select('df1', c) + df2_result = store.select('df2', c) + result = concat([df1_result, df2_result], axis=1) + + expected = concat([df1, df2], axis=1) + expected = expected[(expected.A > 0) & (expected.B > 0)] + tm.assert_frame_equal(result, expected) + + # pass array/mask as the coordinates + with ensure_clean_store() as store: + df = DataFrame(np.random.randn(1000, 2), + index=date_range('20000101', periods=1000)) + store.append('df', df) + c = store.select_column('df', 'index') + where = c[DatetimeIndex(c).month == 5].index + expected = df.iloc[where] + + # locations + result = store.select('df', where=where) + tm.assert_frame_equal(result, expected) + + # boolean + result = store.select('df', where=where) + tm.assert_frame_equal(result, expected) + + # invalid + pytest.raises(ValueError, store.select, 'df', + where=np.arange(len(df), dtype='float64')) + pytest.raises(ValueError, store.select, 'df', + where=np.arange(len(df) + 1)) + pytest.raises(ValueError, store.select, 'df', + where=np.arange(len(df)), start=5) + pytest.raises(ValueError, store.select, 'df', + where=np.arange(len(df)), start=5, stop=10) + + # selection with filter + selection = date_range('20000101', periods=500) + result = store.select('df', where='index in selection') + expected = df[df.index.isin(selection)] + tm.assert_frame_equal(result, expected) + + # list + df = DataFrame(np.random.randn(10, 2)) + store.append('df2', df) + result = store.select('df2', where=[0, 3, 5]) + expected = df.iloc[[0, 3, 5]] + tm.assert_frame_equal(result, expected) + + # boolean + where = [True] * 10 + where[-2] = False + result = store.select('df2', where=where) + expected = df.loc[where] + tm.assert_frame_equal(result, expected) + + # start/stop + result = store.select('df2', start=5, stop=10) + expected = df[5:10] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)]) +def test_contiguous_mixed_data_table(start, stop): + # GH 17021 + # ValueError when reading a contiguous mixed-data table ft. VLArray + df = DataFrame({'a': Series([20111010, 20111011, 20111012]), + 'b': Series(['ab', 'cd', 'ab'])}) + + with ensure_clean_store() as store: + store.append('test_dataset', df) + + result = store.select('test_dataset', start=start, stop=stop) + tm.assert_frame_equal(df[start:stop], result) + + +def test_copy(): + with catch_warnings(record=True): + def do_copy(f, new_f=None, keys=None, + propindexes=True, **kwargs): + try: + store = HDFStore(f, 'r') + + if new_f is None: + fd, new_f = tempfile.mkstemp() + + tstore = store.copy( + new_f, keys=keys, propindexes=propindexes, **kwargs) + + # check keys + if keys is None: + keys = store.keys() + assert set(keys) == set(tstore.keys()) + + # check indicies & nrows + for k in tstore.keys(): + if tstore.get_storer(k).is_table: + new_t = tstore.get_storer(k) + orig_t = store.get_storer(k) + + assert orig_t.nrows == new_t.nrows + + # check propindixes + if propindexes: + for a in orig_t.axes: + if a.is_indexed: + assert new_t[a.name].is_indexed + + finally: + safe_close(store) + safe_close(tstore) + safe_close(fd) + safe_remove(new_f) + + # new table + df = tm.makeDataFrame() + + try: + path = create_tempfile() + st = HDFStore(path) + st.append('df', df, data_columns=['A']) + st.close() + do_copy(f=path) + do_copy(f=path, propindexes=False) + finally: + safe_remove(path) + + +def test_duplicate_column_name(): + df = DataFrame(columns=["a", "a"], data=[[0, 0]]) + + with ensure_clean_path() as path: + pytest.raises(ValueError, df.to_hdf, + path, 'df', format='fixed') + + df.to_hdf(path, 'df', format='table') + other = read_hdf(path, 'df') + + tm.assert_frame_equal(df, other) + assert df.equals(other) + assert other.equals(df) + + +def test_round_trip_equals(): + # GH 9330 + df = DataFrame({"B": [1, 2], "A": ["x", "y"]}) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format='table') + other = read_hdf(path, 'df') + tm.assert_frame_equal(df, other) + assert df.equals(other) + assert other.equals(df) + + +def test_to_hdf_with_object_column_names(): + # GH9057 + # Writing HDF5 table format should only work for string-like + # column types + types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex, + tm.makeDateIndex, tm.makeTimedeltaIndex, + tm.makePeriodIndex] + types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex] + + if compat.PY3: + types_should_run.append(tm.makeUnicodeIndex) + else: + types_should_fail.append(tm.makeUnicodeIndex) + + for index in types_should_fail: + df = DataFrame(np.random.randn(10, 2), columns=index(2)) + with ensure_clean_path() as path: + with catch_warnings(record=True): + with pytest.raises( + ValueError, msg=("cannot have non-object label " + "DataIndexableCol")): + df.to_hdf(path, 'df', format='table', + data_columns=True) + + for index in types_should_run: + df = DataFrame(np.random.randn(10, 2), columns=index(2)) + with ensure_clean_path() as path: + with catch_warnings(record=True): + df.to_hdf(path, 'df', format='table', data_columns=True) + result = pd.read_hdf( + path, 'df', where="index = [{0}]".format(df.index[0])) + assert(len(result)) + + +def test_store_series_name(): + df = tm.makeDataFrame() + series = df['A'] + + with ensure_clean_store() as store: + store['series'] = series + recons = store['series'] + tm.assert_series_equal(recons, series) + + +@pytest.mark.parametrize("compression", [ + False, pytest.param(True, marks=td.skip_if_windows_python_3) +]) +def test_store_mixed(compression): + def _make_one(): + df = tm.makeDataFrame() + df['obj1'] = 'foo' + df['obj2'] = 'bar' + df['bool1'] = df['A'] > 0 + df['bool2'] = df['B'] > 0 + df['int1'] = 1 + df['int2'] = 2 + return df._consolidate() + + df1 = _make_one() + df2 = _make_one() + + _check_roundtrip(df1, tm.assert_frame_equal) + _check_roundtrip(df2, tm.assert_frame_equal) + + with ensure_clean_store() as store: + store['obj'] = df1 + tm.assert_frame_equal(store['obj'], df1) + store['obj'] = df2 + tm.assert_frame_equal(store['obj'], df2) + + # check that can store Series of all of these types + _check_roundtrip(df1['obj1'], tm.assert_series_equal, + compression=compression) + _check_roundtrip(df1['bool1'], tm.assert_series_equal, + compression=compression) + _check_roundtrip(df1['int1'], tm.assert_series_equal, + compression=compression) + + +def test_remove(): + with ensure_clean_store() as store: + ts = tm.makeTimeSeries() + df = tm.makeDataFrame() + store['a'] = ts + store['b'] = df + _maybe_remove(store, 'a') + assert len(store) == 1 + tm.assert_frame_equal(df, store['b']) + + _maybe_remove(store, 'b') + assert len(store) == 0 + + # nonexistence + pytest.raises(KeyError, store.remove, 'a_nonexistent_store') + + # pathing + store['a'] = ts + store['b/foo'] = df + _maybe_remove(store, 'foo') + _maybe_remove(store, 'b/foo') + assert len(store) == 1 + + store['a'] = ts + store['b/foo'] = df + _maybe_remove(store, 'b') + assert len(store) == 1 + + # __delitem__ + store['a'] = ts + store['b'] = df + del store['a'] + del store['b'] + assert len(store) == 0 + + +def test_invalid_terms(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + df = tm.makeTimeDataFrame() + df['string'] = 'foo' + df.loc[0:4, 'string'] = 'bar' + wp = tm.makePanel() + + store.put('df', df, format='table') + store.put('wp', wp, format='table') + + # some invalid terms + pytest.raises(ValueError, store.select, + 'wp', "minor=['A', 'B']") + pytest.raises(ValueError, store.select, + 'wp', ["index=['20121114']"]) + pytest.raises(ValueError, store.select, 'wp', [ + "index=['20121114', '20121114']"]) + pytest.raises(TypeError, Term) + + # more invalid + pytest.raises( + ValueError, store.select, 'df', 'df.index[3]') + pytest.raises(SyntaxError, store.select, 'df', 'index>') + pytest.raises( + ValueError, store.select, 'wp', + "major_axis<'20000108' & minor_axis['A', 'B']") + + # from the docs + with ensure_clean_path() as path: + dfq = DataFrame(np.random.randn(10, 4), columns=list( + 'ABCD'), index=date_range('20130101', periods=10)) + dfq.to_hdf(path, 'dfq', format='table', data_columns=True) + + # check ok + read_hdf(path, 'dfq', + where="index>Timestamp('20130104') & columns=['A', 'B']") + read_hdf(path, 'dfq', where="A>0 or C>0") + + # catch the invalid reference + with ensure_clean_path() as path: + dfq = DataFrame(np.random.randn(10, 4), columns=list( + 'ABCD'), index=date_range('20130101', periods=10)) + dfq.to_hdf(path, 'dfq', format='table') + + pytest.raises(ValueError, read_hdf, path, + 'dfq', where="A>0 or C>0") diff --git a/pandas/tests/io/pytables/test_complex.py b/pandas/tests/io/pytables/test_complex.py new file mode 100644 index 0000000000000..96c3df2dd8775 --- /dev/null +++ b/pandas/tests/io/pytables/test_complex.py @@ -0,0 +1,160 @@ +import pytest +from warnings import catch_warnings + +import numpy as np +import pandas as pd +from pandas import Series, DataFrame, Panel +from .common import ensure_clean_store, ensure_clean_path + +import pandas.util.testing as tm +from pandas.io.pytables import read_hdf + + +def test_complex_fixed(): + df = DataFrame(np.random.rand(4, 5).astype(np.complex64), + index=list('abcd'), + columns=list('ABCDE')) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df') + reread = read_hdf(path, 'df') + tm.assert_frame_equal(df, reread) + + df = DataFrame(np.random.rand(4, 5).astype(np.complex128), + index=list('abcd'), + columns=list('ABCDE')) + with ensure_clean_path() as path: + df.to_hdf(path, 'df') + reread = read_hdf(path, 'df') + tm.assert_frame_equal(df, reread) + + +def test_complex_table(): + df = DataFrame(np.random.rand(4, 5).astype(np.complex64), + index=list('abcd'), + columns=list('ABCDE')) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format='table') + reread = read_hdf(path, 'df') + tm.assert_frame_equal(df, reread) + + df = DataFrame(np.random.rand(4, 5).astype(np.complex128), + index=list('abcd'), + columns=list('ABCDE')) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format='table', mode='w') + reread = read_hdf(path, 'df') + tm.assert_frame_equal(df, reread) + + +def test_complex_mixed_fixed(): + complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, + 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64) + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], + dtype=np.complex128) + df = DataFrame({'A': [1, 2, 3, 4], + 'B': ['a', 'b', 'c', 'd'], + 'C': complex64, + 'D': complex128, + 'E': [1.0, 2.0, 3.0, 4.0]}, + index=list('abcd')) + with ensure_clean_path() as path: + df.to_hdf(path, 'df') + reread = read_hdf(path, 'df') + tm.assert_frame_equal(df, reread) + + +def test_complex_mixed_table(): + complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, + 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64) + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], + dtype=np.complex128) + df = DataFrame({'A': [1, 2, 3, 4], + 'B': ['a', 'b', 'c', 'd'], + 'C': complex64, + 'D': complex128, + 'E': [1.0, 2.0, 3.0, 4.0]}, + index=list('abcd')) + + with ensure_clean_store() as store: + store.append('df', df, data_columns=['A', 'B']) + result = store.select('df', where='A>2') + tm.assert_frame_equal(df.loc[df.A > 2], result) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format='table') + reread = read_hdf(path, 'df') + tm.assert_frame_equal(df, reread) + + +def test_complex_across_dimensions_fixed(): + with catch_warnings(record=True): + complex128 = np.array( + [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) + s = Series(complex128, index=list('abcd')) + df = DataFrame({'A': s, 'B': s}) + p = Panel({'One': df, 'Two': df}) + + objs = [s, df, p] + comps = [tm.assert_series_equal, tm.assert_frame_equal, + tm.assert_panel_equal] + for obj, comp in zip(objs, comps): + with ensure_clean_path() as path: + obj.to_hdf(path, 'obj', format='fixed') + reread = read_hdf(path, 'obj') + comp(obj, reread) + + +def test_complex_across_dimensions(): + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) + s = Series(complex128, index=list('abcd')) + df = DataFrame({'A': s, 'B': s}) + + with catch_warnings(record=True): + p = Panel({'One': df, 'Two': df}) + + objs = [df, p] + comps = [tm.assert_frame_equal, tm.assert_panel_equal] + for obj, comp in zip(objs, comps): + with ensure_clean_path() as path: + obj.to_hdf(path, 'obj', format='table') + reread = read_hdf(path, 'obj') + comp(obj, reread) + + +def test_complex_indexing_error(): + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], + dtype=np.complex128) + df = DataFrame({'A': [1, 2, 3, 4], + 'B': ['a', 'b', 'c', 'd'], + 'C': complex128}, + index=list('abcd')) + with ensure_clean_store() as store: + pytest.raises(TypeError, store.append, + 'df', df, data_columns=['C']) + + +def test_complex_series_error(): + complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) + s = Series(complex128, index=list('abcd')) + + with ensure_clean_path() as path: + pytest.raises(TypeError, s.to_hdf, path, 'obj', format='t') + + with ensure_clean_path() as path: + s.to_hdf(path, 'obj', format='t', index=False) + reread = read_hdf(path, 'obj') + tm.assert_series_equal(s, reread) + + +def test_complex_append(): + df = DataFrame({'a': np.random.randn(100).astype(np.complex128), + 'b': np.random.randn(100)}) + + with ensure_clean_store() as store: + store.append('df', df, data_columns=['b']) + store.append('df', df) + result = store.select('df') + tm.assert_frame_equal(pd.concat([df, df], 0), result) diff --git a/pandas/tests/io/pytables/test_complib.py b/pandas/tests/io/pytables/test_complib.py new file mode 100644 index 0000000000000..1930feee9649f --- /dev/null +++ b/pandas/tests/io/pytables/test_complib.py @@ -0,0 +1,110 @@ +import pytest + +import numpy as np +import pandas as pd +from pandas import DataFrame +import pandas.util.testing as tm +from .common import ensure_clean_path + +tables = pytest.importorskip('tables') + + +def test_complibs_default_settings(): + # GH15943 + df = tm.makeDataFrame() + + # Set complevel and check if complib is automatically set to + # default value + with ensure_clean_path() as tmpfile: + df.to_hdf(tmpfile, 'df', complevel=9) + result = pd.read_hdf(tmpfile, 'df') + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode='r') as h5file: + for node in h5file.walk_nodes(where='/df', classname='Leaf'): + assert node.filters.complevel == 9 + assert node.filters.complib == 'zlib' + + # Set complib and check to see if compression is disabled + with ensure_clean_path() as tmpfile: + df.to_hdf(tmpfile, 'df', complib='zlib') + result = pd.read_hdf(tmpfile, 'df') + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode='r') as h5file: + for node in h5file.walk_nodes(where='/df', classname='Leaf'): + assert node.filters.complevel == 0 + assert node.filters.complib is None + + # Check if not setting complib or complevel results in no compression + with ensure_clean_path() as tmpfile: + df.to_hdf(tmpfile, 'df') + result = pd.read_hdf(tmpfile, 'df') + tm.assert_frame_equal(result, df) + + with tables.open_file(tmpfile, mode='r') as h5file: + for node in h5file.walk_nodes(where='/df', classname='Leaf'): + assert node.filters.complevel == 0 + assert node.filters.complib is None + + # Check if file-defaults can be overridden on a per table basis + with ensure_clean_path() as tmpfile: + store = pd.HDFStore(tmpfile) + store.append('dfc', df, complevel=9, complib='blosc') + store.append('df', df) + store.close() + + with tables.open_file(tmpfile, mode='r') as h5file: + for node in h5file.walk_nodes(where='/df', classname='Leaf'): + assert node.filters.complevel == 0 + assert node.filters.complib is None + for node in h5file.walk_nodes(where='/dfc', classname='Leaf'): + assert node.filters.complevel == 9 + assert node.filters.complib == 'blosc' + + +def test_complibs(): + # GH14478 + df = tm.makeDataFrame() + + # Building list of all complibs and complevels tuples + all_complibs = tables.filters.all_complibs + # Remove lzo if its not available on this platform + if not tables.which_lib_version('lzo'): + all_complibs.remove('lzo') + # Remove bzip2 if its not available on this platform + if not tables.which_lib_version("bzip2"): + all_complibs.remove("bzip2") + + all_levels = range(0, 10) + all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels] + + for (lib, lvl) in all_tests: + with ensure_clean_path() as tmpfile: + gname = 'foo' + + # Write and read file to see if data is consistent + df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl) + result = pd.read_hdf(tmpfile, gname) + tm.assert_frame_equal(result, df) + + # Open file and check metadata + # for correct amount of compression + h5table = tables.open_file(tmpfile, mode='r') + for node in h5table.walk_nodes(where='/' + gname, + classname='Leaf'): + assert node.filters.complevel == lvl + if lvl == 0: + assert node.filters.complib is None + else: + assert node.filters.complib == lib + h5table.close() + + +def test_invalid_complib(): + df = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + with ensure_clean_path() as path: + with pytest.raises(ValueError): + df.to_hdf(path, 'df', complib='foolib') diff --git a/pandas/tests/io/pytables/test_datetime.py b/pandas/tests/io/pytables/test_datetime.py new file mode 100644 index 0000000000000..5de5647d1bec8 --- /dev/null +++ b/pandas/tests/io/pytables/test_datetime.py @@ -0,0 +1,411 @@ +import pytest +import datetime +from datetime import timedelta + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, date_range, timedelta_range, + bdate_range, Timestamp, DatetimeIndex) +from .common import ensure_clean_store, _check_roundtrip, _maybe_remove +import pandas.util.testing as tm +import pandas.util._test_decorators as td +from pandas._libs.tslibs.timezones import maybe_get_tz +from pandas.compat import lrange + + +def test_calendar_roundtrip_issue(): + # 8591 + # doc example from tseries holiday section + weekmask_egypt = 'Sun Mon Tue Wed Thu' + holidays = ['2012-05-01', + datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')] + bday_egypt = pd.offsets.CustomBusinessDay( + holidays=holidays, weekmask=weekmask_egypt) + dt = datetime.datetime(2013, 4, 30) + dts = date_range(dt, periods=5, freq=bday_egypt) + + s = (Series(dts.weekday, dts).map( + Series('Mon Tue Wed Thu Fri Sat Sun'.split()))) + + with ensure_clean_store() as store: + store.put('fixed', s) + result = store.select('fixed') + tm.assert_series_equal(result, s) + + store.append('table', s) + result = store.select('table') + tm.assert_series_equal(result, s) + + +def test_roundtrip_tz_aware_index(): + # GH 17618 + time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern') + df = pd.DataFrame(data=[0], index=[time]) + + with ensure_clean_store() as store: + store.put('frame', df, format='fixed') + recons = store['frame'] + tm.assert_frame_equal(recons, df) + assert recons.index[0].value == 946706400000000000 + + +def test_timeseries_preepoch(): + dr = bdate_range('1/1/1940', '1/1/1960') + ts = Series(np.random.randn(len(dr)), index=dr) + try: + _check_roundtrip(ts, tm.assert_series_equal) + except OverflowError: + pytest.skip('known failer on some windows platforms') + + +def test_can_serialize_dates(): + rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')] + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + _check_roundtrip(frame, tm.assert_frame_equal) + + +def test_store_datetime_fractional_secs(): + with ensure_clean_store() as store: + dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456) + series = Series([0], [dt]) + store['a'] = series + assert store['a'].index[0] == dt + + +def test_tseries_indices_series(): + with ensure_clean_store() as store: + idx = tm.makeDateIndex(10) + ser = Series(np.random.randn(len(idx)), idx) + store['a'] = ser + result = store['a'] + + tm.assert_series_equal(result, ser) + assert result.index.freq == ser.index.freq + tm.assert_class_equal(result.index, ser.index, obj="series index") + + idx = tm.makePeriodIndex(10) + ser = Series(np.random.randn(len(idx)), idx) + store['a'] = ser + result = store['a'] + + tm.assert_series_equal(result, ser) + assert result.index.freq == ser.index.freq + tm.assert_class_equal(result.index, ser.index, obj="series index") + + +def test_tseries_indices_frame(): + with ensure_clean_store() as store: + idx = tm.makeDateIndex(10) + df = DataFrame(np.random.randn(len(idx), 3), index=idx) + store['a'] = df + result = store['a'] + + tm.assert_frame_equal(result, df) + assert result.index.freq == df.index.freq + tm.assert_class_equal(result.index, df.index, + obj="dataframe index") + + idx = tm.makePeriodIndex(10) + df = DataFrame(np.random.randn(len(idx), 3), idx) + store['a'] = df + result = store['a'] + + tm.assert_frame_equal(result, df) + assert result.index.freq == df.index.freq + tm.assert_class_equal(result.index, df.index, + obj="dataframe index") + + +def test_store_datetime_mixed(): + df = DataFrame( + {'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']}) + ts = tm.makeTimeSeries() + df['d'] = ts.index[:3] + _check_roundtrip(df, tm.assert_frame_equal) + + +def test_preserve_timedeltaindex_type(): + # GH9635 + # Storing TimedeltaIndexed DataFrames in fixed stores did not preserve + # the type of the index. + df = DataFrame(np.random.normal(size=(10, 5))) + df.index = timedelta_range( + start='0s', periods=10, freq='1s', name='example') + + with ensure_clean_store() as store: + store['df'] = df + tm.assert_frame_equal(store['df'], df) + + +def _compare_with_tz(a, b): + tm.assert_frame_equal(a, b) + # compare the zones on each element + for c in a.columns: + for i in a.index: + a_e = a.loc[i, c] + b_e = b.loc[i, c] + if not (a_e == b_e and a_e.tz == b_e.tz): + raise AssertionError( + "invalid tz comparison [%s] [%s]" % (a_e, b_e)) + + +def test_append_with_timezones_dateutil(): + + # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows + # filename issues. + gettz = lambda x: maybe_get_tz('dateutil/' + x) + + # as columns + with ensure_clean_store() as store: + + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz( + 'US/Eastern')) + timedelta(hours=1) * i for i in range(5)])) + + store.append('df_tz', df, data_columns=['A']) + result = store['df_tz'] + _compare_with_tz(result, df) + tm.assert_frame_equal(result, df) + + # select with tz aware + expected = df[df.A >= df.A[3]] + result = store.select('df_tz', where='A>=df.A[3]') + _compare_with_tz(result, expected) + + # ensure we include dates in DST and STD time here. + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A=Timestamp('20130102', + tz=gettz('US/Eastern')), + B=Timestamp('20130603', + tz=gettz('US/Eastern'))), + index=range(5)) + store.append('df_tz', df) + result = store['df_tz'] + _compare_with_tz(result, df) + tm.assert_frame_equal(result, df) + + df = DataFrame(dict(A=Timestamp('20130102', + tz=gettz('US/Eastern')), + B=Timestamp('20130102', tz=gettz('EET'))), + index=range(5)) + pytest.raises(ValueError, store.append, 'df_tz', df) + + # this is ok + _maybe_remove(store, 'df_tz') + store.append('df_tz', df, data_columns=['A', 'B']) + result = store['df_tz'] + _compare_with_tz(result, df) + tm.assert_frame_equal(result, df) + + # can't append with diff timezone + df = DataFrame(dict(A=Timestamp('20130102', + tz=gettz('US/Eastern')), + B=Timestamp('20130102', tz=gettz('CET'))), + index=range(5)) + pytest.raises(ValueError, store.append, 'df_tz', df) + + # as index + with ensure_clean_store() as store: + + # GH 4098 example + df = DataFrame(dict(A=Series(lrange(3), index=date_range( + '2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern'))))) + + _maybe_remove(store, 'df') + store.put('df', df) + result = store.select('df') + tm.assert_frame_equal(result, df) + + _maybe_remove(store, 'df') + store.append('df', df) + result = store.select('df') + tm.assert_frame_equal(result, df) + + +def test_append_with_timezones_pytz(): + + # as columns + with ensure_clean_store() as store: + + _maybe_remove(store, 'df_tz') + df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', + tz='US/Eastern') + + timedelta(hours=1) * i + for i in range(5)])) + store.append('df_tz', df, data_columns=['A']) + result = store['df_tz'] + _compare_with_tz(result, df) + tm.assert_frame_equal(result, df) + + # select with tz aware + _compare_with_tz(store.select( + 'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]]) + + _maybe_remove(store, 'df_tz') + # ensure we include dates in DST and STD time here. + df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), + B=Timestamp('20130603', tz='US/Eastern')), + index=range(5)) + store.append('df_tz', df) + result = store['df_tz'] + _compare_with_tz(result, df) + tm.assert_frame_equal(result, df) + + df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), + B=Timestamp('20130102', tz='EET')), + index=range(5)) + pytest.raises(ValueError, store.append, 'df_tz', df) + + # this is ok + _maybe_remove(store, 'df_tz') + store.append('df_tz', df, data_columns=['A', 'B']) + result = store['df_tz'] + _compare_with_tz(result, df) + tm.assert_frame_equal(result, df) + + # can't append with diff timezone + df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), + B=Timestamp('20130102', tz='CET')), + index=range(5)) + pytest.raises(ValueError, store.append, 'df_tz', df) + + # as index + with ensure_clean_store() as store: + + # GH 4098 example + df = DataFrame(dict(A=Series(lrange(3), index=date_range( + '2000-1-1', periods=3, freq='H', tz='US/Eastern')))) + + _maybe_remove(store, 'df') + store.put('df', df) + result = store.select('df') + tm.assert_frame_equal(result, df) + + _maybe_remove(store, 'df') + store.append('df', df) + result = store.select('df') + tm.assert_frame_equal(result, df) + + +def test_tseries_select_index_column(): + # GH7777 + # selecting a UTC datetimeindex column did + # not preserve UTC tzinfo set before storing + + # check that no tz still works + rng = date_range('1/1/2000', '1/30/2000') + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + + with ensure_clean_store() as store: + store.append('frame', frame) + result = store.select_column('frame', 'index') + assert rng.tz == DatetimeIndex(result.values).tz + + # check utc + rng = date_range('1/1/2000', '1/30/2000', tz='UTC') + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + + with ensure_clean_store() as store: + store.append('frame', frame) + result = store.select_column('frame', 'index') + assert rng.tz == result.dt.tz + + # double check non-utc + rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern') + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + + with ensure_clean_store() as store: + store.append('frame', frame) + result = store.select_column('frame', 'index') + assert rng.tz == result.dt.tz + + +def test_timezones_fixed(): + with ensure_clean_store() as store: + + # index + rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern') + df = DataFrame(np.random.randn(len(rng), 4), index=rng) + store['df'] = df + result = store['df'] + tm.assert_frame_equal(result, df) + + # as data + # GH11411 + _maybe_remove(store, 'df') + df = DataFrame({'A': rng, + 'B': rng.tz_convert('UTC').tz_localize(None), + 'C': rng.tz_convert('CET'), + 'D': range(len(rng))}, index=rng) + store['df'] = df + result = store['df'] + tm.assert_frame_equal(result, df) + + +def test_fixed_offset_tz(): + rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00') + frame = DataFrame(np.random.randn(len(rng), 4), index=rng) + + with ensure_clean_store() as store: + store['frame'] = frame + recons = store['frame'] + tm.assert_index_equal(recons.index, rng) + assert rng.tz == recons.index.tz + + +@td.skip_if_windows +def test_store_timezone(): + # GH2852 + # issue storing datetime.date with a timezone as it resets when read + # back in a new timezone + + # original method + with ensure_clean_store() as store: + + today = datetime.date(2013, 9, 10) + df = DataFrame([1, 2, 3], index=[today, today, today]) + store['obj1'] = df + result = store['obj1'] + tm.assert_frame_equal(result, df) + + # with tz setting + with ensure_clean_store() as store: + + with tm.set_timezone('EST5EDT'): + today = datetime.date(2013, 9, 10) + df = DataFrame([1, 2, 3], index=[today, today, today]) + store['obj1'] = df + + with tm.set_timezone('CST6CDT'): + result = store['obj1'] + + tm.assert_frame_equal(result, df) + + +def test_legacy_datetimetz_object(): + # legacy from < 0.17.0 + # 8260 + expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), + B=Timestamp('20130603', tz='CET')), + index=range(5)) + with ensure_clean_store( + tm.get_data_path('legacy_hdf/datetimetz_object.h5'), + mode='r') as store: + result = store['df'] + tm.assert_frame_equal(result, expected) + + +def test_dst_transitions(): + # make sure we are not failing on transaitions + with ensure_clean_store() as store: + times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00", + tz="Europe/London", + freq="H", + ambiguous='infer') + + for i in [times, times + pd.Timedelta('10min')]: + _maybe_remove(store, 'df') + df = DataFrame({'A': range(len(i)), 'B': i}, index=i) + store.append('df', df) + result = store.select('df') + tm.assert_frame_equal(result, df) diff --git a/pandas/tests/io/pytables/test_encode.py b/pandas/tests/io/pytables/test_encode.py new file mode 100644 index 0000000000000..56dd19696e6be --- /dev/null +++ b/pandas/tests/io/pytables/test_encode.py @@ -0,0 +1,100 @@ +import pytest +from warnings import catch_warnings + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, compat) +from .common import (ensure_clean_store, ensure_clean_path, + _check_roundtrip, _maybe_remove) + +import pandas.util.testing as tm +from pandas.compat import is_platform_little_endian, range, u +from pandas.io.pytables import Term, read_hdf +from pandas.core.dtypes.common import is_categorical_dtype + + +@pytest.mark.skipif(not is_platform_little_endian(), + reason="reason platform is not little endian") +def test_encoding(): + with ensure_clean_store() as store: + df = DataFrame(dict(A='foo', B='bar'), index=range(5)) + df.loc[2, 'A'] = np.nan + df.loc[3, 'B'] = np.nan + _maybe_remove(store, 'df') + store.append('df', df, encoding='ascii') + tm.assert_frame_equal(store['df'], df) + + expected = df.reindex(columns=['A']) + result = store.select('df', Term('columns=A', encoding='ascii')) + tm.assert_frame_equal(result, expected) + + +def test_latin_encoding(): + if compat.PY2: + tm.assert_raises_regex( + TypeError, r'\[unicode\] is not implemented as a table column') + return + + values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'], + [b'E\xc9, 17', b'a', b'b', b'c'], + [b'EE, 17', b'', b'a', b'b', b'c'], + [b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'], + [b'', b'a', b'b', b'c'], + [b'\xf8\xfc', b'a', b'b', b'c'], + [b'A\xf8\xfc', b'', b'a', b'b', b'c'], + [np.nan, b'', b'b', b'c'], + [b'A\xf8\xfc', np.nan, b'', b'b', b'c']] + + def _try_decode(x, encoding='latin-1'): + try: + return x.decode(encoding) + except AttributeError: + return x + # not sure how to remove latin-1 from code in python 2 and 3 + values = [[_try_decode(x) for x in y] for y in values] + + examples = [] + for dtype in ['category', object]: + for val in values: + examples.append(pd.Series(val, dtype=dtype)) + + def roundtrip(s, key='data', encoding='latin-1', nan_rep=''): + with ensure_clean_path() as store: + s.to_hdf(store, key, format='table', encoding=encoding, + nan_rep=nan_rep) + retr = read_hdf(store, key) + s_nan = s.replace(nan_rep, np.nan) + if is_categorical_dtype(s_nan): + assert is_categorical_dtype(retr) + tm.assert_series_equal(s_nan, retr, check_dtype=False, + check_categorical=False) + else: + tm.assert_series_equal(s_nan, retr) + + for s in examples: + roundtrip(s) + + +def test_unicode_longer_encoded(): + # GH 11234 + char = '\u0394' + df = pd.DataFrame({'A': [char]}) + with ensure_clean_store() as store: + store.put('df', df, format='table', encoding='utf-8') + result = store.get('df') + tm.assert_frame_equal(result, df) + + df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']}) + with ensure_clean_store() as store: + store.put('df', df, format='table', encoding='utf-8') + result = store.get('df') + tm.assert_frame_equal(result, df) + + +def test_unicode_index(): + unicode_values = [u('\u03c3'), u('\u03c3\u03c3')] + + # PerformanceWarning + with catch_warnings(record=True): + s = Series(np.random.randn(len(unicode_values)), unicode_values) + _check_roundtrip(s, tm.assert_series_equal) diff --git a/pandas/tests/io/pytables/test_index.py b/pandas/tests/io/pytables/test_index.py new file mode 100644 index 0000000000000..1402c90afc037 --- /dev/null +++ b/pandas/tests/io/pytables/test_index.py @@ -0,0 +1,310 @@ +import pytest +from warnings import catch_warnings +import datetime + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, Timestamp, compat, date_range) +from .common import (ensure_clean_store, _maybe_remove, ensure_clean_path, + _check_roundtrip) + +import pandas.util.testing as tm +from pandas.compat import (text_type, lrange, u) +from pandas.io.pytables import read_hdf + + +def test_index_types(): + with catch_warnings(record=True): + values = np.random.randn(2) + + func = lambda l, r: tm.assert_series_equal(l, r, + check_dtype=True, + check_index_type=True, + check_series_type=True) + + with catch_warnings(record=True): + ser = Series(values, [0, 'y']) + _check_roundtrip(ser, func) + + with catch_warnings(record=True): + ser = Series(values, [datetime.datetime.today(), 0]) + _check_roundtrip(ser, func) + + with catch_warnings(record=True): + ser = Series(values, ['y', 0]) + _check_roundtrip(ser, func) + + with catch_warnings(record=True): + ser = Series(values, [datetime.date.today(), 'a']) + _check_roundtrip(ser, func) + + with catch_warnings(record=True): + + ser = Series(values, [0, 'y']) + _check_roundtrip(ser, func) + + ser = Series(values, [datetime.datetime.today(), 0]) + _check_roundtrip(ser, func) + + ser = Series(values, ['y', 0]) + _check_roundtrip(ser, func) + + ser = Series(values, [datetime.date.today(), 'a']) + _check_roundtrip(ser, func) + + ser = Series(values, [1.23, 'b']) + _check_roundtrip(ser, func) + + ser = Series(values, [1, 1.53]) + _check_roundtrip(ser, func) + + ser = Series(values, [1, 5]) + _check_roundtrip(ser, func) + + ser = Series(values, [datetime.datetime( + 2012, 1, 1), datetime.datetime(2012, 1, 2)]) + _check_roundtrip(ser, func) + + +def test_store_index_types(): + # GH5386 + # test storing various index types + with ensure_clean_store() as store: + def check(format, index): + df = DataFrame(np.random.randn(10, 2), columns=list('AB')) + df.index = index(len(df)) + + _maybe_remove(store, 'df') + store.put('df', df, format=format) + tm.assert_frame_equal(df, store['df']) + + for index in [tm.makeFloatIndex, tm.makeStringIndex, + tm.makeIntIndex, tm.makeDateIndex]: + + check('table', index) + check('fixed', index) + + # period index currently broken for table + # seee GH7796 FIXME + check('fixed', tm.makePeriodIndex) + # check('table',tm.makePeriodIndex) + + # unicode + index = tm.makeUnicodeIndex + if compat.PY3: + check('table', index) + check('fixed', index) + else: + + # only support for fixed types (and they have a perf warning) + pytest.raises(TypeError, check, 'table', index) + + # PerformanceWarning + with catch_warnings(record=True): + check('fixed', index) + + +def test_table_index_incompatible_dtypes(): + df1 = DataFrame({'a': [1, 2, 3]}) + df2 = DataFrame({'a': [4, 5, 6]}, + index=date_range('1/1/2000', periods=3)) + + with ensure_clean_store() as store: + store.put('frame', df1, format='table') + pytest.raises(TypeError, store.put, 'frame', df2, + format='table', append=True) + + +def test_store_index_name(): + df = tm.makeDataFrame() + df.index.name = 'foo' + + with ensure_clean_store() as store: + store['frame'] = df + recons = store['frame'] + tm.assert_frame_equal(recons, df) + + +def test_store_index_name_with_tz(): + # GH 13884 + df = pd.DataFrame({'A': [1, 2]}) + df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788]) + df.index = df.index.tz_localize('UTC') + df.index.name = 'foo' + + with ensure_clean_store() as store: + store.put('frame', df, format='table') + recons = store['frame'] + tm.assert_frame_equal(recons, df) + + +@pytest.mark.parametrize('table_format', ['table', 'fixed']) +def test_store_index_name_numpy_str(table_format): + # GH #13492 + idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1), + datetime.date(2000, 1, 2)]), + name=u('cols\u05d2')) + idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1), + datetime.date(2010, 1, 2)]), + name=u('rows\u05d0')) + df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1) + + # This used to fail, returning numpy strings instead of python strings. + with ensure_clean_path() as path: + df.to_hdf(path, 'df', format=table_format) + df2 = read_hdf(path, 'df') + + tm.assert_frame_equal(df, df2, check_names=True) + + assert type(df2.index.name) == text_type + assert type(df2.columns.name) == text_type + + +def test_retain_index_attributes(): + # GH 3499, losing frequency info on index recreation + df = DataFrame(dict( + A=Series(lrange(3), + index=date_range('2000-1-1', periods=3, freq='H')))) + + with ensure_clean_store() as store: + _maybe_remove(store, 'data') + store.put('data', df, format='table') + + result = store.get('data') + tm.assert_frame_equal(df, result) + + for attr in ['freq', 'tz', 'name']: + for idx in ['index', 'columns']: + assert (getattr(getattr(df, idx), attr, None) == + getattr(getattr(result, idx), attr, None)) + + # try to append a table with a different frequency + with catch_warnings(record=True): + df2 = DataFrame(dict( + A=Series(lrange(3), + index=date_range('2002-1-1', + periods=3, freq='D')))) + store.append('data', df2) + + assert store.get_storer('data').info['index']['freq'] is None + + # this is ok + _maybe_remove(store, 'df2') + df2 = DataFrame(dict( + A=Series(lrange(3), + index=[Timestamp('20010101'), Timestamp('20010102'), + Timestamp('20020101')]))) + store.append('df2', df2) + df3 = DataFrame(dict( + A=Series(lrange(3), + index=date_range('2002-1-1', periods=3, + freq='D')))) + store.append('df2', df3) + + +def test_retain_index_attributes2(): + with ensure_clean_path() as path: + with catch_warnings(record=True): + df = DataFrame(dict( + A=Series(lrange(3), + index=date_range('2000-1-1', + periods=3, freq='H')))) + df.to_hdf(path, 'data', mode='w', append=True) + df2 = DataFrame(dict( + A=Series(lrange(3), + index=date_range('2002-1-1', periods=3, + freq='D')))) + df2.to_hdf(path, 'data', append=True) + + idx = date_range('2000-1-1', periods=3, freq='H') + idx.name = 'foo' + df = DataFrame(dict(A=Series(lrange(3), index=idx))) + df.to_hdf(path, 'data', mode='w', append=True) + + assert read_hdf(path, 'data').index.name == 'foo' + + with catch_warnings(record=True): + idx2 = date_range('2001-1-1', periods=3, freq='H') + idx2.name = 'bar' + df2 = DataFrame(dict(A=Series(lrange(3), index=idx2))) + df2.to_hdf(path, 'data', append=True) + + assert read_hdf(path, 'data').index.name is None + + +def test_float_index(): + # GH #454 + index = np.random.randn(10) + s = Series(np.random.randn(10), index=index) + _check_roundtrip(s, tm.assert_series_equal) + + +def test_tuple_index(): + # GH #492 + col = np.arange(10) + idx = [(0., 1.), (2., 3.), (4., 5.)] + data = np.random.randn(30).reshape((3, 10)) + DF = DataFrame(data, index=idx, columns=col) + + with catch_warnings(record=True): + _check_roundtrip(DF, tm.assert_frame_equal) + + +def test_create_table_index(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + def col(t, column): + return getattr(store.get_storer(t).table.cols, column) + + # index=False + wp = tm.makePanel() + store.append('p5', wp, index=False) + store.create_table_index('p5', columns=['major_axis']) + assert(col('p5', 'major_axis').is_indexed is True) + assert(col('p5', 'minor_axis').is_indexed is False) + + # index=True + store.append('p5i', wp, index=True) + assert(col('p5i', 'major_axis').is_indexed is True) + assert(col('p5i', 'minor_axis').is_indexed is True) + + # default optlevels + store.get_storer('p5').create_index() + assert(col('p5', 'major_axis').index.optlevel == 6) + assert(col('p5', 'minor_axis').index.kind == 'medium') + + # let's change the indexing scheme + store.create_table_index('p5') + assert(col('p5', 'major_axis').index.optlevel == 6) + assert(col('p5', 'minor_axis').index.kind == 'medium') + store.create_table_index('p5', optlevel=9) + assert(col('p5', 'major_axis').index.optlevel == 9) + assert(col('p5', 'minor_axis').index.kind == 'medium') + store.create_table_index('p5', kind='full') + assert(col('p5', 'major_axis').index.optlevel == 9) + assert(col('p5', 'minor_axis').index.kind == 'full') + store.create_table_index('p5', optlevel=1, kind='light') + assert(col('p5', 'major_axis').index.optlevel == 1) + assert(col('p5', 'minor_axis').index.kind == 'light') + + # data columns + df = tm.makeTimeDataFrame() + df['string'] = 'foo' + df['string2'] = 'bar' + store.append('f', df, data_columns=['string', 'string2']) + assert(col('f', 'index').is_indexed is True) + assert(col('f', 'string').is_indexed is True) + assert(col('f', 'string2').is_indexed is True) + + # specify index=columns + store.append( + 'f2', df, index=['string'], + data_columns=['string', 'string2']) + assert(col('f2', 'index').is_indexed is False) + assert(col('f2', 'string').is_indexed is True) + assert(col('f2', 'string2').is_indexed is False) + + # try to index a non-table + _maybe_remove(store, 'f2') + store.put('f2', df) + pytest.raises(TypeError, store.create_table_index, 'f2') diff --git a/pandas/tests/io/pytables/test_multiindex.py b/pandas/tests/io/pytables/test_multiindex.py new file mode 100644 index 0000000000000..5734ca9a91ee7 --- /dev/null +++ b/pandas/tests/io/pytables/test_multiindex.py @@ -0,0 +1,228 @@ +import pytest +import datetime + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, date_range, Index, + concat, MultiIndex, RangeIndex, Int64Index) +from .common import (ensure_clean_store, ensure_clean_path, + _maybe_remove, _check_roundtrip) +import pandas.util.testing as tm +from pandas.compat import range +from pandas.io.pytables import HDFStore, read_hdf + + +def test_column_multiindex(): + # GH 4710 + # recreate multi-indexes properly + index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), + ('B', 'a'), ('B', 'b')], + names=['first', 'second']) + df = DataFrame(np.arange(12).reshape(3, 4), columns=index) + expected = df.copy() + if isinstance(expected.index, RangeIndex): + expected.index = Int64Index(expected.index) + + with ensure_clean_store() as store: + store.put('df', df) + tm.assert_frame_equal(store['df'], expected, + check_index_type=True, + check_column_type=True) + + store.put('df1', df, format='table') + tm.assert_frame_equal(store['df1'], expected, + check_index_type=True, + check_column_type=True) + + pytest.raises(ValueError, store.put, 'df2', df, + format='table', data_columns=['A']) + pytest.raises(ValueError, store.put, 'df3', df, + format='table', data_columns=True) + + # appending multi-column on existing table (see GH 6167) + with ensure_clean_store() as store: + store.append('df2', df) + store.append('df2', df) + + tm.assert_frame_equal(store['df2'], concat((df, df))) + + # non_index_axes name + df = DataFrame(np.arange(12).reshape(3, 4), + columns=Index(list('ABCD'), name='foo')) + expected = df.copy() + if isinstance(expected.index, RangeIndex): + expected.index = Int64Index(expected.index) + + with ensure_clean_store() as store: + store.put('df1', df, format='table') + tm.assert_frame_equal(store['df1'], expected, + check_index_type=True, + check_column_type=True) + + +def test_store_multiindex(): + # validate multi-index names + # GH 5527 + with ensure_clean_store() as store: + def make_index(names=None): + return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d), + s, t) + for d in range(1, 3) + for s in range(2) + for t in range(3)], + names=names) + + # no names + _maybe_remove(store, 'df') + df = DataFrame(np.zeros((12, 2)), columns=[ + 'a', 'b'], index=make_index()) + store.append('df', df) + tm.assert_frame_equal(store.select('df'), df) + + # partial names + _maybe_remove(store, 'df') + df = DataFrame(np.zeros((12, 2)), columns=[ + 'a', 'b'], index=make_index(['date', None, None])) + store.append('df', df) + tm.assert_frame_equal(store.select('df'), df) + + # series + _maybe_remove(store, 's') + s = Series(np.zeros(12), index=make_index(['date', None, None])) + store.append('s', s) + xp = Series(np.zeros(12), index=make_index( + ['date', 'level_1', 'level_2'])) + tm.assert_series_equal(store.select('s'), xp) + + # dup with column + _maybe_remove(store, 'df') + df = DataFrame(np.zeros((12, 2)), columns=[ + 'a', 'b'], index=make_index(['date', 'a', 't'])) + pytest.raises(ValueError, store.append, 'df', df) + + # fully names + _maybe_remove(store, 'df') + df = DataFrame(np.zeros((12, 2)), columns=[ + 'a', 'b'], index=make_index(['date', 's', 't'])) + store.append('df', df) + tm.assert_frame_equal(store.select('df'), df) + + +def test_mi_data_columns(): + # GH 14435 + idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5), + range(5)], names=['date', 'id']) + df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) + + with ensure_clean_store() as store: + store.append('df', df, data_columns=True) + + actual = store.select('df', where='id == 1') + expected = df.iloc[[1], :] + tm.assert_frame_equal(actual, expected) + + +def test_columns_multiindex_modified(): + # BUG: 7212 + # read_hdf store.select modified the passed columns parameters + # when multi-indexed. + df = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + df.index.name = 'letters' + df = df.set_index(keys='E', append=True) + + data_columns = df.index.names + df.columns.tolist() + with ensure_clean_path() as path: + df.to_hdf(path, 'df', + mode='a', + append=True, + data_columns=data_columns, + index=False) + cols2load = list('BCD') + cols2load_original = list(cols2load) + df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa + assert cols2load_original == cols2load + + +def test_frame_select_complex2(): + with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths: + pp, hh = paths + + # use non-trivial selection criteria + parms = DataFrame({'A': [1, 1, 2, 2, 3]}) + parms.to_hdf(pp, 'df', mode='w', + format='table', data_columns=['A']) + + selection = read_hdf(pp, 'df', where='A=[2,3]') + hist = DataFrame(np.random.randn(25, 1), + columns=['data'], + index=MultiIndex.from_tuples( + [(i, j) for i in range(5) + for j in range(5)], + names=['l1', 'l2'])) + + hist.to_hdf(hh, 'df', mode='w', format='table') + + expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]') + + # sccope with list like + l = selection.index.tolist() # noqa + store = HDFStore(hh) + result = store.select('df', where='l1=l') + tm.assert_frame_equal(result, expected) + store.close() + + result = read_hdf(hh, 'df', where='l1=l') + tm.assert_frame_equal(result, expected) + + # index + index = selection.index # noqa + result = read_hdf(hh, 'df', where='l1=index') + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, 'df', where='l1=selection.index') + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, 'df', where='l1=selection.index.tolist()') + tm.assert_frame_equal(result, expected) + + result = read_hdf(hh, 'df', where='l1=list(selection.index)') + tm.assert_frame_equal(result, expected) + + # sccope with index + store = HDFStore(hh) + + result = store.select('df', where='l1=index') + tm.assert_frame_equal(result, expected) + + result = store.select('df', where='l1=selection.index') + tm.assert_frame_equal(result, expected) + + result = store.select('df', where='l1=selection.index.tolist()') + tm.assert_frame_equal(result, expected) + + result = store.select('df', where='l1=list(selection.index)') + tm.assert_frame_equal(result, expected) + + store.close() + + +def test_store_hierarchical(): + index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], + ['one', 'two', 'three']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['foo', 'bar']) + frame = DataFrame(np.random.randn(10, 3), index=index, + columns=['A', 'B', 'C']) + + _check_roundtrip(frame, tm.assert_frame_equal) + _check_roundtrip(frame.T, tm.assert_frame_equal) + _check_roundtrip(frame['A'], tm.assert_series_equal) + + # check that the names are stored + with ensure_clean_store() as store: + store['frame'] = frame + recons = store['frame'] + tm.assert_frame_equal(recons, frame) diff --git a/pandas/tests/io/pytables/test_open_close.py b/pandas/tests/io/pytables/test_open_close.py new file mode 100644 index 0000000000000..5b1eb5ee11a52 --- /dev/null +++ b/pandas/tests/io/pytables/test_open_close.py @@ -0,0 +1,172 @@ +import pytest +import os + +import pandas.util.testing as tm +from pandas.io import pytables as pytables +from .common import ensure_clean_path +from pandas.io.pytables import (HDFStore, ClosedFileError, + PossibleDataLossError) + + +def test_multiple_open_close(): + # gh-4409: open & close multiple times + with ensure_clean_path() as path: + + df = tm.makeDataFrame() + df.to_hdf(path, 'df', mode='w', format='table') + + # single + store = HDFStore(path) + assert 'CLOSED' not in store.info() + assert store.is_open + + store.close() + assert 'CLOSED' in store.info() + assert not store.is_open + + with ensure_clean_path() as path: + if pytables._table_file_open_policy_is_strict: + # multiples + store1 = HDFStore(path) + + def f(): + HDFStore(path) + pytest.raises(ValueError, f) + store1.close() + + else: + # multiples + store1 = HDFStore(path) + store2 = HDFStore(path) + + assert 'CLOSED' not in store1.info() + assert 'CLOSED' not in store2.info() + assert store1.is_open + assert store2.is_open + + store1.close() + assert 'CLOSED' in store1.info() + assert not store1.is_open + assert 'CLOSED' not in store2.info() + assert store2.is_open + + store2.close() + assert 'CLOSED' in store1.info() + assert 'CLOSED' in store2.info() + assert not store1.is_open + assert not store2.is_open + + # nested close + store = HDFStore(path, mode='w') + store.append('df', df) + + store2 = HDFStore(path) + store2.append('df2', df) + store2.close() + assert 'CLOSED' in store2.info() + assert not store2.is_open + + store.close() + assert 'CLOSED' in store.info() + assert not store.is_open + + # double closing + store = HDFStore(path, mode='w') + store.append('df', df) + + store2 = HDFStore(path) + store.close() + assert 'CLOSED' in store.info() + assert not store.is_open + + store2.close() + assert 'CLOSED' in store2.info() + assert not store2.is_open + + # ops on a closed store + with ensure_clean_path() as path: + df = tm.makeDataFrame() + df.to_hdf(path, 'df', mode='w', format='table') + + store = HDFStore(path) + store.close() + + pytest.raises(ClosedFileError, store.keys) + pytest.raises(ClosedFileError, lambda: 'df' in store) + pytest.raises(ClosedFileError, lambda: len(store)) + pytest.raises(ClosedFileError, lambda: store['df']) + pytest.raises(AttributeError, lambda: store.df) + pytest.raises(ClosedFileError, store.select, 'df') + pytest.raises(ClosedFileError, store.get, 'df') + pytest.raises(ClosedFileError, store.append, 'df2', df) + pytest.raises(ClosedFileError, store.put, 'df3', df) + pytest.raises(ClosedFileError, store.get_storer, 'df2') + pytest.raises(ClosedFileError, store.remove, 'df2') + + def f(): + store.select('df') + tm.assert_raises_regex(ClosedFileError, 'file is not open', f) + + +def test_reopen_handle(): + with ensure_clean_path() as path: + store = HDFStore(path, mode='a') + store['a'] = tm.makeTimeSeries() + + # invalid mode change + pytest.raises(PossibleDataLossError, store.open, 'w') + store.close() + assert not store.is_open + + # truncation ok here + store.open('w') + assert store.is_open + assert len(store) == 0 + store.close() + assert not store.is_open + + store = HDFStore(path, mode='a') + store['a'] = tm.makeTimeSeries() + + # reopen as read + store.open('r') + assert store.is_open + assert len(store) == 1 + assert store._mode == 'r' + store.close() + assert not store.is_open + + # reopen as append + store.open('a') + assert store.is_open + assert len(store) == 1 + assert store._mode == 'a' + store.close() + assert not store.is_open + + # reopen as append (again) + store.open('a') + assert store.is_open + assert len(store) == 1 + assert store._mode == 'a' + store.close() + assert not store.is_open + + +def test_open_args(): + with ensure_clean_path() as path: + df = tm.makeDataFrame() + + # create an in memory store + store = HDFStore(path, mode='a', driver='H5FD_CORE', + driver_core_backing_store=0) + store['df'] = df + store.append('df2', df) + + tm.assert_frame_equal(store['df'], df) + tm.assert_frame_equal(store['df2'], df) + + store.close() + + # the file should not have actually been written + assert not os.path.exists(path) diff --git a/pandas/tests/io/pytables/test_panel.py b/pandas/tests/io/pytables/test_panel.py new file mode 100644 index 0000000000000..9bafb8be38ef1 --- /dev/null +++ b/pandas/tests/io/pytables/test_panel.py @@ -0,0 +1,332 @@ +import pytest +from warnings import catch_warnings + +import numpy as np +from pandas import Index, Panel, date_range, Timestamp +from .common import _check_roundtrip, ensure_clean_store, _maybe_remove +import pandas.util.testing as tm + + +def test_wide(): + with catch_warnings(record=True): + wp = tm.makePanel() + _check_roundtrip(wp, tm.assert_panel_equal) + + +def test_wide_table_dups(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = tm.makePanel() + store.put('panel', wp, format='table') + store.put('panel', wp, format='table', append=True) + + recons = store['panel'] + tm.assert_panel_equal(recons, wp) + + +def test_long(): + def _check(left, right): + tm.assert_panel_equal(left.to_panel(), right.to_panel()) + + with catch_warnings(record=True): + wp = tm.makePanel() + _check_roundtrip(wp.to_frame(), _check) + + +def test_remove_startstop(): + # GH #4835 and #6177 + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = tm.makePanel(30) + + # start + _maybe_remove(store, 'wp1') + store.put('wp1', wp, format='t') + n = store.remove('wp1', start=32) + assert n == 120 - 32 + result = store.select('wp1') + expected = wp.reindex(major_axis=wp.major_axis[:32 // 4]) + tm.assert_panel_equal(result, expected) + + _maybe_remove(store, 'wp2') + store.put('wp2', wp, format='t') + n = store.remove('wp2', start=-32) + assert n == 32 + result = store.select('wp2') + expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4]) + tm.assert_panel_equal(result, expected) + + # stop + _maybe_remove(store, 'wp3') + store.put('wp3', wp, format='t') + n = store.remove('wp3', stop=32) + assert n == 32 + result = store.select('wp3') + expected = wp.reindex(major_axis=wp.major_axis[32 // 4:]) + tm.assert_panel_equal(result, expected) + + _maybe_remove(store, 'wp4') + store.put('wp4', wp, format='t') + n = store.remove('wp4', stop=-32) + assert n == 120 - 32 + result = store.select('wp4') + expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:]) + tm.assert_panel_equal(result, expected) + + # start n stop + _maybe_remove(store, 'wp5') + store.put('wp5', wp, format='t') + n = store.remove('wp5', start=16, stop=-16) + assert n == 120 - 32 + result = store.select('wp5') + expected = wp.reindex( + major_axis=(wp.major_axis[:16 // 4] + .union(wp.major_axis[-16 // 4:]))) + tm.assert_panel_equal(result, expected) + + _maybe_remove(store, 'wp6') + store.put('wp6', wp, format='t') + n = store.remove('wp6', start=16, stop=16) + assert n == 0 + result = store.select('wp6') + expected = wp.reindex(major_axis=wp.major_axis) + tm.assert_panel_equal(result, expected) + + # with where + _maybe_remove(store, 'wp7') + + # TODO: unused? + date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa + + crit = 'major_axis=date' + store.put('wp7', wp, format='t') + n = store.remove('wp7', where=[crit], stop=80) + assert n == 28 + result = store.select('wp7') + expected = wp.reindex(major_axis=wp.major_axis.difference( + wp.major_axis[np.arange(0, 20, 3)])) + tm.assert_panel_equal(result, expected) + + +def test_remove_crit(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = tm.makePanel(30) + + # group row removal + _maybe_remove(store, 'wp3') + date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10]) + crit4 = 'major_axis=date4' + store.put('wp3', wp, format='t') + n = store.remove('wp3', where=[crit4]) + assert n == 36 + + result = store.select('wp3') + expected = wp.reindex( + major_axis=wp.major_axis.difference(date4)) + tm.assert_panel_equal(result, expected) + + # upper half + _maybe_remove(store, 'wp') + store.put('wp', wp, format='table') + date = wp.major_axis[len(wp.major_axis) // 2] + + crit1 = 'major_axis>date' + crit2 = "minor_axis=['A', 'D']" + n = store.remove('wp', where=[crit1]) + assert n == 56 + + n = store.remove('wp', where=[crit2]) + assert n == 32 + + result = store['wp'] + expected = wp.truncate(after=date).reindex(minor=['B', 'C']) + tm.assert_panel_equal(result, expected) + + # individual row elements + _maybe_remove(store, 'wp2') + store.put('wp2', wp, format='table') + + date1 = wp.major_axis[1:3] + crit1 = 'major_axis=date1' + store.remove('wp2', where=[crit1]) + result = store.select('wp2') + expected = wp.reindex( + major_axis=wp.major_axis.difference(date1)) + tm.assert_panel_equal(result, expected) + + date2 = wp.major_axis[5] + crit2 = 'major_axis=date2' + store.remove('wp2', where=[crit2]) + result = store['wp2'] + expected = wp.reindex( + major_axis=(wp.major_axis + .difference(date1) + .difference(Index([date2])) + )) + tm.assert_panel_equal(result, expected) + + date3 = [wp.major_axis[7], wp.major_axis[9]] + crit3 = 'major_axis=date3' + store.remove('wp2', where=[crit3]) + result = store['wp2'] + expected = wp.reindex(major_axis=wp.major_axis + .difference(date1) + .difference(Index([date2])) + .difference(Index(date3))) + tm.assert_panel_equal(result, expected) + + # corners + _maybe_remove(store, 'wp4') + store.put('wp4', wp, format='table') + n = store.remove( + 'wp4', where="major_axis>wp.major_axis[-1]") + result = store.select('wp4') + tm.assert_panel_equal(result, wp) + + +def test_remove_where(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + # non-existance + crit1 = 'index>foo' + pytest.raises(KeyError, store.remove, 'a', [crit1]) + + # try to remove non-table (with crit) + # non-table ok (where = None) + wp = tm.makePanel(30) + store.put('wp', wp, format='table') + store.remove('wp', ["minor_axis=['A', 'D']"]) + rs = store.select('wp') + expected = wp.reindex(minor_axis=['B', 'C']) + tm.assert_panel_equal(rs, expected) + + # empty where + _maybe_remove(store, 'wp') + store.put('wp', wp, format='table') + + # deleted number (entire table) + n = store.remove('wp', []) + assert n == 120 + + # non - empty where + _maybe_remove(store, 'wp') + store.put('wp', wp, format='table') + pytest.raises(ValueError, store.remove, 'wp', ['foo']) + + +def test_terms(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = tm.makePanel() + wpneg = Panel.fromDict({-1: tm.makeDataFrame(), + 0: tm.makeDataFrame(), + 1: tm.makeDataFrame()}) + + store.put('wp', wp, format='table') + store.put('wpneg', wpneg, format='table') + + # panel + result = store.select( + 'wp', + "major_axis<'20000108' and minor_axis=['A', 'B']") + expected = wp.truncate( + after='20000108').reindex(minor=['A', 'B']) + tm.assert_panel_equal(result, expected) + + # with deprecation + result = store.select( + 'wp', where=("major_axis<'20000108' " + "and minor_axis=['A', 'B']")) + expected = wp.truncate( + after='20000108').reindex(minor=['A', 'B']) + tm.assert_panel_equal(result, expected) + + with catch_warnings(record=True): + # valid terms + terms = [('major_axis=20121114'), + ('major_axis>20121114'), + (("major_axis=['20121114', '20121114']"),), + ('major_axis=datetime.datetime(2012, 11, 14)'), + 'major_axis> 20121114', + 'major_axis >20121114', + 'major_axis > 20121114', + (("minor_axis=['A', 'B']"),), + (("minor_axis=['A', 'B']"),), + ((("minor_axis==['A', 'B']"),),), + (("items=['ItemA', 'ItemB']"),), + ('items=ItemA'), + ] + + for t in terms: + store.select('wp', t) + + with tm.assert_raises_regex( + TypeError, 'Only named functions are supported'): + store.select( + 'wp', + 'major_axis == (lambda x: x)("20130101")') + + with catch_warnings(record=True): + # check USub node parsing + res = store.select('wpneg', 'items == -1') + expected = Panel({-1: wpneg[-1]}) + tm.assert_panel_equal(res, expected) + + with tm.assert_raises_regex(NotImplementedError, + 'Unary addition ' + 'not supported'): + store.select('wpneg', 'items == +1') + + +def test_term_compat(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], + major_axis=date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) + store.append('wp', wp) + + result = store.select( + 'wp', where=("major_axis>20000102 " + "and minor_axis=['A', 'B']")) + expected = wp.loc[:, wp.major_axis > + Timestamp('20000102'), ['A', 'B']] + tm.assert_panel_equal(result, expected) + + store.remove('wp', 'major_axis>20000103') + result = store.select('wp') + expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :] + tm.assert_panel_equal(result, expected) + + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = Panel(np.random.randn(2, 5, 4), + items=['Item1', 'Item2'], + major_axis=date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) + store.append('wp', wp) + + # stringified datetimes + result = store.select( + 'wp', 'major_axis>datetime.datetime(2000, 1, 2)') + expected = wp.loc[:, wp.major_axis > Timestamp('20000102')] + tm.assert_panel_equal(result, expected) + + result = store.select( + 'wp', 'major_axis>datetime.datetime(2000, 1, 2)') + expected = wp.loc[:, wp.major_axis > Timestamp('20000102')] + tm.assert_panel_equal(result, expected) + + result = store.select( + 'wp', + "major_axis=[datetime.datetime(2000, 1, 2, 0, 0), " + "datetime.datetime(2000, 1, 3, 0, 0)]") + expected = wp.loc[:, [Timestamp('20000102'), + Timestamp('20000103')]] + tm.assert_panel_equal(result, expected) + + result = store.select( + 'wp', "minor_axis=['A', 'B']") + expected = wp.loc[:, :, ['A', 'B']] + tm.assert_panel_equal(result, expected) diff --git a/pandas/tests/io/pytables/test_path.py b/pandas/tests/io/pytables/test_path.py new file mode 100644 index 0000000000000..a20f7c9f5c165 --- /dev/null +++ b/pandas/tests/io/pytables/test_path.py @@ -0,0 +1,60 @@ +import os +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas.compat import PY36 + + +def test_path_pathlib(): + df = tm.makeDataFrame() + + result = tm.round_trip_pathlib( + lambda p: df.to_hdf(p, 'df'), + lambda p: pd.read_hdf(p, 'df')) + tm.assert_frame_equal(df, result) + + +def test_path_pathlib_hdfstore(): + df = tm.makeDataFrame() + + def writer(path): + with pd.HDFStore(path) as store: + df.to_hdf(store, 'df') + + def reader(path): + with pd.HDFStore(path) as store: + return pd.read_hdf(store, 'df') + + result = tm.round_trip_pathlib(writer, reader) + tm.assert_frame_equal(df, result) + + +def test_pickle_path_localpath(): + df = tm.makeDataFrame() + result = tm.round_trip_pathlib( + lambda p: df.to_hdf(p, 'df'), + lambda p: pd.read_hdf(p, 'df')) + tm.assert_frame_equal(df, result) + + +def test_path_localpath_hdfstore(): + df = tm.makeDataFrame() + + def writer(path): + with pd.HDFStore(path) as store: + df.to_hdf(store, 'df') + + def reader(path): + with pd.HDFStore(path) as store: + return pd.read_hdf(store, 'df') + + result = tm.round_trip_localpath(writer, reader) + tm.assert_frame_equal(df, result) + + +@pytest.mark.skipif(not PY36, reason="Need python 3.6") +def test_fspath(): + with tm.ensure_clean('foo.h5') as path: + with pd.HDFStore(path) as store: + assert os.fspath(store) == str(path) diff --git a/pandas/tests/io/pytables/test_put.py b/pandas/tests/io/pytables/test_put.py new file mode 100644 index 0000000000000..9b0b74621022e --- /dev/null +++ b/pandas/tests/io/pytables/test_put.py @@ -0,0 +1,117 @@ +import pytest +from warnings import catch_warnings +import datetime + +import numpy as np +from pandas import (Series, DataFrame, Index, Timestamp) +from .common import ensure_clean_store, _maybe_remove, _check_roundtrip +import pandas.util.testing as tm +import pandas.util._test_decorators as td +from pandas.compat import range + + +def test_put(): + with ensure_clean_store() as store: + ts = tm.makeTimeSeries() + df = tm.makeTimeDataFrame() + store['a'] = ts + store['b'] = df[:10] + store['foo/bar/bah'] = df[:10] + store['foo'] = df[:10] + store['/foo'] = df[:10] + store.put('c', df[:10], format='table') + + # not OK, not a table + pytest.raises( + ValueError, store.put, 'b', df[10:], append=True) + + # can't put to a table (use append instead) + pytest.raises(ValueError, store.put, 'c', df[10:], append=True) + + # overwrite table + store.put('c', df[:10], format='table', append=False) + tm.assert_frame_equal(df[:10], store['c']) + + +def test_put_string_index(): + with ensure_clean_store() as store: + index = Index( + ["I am a very long string index: %s" % i for i in range(20)]) + s = Series(np.arange(20), index=index) + df = DataFrame({'A': s, 'B': s}) + + store['a'] = s + tm.assert_series_equal(store['a'], s) + + store['b'] = df + tm.assert_frame_equal(store['b'], df) + + # mixed length + index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + + ["I am a very long string index: %s" % i + for i in range(20)]) + s = Series(np.arange(21), index=index) + df = DataFrame({'A': s, 'B': s}) + store['a'] = s + tm.assert_series_equal(store['a'], s) + + store['b'] = df + tm.assert_frame_equal(store['b'], df) + + +def test_put_compression(): + with ensure_clean_store() as store: + df = tm.makeTimeDataFrame() + + store.put('c', df, format='table', complib='zlib') + tm.assert_frame_equal(store['c'], df) + + # can't compress if format='fixed' + pytest.raises(ValueError, store.put, 'b', df, + format='fixed', complib='zlib') + + +def test_put_integer(): + # non-date, non-string index + df = DataFrame(np.random.randn(50, 100)) + _check_roundtrip(df, tm.assert_frame_equal) + + +def test_put_mixed_type(): + df = tm.makeTimeDataFrame() + df['obj1'] = 'foo' + df['obj2'] = 'bar' + df['bool1'] = df['A'] > 0 + df['bool2'] = df['B'] > 0 + df['bool3'] = True + df['int1'] = 1 + df['int2'] = 2 + df['timestamp1'] = Timestamp('20010102') + df['timestamp2'] = Timestamp('20010103') + df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0) + df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0) + df.loc[3:6, ['obj1']] = np.nan + df = df._consolidate()._convert(datetime=True) + + with ensure_clean_store() as store: + _maybe_remove(store, 'df') + + # PerformanceWarning + with catch_warnings(record=True): + store.put('df', df) + + expected = store.get('df') + tm.assert_frame_equal(expected, df) + + +@td.skip_if_windows_python_3 +def test_put_compression_blosc(): + df = tm.makeTimeDataFrame() + + with ensure_clean_store() as store: + # can't compress if format='fixed' + pytest.raises(ValueError, store.put, 'b', df, + format='fixed', complib='blosc') + + store.put('c', df, format='table', complib='blosc') + tm.assert_frame_equal(store['c'], df) diff --git a/pandas/tests/io/pytables/test_query.py b/pandas/tests/io/pytables/test_query.py new file mode 100644 index 0000000000000..24db3100c5a91 --- /dev/null +++ b/pandas/tests/io/pytables/test_query.py @@ -0,0 +1,1265 @@ +import pytest +from warnings import catch_warnings +from distutils.version import LooseVersion + +import numpy as np +import pandas as pd +from pandas import (Series, DataFrame, MultiIndex, Timestamp, date_range, + Index, concat, bdate_range, Panel, isna) +from .common import (ensure_clean_store, _maybe_remove, + ensure_clean_path) +tables = pytest.importorskip('tables') +from pandas.util.testing import assert_frame_equal +from pandas.compat import (BytesIO, range, PY35, is_platform_windows) +import pandas.util.testing as tm +import pandas.util._test_decorators as td +from pandas.io.pytables import read_hdf, HDFStore, TableIterator, Term +from pandas.io.formats.printing import pprint_thing + + +def test_read_hdf_open_store(): + # GH10330 + # No check for non-string path_or-buf, and no test of open store + df = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + df.index.name = 'letters' + df = df.set_index(keys='E', append=True) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df', mode='w') + direct = read_hdf(path, 'df') + store = HDFStore(path, mode='r') + indirect = read_hdf(store, 'df') + tm.assert_frame_equal(direct, indirect) + assert store.is_open + store.close() + + +def test_read_hdf_iterator(): + df = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + df.index.name = 'letters' + df = df.set_index(keys='E', append=True) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df', mode='w', format='t') + direct = read_hdf(path, 'df') + iterator = read_hdf(path, 'df', iterator=True) + assert isinstance(iterator, TableIterator) + indirect = next(iterator.__iter__()) + tm.assert_frame_equal(direct, indirect) + iterator.store.close() + + +def test_read_hdf_errors(): + df = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + + with ensure_clean_path() as path: + pytest.raises(IOError, read_hdf, path, 'key') + df.to_hdf(path, 'df') + store = HDFStore(path, mode='r') + store.close() + pytest.raises(IOError, read_hdf, store, 'df') + + +def test_read_hdf_generic_buffer_errors(): + pytest.raises(NotImplementedError, read_hdf, BytesIO(b''), 'df') + + +def test_read_nokey(): + df = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + + # Categorical dtype not supported for "fixed" format. So no need + # to test with that dtype in the dataframe here. + with ensure_clean_path() as path: + df.to_hdf(path, 'df', mode='a') + reread = read_hdf(path) + assert_frame_equal(df, reread) + df.to_hdf(path, 'df2', mode='a') + pytest.raises(ValueError, read_hdf, path) + + +def test_read_nokey_table(): + # GH13231 + df = DataFrame({'i': range(5), + 'c': Series(list('abacd'), dtype='category')}) + + with ensure_clean_path() as path: + df.to_hdf(path, 'df', mode='a', format='table') + reread = read_hdf(path) + assert_frame_equal(df, reread) + df.to_hdf(path, 'df2', mode='a', format='table') + pytest.raises(ValueError, read_hdf, path) + + +def test_read_nokey_empty(): + with ensure_clean_path() as path: + store = HDFStore(path) + store.close() + pytest.raises(ValueError, read_hdf, path) + + +@td.skip_if_no('pathlib') +def test_read_from_pathlib_path(): + # GH11773 + Path = pytest.importorskip('pathlib').Path + expected = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + with ensure_clean_path() as filename: + path_obj = Path(filename) + + expected.to_hdf(path_obj, 'df', mode='a') + actual = read_hdf(path_obj, 'df') + + tm.assert_frame_equal(expected, actual) + + +@td.skip_if_no('py.path') +def test_read_from_py_localpath(): + # GH11773 + LocalPath = pytest.importorskip('py.path').local + expected = DataFrame(np.random.rand(4, 5), + index=list('abcd'), + columns=list('ABCDE')) + with ensure_clean_path() as filename: + path_obj = LocalPath(filename) + + expected.to_hdf(path_obj, 'df', mode='a') + actual = read_hdf(path_obj, 'df') + + tm.assert_frame_equal(expected, actual) + + +@pytest.mark.parametrize('format', ['fixed', 'table']) +def test_read_hdf_series_mode_r(format): + # GH 16583 + # Tests that reading a Series saved to an HDF file + # still works if a mode='r' argument is supplied + series = tm.makeFloatSeries() + with ensure_clean_path() as path: + series.to_hdf(path, key='data', format=format) + result = pd.read_hdf(path, key='data', mode='r') + tm.assert_series_equal(result, series) + + +def test_read_py2_hdf_file_in_py3(): + # GH 16781 + # tests reading a PeriodIndex DataFrame written in Python2 in Python3 + # the file was generated in Python 2.7 like so: + # df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex( + # ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) + # df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p') + expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex( + ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) + + with ensure_clean_store( + tm.get_data_path( + 'legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5'), + mode='r') as store: + result = store['p'] + assert_frame_equal(result, expected) + + +def test_read_column(): + df = tm.makeTimeDataFrame() + with ensure_clean_store() as store: + _maybe_remove(store, 'df') + store.append('df', df) + + # error + pytest.raises(KeyError, store.select_column, 'df', 'foo') + + def f(): + store.select_column('df', 'index', where=['index>5']) + pytest.raises(Exception, f) + + # valid + result = store.select_column('df', 'index') + tm.assert_almost_equal(result.values, Series(df.index).values) + assert isinstance(result, Series) + + # not a data indexable column + pytest.raises( + ValueError, store.select_column, 'df', 'values_block_0') + + # a data column + df2 = df.copy() + df2['string'] = 'foo' + store.append('df2', df2, data_columns=['string']) + result = store.select_column('df2', 'string') + tm.assert_almost_equal(result.values, df2['string'].values) + + # a data column with NaNs, result excludes the NaNs + df3 = df.copy() + df3['string'] = 'foo' + df3.loc[4:6, 'string'] = np.nan + store.append('df3', df3, data_columns=['string']) + result = store.select_column('df3', 'string') + tm.assert_almost_equal(result.values, df3['string'].values) + + # start/stop + result = store.select_column('df3', 'string', start=2) + tm.assert_almost_equal(result.values, df3['string'].values[2:]) + + result = store.select_column('df3', 'string', start=-2) + tm.assert_almost_equal(result.values, df3['string'].values[-2:]) + + result = store.select_column('df3', 'string', stop=2) + tm.assert_almost_equal(result.values, df3['string'].values[:2]) + + result = store.select_column('df3', 'string', stop=-2) + tm.assert_almost_equal(result.values, df3['string'].values[:-2]) + + result = store.select_column('df3', 'string', start=2, stop=-2) + tm.assert_almost_equal(result.values, df3['string'].values[2:-2]) + + result = store.select_column('df3', 'string', start=-2, stop=2) + tm.assert_almost_equal(result.values, df3['string'].values[-2:2]) + + # GH 10392 - make sure column name is preserved + df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'}) + store.append('df4', df4, data_columns=True) + expected = df4['B'] + result = store.select_column('df4', 'B') + tm.assert_series_equal(result, expected) + + +def test_pytables_native_read(): + with ensure_clean_store( + tm.get_data_path('legacy_hdf/pytables_native.h5'), + mode='r') as store: + d2 = store['detector/readout'] + assert isinstance(d2, DataFrame) + + +@pytest.mark.skipif(PY35 and is_platform_windows(), + reason="native2 read fails oddly on windows / 3.5") +def test_pytables_native2_read(): + with ensure_clean_store( + tm.get_data_path('legacy_hdf/pytables_native2.h5'), + mode='r') as store: + str(store) + d1 = store['detector'] + assert isinstance(d1, DataFrame) + + +def test_legacy_table_read(): + # legacy table types + with ensure_clean_store( + tm.get_data_path('legacy_hdf/legacy_table.h5'), + mode='r') as store: + + with catch_warnings(record=True): + store.select('df1') + store.select('df2') + store.select('wp1') + + # force the frame + store.select('df2', typ='legacy_frame') + + # old version warning + pytest.raises( + Exception, store.select, 'wp1', 'minor_axis=B') + + df2 = store.select('df2') + result = store.select('df2', 'index>df2.index[2]') + expected = df2[df2.index > df2.index[2]] + assert_frame_equal(expected, result) + + +def test_select_columns_in_where(): + # GH 6169 + # recreate multi-indexes when columns is passed + # in the `where` argument + index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], + ['one', 'two', 'three']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['foo_name', 'bar_name']) + + # With a DataFrame + df = DataFrame(np.random.randn(10, 3), index=index, + columns=['A', 'B', 'C']) + + with ensure_clean_store() as store: + store.put('df', df, format='table') + expected = df[['A']] + + tm.assert_frame_equal(store.select('df', columns=['A']), expected) + + tm.assert_frame_equal(store.select( + 'df', where="columns=['A']"), expected) + + # With a Series + s = Series(np.random.randn(10), index=index, + name='A') + with ensure_clean_store() as store: + store.put('s', s, format='table') + tm.assert_series_equal(store.select('s', where="columns=['A']"), s) + + +def test_select_with_dups(): + # single dtypes + df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B']) + df.index = date_range('20130101 9:30', periods=10, freq='T') + + with ensure_clean_store() as store: + store.append('df', df) + + result = store.select('df') + expected = df + assert_frame_equal(result, expected, by_blocks=True) + + result = store.select('df', columns=df.columns) + expected = df + assert_frame_equal(result, expected, by_blocks=True) + + result = store.select('df', columns=['A']) + expected = df.loc[:, ['A']] + assert_frame_equal(result, expected) + + # dups across dtypes + df = concat([DataFrame(np.random.randn(10, 4), + columns=['A', 'A', 'B', 'B']), + DataFrame(np.random.randint(0, 10, size=20) + .reshape(10, 2), + columns=['A', 'C'])], + axis=1) + df.index = date_range('20130101 9:30', periods=10, freq='T') + + with ensure_clean_store() as store: + store.append('df', df) + + result = store.select('df') + expected = df + assert_frame_equal(result, expected, by_blocks=True) + + result = store.select('df', columns=df.columns) + expected = df + assert_frame_equal(result, expected, by_blocks=True) + + expected = df.loc[:, ['A']] + result = store.select('df', columns=['A']) + assert_frame_equal(result, expected, by_blocks=True) + + expected = df.loc[:, ['B', 'A']] + result = store.select('df', columns=['B', 'A']) + assert_frame_equal(result, expected, by_blocks=True) + + # duplicates on both index and columns + with ensure_clean_store() as store: + store.append('df', df) + store.append('df', df) + + expected = df.loc[:, ['B', 'A']] + expected = concat([expected, expected]) + result = store.select('df', columns=['B', 'A']) + assert_frame_equal(result, expected, by_blocks=True) + + +def test_select(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = tm.makePanel() + + # put/select ok + _maybe_remove(store, 'wp') + store.put('wp', wp, format='table') + store.select('wp') + + # non-table ok (where = None) + _maybe_remove(store, 'wp') + store.put('wp2', wp) + store.select('wp2') + + # selection on the non-indexable with a large number of columns + wp = Panel(np.random.randn(100, 100, 100), + items=['Item%03d' % i for i in range(100)], + major_axis=date_range('1/1/2000', periods=100), + minor_axis=['E%03d' % i for i in range(100)]) + + _maybe_remove(store, 'wp') + store.append('wp', wp) + items = ['Item%03d' % i for i in range(80)] + result = store.select('wp', 'items=items') + expected = wp.reindex(items=items) + tm.assert_panel_equal(expected, result) + + # selectin non-table with a where + # pytest.raises(ValueError, store.select, + # 'wp2', ('column', ['A', 'D'])) + + # select with columns= + df = tm.makeTimeDataFrame() + _maybe_remove(store, 'df') + store.append('df', df) + result = store.select('df', columns=['A', 'B']) + expected = df.reindex(columns=['A', 'B']) + tm.assert_frame_equal(expected, result) + + # equivalentsly + result = store.select('df', [("columns=['A', 'B']")]) + expected = df.reindex(columns=['A', 'B']) + tm.assert_frame_equal(expected, result) + + # with a data column + _maybe_remove(store, 'df') + store.append('df', df, data_columns=['A']) + result = store.select('df', ['A > 0'], columns=['A', 'B']) + expected = df[df.A > 0].reindex(columns=['A', 'B']) + tm.assert_frame_equal(expected, result) + + # all a data columns + _maybe_remove(store, 'df') + store.append('df', df, data_columns=True) + result = store.select('df', ['A > 0'], columns=['A', 'B']) + expected = df[df.A > 0].reindex(columns=['A', 'B']) + tm.assert_frame_equal(expected, result) + + # with a data column, but different columns + _maybe_remove(store, 'df') + store.append('df', df, data_columns=['A']) + result = store.select('df', ['A > 0'], columns=['C', 'D']) + expected = df[df.A > 0].reindex(columns=['C', 'D']) + tm.assert_frame_equal(expected, result) + + +def test_select_dtypes(): + with ensure_clean_store() as store: + # with a Timestamp data column (GH #2637) + df = DataFrame(dict( + ts=bdate_range('2012-01-01', periods=300), + A=np.random.randn(300))) + _maybe_remove(store, 'df') + store.append('df', df, data_columns=['ts', 'A']) + + result = store.select('df', "ts>=Timestamp('2012-02-01')") + expected = df[df.ts >= Timestamp('2012-02-01')] + tm.assert_frame_equal(expected, result) + + # bool columns (GH #2849) + df = DataFrame(np.random.randn(5, 2), columns=['A', 'B']) + df['object'] = 'foo' + df.loc[4:5, 'object'] = 'bar' + df['boolv'] = df['A'] > 0 + _maybe_remove(store, 'df') + store.append('df', df, data_columns=True) + + expected = (df[df.boolv == True] # noqa + .reindex(columns=['A', 'boolv'])) + for v in [True, 'true', 1]: + result = store.select('df', 'boolv == %s' % str(v), + columns=['A', 'boolv']) + tm.assert_frame_equal(expected, result) + + expected = (df[df.boolv == False] # noqa + .reindex(columns=['A', 'boolv'])) + for v in [False, 'false', 0]: + result = store.select( + 'df', 'boolv == %s' % str(v), columns=['A', 'boolv']) + tm.assert_frame_equal(expected, result) + + # integer index + df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) + _maybe_remove(store, 'df_int') + store.append('df_int', df) + result = store.select( + 'df_int', "index<10 and columns=['A']") + expected = df.reindex(index=list(df.index)[0:10], columns=['A']) + tm.assert_frame_equal(expected, result) + + # float index + df = DataFrame(dict(A=np.random.rand( + 20), B=np.random.rand(20), index=np.arange(20, dtype='f8'))) + _maybe_remove(store, 'df_float') + store.append('df_float', df) + result = store.select( + 'df_float', "index<10.0 and columns=['A']") + expected = df.reindex(index=list(df.index)[0:10], columns=['A']) + tm.assert_frame_equal(expected, result) + + with ensure_clean_store() as store: + # floats w/o NaN + df = DataFrame( + dict(cols=range(11), values=range(11)), dtype='float64') + df['cols'] = (df['cols'] + 10).apply(str) + + store.append('df1', df, data_columns=True) + result = store.select( + 'df1', where='values>2.0') + expected = df[df['values'] > 2.0] + tm.assert_frame_equal(expected, result) + + # floats with NaN + df.iloc[0] = np.nan + expected = df[df['values'] > 2.0] + + store.append('df2', df, data_columns=True, index=False) + result = store.select( + 'df2', where='values>2.0') + tm.assert_frame_equal(expected, result) + + # https://github.com/PyTables/PyTables/issues/282 + # bug in selection when 0th row has a np.nan and an index + # store.append('df3',df,data_columns=True) + # result = store.select( + # 'df3', where='values>2.0') + # tm.assert_frame_equal(expected, result) + + # not in first position float with NaN ok too + df = DataFrame( + dict(cols=range(11), values=range(11)), dtype='float64') + df['cols'] = (df['cols'] + 10).apply(str) + + df.iloc[1] = np.nan + expected = df[df['values'] > 2.0] + + store.append('df4', df, data_columns=True) + result = store.select( + 'df4', where='values>2.0') + tm.assert_frame_equal(expected, result) + + # test selection with comparison against numpy scalar + # GH 11283 + with ensure_clean_store() as store: + df = tm.makeDataFrame() + + expected = df[df['A'] > 0] + + store.append('df', df, data_columns=True) + np_zero = np.float64(0) # noqa + result = store.select('df', where=["A>np_zero"]) + tm.assert_frame_equal(expected, result) + + +def test_select_with_many_inputs(): + with ensure_clean_store() as store: + df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), + A=np.random.randn(300), + B=range(300), + users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 + + ['a%03d' % i for i in range(100)])) + _maybe_remove(store, 'df') + store.append('df', df, data_columns=['ts', 'A', 'B', 'users']) + + # regular select + result = store.select('df', "ts>=Timestamp('2012-02-01')") + expected = df[df.ts >= Timestamp('2012-02-01')] + tm.assert_frame_equal(expected, result) + + # small selector + result = store.select( + 'df', + "ts>=Timestamp('2012-02-01') & users=['a','b','c']") + expected = df[(df.ts >= Timestamp('2012-02-01')) & + df.users.isin(['a', 'b', 'c'])] + tm.assert_frame_equal(expected, result) + + # big selector along the columns + selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)] + result = store.select( + 'df', + "ts>=Timestamp('2012-02-01') and users=selector") + expected = df[(df.ts >= Timestamp('2012-02-01')) & + df.users.isin(selector)] + tm.assert_frame_equal(expected, result) + + selector = range(100, 200) + result = store.select('df', 'B=selector') + expected = df[df.B.isin(selector)] + tm.assert_frame_equal(expected, result) + assert len(result) == 100 + + # big selector along the index + selector = Index(df.ts[0:100].values) + result = store.select('df', 'ts=selector') + expected = df[df.ts.isin(selector.values)] + tm.assert_frame_equal(expected, result) + assert len(result) == 100 + + +def test_select_iterator(): + # single table + with ensure_clean_store() as store: + df = tm.makeTimeDataFrame(500) + _maybe_remove(store, 'df') + store.append('df', df) + + expected = store.select('df') + + results = [s for s in store.select('df', iterator=True)] + result = concat(results) + tm.assert_frame_equal(expected, result) + + results = [s for s in store.select('df', chunksize=100)] + assert len(results) == 5 + result = concat(results) + tm.assert_frame_equal(expected, result) + + results = [s for s in store.select('df', chunksize=150)] + result = concat(results) + tm.assert_frame_equal(result, expected) + + with ensure_clean_path() as path: + df = tm.makeTimeDataFrame(500) + df.to_hdf(path, 'df_non_table') + pytest.raises(TypeError, read_hdf, path, + 'df_non_table', chunksize=100) + pytest.raises(TypeError, read_hdf, path, + 'df_non_table', iterator=True) + + with ensure_clean_path() as path: + df = tm.makeTimeDataFrame(500) + df.to_hdf(path, 'df', format='table') + + results = [s for s in read_hdf(path, 'df', chunksize=100)] + result = concat(results) + + assert len(results) == 5 + tm.assert_frame_equal(result, df) + tm.assert_frame_equal(result, read_hdf(path, 'df')) + + # multiple + with ensure_clean_store() as store: + df1 = tm.makeTimeDataFrame(500) + store.append('df1', df1, data_columns=True) + df2 = tm.makeTimeDataFrame(500).rename( + columns=lambda x: "%s_2" % x) + df2['foo'] = 'bar' + store.append('df2', df2) + + df = concat([df1, df2], axis=1) + + # full selection + expected = store.select_as_multiple( + ['df1', 'df2'], selector='df1') + results = [s for s in store.select_as_multiple( + ['df1', 'df2'], selector='df1', chunksize=150)] + result = concat(results) + tm.assert_frame_equal(expected, result) + + +def test_select_iterator_complete_8014(): + # GH 8014 + # using iterator and where clause + chunksize = 1e4 + + # no iterator + with ensure_clean_store() as store: + expected = tm.makeTimeDataFrame(100064, 'S') + _maybe_remove(store, 'df') + store.append('df', expected) + + beg_dt = expected.index[0] + end_dt = expected.index[-1] + + # select w/o iteration and no where clause works + result = store.select('df') + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, single term, begin + # of range, works + where = "index >= '%s'" % beg_dt + result = store.select('df', where=where) + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, single term, end + # of range, works + where = "index <= '%s'" % end_dt + result = store.select('df', where=where) + tm.assert_frame_equal(expected, result) + + # select w/o iterator and where clause, inclusive range, + # works + where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + result = store.select('df', where=where) + tm.assert_frame_equal(expected, result) + + # with iterator, full range + with ensure_clean_store() as store: + expected = tm.makeTimeDataFrame(100064, 'S') + _maybe_remove(store, 'df') + store.append('df', expected) + + beg_dt = expected.index[0] + end_dt = expected.index[-1] + + # select w/iterator and no where clause works + results = [s for s in store.select('df', chunksize=chunksize)] + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, single term, begin of range + where = "index >= '%s'" % beg_dt + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, single term, end of range + where = "index <= '%s'" % end_dt + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + result = concat(results) + tm.assert_frame_equal(expected, result) + + # select w/iterator and where clause, inclusive range + where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + result = concat(results) + tm.assert_frame_equal(expected, result) + + +def test_select_iterator_non_complete_8014(): + # GH 8014 + # using iterator and where clause + chunksize = 1e4 + + # with iterator, non complete range + with ensure_clean_store() as store: + expected = tm.makeTimeDataFrame(100064, 'S') + _maybe_remove(store, 'df') + store.append('df', expected) + + beg_dt = expected.index[1] + end_dt = expected.index[-2] + + # select w/iterator and where clause, single term, begin of range + where = "index >= '%s'" % beg_dt + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + result = concat(results) + rexpected = expected[expected.index >= beg_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, single term, end of range + where = "index <= '%s'" % end_dt + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + result = concat(results) + rexpected = expected[expected.index <= end_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, inclusive range + where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + result = concat(results) + rexpected = expected[(expected.index >= beg_dt) & + (expected.index <= end_dt)] + tm.assert_frame_equal(rexpected, result) + + # with iterator, empty where + with ensure_clean_store() as store: + expected = tm.makeTimeDataFrame(100064, 'S') + _maybe_remove(store, 'df') + store.append('df', expected) + + end_dt = expected.index[-1] + + # select w/iterator and where clause, single term, begin of range + where = "index > '%s'" % end_dt + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + assert 0 == len(results) + + +def test_select_iterator_many_empty_frames(): + # GH 8014 + # using iterator and where clause can return many empty + # frames. + chunksize = int(1e4) + + # with iterator, range limited to the first chunk + with ensure_clean_store() as store: + expected = tm.makeTimeDataFrame(100000, 'S') + _maybe_remove(store, 'df') + store.append('df', expected) + + beg_dt = expected.index[0] + end_dt = expected.index[chunksize - 1] + + # select w/iterator and where clause, single term, begin of range + where = "index >= '%s'" % beg_dt + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + result = concat(results) + rexpected = expected[expected.index >= beg_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, single term, end of range + where = "index <= '%s'" % end_dt + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + + assert len(results) == 1 + result = concat(results) + rexpected = expected[expected.index <= end_dt] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause, inclusive range + where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + + # should be 1, is 10 + assert len(results) == 1 + result = concat(results) + rexpected = expected[(expected.index >= beg_dt) & + (expected.index <= end_dt)] + tm.assert_frame_equal(rexpected, result) + + # select w/iterator and where clause which selects + # *nothing*. + # + # To be consistent with Python idiom I suggest this should + # return [] e.g. `for e in []: print True` never prints + # True. + + where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt) + results = [s for s in store.select( + 'df', where=where, chunksize=chunksize)] + + # should be [] + assert len(results) == 0 + + +def test_select_as_multiple(): + df1 = tm.makeTimeDataFrame() + df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) + df2['foo'] = 'bar' + + with ensure_clean_store() as store: + # no tables stored + pytest.raises(Exception, store.select_as_multiple, + None, where=['A>0', 'B>0'], selector='df1') + + store.append('df1', df1, data_columns=['A', 'B']) + store.append('df2', df2) + + # exceptions + pytest.raises(Exception, store.select_as_multiple, + None, where=['A>0', 'B>0'], selector='df1') + pytest.raises(Exception, store.select_as_multiple, + [None], where=['A>0', 'B>0'], selector='df1') + pytest.raises(KeyError, store.select_as_multiple, + ['df1', 'df3'], where=['A>0', 'B>0'], + selector='df1') + pytest.raises(KeyError, store.select_as_multiple, + ['df3'], where=['A>0', 'B>0'], selector='df1') + pytest.raises(KeyError, store.select_as_multiple, + ['df1', 'df2'], where=['A>0', 'B>0'], + selector='df4') + + # default select + result = store.select('df1', ['A>0', 'B>0']) + expected = store.select_as_multiple( + ['df1'], where=['A>0', 'B>0'], selector='df1') + tm.assert_frame_equal(result, expected) + expected = store.select_as_multiple( + 'df1', where=['A>0', 'B>0'], selector='df1') + tm.assert_frame_equal(result, expected) + + # multiple + result = store.select_as_multiple( + ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1') + expected = concat([df1, df2], axis=1) + expected = expected[(expected.A > 0) & (expected.B > 0)] + tm.assert_frame_equal(result, expected) + + # multiple (diff selector) + result = store.select_as_multiple( + ['df1', 'df2'], where='index>df2.index[4]', selector='df2') + expected = concat([df1, df2], axis=1) + expected = expected[5:] + tm.assert_frame_equal(result, expected) + + # test excpection for diff rows + store.append('df3', tm.makeTimeDataFrame(nper=50)) + pytest.raises(ValueError, store.select_as_multiple, + ['df1', 'df3'], where=['A>0', 'B>0'], + selector='df1') + + +def test_select_filter_corner(): + df = DataFrame(np.random.randn(50, 100)) + df.index = ['%.3d' % c for c in df.index] + df.columns = ['%.3d' % c for c in df.columns] + + with ensure_clean_store() as store: + store.put('frame', df, format='table') + + crit = 'columns=df.columns[:75]' + result = store.select('frame', [crit]) + tm.assert_frame_equal(result, df.loc[:, df.columns[:75]]) + + crit = 'columns=df.columns[:75:2]' + result = store.select('frame', [crit]) + tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]]) + + +def test_panel_select(): + with ensure_clean_store() as store: + with catch_warnings(record=True): + wp = tm.makePanel() + + store.put('wp', wp, format='table') + date = wp.major_axis[len(wp.major_axis) // 2] + + crit1 = ('major_axis>=date') + crit2 = ("minor_axis=['A', 'D']") + + result = store.select('wp', [crit1, crit2]) + expected = wp.truncate(before=date).reindex(minor=['A', 'D']) + tm.assert_panel_equal(result, expected) + + result = store.select( + 'wp', ['major_axis>="20000124"', + ("minor_axis=['A', 'B']")]) + expected = wp.truncate( + before='20000124').reindex(minor=['A', 'B']) + tm.assert_panel_equal(result, expected) + + +def test_frame_select(): + df = tm.makeTimeDataFrame() + + with ensure_clean_store() as store: + store.put('frame', df, format='table') + date = df.index[len(df) // 2] + + crit1 = Term('index>=date') + assert crit1.env.scope['date'] == date + + crit2 = ("columns=['A', 'D']") + crit3 = ('columns=A') + + result = store.select('frame', [crit1, crit2]) + expected = df.loc[date:, ['A', 'D']] + tm.assert_frame_equal(result, expected) + + result = store.select('frame', [crit3]) + expected = df.loc[:, ['A']] + tm.assert_frame_equal(result, expected) + + # invalid terms + df = tm.makeTimeDataFrame() + store.append('df_time', df) + pytest.raises(ValueError, store.select, 'df_time', "index>0") + + +def test_frame_select_complex(): + # select via complex criteria + df = tm.makeTimeDataFrame() + df['string'] = 'foo' + df.loc[df.index[0:4], 'string'] = 'bar' + + with ensure_clean_store() as store: + store.put('df', df, format='table', data_columns=['string']) + + # empty + result = store.select('df', 'index>df.index[3] & string="bar"') + expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')] + tm.assert_frame_equal(result, expected) + + result = store.select('df', 'index>df.index[3] & string="foo"') + expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')] + tm.assert_frame_equal(result, expected) + + # or + result = store.select('df', 'index>df.index[3] | string="bar"') + expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')] + tm.assert_frame_equal(result, expected) + + result = store.select('df', '(index>df.index[3] & ' + 'index<=df.index[6]) | string="bar"') + expected = df.loc[((df.index > df.index[3]) & ( + df.index <= df.index[6])) | (df.string == 'bar')] + tm.assert_frame_equal(result, expected) + + # invert + result = store.select('df', 'string!="bar"') + expected = df.loc[df.string != 'bar'] + tm.assert_frame_equal(result, expected) + + # invert not implemented in numexpr :( + pytest.raises(NotImplementedError, + store.select, 'df', '~(string="bar")') + + # invert ok for filters + result = store.select('df', "~(columns=['A','B'])") + expected = df.loc[:, df.columns.difference(['A', 'B'])] + tm.assert_frame_equal(result, expected) + + # in + result = store.select( + 'df', "index>df.index[3] & columns in ['A','B']") + expected = df.loc[df.index > df.index[3]].reindex(columns=[ + 'A', 'B']) + tm.assert_frame_equal(result, expected) + + +def test_start_stop_table(): + with ensure_clean_store() as store: + # table + df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) + store.append('df', df) + + result = store.select( + 'df', "columns=['A']", start=0, stop=5) + expected = df.loc[0:4, ['A']] + tm.assert_frame_equal(result, expected) + + # out of range + result = store.select( + 'df', "columns=['A']", start=30, stop=40) + assert len(result) == 0 + expected = df.loc[30:40, ['A']] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_multiple(): + # GH 16209 + with ensure_clean_store() as store: + df = DataFrame({"foo": [1, 2], "bar": [1, 2]}) + + store.append_to_multiple({'selector': ['foo'], 'data': None}, df, + selector='selector') + result = store.select_as_multiple(['selector', 'data'], + selector='selector', start=0, + stop=1) + expected = df.loc[[0], ['foo', 'bar']] + tm.assert_frame_equal(result, expected) + + +def test_start_stop_fixed(): + with ensure_clean_store() as store: + # fixed, GH 8287 + df = DataFrame(dict(A=np.random.rand(20), + B=np.random.rand(20)), + index=pd.date_range('20130101', periods=20)) + store.put('df', df) + + result = store.select( + 'df', start=0, stop=5) + expected = df.iloc[0:5, :] + tm.assert_frame_equal(result, expected) + + result = store.select( + 'df', start=5, stop=10) + expected = df.iloc[5:10, :] + tm.assert_frame_equal(result, expected) + + # out of range + result = store.select( + 'df', start=30, stop=40) + expected = df.iloc[30:40, :] + tm.assert_frame_equal(result, expected) + + # series + s = df.A + store.put('s', s) + result = store.select('s', start=0, stop=5) + expected = s.iloc[0:5] + tm.assert_series_equal(result, expected) + + result = store.select('s', start=5, stop=10) + expected = s.iloc[5:10] + tm.assert_series_equal(result, expected) + + # sparse; not implemented + df = tm.makeDataFrame() + df.iloc[3:5, 1:3] = np.nan + df.iloc[8:10, -2] = np.nan + dfs = df.to_sparse() + store.put('dfs', dfs) + with pytest.raises(NotImplementedError): + store.select('dfs', start=0, stop=5) + + +def test_contains(): + with ensure_clean_store() as store: + store['a'] = tm.makeTimeSeries() + store['b'] = tm.makeDataFrame() + store['foo/bar'] = tm.makeDataFrame() + assert 'a' in store + assert 'b' in store + assert 'c' not in store + assert 'foo/bar' in store + assert '/foo/bar' in store + assert '/foo/b' not in store + assert 'bar' not in store + + # gh-2694: tables.NaturalNameWarning + with catch_warnings(record=True): + store['node())'] = tm.makeDataFrame() + assert 'node())' in store + + +def test_get(): + with ensure_clean_store() as store: + store['a'] = tm.makeTimeSeries() + left = store.get('a') + right = store['a'] + tm.assert_series_equal(left, right) + + left = store.get('/a') + right = store['/a'] + tm.assert_series_equal(left, right) + + pytest.raises(KeyError, store.get, 'b') + + +def test_query_with_nested_special_character(): + df = DataFrame({'a': ['a', 'a', 'c', 'b', + 'test & test', 'c', 'b', 'e'], + 'b': [1, 2, 3, 4, 5, 6, 7, 8]}) + expected = df[df.a == 'test & test'] + with ensure_clean_store() as store: + store.append('test', df, format='table', data_columns=True) + result = store.select('test', 'a = "test & test"') + tm.assert_frame_equal(expected, result) + + +def test_query_long_float_literal(): + # GH 14241 + df = pd.DataFrame({'A': [1000000000.0009, + 1000000000.0011, + 1000000000.0015]}) + + with ensure_clean_store() as store: + store.append('test', df, format='table', data_columns=True) + + cutoff = 1000000000.0006 + result = store.select('test', "A < %.4f" % cutoff) + assert result.empty + + cutoff = 1000000000.0010 + result = store.select('test', "A > %.4f" % cutoff) + expected = df.loc[[1, 2], :] + tm.assert_frame_equal(expected, result) + + exact = 1000000000.0011 + result = store.select('test', 'A == %.4f' % exact) + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + +def test_query_compare_column_type(): + # GH 15492 + df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'], + 'real_date': date_range('2014-01-01', periods=2), + 'float': [1.1, 1.2], + 'int': [1, 2]}, + columns=['date', 'real_date', 'float', 'int']) + + with ensure_clean_store() as store: + store.append('test', df, format='table', data_columns=True) + + ts = pd.Timestamp('2014-01-01') # noqa + result = store.select('test', where='real_date > ts') + expected = df.loc[[1], :] + tm.assert_frame_equal(expected, result) + + for op in ['<', '>', '==']: + # non strings to string column always fail + for v in [2.1, True, pd.Timestamp('2014-01-01'), + pd.Timedelta(1, 's')]: + query = 'date {op} v'.format(op=op) + with pytest.raises(TypeError): + result = store.select('test', where=query) + + # strings to other columns must be convertible to type + v = 'a' + for col in ['int', 'float', 'real_date']: + query = '{col} {op} v'.format(op=op, col=col) + with pytest.raises(ValueError): + result = store.select('test', where=query) + + for v, col in zip(['1', '1.1', '2014-01-01'], + ['int', 'float', 'real_date']): + query = '{col} {op} v'.format(op=op, col=col) + result = store.select('test', where=query) + + if op == '==': + expected = df.loc[[0], :] + elif op == '>': + expected = df.loc[[1], :] + else: + expected = df.loc[[], :] + tm.assert_frame_equal(expected, result) + + +def test_string_select(): + # GH 2973 + with ensure_clean_store() as store: + df = tm.makeTimeDataFrame() + + # test string ==/!= + df['x'] = 'none' + df.loc[2:7, 'x'] = '' + + store.append('df', df, data_columns=['x']) + + result = store.select('df', 'x=none') + expected = df[df.x == 'none'] + assert_frame_equal(result, expected) + + try: + result = store.select('df', 'x!=none') + expected = df[df.x != 'none'] + assert_frame_equal(result, expected) + except Exception as detail: + pprint_thing("[{0}]".format(detail)) + pprint_thing(store) + pprint_thing(expected) + + df2 = df.copy() + df2.loc[df2.x == '', 'x'] = np.nan + + store.append('df2', df2, data_columns=['x']) + result = store.select('df2', 'x!=none') + expected = df2[isna(df2.x)] + assert_frame_equal(result, expected) + + # int ==/!= + df['int'] = 1 + df.loc[2:7, 'int'] = 2 + + store.append('df3', df, data_columns=['int']) + + result = store.select('df3', 'int=2') + expected = df[df.int == 2] + assert_frame_equal(result, expected) + + result = store.select('df3', 'int!=2') + expected = df[df.int != 2] + assert_frame_equal(result, expected) + + +@pytest.mark.skipif( + LooseVersion(tables.__version__) < LooseVersion('3.1.0'), + reason=("tables version does not support fix for nan selection " + "bug: GH 4858")) +def test_nan_selection_bug_4858(): + with ensure_clean_store() as store: + df = DataFrame(dict(cols=range(6), values=range(6)), + dtype='float64') + df['cols'] = (df['cols'] + 10).apply(str) + df.iloc[0] = np.nan + + expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[ + 3., 4., 5.]), index=[3, 4, 5]) + + # write w/o the index on that particular column + store.append('df', df, data_columns=True, index=['cols']) + result = store.select('df', where='values>2.0') + assert_frame_equal(result, expected) diff --git a/pandas/tests/io/pytables/test_sparse.py b/pandas/tests/io/pytables/test_sparse.py new file mode 100644 index 0000000000000..244f9dc73e7ce --- /dev/null +++ b/pandas/tests/io/pytables/test_sparse.py @@ -0,0 +1,64 @@ +import numpy as np + +from pandas import DataFrame +import pandas.util.testing as tm +from .common import _check_roundtrip, _check_double_roundtrip + + +def test_sparse_series(): + s = tm.makeStringSeries() + s.iloc[3:5] = np.nan + ss = s.to_sparse() + _check_roundtrip(ss, tm.assert_series_equal, check_series_type=True) + + ss2 = s.to_sparse(kind='integer') + _check_roundtrip(ss2, tm.assert_series_equal, check_series_type=True) + + ss3 = s.to_sparse(fill_value=0) + _check_roundtrip(ss3, tm.assert_series_equal, check_series_type=True) + + +def test_sparse_frame(): + s = tm.makeDataFrame() + s.iloc[3:5, 1:3] = np.nan + s.iloc[8:10, -2] = np.nan + ss = s.to_sparse() + + _check_double_roundtrip(ss, tm.assert_frame_equal, check_frame_type=True) + + ss2 = s.to_sparse(kind='integer') + _check_double_roundtrip(ss2, tm.assert_frame_equal, check_frame_type=True) + + ss3 = s.to_sparse(fill_value=0) + _check_double_roundtrip(ss3, tm.assert_frame_equal, check_frame_type=True) + + +def test_sparse_with_compression(): + # GH 2931 + # make sparse dataframe + arr = np.random.binomial(n=1, p=.01, size=(1000, 10)) + df = DataFrame(arr).to_sparse(fill_value=0) + + # case 1: store uncompressed + _check_double_roundtrip(df, tm.assert_frame_equal, + compression=False, + check_frame_type=True) + + # case 2: store compressed (works) + _check_double_roundtrip(df, tm.assert_frame_equal, + compression='zlib', + check_frame_type=True) + + # set one series to be completely sparse + df[0] = np.zeros(1000) + + # case 3: store df with completely sparse series uncompressed + _check_double_roundtrip(df, tm.assert_frame_equal, + compression=False, + check_frame_type=True) + + # case 4: try storing df with completely sparse series compressed + # (fails) + _check_double_roundtrip(df, tm.assert_frame_equal, + compression='zlib', + check_frame_type=True) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py deleted file mode 100644 index 04da6da74059b..0000000000000 --- a/pandas/tests/io/test_pytables.py +++ /dev/null @@ -1,5523 +0,0 @@ -import pytest -import os -import tempfile -from contextlib import contextmanager -from warnings import catch_warnings -from distutils.version import LooseVersion - -import datetime -from datetime import timedelta - -import numpy as np - -import pandas as pd -from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index, - RangeIndex, Categorical, bdate_range, - date_range, timedelta_range, Index, DatetimeIndex, - isna, compat, concat, Timestamp) - -import pandas.util.testing as tm -import pandas.util._test_decorators as td -from pandas.util.testing import (assert_panel_equal, - assert_frame_equal, - assert_series_equal, - set_timezone) - -from pandas.compat import (is_platform_windows, is_platform_little_endian, - PY35, PY36, BytesIO, text_type, - range, lrange, u) -from pandas.io.formats.printing import pprint_thing -from pandas.core.dtypes.common import is_categorical_dtype - -tables = pytest.importorskip('tables') -from pandas.io import pytables as pytables # noqa:E402 -from pandas.io.pytables import (TableIterator, # noqa:E402 - HDFStore, get_store, Term, read_hdf, - PossibleDataLossError, ClosedFileError) - - -_default_compressor = ('blosc' if LooseVersion(tables.__version__) >= - LooseVersion('2.2') else 'zlib') - - -# contextmanager to ensure the file cleanup - - -def safe_remove(path): - if path is not None: - try: - os.remove(path) - except: - pass - - -def safe_close(store): - try: - if store is not None: - store.close() - except: - pass - - -def create_tempfile(path): - """ create an unopened named temporary file """ - return os.path.join(tempfile.gettempdir(), path) - - -@contextmanager -def ensure_clean_store(path, mode='a', complevel=None, complib=None, - fletcher32=False): - - try: - - # put in the temporary path if we don't have one already - if not len(os.path.dirname(path)): - path = create_tempfile(path) - - store = HDFStore(path, mode=mode, complevel=complevel, - complib=complib, fletcher32=False) - yield store - finally: - safe_close(store) - if mode == 'w' or mode == 'a': - safe_remove(path) - - -@contextmanager -def ensure_clean_path(path): - """ - return essentially a named temporary file that is not opened - and deleted on existing; if path is a list, then create and - return list of filenames - """ - try: - if isinstance(path, list): - filenames = [create_tempfile(p) for p in path] - yield filenames - else: - filenames = [create_tempfile(path)] - yield filenames[0] - finally: - for f in filenames: - safe_remove(f) - - -# set these parameters so we don't have file sharing -tables.parameters.MAX_NUMEXPR_THREADS = 1 -tables.parameters.MAX_BLOSC_THREADS = 1 -tables.parameters.MAX_THREADS = 1 - - -def _maybe_remove(store, key): - """For tests using tables, try removing the table to be sure there is - no content from previous tests using the same table name.""" - try: - store.remove(key) - except: - pass - - -class Base(object): - - @classmethod - def setup_class(cls): - - # Pytables 3.0.0 deprecates lots of things - tm.reset_testing_mode() - - @classmethod - def teardown_class(cls): - - # Pytables 3.0.0 deprecates lots of things - tm.set_testing_mode() - - def setup_method(self, method): - self.path = 'tmp.__%s__.h5' % tm.rands(10) - - def teardown_method(self, method): - pass - - -@pytest.mark.single -class TestHDFStore(Base): - - def test_factory_fun(self): - path = create_tempfile(self.path) - try: - with catch_warnings(record=True): - with get_store(path) as tbl: - raise ValueError('blah') - except ValueError: - pass - finally: - safe_remove(path) - - try: - with catch_warnings(record=True): - with get_store(path) as tbl: - tbl['a'] = tm.makeDataFrame() - - with catch_warnings(record=True): - with get_store(path) as tbl: - assert len(tbl) == 1 - assert type(tbl['a']) == DataFrame - finally: - safe_remove(self.path) - - def test_context(self): - path = create_tempfile(self.path) - try: - with HDFStore(path) as tbl: - raise ValueError('blah') - except ValueError: - pass - finally: - safe_remove(path) - - try: - with HDFStore(path) as tbl: - tbl['a'] = tm.makeDataFrame() - - with HDFStore(path) as tbl: - assert len(tbl) == 1 - assert type(tbl['a']) == DataFrame - finally: - safe_remove(path) - - def test_conv_read_write(self): - path = create_tempfile(self.path) - try: - def roundtrip(key, obj, **kwargs): - obj.to_hdf(path, key, **kwargs) - return read_hdf(path, key) - - o = tm.makeTimeSeries() - assert_series_equal(o, roundtrip('series', o)) - - o = tm.makeStringSeries() - assert_series_equal(o, roundtrip('string_series', o)) - - o = tm.makeDataFrame() - assert_frame_equal(o, roundtrip('frame', o)) - - with catch_warnings(record=True): - - o = tm.makePanel() - assert_panel_equal(o, roundtrip('panel', o)) - - # table - df = DataFrame(dict(A=lrange(5), B=lrange(5))) - df.to_hdf(path, 'table', append=True) - result = read_hdf(path, 'table', where=['index>2']) - assert_frame_equal(df[df.index > 2], result) - - finally: - safe_remove(path) - - def test_long_strings(self): - - # GH6166 - # unconversion of long strings was being chopped in earlier - # versions of numpy < 1.7.2 - df = DataFrame({'a': tm.rands_array(100, size=10)}, - index=tm.rands_array(100, size=10)) - - with ensure_clean_store(self.path) as store: - store.append('df', df, data_columns=['a']) - - result = store.select('df') - assert_frame_equal(df, result) - - def test_api(self): - - # GH4584 - # API issue when to_hdf doesn't acdept append AND format args - with ensure_clean_path(self.path) as path: - - df = tm.makeDataFrame() - df.iloc[:10].to_hdf(path, 'df', append=True, format='table') - df.iloc[10:].to_hdf(path, 'df', append=True, format='table') - assert_frame_equal(read_hdf(path, 'df'), df) - - # append to False - df.iloc[:10].to_hdf(path, 'df', append=False, format='table') - df.iloc[10:].to_hdf(path, 'df', append=True, format='table') - assert_frame_equal(read_hdf(path, 'df'), df) - - with ensure_clean_path(self.path) as path: - - df = tm.makeDataFrame() - df.iloc[:10].to_hdf(path, 'df', append=True) - df.iloc[10:].to_hdf(path, 'df', append=True, format='table') - assert_frame_equal(read_hdf(path, 'df'), df) - - # append to False - df.iloc[:10].to_hdf(path, 'df', append=False, format='table') - df.iloc[10:].to_hdf(path, 'df', append=True) - assert_frame_equal(read_hdf(path, 'df'), df) - - with ensure_clean_path(self.path) as path: - - df = tm.makeDataFrame() - df.to_hdf(path, 'df', append=False, format='fixed') - assert_frame_equal(read_hdf(path, 'df'), df) - - df.to_hdf(path, 'df', append=False, format='f') - assert_frame_equal(read_hdf(path, 'df'), df) - - df.to_hdf(path, 'df', append=False) - assert_frame_equal(read_hdf(path, 'df'), df) - - df.to_hdf(path, 'df') - assert_frame_equal(read_hdf(path, 'df'), df) - - with ensure_clean_store(self.path) as store: - - path = store._path - df = tm.makeDataFrame() - - _maybe_remove(store, 'df') - store.append('df', df.iloc[:10], append=True, format='table') - store.append('df', df.iloc[10:], append=True, format='table') - assert_frame_equal(store.select('df'), df) - - # append to False - _maybe_remove(store, 'df') - store.append('df', df.iloc[:10], append=False, format='table') - store.append('df', df.iloc[10:], append=True, format='table') - assert_frame_equal(store.select('df'), df) - - # formats - _maybe_remove(store, 'df') - store.append('df', df.iloc[:10], append=False, format='table') - store.append('df', df.iloc[10:], append=True, format='table') - assert_frame_equal(store.select('df'), df) - - _maybe_remove(store, 'df') - store.append('df', df.iloc[:10], append=False, format='table') - store.append('df', df.iloc[10:], append=True, format=None) - assert_frame_equal(store.select('df'), df) - - with ensure_clean_path(self.path) as path: - - # invalid - df = tm.makeDataFrame() - pytest.raises(ValueError, df.to_hdf, path, - 'df', append=True, format='f') - pytest.raises(ValueError, df.to_hdf, path, - 'df', append=True, format='fixed') - - pytest.raises(TypeError, df.to_hdf, path, - 'df', append=True, format='foo') - pytest.raises(TypeError, df.to_hdf, path, - 'df', append=False, format='bar') - - # File path doesn't exist - path = "" - pytest.raises(compat.FileNotFoundError, - read_hdf, path, 'df') - - def test_api_default_format(self): - - # default_format option - with ensure_clean_store(self.path) as store: - df = tm.makeDataFrame() - - pd.set_option('io.hdf.default_format', 'fixed') - _maybe_remove(store, 'df') - store.put('df', df) - assert not store.get_storer('df').is_table - pytest.raises(ValueError, store.append, 'df2', df) - - pd.set_option('io.hdf.default_format', 'table') - _maybe_remove(store, 'df') - store.put('df', df) - assert store.get_storer('df').is_table - _maybe_remove(store, 'df2') - store.append('df2', df) - assert store.get_storer('df').is_table - - pd.set_option('io.hdf.default_format', None) - - with ensure_clean_path(self.path) as path: - - df = tm.makeDataFrame() - - pd.set_option('io.hdf.default_format', 'fixed') - df.to_hdf(path, 'df') - with HDFStore(path) as store: - assert not store.get_storer('df').is_table - pytest.raises(ValueError, df.to_hdf, path, 'df2', append=True) - - pd.set_option('io.hdf.default_format', 'table') - df.to_hdf(path, 'df3') - with HDFStore(path) as store: - assert store.get_storer('df3').is_table - df.to_hdf(path, 'df4', append=True) - with HDFStore(path) as store: - assert store.get_storer('df4').is_table - - pd.set_option('io.hdf.default_format', None) - - def test_keys(self): - - with ensure_clean_store(self.path) as store: - store['a'] = tm.makeTimeSeries() - store['b'] = tm.makeStringSeries() - store['c'] = tm.makeDataFrame() - with catch_warnings(record=True): - store['d'] = tm.makePanel() - store['foo/bar'] = tm.makePanel() - assert len(store) == 5 - expected = set(['/a', '/b', '/c', '/d', '/foo/bar']) - assert set(store.keys()) == expected - assert set(store) == expected - - def test_iter_empty(self): - - with ensure_clean_store(self.path) as store: - # GH 12221 - assert list(store) == [] - - def test_repr(self): - - with ensure_clean_store(self.path) as store: - repr(store) - store.info() - store['a'] = tm.makeTimeSeries() - store['b'] = tm.makeStringSeries() - store['c'] = tm.makeDataFrame() - - with catch_warnings(record=True): - store['d'] = tm.makePanel() - store['foo/bar'] = tm.makePanel() - store.append('e', tm.makePanel()) - - df = tm.makeDataFrame() - df['obj1'] = 'foo' - df['obj2'] = 'bar' - df['bool1'] = df['A'] > 0 - df['bool2'] = df['B'] > 0 - df['bool3'] = True - df['int1'] = 1 - df['int2'] = 2 - df['timestamp1'] = Timestamp('20010102') - df['timestamp2'] = Timestamp('20010103') - df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0) - df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0) - df.loc[3:6, ['obj1']] = np.nan - df = df._consolidate()._convert(datetime=True) - - # PerformanceWarning - with catch_warnings(record=True): - store['df'] = df - - # make a random group in hdf space - store._handle.create_group(store._handle.root, 'bah') - - assert store.filename in repr(store) - assert store.filename in str(store) - store.info() - - # storers - with ensure_clean_store(self.path) as store: - - df = tm.makeDataFrame() - store.append('df', df) - - s = store.get_storer('df') - repr(s) - str(s) - - def test_contains(self): - - with ensure_clean_store(self.path) as store: - store['a'] = tm.makeTimeSeries() - store['b'] = tm.makeDataFrame() - store['foo/bar'] = tm.makeDataFrame() - assert 'a' in store - assert 'b' in store - assert 'c' not in store - assert 'foo/bar' in store - assert '/foo/bar' in store - assert '/foo/b' not in store - assert 'bar' not in store - - # gh-2694: tables.NaturalNameWarning - with catch_warnings(record=True): - store['node())'] = tm.makeDataFrame() - assert 'node())' in store - - def test_versioning(self): - - with ensure_clean_store(self.path) as store: - store['a'] = tm.makeTimeSeries() - store['b'] = tm.makeDataFrame() - df = tm.makeTimeDataFrame() - _maybe_remove(store, 'df1') - store.append('df1', df[:10]) - store.append('df1', df[10:]) - assert store.root.a._v_attrs.pandas_version == '0.15.2' - assert store.root.b._v_attrs.pandas_version == '0.15.2' - assert store.root.df1._v_attrs.pandas_version == '0.15.2' - - # write a file and wipe its versioning - _maybe_remove(store, 'df2') - store.append('df2', df) - - # this is an error because its table_type is appendable, but no - # version info - store.get_node('df2')._v_attrs.pandas_version = None - pytest.raises(Exception, store.select, 'df2') - - def test_mode(self): - - df = tm.makeTimeDataFrame() - - def check(mode): - - with ensure_clean_path(self.path) as path: - - # constructor - if mode in ['r', 'r+']: - pytest.raises(IOError, HDFStore, path, mode=mode) - - else: - store = HDFStore(path, mode=mode) - assert store._handle.mode == mode - store.close() - - with ensure_clean_path(self.path) as path: - - # context - if mode in ['r', 'r+']: - def f(): - with HDFStore(path, mode=mode) as store: # noqa - pass - pytest.raises(IOError, f) - else: - with HDFStore(path, mode=mode) as store: - assert store._handle.mode == mode - - with ensure_clean_path(self.path) as path: - - # conv write - if mode in ['r', 'r+']: - pytest.raises(IOError, df.to_hdf, - path, 'df', mode=mode) - df.to_hdf(path, 'df', mode='w') - else: - df.to_hdf(path, 'df', mode=mode) - - # conv read - if mode in ['w']: - pytest.raises(ValueError, read_hdf, - path, 'df', mode=mode) - else: - result = read_hdf(path, 'df', mode=mode) - assert_frame_equal(result, df) - - def check_default_mode(): - - # read_hdf uses default mode - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', mode='w') - result = read_hdf(path, 'df') - assert_frame_equal(result, df) - - check('r') - check('r+') - check('a') - check('w') - check_default_mode() - - def test_reopen_handle(self): - - with ensure_clean_path(self.path) as path: - - store = HDFStore(path, mode='a') - store['a'] = tm.makeTimeSeries() - - # invalid mode change - pytest.raises(PossibleDataLossError, store.open, 'w') - store.close() - assert not store.is_open - - # truncation ok here - store.open('w') - assert store.is_open - assert len(store) == 0 - store.close() - assert not store.is_open - - store = HDFStore(path, mode='a') - store['a'] = tm.makeTimeSeries() - - # reopen as read - store.open('r') - assert store.is_open - assert len(store) == 1 - assert store._mode == 'r' - store.close() - assert not store.is_open - - # reopen as append - store.open('a') - assert store.is_open - assert len(store) == 1 - assert store._mode == 'a' - store.close() - assert not store.is_open - - # reopen as append (again) - store.open('a') - assert store.is_open - assert len(store) == 1 - assert store._mode == 'a' - store.close() - assert not store.is_open - - def test_open_args(self): - - with ensure_clean_path(self.path) as path: - - df = tm.makeDataFrame() - - # create an in memory store - store = HDFStore(path, mode='a', driver='H5FD_CORE', - driver_core_backing_store=0) - store['df'] = df - store.append('df2', df) - - tm.assert_frame_equal(store['df'], df) - tm.assert_frame_equal(store['df2'], df) - - store.close() - - # the file should not have actually been written - assert not os.path.exists(path) - - def test_flush(self): - - with ensure_clean_store(self.path) as store: - store['a'] = tm.makeTimeSeries() - store.flush() - store.flush(fsync=True) - - def test_get(self): - - with ensure_clean_store(self.path) as store: - store['a'] = tm.makeTimeSeries() - left = store.get('a') - right = store['a'] - tm.assert_series_equal(left, right) - - left = store.get('/a') - right = store['/a'] - tm.assert_series_equal(left, right) - - pytest.raises(KeyError, store.get, 'b') - - def test_getattr(self): - - with ensure_clean_store(self.path) as store: - - s = tm.makeTimeSeries() - store['a'] = s - - # test attribute access - result = store.a - tm.assert_series_equal(result, s) - result = getattr(store, 'a') - tm.assert_series_equal(result, s) - - df = tm.makeTimeDataFrame() - store['df'] = df - result = store.df - tm.assert_frame_equal(result, df) - - # errors - pytest.raises(AttributeError, getattr, store, 'd') - - for x in ['mode', 'path', 'handle', 'complib']: - pytest.raises(AttributeError, getattr, store, x) - - # not stores - for x in ['mode', 'path', 'handle', 'complib']: - getattr(store, "_%s" % x) - - def test_put(self): - - with ensure_clean_store(self.path) as store: - - ts = tm.makeTimeSeries() - df = tm.makeTimeDataFrame() - store['a'] = ts - store['b'] = df[:10] - store['foo/bar/bah'] = df[:10] - store['foo'] = df[:10] - store['/foo'] = df[:10] - store.put('c', df[:10], format='table') - - # not OK, not a table - pytest.raises( - ValueError, store.put, 'b', df[10:], append=True) - - # node does not currently exist, test _is_table_type returns False - # in this case - # _maybe_remove(store, 'f') - # pytest.raises(ValueError, store.put, 'f', df[10:], - # append=True) - - # can't put to a table (use append instead) - pytest.raises(ValueError, store.put, 'c', df[10:], append=True) - - # overwrite table - store.put('c', df[:10], format='table', append=False) - tm.assert_frame_equal(df[:10], store['c']) - - def test_put_string_index(self): - - with ensure_clean_store(self.path) as store: - - index = Index( - ["I am a very long string index: %s" % i for i in range(20)]) - s = Series(np.arange(20), index=index) - df = DataFrame({'A': s, 'B': s}) - - store['a'] = s - tm.assert_series_equal(store['a'], s) - - store['b'] = df - tm.assert_frame_equal(store['b'], df) - - # mixed length - index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + - ["I am a very long string index: %s" % i - for i in range(20)]) - s = Series(np.arange(21), index=index) - df = DataFrame({'A': s, 'B': s}) - store['a'] = s - tm.assert_series_equal(store['a'], s) - - store['b'] = df - tm.assert_frame_equal(store['b'], df) - - def test_put_compression(self): - - with ensure_clean_store(self.path) as store: - df = tm.makeTimeDataFrame() - - store.put('c', df, format='table', complib='zlib') - tm.assert_frame_equal(store['c'], df) - - # can't compress if format='fixed' - pytest.raises(ValueError, store.put, 'b', df, - format='fixed', complib='zlib') - - @td.skip_if_windows_python_3 - def test_put_compression_blosc(self): - df = tm.makeTimeDataFrame() - - with ensure_clean_store(self.path) as store: - - # can't compress if format='fixed' - pytest.raises(ValueError, store.put, 'b', df, - format='fixed', complib='blosc') - - store.put('c', df, format='table', complib='blosc') - tm.assert_frame_equal(store['c'], df) - - def test_complibs_default_settings(self): - # GH15943 - df = tm.makeDataFrame() - - # Set complevel and check if complib is automatically set to - # default value - with ensure_clean_path(self.path) as tmpfile: - df.to_hdf(tmpfile, 'df', complevel=9) - result = pd.read_hdf(tmpfile, 'df') - tm.assert_frame_equal(result, df) - - with tables.open_file(tmpfile, mode='r') as h5file: - for node in h5file.walk_nodes(where='/df', classname='Leaf'): - assert node.filters.complevel == 9 - assert node.filters.complib == 'zlib' - - # Set complib and check to see if compression is disabled - with ensure_clean_path(self.path) as tmpfile: - df.to_hdf(tmpfile, 'df', complib='zlib') - result = pd.read_hdf(tmpfile, 'df') - tm.assert_frame_equal(result, df) - - with tables.open_file(tmpfile, mode='r') as h5file: - for node in h5file.walk_nodes(where='/df', classname='Leaf'): - assert node.filters.complevel == 0 - assert node.filters.complib is None - - # Check if not setting complib or complevel results in no compression - with ensure_clean_path(self.path) as tmpfile: - df.to_hdf(tmpfile, 'df') - result = pd.read_hdf(tmpfile, 'df') - tm.assert_frame_equal(result, df) - - with tables.open_file(tmpfile, mode='r') as h5file: - for node in h5file.walk_nodes(where='/df', classname='Leaf'): - assert node.filters.complevel == 0 - assert node.filters.complib is None - - # Check if file-defaults can be overridden on a per table basis - with ensure_clean_path(self.path) as tmpfile: - store = pd.HDFStore(tmpfile) - store.append('dfc', df, complevel=9, complib='blosc') - store.append('df', df) - store.close() - - with tables.open_file(tmpfile, mode='r') as h5file: - for node in h5file.walk_nodes(where='/df', classname='Leaf'): - assert node.filters.complevel == 0 - assert node.filters.complib is None - for node in h5file.walk_nodes(where='/dfc', classname='Leaf'): - assert node.filters.complevel == 9 - assert node.filters.complib == 'blosc' - - def test_complibs(self): - # GH14478 - df = tm.makeDataFrame() - - # Building list of all complibs and complevels tuples - all_complibs = tables.filters.all_complibs - # Remove lzo if its not available on this platform - if not tables.which_lib_version('lzo'): - all_complibs.remove('lzo') - # Remove bzip2 if its not available on this platform - if not tables.which_lib_version("bzip2"): - all_complibs.remove("bzip2") - - all_levels = range(0, 10) - all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels] - - for (lib, lvl) in all_tests: - with ensure_clean_path(self.path) as tmpfile: - gname = 'foo' - - # Write and read file to see if data is consistent - df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl) - result = pd.read_hdf(tmpfile, gname) - tm.assert_frame_equal(result, df) - - # Open file and check metadata - # for correct amount of compression - h5table = tables.open_file(tmpfile, mode='r') - for node in h5table.walk_nodes(where='/' + gname, - classname='Leaf'): - assert node.filters.complevel == lvl - if lvl == 0: - assert node.filters.complib is None - else: - assert node.filters.complib == lib - h5table.close() - - def test_put_integer(self): - # non-date, non-string index - df = DataFrame(np.random.randn(50, 100)) - self._check_roundtrip(df, tm.assert_frame_equal) - - def test_put_mixed_type(self): - df = tm.makeTimeDataFrame() - df['obj1'] = 'foo' - df['obj2'] = 'bar' - df['bool1'] = df['A'] > 0 - df['bool2'] = df['B'] > 0 - df['bool3'] = True - df['int1'] = 1 - df['int2'] = 2 - df['timestamp1'] = Timestamp('20010102') - df['timestamp2'] = Timestamp('20010103') - df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0) - df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0) - df.loc[3:6, ['obj1']] = np.nan - df = df._consolidate()._convert(datetime=True) - - with ensure_clean_store(self.path) as store: - _maybe_remove(store, 'df') - - # PerformanceWarning - with catch_warnings(record=True): - store.put('df', df) - - expected = store.get('df') - tm.assert_frame_equal(expected, df) - - def test_append(self): - - with ensure_clean_store(self.path) as store: - - # this is allowed by almost always don't want to do it - # tables.NaturalNameWarning): - with catch_warnings(record=True): - - df = tm.makeTimeDataFrame() - _maybe_remove(store, 'df1') - store.append('df1', df[:10]) - store.append('df1', df[10:]) - tm.assert_frame_equal(store['df1'], df) - - _maybe_remove(store, 'df2') - store.put('df2', df[:10], format='table') - store.append('df2', df[10:]) - tm.assert_frame_equal(store['df2'], df) - - _maybe_remove(store, 'df3') - store.append('/df3', df[:10]) - store.append('/df3', df[10:]) - tm.assert_frame_equal(store['df3'], df) - - # this is allowed by almost always don't want to do it - # tables.NaturalNameWarning - _maybe_remove(store, '/df3 foo') - store.append('/df3 foo', df[:10]) - store.append('/df3 foo', df[10:]) - tm.assert_frame_equal(store['df3 foo'], df) - - # panel - wp = tm.makePanel() - _maybe_remove(store, 'wp1') - store.append('wp1', wp.iloc[:, :10, :]) - store.append('wp1', wp.iloc[:, 10:, :]) - assert_panel_equal(store['wp1'], wp) - - # test using differt order of items on the non-index axes - _maybe_remove(store, 'wp1') - wp_append1 = wp.iloc[:, :10, :] - store.append('wp1', wp_append1) - wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1]) - store.append('wp1', wp_append2) - assert_panel_equal(store['wp1'], wp) - - # dtype issues - mizxed type in a single object column - df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]]) - df['mixed_column'] = 'testing' - df.loc[2, 'mixed_column'] = np.nan - _maybe_remove(store, 'df') - store.append('df', df) - tm.assert_frame_equal(store['df'], df) - - # uints - test storage of uints - uint_data = DataFrame({ - 'u08': Series(np.random.randint(0, high=255, size=5), - dtype=np.uint8), - 'u16': Series(np.random.randint(0, high=65535, size=5), - dtype=np.uint16), - 'u32': Series(np.random.randint(0, high=2**30, size=5), - dtype=np.uint32), - 'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62], - dtype=np.uint64)}, index=np.arange(5)) - _maybe_remove(store, 'uints') - store.append('uints', uint_data) - tm.assert_frame_equal(store['uints'], uint_data) - - # uints - test storage of uints in indexable columns - _maybe_remove(store, 'uints') - # 64-bit indices not yet supported - store.append('uints', uint_data, data_columns=[ - 'u08', 'u16', 'u32']) - tm.assert_frame_equal(store['uints'], uint_data) - - def test_append_series(self): - - with ensure_clean_store(self.path) as store: - - # basic - ss = tm.makeStringSeries() - ts = tm.makeTimeSeries() - ns = Series(np.arange(100)) - - store.append('ss', ss) - result = store['ss'] - tm.assert_series_equal(result, ss) - assert result.name is None - - store.append('ts', ts) - result = store['ts'] - tm.assert_series_equal(result, ts) - assert result.name is None - - ns.name = 'foo' - store.append('ns', ns) - result = store['ns'] - tm.assert_series_equal(result, ns) - assert result.name == ns.name - - # select on the values - expected = ns[ns > 60] - result = store.select('ns', 'foo>60') - tm.assert_series_equal(result, expected) - - # select on the index and values - expected = ns[(ns > 70) & (ns.index < 90)] - result = store.select('ns', 'foo>70 and index<90') - tm.assert_series_equal(result, expected) - - # multi-index - mi = DataFrame(np.random.randn(5, 1), columns=['A']) - mi['B'] = np.arange(len(mi)) - mi['C'] = 'foo' - mi.loc[3:5, 'C'] = 'bar' - mi.set_index(['C', 'B'], inplace=True) - s = mi.stack() - s.index = s.index.droplevel(2) - store.append('mi', s) - tm.assert_series_equal(store['mi'], s) - - def test_store_index_types(self): - # GH5386 - # test storing various index types - - with ensure_clean_store(self.path) as store: - - def check(format, index): - df = DataFrame(np.random.randn(10, 2), columns=list('AB')) - df.index = index(len(df)) - - _maybe_remove(store, 'df') - store.put('df', df, format=format) - assert_frame_equal(df, store['df']) - - for index in [tm.makeFloatIndex, tm.makeStringIndex, - tm.makeIntIndex, tm.makeDateIndex]: - - check('table', index) - check('fixed', index) - - # period index currently broken for table - # seee GH7796 FIXME - check('fixed', tm.makePeriodIndex) - # check('table',tm.makePeriodIndex) - - # unicode - index = tm.makeUnicodeIndex - if compat.PY3: - check('table', index) - check('fixed', index) - else: - - # only support for fixed types (and they have a perf warning) - pytest.raises(TypeError, check, 'table', index) - - # PerformanceWarning - with catch_warnings(record=True): - check('fixed', index) - - @pytest.mark.skipif(not is_platform_little_endian(), - reason="reason platform is not little endian") - def test_encoding(self): - - with ensure_clean_store(self.path) as store: - df = DataFrame(dict(A='foo', B='bar'), index=range(5)) - df.loc[2, 'A'] = np.nan - df.loc[3, 'B'] = np.nan - _maybe_remove(store, 'df') - store.append('df', df, encoding='ascii') - tm.assert_frame_equal(store['df'], df) - - expected = df.reindex(columns=['A']) - result = store.select('df', Term('columns=A', encoding='ascii')) - tm.assert_frame_equal(result, expected) - - def test_latin_encoding(self): - - if compat.PY2: - tm.assert_raises_regex( - TypeError, r'\[unicode\] is not implemented as a table column') - return - - values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'], - [b'E\xc9, 17', b'a', b'b', b'c'], - [b'EE, 17', b'', b'a', b'b', b'c'], - [b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'], - [b'', b'a', b'b', b'c'], - [b'\xf8\xfc', b'a', b'b', b'c'], - [b'A\xf8\xfc', b'', b'a', b'b', b'c'], - [np.nan, b'', b'b', b'c'], - [b'A\xf8\xfc', np.nan, b'', b'b', b'c']] - - def _try_decode(x, encoding='latin-1'): - try: - return x.decode(encoding) - except AttributeError: - return x - # not sure how to remove latin-1 from code in python 2 and 3 - values = [[_try_decode(x) for x in y] for y in values] - - examples = [] - for dtype in ['category', object]: - for val in values: - examples.append(pd.Series(val, dtype=dtype)) - - def roundtrip(s, key='data', encoding='latin-1', nan_rep=''): - with ensure_clean_path(self.path) as store: - s.to_hdf(store, key, format='table', encoding=encoding, - nan_rep=nan_rep) - retr = read_hdf(store, key) - s_nan = s.replace(nan_rep, np.nan) - if is_categorical_dtype(s_nan): - assert is_categorical_dtype(retr) - assert_series_equal(s_nan, retr, check_dtype=False, - check_categorical=False) - else: - assert_series_equal(s_nan, retr) - - for s in examples: - roundtrip(s) - - # fails: - # for x in examples: - # roundtrip(s, nan_rep=b'\xf8\xfc') - - def test_append_some_nans(self): - - with ensure_clean_store(self.path) as store: - df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'), - 'A1': np.random.randn(20), - 'A2': np.random.randn(20), - 'B': 'foo', 'C': 'bar', - 'D': Timestamp("20010101"), - 'E': datetime.datetime(2001, 1, 2, 0, 0)}, - index=np.arange(20)) - # some nans - _maybe_remove(store, 'df1') - df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan - store.append('df1', df[:10]) - store.append('df1', df[10:]) - tm.assert_frame_equal(store['df1'], df) - - # first column - df1 = df.copy() - df1.loc[:, 'A1'] = np.nan - _maybe_remove(store, 'df1') - store.append('df1', df1[:10]) - store.append('df1', df1[10:]) - tm.assert_frame_equal(store['df1'], df1) - - # 2nd column - df2 = df.copy() - df2.loc[:, 'A2'] = np.nan - _maybe_remove(store, 'df2') - store.append('df2', df2[:10]) - store.append('df2', df2[10:]) - tm.assert_frame_equal(store['df2'], df2) - - # datetimes - df3 = df.copy() - df3.loc[:, 'E'] = np.nan - _maybe_remove(store, 'df3') - store.append('df3', df3[:10]) - store.append('df3', df3[10:]) - tm.assert_frame_equal(store['df3'], df3) - - def test_append_all_nans(self): - - with ensure_clean_store(self.path) as store: - - df = DataFrame({'A1': np.random.randn(20), - 'A2': np.random.randn(20)}, - index=np.arange(20)) - df.loc[0:15, :] = np.nan - - # nan some entire rows (dropna=True) - _maybe_remove(store, 'df') - store.append('df', df[:10], dropna=True) - store.append('df', df[10:], dropna=True) - tm.assert_frame_equal(store['df'], df[-4:]) - - # nan some entire rows (dropna=False) - _maybe_remove(store, 'df2') - store.append('df2', df[:10], dropna=False) - store.append('df2', df[10:], dropna=False) - tm.assert_frame_equal(store['df2'], df) - - # tests the option io.hdf.dropna_table - pd.set_option('io.hdf.dropna_table', False) - _maybe_remove(store, 'df3') - store.append('df3', df[:10]) - store.append('df3', df[10:]) - tm.assert_frame_equal(store['df3'], df) - - pd.set_option('io.hdf.dropna_table', True) - _maybe_remove(store, 'df4') - store.append('df4', df[:10]) - store.append('df4', df[10:]) - tm.assert_frame_equal(store['df4'], df[-4:]) - - # nan some entire rows (string are still written!) - df = DataFrame({'A1': np.random.randn(20), - 'A2': np.random.randn(20), - 'B': 'foo', 'C': 'bar'}, - index=np.arange(20)) - - df.loc[0:15, :] = np.nan - - _maybe_remove(store, 'df') - store.append('df', df[:10], dropna=True) - store.append('df', df[10:], dropna=True) - tm.assert_frame_equal(store['df'], df) - - _maybe_remove(store, 'df2') - store.append('df2', df[:10], dropna=False) - store.append('df2', df[10:], dropna=False) - tm.assert_frame_equal(store['df2'], df) - - # nan some entire rows (but since we have dates they are still - # written!) - df = DataFrame({'A1': np.random.randn(20), - 'A2': np.random.randn(20), - 'B': 'foo', 'C': 'bar', - 'D': Timestamp("20010101"), - 'E': datetime.datetime(2001, 1, 2, 0, 0)}, - index=np.arange(20)) - - df.loc[0:15, :] = np.nan - - _maybe_remove(store, 'df') - store.append('df', df[:10], dropna=True) - store.append('df', df[10:], dropna=True) - tm.assert_frame_equal(store['df'], df) - - _maybe_remove(store, 'df2') - store.append('df2', df[:10], dropna=False) - store.append('df2', df[10:], dropna=False) - tm.assert_frame_equal(store['df2'], df) - - # Test to make sure defaults are to not drop. - # Corresponding to Issue 9382 - df_with_missing = DataFrame( - {'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]}) - - with ensure_clean_path(self.path) as path: - df_with_missing.to_hdf(path, 'df_with_missing', format='table') - reloaded = read_hdf(path, 'df_with_missing') - tm.assert_frame_equal(df_with_missing, reloaded) - - matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]], - [[np.nan, np.nan, np.nan], [np.nan, 5, 6]], - [[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]] - - with catch_warnings(record=True): - panel_with_missing = Panel(matrix, - items=['Item1', 'Item2', 'Item3'], - major_axis=[1, 2], - minor_axis=['A', 'B', 'C']) - - with ensure_clean_path(self.path) as path: - panel_with_missing.to_hdf( - path, 'panel_with_missing', format='table') - reloaded_panel = read_hdf(path, 'panel_with_missing') - tm.assert_panel_equal(panel_with_missing, reloaded_panel) - - def test_append_frame_column_oriented(self): - - with ensure_clean_store(self.path) as store: - - # column oriented - df = tm.makeTimeDataFrame() - _maybe_remove(store, 'df1') - store.append('df1', df.iloc[:, :2], axes=['columns']) - store.append('df1', df.iloc[:, 2:]) - tm.assert_frame_equal(store['df1'], df) - - result = store.select('df1', 'columns=A') - expected = df.reindex(columns=['A']) - tm.assert_frame_equal(expected, result) - - # selection on the non-indexable - result = store.select( - 'df1', ('columns=A', 'index=df.index[0:4]')) - expected = df.reindex(columns=['A'], index=df.index[0:4]) - tm.assert_frame_equal(expected, result) - - # this isn't supported - with pytest.raises(TypeError): - store.select('df1', - 'columns=A and index>df.index[4]') - - def test_append_with_different_block_ordering(self): - - # GH 4096; using same frames, but different block orderings - with ensure_clean_store(self.path) as store: - - for i in range(10): - - df = DataFrame(np.random.randn(10, 2), columns=list('AB')) - df['index'] = range(10) - df['index'] += i * 10 - df['int64'] = Series([1] * len(df), dtype='int64') - df['int16'] = Series([1] * len(df), dtype='int16') - - if i % 2 == 0: - del df['int64'] - df['int64'] = Series([1] * len(df), dtype='int64') - if i % 3 == 0: - a = df.pop('A') - df['A'] = a - - df.set_index('index', inplace=True) - - store.append('df', df) - - # test a different ordering but with more fields (like invalid - # combinate) - with ensure_clean_store(self.path) as store: - - df = DataFrame(np.random.randn(10, 2), - columns=list('AB'), dtype='float64') - df['int64'] = Series([1] * len(df), dtype='int64') - df['int16'] = Series([1] * len(df), dtype='int16') - store.append('df', df) - - # store additional fields in different blocks - df['int16_2'] = Series([1] * len(df), dtype='int16') - pytest.raises(ValueError, store.append, 'df', df) - - # store multile additional fields in different blocks - df['float_3'] = Series([1.] * len(df), dtype='float64') - pytest.raises(ValueError, store.append, 'df', df) - - def test_append_with_strings(self): - - with ensure_clean_store(self.path) as store: - with catch_warnings(record=True): - wp = tm.makePanel() - wp2 = wp.rename_axis( - {x: "%s_extra" % x for x in wp.minor_axis}, axis=2) - - def check_col(key, name, size): - assert getattr(store.get_storer(key) - .table.description, name).itemsize == size - - store.append('s1', wp, min_itemsize=20) - store.append('s1', wp2) - expected = concat([wp, wp2], axis=2) - expected = expected.reindex( - minor_axis=sorted(expected.minor_axis)) - assert_panel_equal(store['s1'], expected) - check_col('s1', 'minor_axis', 20) - - # test dict format - store.append('s2', wp, min_itemsize={'minor_axis': 20}) - store.append('s2', wp2) - expected = concat([wp, wp2], axis=2) - expected = expected.reindex( - minor_axis=sorted(expected.minor_axis)) - assert_panel_equal(store['s2'], expected) - check_col('s2', 'minor_axis', 20) - - # apply the wrong field (similar to #1) - store.append('s3', wp, min_itemsize={'major_axis': 20}) - pytest.raises(ValueError, store.append, 's3', wp2) - - # test truncation of bigger strings - store.append('s4', wp) - pytest.raises(ValueError, store.append, 's4', wp2) - - # avoid truncation on elements - df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']]) - store.append('df_big', df) - tm.assert_frame_equal(store.select('df_big'), df) - check_col('df_big', 'values_block_1', 15) - - # appending smaller string ok - df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']]) - store.append('df_big', df2) - expected = concat([df, df2]) - tm.assert_frame_equal(store.select('df_big'), expected) - check_col('df_big', 'values_block_1', 15) - - # avoid truncation on elements - df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']]) - store.append('df_big2', df, min_itemsize={'values': 50}) - tm.assert_frame_equal(store.select('df_big2'), df) - check_col('df_big2', 'values_block_1', 50) - - # bigger string on next append - store.append('df_new', df) - df_new = DataFrame( - [[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']]) - pytest.raises(ValueError, store.append, 'df_new', df_new) - - # min_itemsize on Series index (GH 11412) - df = tm.makeMixedDataFrame().set_index('C') - store.append('ss', df['B'], min_itemsize={'index': 4}) - tm.assert_series_equal(store.select('ss'), df['B']) - - # same as above, with data_columns=True - store.append('ss2', df['B'], data_columns=True, - min_itemsize={'index': 4}) - tm.assert_series_equal(store.select('ss2'), df['B']) - - # min_itemsize in index without appending (GH 10381) - store.put('ss3', df, format='table', - min_itemsize={'index': 6}) - # just make sure there is a longer string: - df2 = df.copy().reset_index().assign(C='longer').set_index('C') - store.append('ss3', df2) - tm.assert_frame_equal(store.select('ss3'), - pd.concat([df, df2])) - - # same as above, with a Series - store.put('ss4', df['B'], format='table', - min_itemsize={'index': 6}) - store.append('ss4', df2['B']) - tm.assert_series_equal(store.select('ss4'), - pd.concat([df['B'], df2['B']])) - - # with nans - _maybe_remove(store, 'df') - df = tm.makeTimeDataFrame() - df['string'] = 'foo' - df.loc[1:4, 'string'] = np.nan - df['string2'] = 'bar' - df.loc[4:8, 'string2'] = np.nan - df['string3'] = 'bah' - df.loc[1:, 'string3'] = np.nan - store.append('df', df) - result = store.select('df') - tm.assert_frame_equal(result, df) - - with ensure_clean_store(self.path) as store: - - def check_col(key, name, size): - assert getattr(store.get_storer(key) - .table.description, name).itemsize, size - - df = DataFrame(dict(A='foo', B='bar'), index=range(10)) - - # a min_itemsize that creates a data_column - _maybe_remove(store, 'df') - store.append('df', df, min_itemsize={'A': 200}) - check_col('df', 'A', 200) - assert store.get_storer('df').data_columns == ['A'] - - # a min_itemsize that creates a data_column2 - _maybe_remove(store, 'df') - store.append('df', df, data_columns=['B'], min_itemsize={'A': 200}) - check_col('df', 'A', 200) - assert store.get_storer('df').data_columns == ['B', 'A'] - - # a min_itemsize that creates a data_column2 - _maybe_remove(store, 'df') - store.append('df', df, data_columns=[ - 'B'], min_itemsize={'values': 200}) - check_col('df', 'B', 200) - check_col('df', 'values_block_0', 200) - assert store.get_storer('df').data_columns == ['B'] - - # infer the .typ on subsequent appends - _maybe_remove(store, 'df') - store.append('df', df[:5], min_itemsize=200) - store.append('df', df[5:], min_itemsize=200) - tm.assert_frame_equal(store['df'], df) - - # invalid min_itemsize keys - df = DataFrame(['foo', 'foo', 'foo', 'barh', - 'barh', 'barh'], columns=['A']) - _maybe_remove(store, 'df') - pytest.raises(ValueError, store.append, 'df', - df, min_itemsize={'foo': 20, 'foobar': 20}) - - def test_to_hdf_with_min_itemsize(self): - - with ensure_clean_path(self.path) as path: - - # min_itemsize in index with to_hdf (GH 10381) - df = tm.makeMixedDataFrame().set_index('C') - df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6}) - # just make sure there is a longer string: - df2 = df.copy().reset_index().assign(C='longer').set_index('C') - df2.to_hdf(path, 'ss3', append=True, format='table') - tm.assert_frame_equal(pd.read_hdf(path, 'ss3'), - pd.concat([df, df2])) - - # same as above, with a Series - df['B'].to_hdf(path, 'ss4', format='table', - min_itemsize={'index': 6}) - df2['B'].to_hdf(path, 'ss4', append=True, format='table') - tm.assert_series_equal(pd.read_hdf(path, 'ss4'), - pd.concat([df['B'], df2['B']])) - - def test_append_with_data_columns(self): - - with ensure_clean_store(self.path) as store: - df = tm.makeTimeDataFrame() - df.iloc[0, df.columns.get_loc('B')] = 1. - _maybe_remove(store, 'df') - store.append('df', df[:2], data_columns=['B']) - store.append('df', df[2:]) - tm.assert_frame_equal(store['df'], df) - - # check that we have indicies created - assert(store._handle.root.df.table.cols.index.is_indexed is True) - assert(store._handle.root.df.table.cols.B.is_indexed is True) - - # data column searching - result = store.select('df', 'B>0') - expected = df[df.B > 0] - tm.assert_frame_equal(result, expected) - - # data column searching (with an indexable and a data_columns) - result = store.select( - 'df', 'B>0 and index>df.index[3]') - df_new = df.reindex(index=df.index[4:]) - expected = df_new[df_new.B > 0] - tm.assert_frame_equal(result, expected) - - # data column selection with a string data_column - df_new = df.copy() - df_new['string'] = 'foo' - df_new.loc[1:4, 'string'] = np.nan - df_new.loc[5:6, 'string'] = 'bar' - _maybe_remove(store, 'df') - store.append('df', df_new, data_columns=['string']) - result = store.select('df', "string='foo'") - expected = df_new[df_new.string == 'foo'] - tm.assert_frame_equal(result, expected) - - # using min_itemsize and a data column - def check_col(key, name, size): - assert getattr(store.get_storer(key) - .table.description, name).itemsize == size - - with ensure_clean_store(self.path) as store: - _maybe_remove(store, 'df') - store.append('df', df_new, data_columns=['string'], - min_itemsize={'string': 30}) - check_col('df', 'string', 30) - _maybe_remove(store, 'df') - store.append( - 'df', df_new, data_columns=['string'], min_itemsize=30) - check_col('df', 'string', 30) - _maybe_remove(store, 'df') - store.append('df', df_new, data_columns=['string'], - min_itemsize={'values': 30}) - check_col('df', 'string', 30) - - with ensure_clean_store(self.path) as store: - df_new['string2'] = 'foobarbah' - df_new['string_block1'] = 'foobarbah1' - df_new['string_block2'] = 'foobarbah2' - _maybe_remove(store, 'df') - store.append('df', df_new, data_columns=['string', 'string2'], - min_itemsize={'string': 30, 'string2': 40, - 'values': 50}) - check_col('df', 'string', 30) - check_col('df', 'string2', 40) - check_col('df', 'values_block_1', 50) - - with ensure_clean_store(self.path) as store: - # multiple data columns - df_new = df.copy() - df_new.iloc[0, df_new.columns.get_loc('A')] = 1. - df_new.iloc[0, df_new.columns.get_loc('B')] = -1. - df_new['string'] = 'foo' - - sl = df_new.columns.get_loc('string') - df_new.iloc[1:4, sl] = np.nan - df_new.iloc[5:6, sl] = 'bar' - - df_new['string2'] = 'foo' - sl = df_new.columns.get_loc('string2') - df_new.iloc[2:5, sl] = np.nan - df_new.iloc[7:8, sl] = 'bar' - _maybe_remove(store, 'df') - store.append( - 'df', df_new, data_columns=['A', 'B', 'string', 'string2']) - result = store.select('df', - "string='foo' and string2='foo'" - " and A>0 and B<0") - expected = df_new[(df_new.string == 'foo') & ( - df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)] - tm.assert_frame_equal(result, expected, check_index_type=False) - - # yield an empty frame - result = store.select('df', "string='foo' and string2='cool'") - expected = df_new[(df_new.string == 'foo') & ( - df_new.string2 == 'cool')] - tm.assert_frame_equal(result, expected, check_index_type=False) - - with ensure_clean_store(self.path) as store: - # doc example - df_dc = df.copy() - df_dc['string'] = 'foo' - df_dc.loc[4:6, 'string'] = np.nan - df_dc.loc[7:9, 'string'] = 'bar' - df_dc['string2'] = 'cool' - df_dc['datetime'] = Timestamp('20010102') - df_dc = df_dc._convert(datetime=True) - df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan - - _maybe_remove(store, 'df_dc') - store.append('df_dc', df_dc, - data_columns=['B', 'C', 'string', - 'string2', 'datetime']) - result = store.select('df_dc', 'B>0') - - expected = df_dc[df_dc.B > 0] - tm.assert_frame_equal(result, expected, check_index_type=False) - - result = store.select( - 'df_dc', ['B > 0', 'C > 0', 'string == foo']) - expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & ( - df_dc.string == 'foo')] - tm.assert_frame_equal(result, expected, check_index_type=False) - - with ensure_clean_store(self.path) as store: - # doc example part 2 - np.random.seed(1234) - index = date_range('1/1/2000', periods=8) - df_dc = DataFrame(np.random.randn(8, 3), index=index, - columns=['A', 'B', 'C']) - df_dc['string'] = 'foo' - df_dc.loc[4:6, 'string'] = np.nan - df_dc.loc[7:9, 'string'] = 'bar' - df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs() - df_dc['string2'] = 'cool' - - # on-disk operations - store.append('df_dc', df_dc, data_columns=[ - 'B', 'C', 'string', 'string2']) - - result = store.select('df_dc', 'B>0') - expected = df_dc[df_dc.B > 0] - tm.assert_frame_equal(result, expected) - - result = store.select( - 'df_dc', ['B > 0', 'C > 0', 'string == "foo"']) - expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & - (df_dc.string == 'foo')] - tm.assert_frame_equal(result, expected) - - with ensure_clean_store(self.path) as store: - with catch_warnings(record=True): - # panel - # GH5717 not handling data_columns - np.random.seed(1234) - p = tm.makePanel() - - store.append('p1', p) - tm.assert_panel_equal(store.select('p1'), p) - - store.append('p2', p, data_columns=True) - tm.assert_panel_equal(store.select('p2'), p) - - result = store.select('p2', where='ItemA>0') - expected = p.to_frame() - expected = expected[expected['ItemA'] > 0] - tm.assert_frame_equal(result.to_frame(), expected) - - result = store.select( - 'p2', where='ItemA>0 & minor_axis=["A","B"]') - expected = p.to_frame() - expected = expected[expected['ItemA'] > 0] - expected = expected[expected.reset_index( - level=['major']).index.isin(['A', 'B'])] - tm.assert_frame_equal(result.to_frame(), expected) - - def test_create_table_index(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - def col(t, column): - return getattr(store.get_storer(t).table.cols, column) - - # index=False - wp = tm.makePanel() - store.append('p5', wp, index=False) - store.create_table_index('p5', columns=['major_axis']) - assert(col('p5', 'major_axis').is_indexed is True) - assert(col('p5', 'minor_axis').is_indexed is False) - - # index=True - store.append('p5i', wp, index=True) - assert(col('p5i', 'major_axis').is_indexed is True) - assert(col('p5i', 'minor_axis').is_indexed is True) - - # default optlevels - store.get_storer('p5').create_index() - assert(col('p5', 'major_axis').index.optlevel == 6) - assert(col('p5', 'minor_axis').index.kind == 'medium') - - # let's change the indexing scheme - store.create_table_index('p5') - assert(col('p5', 'major_axis').index.optlevel == 6) - assert(col('p5', 'minor_axis').index.kind == 'medium') - store.create_table_index('p5', optlevel=9) - assert(col('p5', 'major_axis').index.optlevel == 9) - assert(col('p5', 'minor_axis').index.kind == 'medium') - store.create_table_index('p5', kind='full') - assert(col('p5', 'major_axis').index.optlevel == 9) - assert(col('p5', 'minor_axis').index.kind == 'full') - store.create_table_index('p5', optlevel=1, kind='light') - assert(col('p5', 'major_axis').index.optlevel == 1) - assert(col('p5', 'minor_axis').index.kind == 'light') - - # data columns - df = tm.makeTimeDataFrame() - df['string'] = 'foo' - df['string2'] = 'bar' - store.append('f', df, data_columns=['string', 'string2']) - assert(col('f', 'index').is_indexed is True) - assert(col('f', 'string').is_indexed is True) - assert(col('f', 'string2').is_indexed is True) - - # specify index=columns - store.append( - 'f2', df, index=['string'], - data_columns=['string', 'string2']) - assert(col('f2', 'index').is_indexed is False) - assert(col('f2', 'string').is_indexed is True) - assert(col('f2', 'string2').is_indexed is False) - - # try to index a non-table - _maybe_remove(store, 'f2') - store.put('f2', df) - pytest.raises(TypeError, store.create_table_index, 'f2') - - def test_append_diff_item_order(self): - - with catch_warnings(record=True): - wp = tm.makePanel() - wp1 = wp.iloc[:, :10, :] - wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']), - 10:, :] - - with ensure_clean_store(self.path) as store: - store.put('panel', wp1, format='table') - pytest.raises(ValueError, store.put, 'panel', wp2, - append=True) - - def test_append_hierarchical(self): - index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], - ['one', 'two', 'three']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['foo', 'bar']) - df = DataFrame(np.random.randn(10, 3), index=index, - columns=['A', 'B', 'C']) - - with ensure_clean_store(self.path) as store: - store.append('mi', df) - result = store.select('mi') - tm.assert_frame_equal(result, df) - - # GH 3748 - result = store.select('mi', columns=['A', 'B']) - expected = df.reindex(columns=['A', 'B']) - tm.assert_frame_equal(result, expected) - - with ensure_clean_path('test.hdf') as path: - df.to_hdf(path, 'df', format='table') - result = read_hdf(path, 'df', columns=['A', 'B']) - expected = df.reindex(columns=['A', 'B']) - tm.assert_frame_equal(result, expected) - - def test_column_multiindex(self): - # GH 4710 - # recreate multi-indexes properly - - index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'), - ('B', 'a'), ('B', 'b')], - names=['first', 'second']) - df = DataFrame(np.arange(12).reshape(3, 4), columns=index) - expected = df.copy() - if isinstance(expected.index, RangeIndex): - expected.index = Int64Index(expected.index) - - with ensure_clean_store(self.path) as store: - - store.put('df', df) - tm.assert_frame_equal(store['df'], expected, - check_index_type=True, - check_column_type=True) - - store.put('df1', df, format='table') - tm.assert_frame_equal(store['df1'], expected, - check_index_type=True, - check_column_type=True) - - pytest.raises(ValueError, store.put, 'df2', df, - format='table', data_columns=['A']) - pytest.raises(ValueError, store.put, 'df3', df, - format='table', data_columns=True) - - # appending multi-column on existing table (see GH 6167) - with ensure_clean_store(self.path) as store: - store.append('df2', df) - store.append('df2', df) - - tm.assert_frame_equal(store['df2'], concat((df, df))) - - # non_index_axes name - df = DataFrame(np.arange(12).reshape(3, 4), - columns=Index(list('ABCD'), name='foo')) - expected = df.copy() - if isinstance(expected.index, RangeIndex): - expected.index = Int64Index(expected.index) - - with ensure_clean_store(self.path) as store: - - store.put('df1', df, format='table') - tm.assert_frame_equal(store['df1'], expected, - check_index_type=True, - check_column_type=True) - - def test_store_multiindex(self): - - # validate multi-index names - # GH 5527 - with ensure_clean_store(self.path) as store: - - def make_index(names=None): - return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d), - s, t) - for d in range(1, 3) - for s in range(2) - for t in range(3)], - names=names) - - # no names - _maybe_remove(store, 'df') - df = DataFrame(np.zeros((12, 2)), columns=[ - 'a', 'b'], index=make_index()) - store.append('df', df) - tm.assert_frame_equal(store.select('df'), df) - - # partial names - _maybe_remove(store, 'df') - df = DataFrame(np.zeros((12, 2)), columns=[ - 'a', 'b'], index=make_index(['date', None, None])) - store.append('df', df) - tm.assert_frame_equal(store.select('df'), df) - - # series - _maybe_remove(store, 's') - s = Series(np.zeros(12), index=make_index(['date', None, None])) - store.append('s', s) - xp = Series(np.zeros(12), index=make_index( - ['date', 'level_1', 'level_2'])) - tm.assert_series_equal(store.select('s'), xp) - - # dup with column - _maybe_remove(store, 'df') - df = DataFrame(np.zeros((12, 2)), columns=[ - 'a', 'b'], index=make_index(['date', 'a', 't'])) - pytest.raises(ValueError, store.append, 'df', df) - - # fully names - _maybe_remove(store, 'df') - df = DataFrame(np.zeros((12, 2)), columns=[ - 'a', 'b'], index=make_index(['date', 's', 't'])) - store.append('df', df) - tm.assert_frame_equal(store.select('df'), df) - - def test_select_columns_in_where(self): - - # GH 6169 - # recreate multi-indexes when columns is passed - # in the `where` argument - index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], - ['one', 'two', 'three']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['foo_name', 'bar_name']) - - # With a DataFrame - df = DataFrame(np.random.randn(10, 3), index=index, - columns=['A', 'B', 'C']) - - with ensure_clean_store(self.path) as store: - store.put('df', df, format='table') - expected = df[['A']] - - tm.assert_frame_equal(store.select('df', columns=['A']), expected) - - tm.assert_frame_equal(store.select( - 'df', where="columns=['A']"), expected) - - # With a Series - s = Series(np.random.randn(10), index=index, - name='A') - with ensure_clean_store(self.path) as store: - store.put('s', s, format='table') - tm.assert_series_equal(store.select('s', where="columns=['A']"), s) - - def test_mi_data_columns(self): - # GH 14435 - idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5), - range(5)], names=['date', 'id']) - df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx) - - with ensure_clean_store(self.path) as store: - store.append('df', df, data_columns=True) - - actual = store.select('df', where='id == 1') - expected = df.iloc[[1], :] - tm.assert_frame_equal(actual, expected) - - def test_pass_spec_to_storer(self): - - df = tm.makeDataFrame() - - with ensure_clean_store(self.path) as store: - store.put('df', df) - pytest.raises(TypeError, store.select, 'df', columns=['A']) - pytest.raises(TypeError, store.select, - 'df', where=[('columns=A')]) - - def test_append_misc(self): - - with ensure_clean_store(self.path) as store: - df = tm.makeDataFrame() - store.append('df', df, chunksize=1) - result = store.select('df') - tm.assert_frame_equal(result, df) - - store.append('df1', df, expectedrows=10) - result = store.select('df1') - tm.assert_frame_equal(result, df) - - # more chunksize in append tests - def check(obj, comparator): - for c in [10, 200, 1000]: - with ensure_clean_store(self.path, mode='w') as store: - store.append('obj', obj, chunksize=c) - result = store.select('obj') - comparator(result, obj) - - df = tm.makeDataFrame() - df['string'] = 'foo' - df['float322'] = 1. - df['float322'] = df['float322'].astype('float32') - df['bool'] = df['float322'] > 0 - df['time1'] = Timestamp('20130101') - df['time2'] = Timestamp('20130102') - check(df, tm.assert_frame_equal) - - with catch_warnings(record=True): - p = tm.makePanel() - check(p, assert_panel_equal) - - # empty frame, GH4273 - with ensure_clean_store(self.path) as store: - - # 0 len - df_empty = DataFrame(columns=list('ABC')) - store.append('df', df_empty) - pytest.raises(KeyError, store.select, 'df') - - # repeated append of 0/non-zero frames - df = DataFrame(np.random.rand(10, 3), columns=list('ABC')) - store.append('df', df) - assert_frame_equal(store.select('df'), df) - store.append('df', df_empty) - assert_frame_equal(store.select('df'), df) - - # store - df = DataFrame(columns=list('ABC')) - store.put('df2', df) - assert_frame_equal(store.select('df2'), df) - - with catch_warnings(record=True): - - # 0 len - p_empty = Panel(items=list('ABC')) - store.append('p', p_empty) - pytest.raises(KeyError, store.select, 'p') - - # repeated append of 0/non-zero frames - p = Panel(np.random.randn(3, 4, 5), items=list('ABC')) - store.append('p', p) - assert_panel_equal(store.select('p'), p) - store.append('p', p_empty) - assert_panel_equal(store.select('p'), p) - - # store - store.put('p2', p_empty) - assert_panel_equal(store.select('p2'), p_empty) - - def test_append_raise(self): - - with ensure_clean_store(self.path) as store: - - # test append with invalid input to get good error messages - - # list in column - df = tm.makeDataFrame() - df['invalid'] = [['a']] * len(df) - assert df.dtypes['invalid'] == np.object_ - pytest.raises(TypeError, store.append, 'df', df) - - # multiple invalid columns - df['invalid2'] = [['a']] * len(df) - df['invalid3'] = [['a']] * len(df) - pytest.raises(TypeError, store.append, 'df', df) - - # datetime with embedded nans as object - df = tm.makeDataFrame() - s = Series(datetime.datetime(2001, 1, 2), index=df.index) - s = s.astype(object) - s[0:5] = np.nan - df['invalid'] = s - assert df.dtypes['invalid'] == np.object_ - pytest.raises(TypeError, store.append, 'df', df) - - # directly ndarray - pytest.raises(TypeError, store.append, 'df', np.arange(10)) - - # series directly - pytest.raises(TypeError, store.append, - 'df', Series(np.arange(10))) - - # appending an incompatible table - df = tm.makeDataFrame() - store.append('df', df) - - df['foo'] = 'foo' - pytest.raises(ValueError, store.append, 'df', df) - - def test_table_index_incompatible_dtypes(self): - df1 = DataFrame({'a': [1, 2, 3]}) - df2 = DataFrame({'a': [4, 5, 6]}, - index=date_range('1/1/2000', periods=3)) - - with ensure_clean_store(self.path) as store: - store.put('frame', df1, format='table') - pytest.raises(TypeError, store.put, 'frame', df2, - format='table', append=True) - - def test_table_values_dtypes_roundtrip(self): - - with ensure_clean_store(self.path) as store: - df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8') - store.append('df_f8', df1) - assert_series_equal(df1.dtypes, store['df_f8'].dtypes) - - df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8') - store.append('df_i8', df2) - assert_series_equal(df2.dtypes, store['df_i8'].dtypes) - - # incompatible dtype - pytest.raises(ValueError, store.append, 'df_i8', df1) - - # check creation/storage/retrieval of float32 (a bit hacky to - # actually create them thought) - df1 = DataFrame( - np.array([[1], [2], [3]], dtype='f4'), columns=['A']) - store.append('df_f4', df1) - assert_series_equal(df1.dtypes, store['df_f4'].dtypes) - assert df1.dtypes[0] == 'float32' - - # check with mixed dtypes - df1 = DataFrame(dict((c, Series(np.random.randn(5), dtype=c)) - for c in ['float32', 'float64', 'int32', - 'int64', 'int16', 'int8'])) - df1['string'] = 'foo' - df1['float322'] = 1. - df1['float322'] = df1['float322'].astype('float32') - df1['bool'] = df1['float32'] > 0 - df1['time1'] = Timestamp('20130101') - df1['time2'] = Timestamp('20130102') - - store.append('df_mixed_dtypes1', df1) - result = store.select('df_mixed_dtypes1').get_dtype_counts() - expected = Series({'float32': 2, 'float64': 1, 'int32': 1, - 'bool': 1, 'int16': 1, 'int8': 1, - 'int64': 1, 'object': 1, 'datetime64[ns]': 2}) - result = result.sort_index() - result = expected.sort_index() - tm.assert_series_equal(result, expected) - - def test_table_mixed_dtypes(self): - - # frame - df = tm.makeDataFrame() - df['obj1'] = 'foo' - df['obj2'] = 'bar' - df['bool1'] = df['A'] > 0 - df['bool2'] = df['B'] > 0 - df['bool3'] = True - df['int1'] = 1 - df['int2'] = 2 - df['timestamp1'] = Timestamp('20010102') - df['timestamp2'] = Timestamp('20010103') - df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0) - df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0) - df.loc[3:6, ['obj1']] = np.nan - df = df._consolidate()._convert(datetime=True) - - with ensure_clean_store(self.path) as store: - store.append('df1_mixed', df) - tm.assert_frame_equal(store.select('df1_mixed'), df) - - with catch_warnings(record=True): - - # panel - wp = tm.makePanel() - wp['obj1'] = 'foo' - wp['obj2'] = 'bar' - wp['bool1'] = wp['ItemA'] > 0 - wp['bool2'] = wp['ItemB'] > 0 - wp['int1'] = 1 - wp['int2'] = 2 - wp = wp._consolidate() - - with catch_warnings(record=True): - - with ensure_clean_store(self.path) as store: - store.append('p1_mixed', wp) - assert_panel_equal(store.select('p1_mixed'), wp) - - def test_unimplemented_dtypes_table_columns(self): - - with ensure_clean_store(self.path) as store: - - l = [('date', datetime.date(2001, 1, 2))] - - # py3 ok for unicode - if not compat.PY3: - l.append(('unicode', u('\\u03c3'))) - - # currently not supported dtypes #### - for n, f in l: - df = tm.makeDataFrame() - df[n] = f - pytest.raises( - TypeError, store.append, 'df1_%s' % n, df) - - # frame - df = tm.makeDataFrame() - df['obj1'] = 'foo' - df['obj2'] = 'bar' - df['datetime1'] = datetime.date(2001, 1, 2) - df = df._consolidate()._convert(datetime=True) - - with ensure_clean_store(self.path) as store: - # this fails because we have a date in the object block...... - pytest.raises(TypeError, store.append, 'df_unimplemented', df) - - def test_calendar_roundtrip_issue(self): - - # 8591 - # doc example from tseries holiday section - weekmask_egypt = 'Sun Mon Tue Wed Thu' - holidays = ['2012-05-01', - datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')] - bday_egypt = pd.offsets.CustomBusinessDay( - holidays=holidays, weekmask=weekmask_egypt) - dt = datetime.datetime(2013, 4, 30) - dts = date_range(dt, periods=5, freq=bday_egypt) - - s = (Series(dts.weekday, dts).map( - Series('Mon Tue Wed Thu Fri Sat Sun'.split()))) - - with ensure_clean_store(self.path) as store: - - store.put('fixed', s) - result = store.select('fixed') - assert_series_equal(result, s) - - store.append('table', s) - result = store.select('table') - assert_series_equal(result, s) - - def test_roundtrip_tz_aware_index(self): - # GH 17618 - time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern') - df = pd.DataFrame(data=[0], index=[time]) - - with ensure_clean_store(self.path) as store: - store.put('frame', df, format='fixed') - recons = store['frame'] - tm.assert_frame_equal(recons, df) - assert recons.index[0].value == 946706400000000000 - - def test_append_with_timedelta(self): - # GH 3577 - # append timedelta - - df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp( - '20130101') + timedelta(days=i, seconds=10) for i in range(10)])) - df['C'] = df['A'] - df['B'] - df.loc[3:5, 'C'] = np.nan - - with ensure_clean_store(self.path) as store: - - # table - _maybe_remove(store, 'df') - store.append('df', df, data_columns=True) - result = store.select('df') - assert_frame_equal(result, df) - - result = store.select('df', where="C<100000") - assert_frame_equal(result, df) - - result = store.select('df', where="C<pd.Timedelta('-3D')") - assert_frame_equal(result, df.iloc[3:]) - - result = store.select('df', "C<'-3D'") - assert_frame_equal(result, df.iloc[3:]) - - # a bit hacky here as we don't really deal with the NaT properly - - result = store.select('df', "C<'-500000s'") - result = result.dropna(subset=['C']) - assert_frame_equal(result, df.iloc[6:]) - - result = store.select('df', "C<'-3.5D'") - result = result.iloc[1:] - assert_frame_equal(result, df.iloc[4:]) - - # fixed - _maybe_remove(store, 'df2') - store.put('df2', df) - result = store.select('df2') - assert_frame_equal(result, df) - - def test_remove(self): - - with ensure_clean_store(self.path) as store: - - ts = tm.makeTimeSeries() - df = tm.makeDataFrame() - store['a'] = ts - store['b'] = df - _maybe_remove(store, 'a') - assert len(store) == 1 - tm.assert_frame_equal(df, store['b']) - - _maybe_remove(store, 'b') - assert len(store) == 0 - - # nonexistence - pytest.raises(KeyError, store.remove, 'a_nonexistent_store') - - # pathing - store['a'] = ts - store['b/foo'] = df - _maybe_remove(store, 'foo') - _maybe_remove(store, 'b/foo') - assert len(store) == 1 - - store['a'] = ts - store['b/foo'] = df - _maybe_remove(store, 'b') - assert len(store) == 1 - - # __delitem__ - store['a'] = ts - store['b'] = df - del store['a'] - del store['b'] - assert len(store) == 0 - - def test_remove_where(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - - # non-existance - crit1 = 'index>foo' - pytest.raises(KeyError, store.remove, 'a', [crit1]) - - # try to remove non-table (with crit) - # non-table ok (where = None) - wp = tm.makePanel(30) - store.put('wp', wp, format='table') - store.remove('wp', ["minor_axis=['A', 'D']"]) - rs = store.select('wp') - expected = wp.reindex(minor_axis=['B', 'C']) - assert_panel_equal(rs, expected) - - # empty where - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - - # deleted number (entire table) - n = store.remove('wp', []) - assert n == 120 - - # non - empty where - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - pytest.raises(ValueError, store.remove, - 'wp', ['foo']) - - def test_remove_startstop(self): - # GH #4835 and #6177 - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = tm.makePanel(30) - - # start - _maybe_remove(store, 'wp1') - store.put('wp1', wp, format='t') - n = store.remove('wp1', start=32) - assert n == 120 - 32 - result = store.select('wp1') - expected = wp.reindex(major_axis=wp.major_axis[:32 // 4]) - assert_panel_equal(result, expected) - - _maybe_remove(store, 'wp2') - store.put('wp2', wp, format='t') - n = store.remove('wp2', start=-32) - assert n == 32 - result = store.select('wp2') - expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4]) - assert_panel_equal(result, expected) - - # stop - _maybe_remove(store, 'wp3') - store.put('wp3', wp, format='t') - n = store.remove('wp3', stop=32) - assert n == 32 - result = store.select('wp3') - expected = wp.reindex(major_axis=wp.major_axis[32 // 4:]) - assert_panel_equal(result, expected) - - _maybe_remove(store, 'wp4') - store.put('wp4', wp, format='t') - n = store.remove('wp4', stop=-32) - assert n == 120 - 32 - result = store.select('wp4') - expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:]) - assert_panel_equal(result, expected) - - # start n stop - _maybe_remove(store, 'wp5') - store.put('wp5', wp, format='t') - n = store.remove('wp5', start=16, stop=-16) - assert n == 120 - 32 - result = store.select('wp5') - expected = wp.reindex( - major_axis=(wp.major_axis[:16 // 4] - .union(wp.major_axis[-16 // 4:]))) - assert_panel_equal(result, expected) - - _maybe_remove(store, 'wp6') - store.put('wp6', wp, format='t') - n = store.remove('wp6', start=16, stop=16) - assert n == 0 - result = store.select('wp6') - expected = wp.reindex(major_axis=wp.major_axis) - assert_panel_equal(result, expected) - - # with where - _maybe_remove(store, 'wp7') - - # TODO: unused? - date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa - - crit = 'major_axis=date' - store.put('wp7', wp, format='t') - n = store.remove('wp7', where=[crit], stop=80) - assert n == 28 - result = store.select('wp7') - expected = wp.reindex(major_axis=wp.major_axis.difference( - wp.major_axis[np.arange(0, 20, 3)])) - assert_panel_equal(result, expected) - - def test_remove_crit(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = tm.makePanel(30) - - # group row removal - _maybe_remove(store, 'wp3') - date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10]) - crit4 = 'major_axis=date4' - store.put('wp3', wp, format='t') - n = store.remove('wp3', where=[crit4]) - assert n == 36 - - result = store.select('wp3') - expected = wp.reindex( - major_axis=wp.major_axis.difference(date4)) - assert_panel_equal(result, expected) - - # upper half - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - date = wp.major_axis[len(wp.major_axis) // 2] - - crit1 = 'major_axis>date' - crit2 = "minor_axis=['A', 'D']" - n = store.remove('wp', where=[crit1]) - assert n == 56 - - n = store.remove('wp', where=[crit2]) - assert n == 32 - - result = store['wp'] - expected = wp.truncate(after=date).reindex(minor=['B', 'C']) - assert_panel_equal(result, expected) - - # individual row elements - _maybe_remove(store, 'wp2') - store.put('wp2', wp, format='table') - - date1 = wp.major_axis[1:3] - crit1 = 'major_axis=date1' - store.remove('wp2', where=[crit1]) - result = store.select('wp2') - expected = wp.reindex( - major_axis=wp.major_axis.difference(date1)) - assert_panel_equal(result, expected) - - date2 = wp.major_axis[5] - crit2 = 'major_axis=date2' - store.remove('wp2', where=[crit2]) - result = store['wp2'] - expected = wp.reindex( - major_axis=(wp.major_axis - .difference(date1) - .difference(Index([date2])) - )) - assert_panel_equal(result, expected) - - date3 = [wp.major_axis[7], wp.major_axis[9]] - crit3 = 'major_axis=date3' - store.remove('wp2', where=[crit3]) - result = store['wp2'] - expected = wp.reindex(major_axis=wp.major_axis - .difference(date1) - .difference(Index([date2])) - .difference(Index(date3))) - assert_panel_equal(result, expected) - - # corners - _maybe_remove(store, 'wp4') - store.put('wp4', wp, format='table') - n = store.remove( - 'wp4', where="major_axis>wp.major_axis[-1]") - result = store.select('wp4') - assert_panel_equal(result, wp) - - def test_invalid_terms(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - - df = tm.makeTimeDataFrame() - df['string'] = 'foo' - df.loc[0:4, 'string'] = 'bar' - wp = tm.makePanel() - - store.put('df', df, format='table') - store.put('wp', wp, format='table') - - # some invalid terms - pytest.raises(ValueError, store.select, - 'wp', "minor=['A', 'B']") - pytest.raises(ValueError, store.select, - 'wp', ["index=['20121114']"]) - pytest.raises(ValueError, store.select, 'wp', [ - "index=['20121114', '20121114']"]) - pytest.raises(TypeError, Term) - - # more invalid - pytest.raises( - ValueError, store.select, 'df', 'df.index[3]') - pytest.raises(SyntaxError, store.select, 'df', 'index>') - pytest.raises( - ValueError, store.select, 'wp', - "major_axis<'20000108' & minor_axis['A', 'B']") - - # from the docs - with ensure_clean_path(self.path) as path: - dfq = DataFrame(np.random.randn(10, 4), columns=list( - 'ABCD'), index=date_range('20130101', periods=10)) - dfq.to_hdf(path, 'dfq', format='table', data_columns=True) - - # check ok - read_hdf(path, 'dfq', - where="index>Timestamp('20130104') & columns=['A', 'B']") - read_hdf(path, 'dfq', where="A>0 or C>0") - - # catch the invalid reference - with ensure_clean_path(self.path) as path: - dfq = DataFrame(np.random.randn(10, 4), columns=list( - 'ABCD'), index=date_range('20130101', periods=10)) - dfq.to_hdf(path, 'dfq', format='table') - - pytest.raises(ValueError, read_hdf, path, - 'dfq', where="A>0 or C>0") - - def test_terms(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - - wp = tm.makePanel() - wpneg = Panel.fromDict({-1: tm.makeDataFrame(), - 0: tm.makeDataFrame(), - 1: tm.makeDataFrame()}) - - store.put('wp', wp, format='table') - store.put('wpneg', wpneg, format='table') - - # panel - result = store.select( - 'wp', - "major_axis<'20000108' and minor_axis=['A', 'B']") - expected = wp.truncate( - after='20000108').reindex(minor=['A', 'B']) - assert_panel_equal(result, expected) - - # with deprecation - result = store.select( - 'wp', where=("major_axis<'20000108' " - "and minor_axis=['A', 'B']")) - expected = wp.truncate( - after='20000108').reindex(minor=['A', 'B']) - tm.assert_panel_equal(result, expected) - - with catch_warnings(record=True): - - # valid terms - terms = [('major_axis=20121114'), - ('major_axis>20121114'), - (("major_axis=['20121114', '20121114']"),), - ('major_axis=datetime.datetime(2012, 11, 14)'), - 'major_axis> 20121114', - 'major_axis >20121114', - 'major_axis > 20121114', - (("minor_axis=['A', 'B']"),), - (("minor_axis=['A', 'B']"),), - ((("minor_axis==['A', 'B']"),),), - (("items=['ItemA', 'ItemB']"),), - ('items=ItemA'), - ] - - for t in terms: - store.select('wp', t) - - with tm.assert_raises_regex( - TypeError, 'Only named functions are supported'): - store.select( - 'wp', - 'major_axis == (lambda x: x)("20130101")') - - with catch_warnings(record=True): - # check USub node parsing - res = store.select('wpneg', 'items == -1') - expected = Panel({-1: wpneg[-1]}) - tm.assert_panel_equal(res, expected) - - with tm.assert_raises_regex(NotImplementedError, - 'Unary addition ' - 'not supported'): - store.select('wpneg', 'items == +1') - - def test_term_compat(self): - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - store.append('wp', wp) - - result = store.select( - 'wp', where=("major_axis>20000102 " - "and minor_axis=['A', 'B']")) - expected = wp.loc[:, wp.major_axis > - Timestamp('20000102'), ['A', 'B']] - assert_panel_equal(result, expected) - - store.remove('wp', 'major_axis>20000103') - result = store.select('wp') - expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :] - assert_panel_equal(result, expected) - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = Panel(np.random.randn(2, 5, 4), - items=['Item1', 'Item2'], - major_axis=date_range('1/1/2000', periods=5), - minor_axis=['A', 'B', 'C', 'D']) - store.append('wp', wp) - - # stringified datetimes - result = store.select( - 'wp', 'major_axis>datetime.datetime(2000, 1, 2)') - expected = wp.loc[:, wp.major_axis > Timestamp('20000102')] - assert_panel_equal(result, expected) - - result = store.select( - 'wp', 'major_axis>datetime.datetime(2000, 1, 2)') - expected = wp.loc[:, wp.major_axis > Timestamp('20000102')] - assert_panel_equal(result, expected) - - result = store.select( - 'wp', - "major_axis=[datetime.datetime(2000, 1, 2, 0, 0), " - "datetime.datetime(2000, 1, 3, 0, 0)]") - expected = wp.loc[:, [Timestamp('20000102'), - Timestamp('20000103')]] - assert_panel_equal(result, expected) - - result = store.select( - 'wp', "minor_axis=['A', 'B']") - expected = wp.loc[:, :, ['A', 'B']] - assert_panel_equal(result, expected) - - def test_same_name_scoping(self): - - with ensure_clean_store(self.path) as store: - - import pandas as pd - df = DataFrame(np.random.randn(20, 2), - index=pd.date_range('20130101', periods=20)) - store.put('df', df, format='table') - expected = df[df.index > pd.Timestamp('20130105')] - - import datetime # noqa - result = store.select('df', 'index>datetime.datetime(2013,1,5)') - assert_frame_equal(result, expected) - - from datetime import datetime # noqa - - # technically an error, but allow it - result = store.select('df', 'index>datetime.datetime(2013,1,5)') - assert_frame_equal(result, expected) - - result = store.select('df', 'index>datetime(2013,1,5)') - assert_frame_equal(result, expected) - - def test_series(self): - - s = tm.makeStringSeries() - self._check_roundtrip(s, tm.assert_series_equal) - - ts = tm.makeTimeSeries() - self._check_roundtrip(ts, tm.assert_series_equal) - - ts2 = Series(ts.index, Index(ts.index, dtype=object)) - self._check_roundtrip(ts2, tm.assert_series_equal) - - ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), - dtype=object)) - self._check_roundtrip(ts3, tm.assert_series_equal, - check_index_type=False) - - def test_sparse_series(self): - - s = tm.makeStringSeries() - s.iloc[3:5] = np.nan - ss = s.to_sparse() - self._check_roundtrip(ss, tm.assert_series_equal, - check_series_type=True) - - ss2 = s.to_sparse(kind='integer') - self._check_roundtrip(ss2, tm.assert_series_equal, - check_series_type=True) - - ss3 = s.to_sparse(fill_value=0) - self._check_roundtrip(ss3, tm.assert_series_equal, - check_series_type=True) - - def test_sparse_frame(self): - - s = tm.makeDataFrame() - s.iloc[3:5, 1:3] = np.nan - s.iloc[8:10, -2] = np.nan - ss = s.to_sparse() - - self._check_double_roundtrip(ss, tm.assert_frame_equal, - check_frame_type=True) - - ss2 = s.to_sparse(kind='integer') - self._check_double_roundtrip(ss2, tm.assert_frame_equal, - check_frame_type=True) - - ss3 = s.to_sparse(fill_value=0) - self._check_double_roundtrip(ss3, tm.assert_frame_equal, - check_frame_type=True) - - def test_float_index(self): - - # GH #454 - index = np.random.randn(10) - s = Series(np.random.randn(10), index=index) - self._check_roundtrip(s, tm.assert_series_equal) - - def test_tuple_index(self): - - # GH #492 - col = np.arange(10) - idx = [(0., 1.), (2., 3.), (4., 5.)] - data = np.random.randn(30).reshape((3, 10)) - DF = DataFrame(data, index=idx, columns=col) - - with catch_warnings(record=True): - self._check_roundtrip(DF, tm.assert_frame_equal) - - def test_index_types(self): - - with catch_warnings(record=True): - values = np.random.randn(2) - - func = lambda l, r: tm.assert_series_equal(l, r, - check_dtype=True, - check_index_type=True, - check_series_type=True) - - with catch_warnings(record=True): - ser = Series(values, [0, 'y']) - self._check_roundtrip(ser, func) - - with catch_warnings(record=True): - ser = Series(values, [datetime.datetime.today(), 0]) - self._check_roundtrip(ser, func) - - with catch_warnings(record=True): - ser = Series(values, ['y', 0]) - self._check_roundtrip(ser, func) - - with catch_warnings(record=True): - ser = Series(values, [datetime.date.today(), 'a']) - self._check_roundtrip(ser, func) - - with catch_warnings(record=True): - - ser = Series(values, [0, 'y']) - self._check_roundtrip(ser, func) - - ser = Series(values, [datetime.datetime.today(), 0]) - self._check_roundtrip(ser, func) - - ser = Series(values, ['y', 0]) - self._check_roundtrip(ser, func) - - ser = Series(values, [datetime.date.today(), 'a']) - self._check_roundtrip(ser, func) - - ser = Series(values, [1.23, 'b']) - self._check_roundtrip(ser, func) - - ser = Series(values, [1, 1.53]) - self._check_roundtrip(ser, func) - - ser = Series(values, [1, 5]) - self._check_roundtrip(ser, func) - - ser = Series(values, [datetime.datetime( - 2012, 1, 1), datetime.datetime(2012, 1, 2)]) - self._check_roundtrip(ser, func) - - def test_timeseries_preepoch(self): - - dr = bdate_range('1/1/1940', '1/1/1960') - ts = Series(np.random.randn(len(dr)), index=dr) - try: - self._check_roundtrip(ts, tm.assert_series_equal) - except OverflowError: - pytest.skip('known failer on some windows platforms') - - @pytest.mark.parametrize("compression", [ - False, pytest.param(True, marks=td.skip_if_windows_python_3) - ]) - def test_frame(self, compression): - - df = tm.makeDataFrame() - - # put in some random NAs - df.values[0, 0] = np.nan - df.values[5, 3] = np.nan - - self._check_roundtrip_table(df, tm.assert_frame_equal, - compression=compression) - self._check_roundtrip(df, tm.assert_frame_equal, - compression=compression) - - tdf = tm.makeTimeDataFrame() - self._check_roundtrip(tdf, tm.assert_frame_equal, - compression=compression) - - with ensure_clean_store(self.path) as store: - # not consolidated - df['foo'] = np.random.randn(len(df)) - store['df'] = df - recons = store['df'] - assert recons._data.is_consolidated() - - # empty - self._check_roundtrip(df[:0], tm.assert_frame_equal) - - def test_empty_series_frame(self): - s0 = Series() - s1 = Series(name='myseries') - df0 = DataFrame() - df1 = DataFrame(index=['a', 'b', 'c']) - df2 = DataFrame(columns=['d', 'e', 'f']) - - self._check_roundtrip(s0, tm.assert_series_equal) - self._check_roundtrip(s1, tm.assert_series_equal) - self._check_roundtrip(df0, tm.assert_frame_equal) - self._check_roundtrip(df1, tm.assert_frame_equal) - self._check_roundtrip(df2, tm.assert_frame_equal) - - def test_empty_series(self): - for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']: - s = Series(dtype=dtype) - self._check_roundtrip(s, tm.assert_series_equal) - - def test_can_serialize_dates(self): - - rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')] - frame = DataFrame(np.random.randn(len(rng), 4), index=rng) - - self._check_roundtrip(frame, tm.assert_frame_equal) - - def test_store_hierarchical(self): - index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], - ['one', 'two', 'three']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['foo', 'bar']) - frame = DataFrame(np.random.randn(10, 3), index=index, - columns=['A', 'B', 'C']) - - self._check_roundtrip(frame, tm.assert_frame_equal) - self._check_roundtrip(frame.T, tm.assert_frame_equal) - self._check_roundtrip(frame['A'], tm.assert_series_equal) - - # check that the names are stored - with ensure_clean_store(self.path) as store: - store['frame'] = frame - recons = store['frame'] - tm.assert_frame_equal(recons, frame) - - def test_store_index_name(self): - df = tm.makeDataFrame() - df.index.name = 'foo' - - with ensure_clean_store(self.path) as store: - store['frame'] = df - recons = store['frame'] - tm.assert_frame_equal(recons, df) - - def test_store_index_name_with_tz(self): - # GH 13884 - df = pd.DataFrame({'A': [1, 2]}) - df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788]) - df.index = df.index.tz_localize('UTC') - df.index.name = 'foo' - - with ensure_clean_store(self.path) as store: - store.put('frame', df, format='table') - recons = store['frame'] - tm.assert_frame_equal(recons, df) - - @pytest.mark.parametrize('table_format', ['table', 'fixed']) - def test_store_index_name_numpy_str(self, table_format): - # GH #13492 - idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1), - datetime.date(2000, 1, 2)]), - name=u('cols\u05d2')) - idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1), - datetime.date(2010, 1, 2)]), - name=u('rows\u05d0')) - df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1) - - # This used to fail, returning numpy strings instead of python strings. - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format=table_format) - df2 = read_hdf(path, 'df') - - assert_frame_equal(df, df2, check_names=True) - - assert type(df2.index.name) == text_type - assert type(df2.columns.name) == text_type - - def test_store_series_name(self): - df = tm.makeDataFrame() - series = df['A'] - - with ensure_clean_store(self.path) as store: - store['series'] = series - recons = store['series'] - tm.assert_series_equal(recons, series) - - @pytest.mark.parametrize("compression", [ - False, pytest.param(True, marks=td.skip_if_windows_python_3) - ]) - def test_store_mixed(self, compression): - - def _make_one(): - df = tm.makeDataFrame() - df['obj1'] = 'foo' - df['obj2'] = 'bar' - df['bool1'] = df['A'] > 0 - df['bool2'] = df['B'] > 0 - df['int1'] = 1 - df['int2'] = 2 - return df._consolidate() - - df1 = _make_one() - df2 = _make_one() - - self._check_roundtrip(df1, tm.assert_frame_equal) - self._check_roundtrip(df2, tm.assert_frame_equal) - - with ensure_clean_store(self.path) as store: - store['obj'] = df1 - tm.assert_frame_equal(store['obj'], df1) - store['obj'] = df2 - tm.assert_frame_equal(store['obj'], df2) - - # check that can store Series of all of these types - self._check_roundtrip(df1['obj1'], tm.assert_series_equal, - compression=compression) - self._check_roundtrip(df1['bool1'], tm.assert_series_equal, - compression=compression) - self._check_roundtrip(df1['int1'], tm.assert_series_equal, - compression=compression) - - def test_wide(self): - - with catch_warnings(record=True): - wp = tm.makePanel() - self._check_roundtrip(wp, assert_panel_equal) - - def test_select_with_dups(self): - - # single dtypes - df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B']) - df.index = date_range('20130101 9:30', periods=10, freq='T') - - with ensure_clean_store(self.path) as store: - store.append('df', df) - - result = store.select('df') - expected = df - assert_frame_equal(result, expected, by_blocks=True) - - result = store.select('df', columns=df.columns) - expected = df - assert_frame_equal(result, expected, by_blocks=True) - - result = store.select('df', columns=['A']) - expected = df.loc[:, ['A']] - assert_frame_equal(result, expected) - - # dups across dtypes - df = concat([DataFrame(np.random.randn(10, 4), - columns=['A', 'A', 'B', 'B']), - DataFrame(np.random.randint(0, 10, size=20) - .reshape(10, 2), - columns=['A', 'C'])], - axis=1) - df.index = date_range('20130101 9:30', periods=10, freq='T') - - with ensure_clean_store(self.path) as store: - store.append('df', df) - - result = store.select('df') - expected = df - assert_frame_equal(result, expected, by_blocks=True) - - result = store.select('df', columns=df.columns) - expected = df - assert_frame_equal(result, expected, by_blocks=True) - - expected = df.loc[:, ['A']] - result = store.select('df', columns=['A']) - assert_frame_equal(result, expected, by_blocks=True) - - expected = df.loc[:, ['B', 'A']] - result = store.select('df', columns=['B', 'A']) - assert_frame_equal(result, expected, by_blocks=True) - - # duplicates on both index and columns - with ensure_clean_store(self.path) as store: - store.append('df', df) - store.append('df', df) - - expected = df.loc[:, ['B', 'A']] - expected = concat([expected, expected]) - result = store.select('df', columns=['B', 'A']) - assert_frame_equal(result, expected, by_blocks=True) - - def test_wide_table_dups(self): - with ensure_clean_store(self.path) as store: - with catch_warnings(record=True): - - wp = tm.makePanel() - store.put('panel', wp, format='table') - store.put('panel', wp, format='table', append=True) - - recons = store['panel'] - - assert_panel_equal(recons, wp) - - def test_long(self): - def _check(left, right): - assert_panel_equal(left.to_panel(), right.to_panel()) - - with catch_warnings(record=True): - wp = tm.makePanel() - self._check_roundtrip(wp.to_frame(), _check) - - def test_longpanel(self): - pass - - def test_overwrite_node(self): - - with ensure_clean_store(self.path) as store: - store['a'] = tm.makeTimeDataFrame() - ts = tm.makeTimeSeries() - store['a'] = ts - - tm.assert_series_equal(store['a'], ts) - - def test_sparse_with_compression(self): - - # GH 2931 - - # make sparse dataframe - arr = np.random.binomial(n=1, p=.01, size=(1000, 10)) - df = DataFrame(arr).to_sparse(fill_value=0) - - # case 1: store uncompressed - self._check_double_roundtrip(df, tm.assert_frame_equal, - compression=False, - check_frame_type=True) - - # case 2: store compressed (works) - self._check_double_roundtrip(df, tm.assert_frame_equal, - compression='zlib', - check_frame_type=True) - - # set one series to be completely sparse - df[0] = np.zeros(1000) - - # case 3: store df with completely sparse series uncompressed - self._check_double_roundtrip(df, tm.assert_frame_equal, - compression=False, - check_frame_type=True) - - # case 4: try storing df with completely sparse series compressed - # (fails) - self._check_double_roundtrip(df, tm.assert_frame_equal, - compression='zlib', - check_frame_type=True) - - def test_select(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - wp = tm.makePanel() - - # put/select ok - _maybe_remove(store, 'wp') - store.put('wp', wp, format='table') - store.select('wp') - - # non-table ok (where = None) - _maybe_remove(store, 'wp') - store.put('wp2', wp) - store.select('wp2') - - # selection on the non-indexable with a large number of columns - wp = Panel(np.random.randn(100, 100, 100), - items=['Item%03d' % i for i in range(100)], - major_axis=date_range('1/1/2000', periods=100), - minor_axis=['E%03d' % i for i in range(100)]) - - _maybe_remove(store, 'wp') - store.append('wp', wp) - items = ['Item%03d' % i for i in range(80)] - result = store.select('wp', 'items=items') - expected = wp.reindex(items=items) - assert_panel_equal(expected, result) - - # selectin non-table with a where - # pytest.raises(ValueError, store.select, - # 'wp2', ('column', ['A', 'D'])) - - # select with columns= - df = tm.makeTimeDataFrame() - _maybe_remove(store, 'df') - store.append('df', df) - result = store.select('df', columns=['A', 'B']) - expected = df.reindex(columns=['A', 'B']) - tm.assert_frame_equal(expected, result) - - # equivalentsly - result = store.select('df', [("columns=['A', 'B']")]) - expected = df.reindex(columns=['A', 'B']) - tm.assert_frame_equal(expected, result) - - # with a data column - _maybe_remove(store, 'df') - store.append('df', df, data_columns=['A']) - result = store.select('df', ['A > 0'], columns=['A', 'B']) - expected = df[df.A > 0].reindex(columns=['A', 'B']) - tm.assert_frame_equal(expected, result) - - # all a data columns - _maybe_remove(store, 'df') - store.append('df', df, data_columns=True) - result = store.select('df', ['A > 0'], columns=['A', 'B']) - expected = df[df.A > 0].reindex(columns=['A', 'B']) - tm.assert_frame_equal(expected, result) - - # with a data column, but different columns - _maybe_remove(store, 'df') - store.append('df', df, data_columns=['A']) - result = store.select('df', ['A > 0'], columns=['C', 'D']) - expected = df[df.A > 0].reindex(columns=['C', 'D']) - tm.assert_frame_equal(expected, result) - - def test_select_dtypes(self): - - with ensure_clean_store(self.path) as store: - # with a Timestamp data column (GH #2637) - df = DataFrame(dict( - ts=bdate_range('2012-01-01', periods=300), - A=np.random.randn(300))) - _maybe_remove(store, 'df') - store.append('df', df, data_columns=['ts', 'A']) - - result = store.select('df', "ts>=Timestamp('2012-02-01')") - expected = df[df.ts >= Timestamp('2012-02-01')] - tm.assert_frame_equal(expected, result) - - # bool columns (GH #2849) - df = DataFrame(np.random.randn(5, 2), columns=['A', 'B']) - df['object'] = 'foo' - df.loc[4:5, 'object'] = 'bar' - df['boolv'] = df['A'] > 0 - _maybe_remove(store, 'df') - store.append('df', df, data_columns=True) - - expected = (df[df.boolv == True] # noqa - .reindex(columns=['A', 'boolv'])) - for v in [True, 'true', 1]: - result = store.select('df', 'boolv == %s' % str(v), - columns=['A', 'boolv']) - tm.assert_frame_equal(expected, result) - - expected = (df[df.boolv == False] # noqa - .reindex(columns=['A', 'boolv'])) - for v in [False, 'false', 0]: - result = store.select( - 'df', 'boolv == %s' % str(v), columns=['A', 'boolv']) - tm.assert_frame_equal(expected, result) - - # integer index - df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) - _maybe_remove(store, 'df_int') - store.append('df_int', df) - result = store.select( - 'df_int', "index<10 and columns=['A']") - expected = df.reindex(index=list(df.index)[0:10], columns=['A']) - tm.assert_frame_equal(expected, result) - - # float index - df = DataFrame(dict(A=np.random.rand( - 20), B=np.random.rand(20), index=np.arange(20, dtype='f8'))) - _maybe_remove(store, 'df_float') - store.append('df_float', df) - result = store.select( - 'df_float', "index<10.0 and columns=['A']") - expected = df.reindex(index=list(df.index)[0:10], columns=['A']) - tm.assert_frame_equal(expected, result) - - with ensure_clean_store(self.path) as store: - - # floats w/o NaN - df = DataFrame( - dict(cols=range(11), values=range(11)), dtype='float64') - df['cols'] = (df['cols'] + 10).apply(str) - - store.append('df1', df, data_columns=True) - result = store.select( - 'df1', where='values>2.0') - expected = df[df['values'] > 2.0] - tm.assert_frame_equal(expected, result) - - # floats with NaN - df.iloc[0] = np.nan - expected = df[df['values'] > 2.0] - - store.append('df2', df, data_columns=True, index=False) - result = store.select( - 'df2', where='values>2.0') - tm.assert_frame_equal(expected, result) - - # https://github.com/PyTables/PyTables/issues/282 - # bug in selection when 0th row has a np.nan and an index - # store.append('df3',df,data_columns=True) - # result = store.select( - # 'df3', where='values>2.0') - # tm.assert_frame_equal(expected, result) - - # not in first position float with NaN ok too - df = DataFrame( - dict(cols=range(11), values=range(11)), dtype='float64') - df['cols'] = (df['cols'] + 10).apply(str) - - df.iloc[1] = np.nan - expected = df[df['values'] > 2.0] - - store.append('df4', df, data_columns=True) - result = store.select( - 'df4', where='values>2.0') - tm.assert_frame_equal(expected, result) - - # test selection with comparison against numpy scalar - # GH 11283 - with ensure_clean_store(self.path) as store: - df = tm.makeDataFrame() - - expected = df[df['A'] > 0] - - store.append('df', df, data_columns=True) - np_zero = np.float64(0) # noqa - result = store.select('df', where=["A>np_zero"]) - tm.assert_frame_equal(expected, result) - - def test_select_with_many_inputs(self): - - with ensure_clean_store(self.path) as store: - - df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), - A=np.random.randn(300), - B=range(300), - users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 + - ['a%03d' % i for i in range(100)])) - _maybe_remove(store, 'df') - store.append('df', df, data_columns=['ts', 'A', 'B', 'users']) - - # regular select - result = store.select('df', "ts>=Timestamp('2012-02-01')") - expected = df[df.ts >= Timestamp('2012-02-01')] - tm.assert_frame_equal(expected, result) - - # small selector - result = store.select( - 'df', - "ts>=Timestamp('2012-02-01') & users=['a','b','c']") - expected = df[(df.ts >= Timestamp('2012-02-01')) & - df.users.isin(['a', 'b', 'c'])] - tm.assert_frame_equal(expected, result) - - # big selector along the columns - selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)] - result = store.select( - 'df', - "ts>=Timestamp('2012-02-01') and users=selector") - expected = df[(df.ts >= Timestamp('2012-02-01')) & - df.users.isin(selector)] - tm.assert_frame_equal(expected, result) - - selector = range(100, 200) - result = store.select('df', 'B=selector') - expected = df[df.B.isin(selector)] - tm.assert_frame_equal(expected, result) - assert len(result) == 100 - - # big selector along the index - selector = Index(df.ts[0:100].values) - result = store.select('df', 'ts=selector') - expected = df[df.ts.isin(selector.values)] - tm.assert_frame_equal(expected, result) - assert len(result) == 100 - - def test_select_iterator(self): - - # single table - with ensure_clean_store(self.path) as store: - - df = tm.makeTimeDataFrame(500) - _maybe_remove(store, 'df') - store.append('df', df) - - expected = store.select('df') - - results = [s for s in store.select('df', iterator=True)] - result = concat(results) - tm.assert_frame_equal(expected, result) - - results = [s for s in store.select('df', chunksize=100)] - assert len(results) == 5 - result = concat(results) - tm.assert_frame_equal(expected, result) - - results = [s for s in store.select('df', chunksize=150)] - result = concat(results) - tm.assert_frame_equal(result, expected) - - with ensure_clean_path(self.path) as path: - - df = tm.makeTimeDataFrame(500) - df.to_hdf(path, 'df_non_table') - pytest.raises(TypeError, read_hdf, path, - 'df_non_table', chunksize=100) - pytest.raises(TypeError, read_hdf, path, - 'df_non_table', iterator=True) - - with ensure_clean_path(self.path) as path: - - df = tm.makeTimeDataFrame(500) - df.to_hdf(path, 'df', format='table') - - results = [s for s in read_hdf(path, 'df', chunksize=100)] - result = concat(results) - - assert len(results) == 5 - tm.assert_frame_equal(result, df) - tm.assert_frame_equal(result, read_hdf(path, 'df')) - - # multiple - - with ensure_clean_store(self.path) as store: - - df1 = tm.makeTimeDataFrame(500) - store.append('df1', df1, data_columns=True) - df2 = tm.makeTimeDataFrame(500).rename( - columns=lambda x: "%s_2" % x) - df2['foo'] = 'bar' - store.append('df2', df2) - - df = concat([df1, df2], axis=1) - - # full selection - expected = store.select_as_multiple( - ['df1', 'df2'], selector='df1') - results = [s for s in store.select_as_multiple( - ['df1', 'df2'], selector='df1', chunksize=150)] - result = concat(results) - tm.assert_frame_equal(expected, result) - - def test_select_iterator_complete_8014(self): - - # GH 8014 - # using iterator and where clause - chunksize = 1e4 - - # no iterator - with ensure_clean_store(self.path) as store: - - expected = tm.makeTimeDataFrame(100064, 'S') - _maybe_remove(store, 'df') - store.append('df', expected) - - beg_dt = expected.index[0] - end_dt = expected.index[-1] - - # select w/o iteration and no where clause works - result = store.select('df') - tm.assert_frame_equal(expected, result) - - # select w/o iterator and where clause, single term, begin - # of range, works - where = "index >= '%s'" % beg_dt - result = store.select('df', where=where) - tm.assert_frame_equal(expected, result) - - # select w/o iterator and where clause, single term, end - # of range, works - where = "index <= '%s'" % end_dt - result = store.select('df', where=where) - tm.assert_frame_equal(expected, result) - - # select w/o iterator and where clause, inclusive range, - # works - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) - result = store.select('df', where=where) - tm.assert_frame_equal(expected, result) - - # with iterator, full range - with ensure_clean_store(self.path) as store: - - expected = tm.makeTimeDataFrame(100064, 'S') - _maybe_remove(store, 'df') - store.append('df', expected) - - beg_dt = expected.index[0] - end_dt = expected.index[-1] - - # select w/iterator and no where clause works - results = [s for s in store.select('df', chunksize=chunksize)] - result = concat(results) - tm.assert_frame_equal(expected, result) - - # select w/iterator and where clause, single term, begin of range - where = "index >= '%s'" % beg_dt - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - result = concat(results) - tm.assert_frame_equal(expected, result) - - # select w/iterator and where clause, single term, end of range - where = "index <= '%s'" % end_dt - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - result = concat(results) - tm.assert_frame_equal(expected, result) - - # select w/iterator and where clause, inclusive range - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - result = concat(results) - tm.assert_frame_equal(expected, result) - - def test_select_iterator_non_complete_8014(self): - - # GH 8014 - # using iterator and where clause - chunksize = 1e4 - - # with iterator, non complete range - with ensure_clean_store(self.path) as store: - - expected = tm.makeTimeDataFrame(100064, 'S') - _maybe_remove(store, 'df') - store.append('df', expected) - - beg_dt = expected.index[1] - end_dt = expected.index[-2] - - # select w/iterator and where clause, single term, begin of range - where = "index >= '%s'" % beg_dt - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - result = concat(results) - rexpected = expected[expected.index >= beg_dt] - tm.assert_frame_equal(rexpected, result) - - # select w/iterator and where clause, single term, end of range - where = "index <= '%s'" % end_dt - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - result = concat(results) - rexpected = expected[expected.index <= end_dt] - tm.assert_frame_equal(rexpected, result) - - # select w/iterator and where clause, inclusive range - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - result = concat(results) - rexpected = expected[(expected.index >= beg_dt) & - (expected.index <= end_dt)] - tm.assert_frame_equal(rexpected, result) - - # with iterator, empty where - with ensure_clean_store(self.path) as store: - - expected = tm.makeTimeDataFrame(100064, 'S') - _maybe_remove(store, 'df') - store.append('df', expected) - - end_dt = expected.index[-1] - - # select w/iterator and where clause, single term, begin of range - where = "index > '%s'" % end_dt - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - assert 0 == len(results) - - def test_select_iterator_many_empty_frames(self): - - # GH 8014 - # using iterator and where clause can return many empty - # frames. - chunksize = int(1e4) - - # with iterator, range limited to the first chunk - with ensure_clean_store(self.path) as store: - - expected = tm.makeTimeDataFrame(100000, 'S') - _maybe_remove(store, 'df') - store.append('df', expected) - - beg_dt = expected.index[0] - end_dt = expected.index[chunksize - 1] - - # select w/iterator and where clause, single term, begin of range - where = "index >= '%s'" % beg_dt - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - result = concat(results) - rexpected = expected[expected.index >= beg_dt] - tm.assert_frame_equal(rexpected, result) - - # select w/iterator and where clause, single term, end of range - where = "index <= '%s'" % end_dt - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - - assert len(results) == 1 - result = concat(results) - rexpected = expected[expected.index <= end_dt] - tm.assert_frame_equal(rexpected, result) - - # select w/iterator and where clause, inclusive range - where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt) - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - - # should be 1, is 10 - assert len(results) == 1 - result = concat(results) - rexpected = expected[(expected.index >= beg_dt) & - (expected.index <= end_dt)] - tm.assert_frame_equal(rexpected, result) - - # select w/iterator and where clause which selects - # *nothing*. - # - # To be consistent with Python idiom I suggest this should - # return [] e.g. `for e in []: print True` never prints - # True. - - where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt) - results = [s for s in store.select( - 'df', where=where, chunksize=chunksize)] - - # should be [] - assert len(results) == 0 - - def test_retain_index_attributes(self): - - # GH 3499, losing frequency info on index recreation - df = DataFrame(dict( - A=Series(lrange(3), - index=date_range('2000-1-1', periods=3, freq='H')))) - - with ensure_clean_store(self.path) as store: - _maybe_remove(store, 'data') - store.put('data', df, format='table') - - result = store.get('data') - tm.assert_frame_equal(df, result) - - for attr in ['freq', 'tz', 'name']: - for idx in ['index', 'columns']: - assert (getattr(getattr(df, idx), attr, None) == - getattr(getattr(result, idx), attr, None)) - - # try to append a table with a different frequency - with catch_warnings(record=True): - df2 = DataFrame(dict( - A=Series(lrange(3), - index=date_range('2002-1-1', - periods=3, freq='D')))) - store.append('data', df2) - - assert store.get_storer('data').info['index']['freq'] is None - - # this is ok - _maybe_remove(store, 'df2') - df2 = DataFrame(dict( - A=Series(lrange(3), - index=[Timestamp('20010101'), Timestamp('20010102'), - Timestamp('20020101')]))) - store.append('df2', df2) - df3 = DataFrame(dict( - A=Series(lrange(3), - index=date_range('2002-1-1', periods=3, - freq='D')))) - store.append('df2', df3) - - def test_retain_index_attributes2(self): - with ensure_clean_path(self.path) as path: - - with catch_warnings(record=True): - - df = DataFrame(dict( - A=Series(lrange(3), - index=date_range('2000-1-1', - periods=3, freq='H')))) - df.to_hdf(path, 'data', mode='w', append=True) - df2 = DataFrame(dict( - A=Series(lrange(3), - index=date_range('2002-1-1', periods=3, - freq='D')))) - df2.to_hdf(path, 'data', append=True) - - idx = date_range('2000-1-1', periods=3, freq='H') - idx.name = 'foo' - df = DataFrame(dict(A=Series(lrange(3), index=idx))) - df.to_hdf(path, 'data', mode='w', append=True) - - assert read_hdf(path, 'data').index.name == 'foo' - - with catch_warnings(record=True): - - idx2 = date_range('2001-1-1', periods=3, freq='H') - idx2.name = 'bar' - df2 = DataFrame(dict(A=Series(lrange(3), index=idx2))) - df2.to_hdf(path, 'data', append=True) - - assert read_hdf(path, 'data').index.name is None - - def test_panel_select(self): - - with ensure_clean_store(self.path) as store: - - with catch_warnings(record=True): - - wp = tm.makePanel() - - store.put('wp', wp, format='table') - date = wp.major_axis[len(wp.major_axis) // 2] - - crit1 = ('major_axis>=date') - crit2 = ("minor_axis=['A', 'D']") - - result = store.select('wp', [crit1, crit2]) - expected = wp.truncate(before=date).reindex(minor=['A', 'D']) - assert_panel_equal(result, expected) - - result = store.select( - 'wp', ['major_axis>="20000124"', - ("minor_axis=['A', 'B']")]) - expected = wp.truncate( - before='20000124').reindex(minor=['A', 'B']) - assert_panel_equal(result, expected) - - def test_frame_select(self): - - df = tm.makeTimeDataFrame() - - with ensure_clean_store(self.path) as store: - store.put('frame', df, format='table') - date = df.index[len(df) // 2] - - crit1 = Term('index>=date') - assert crit1.env.scope['date'] == date - - crit2 = ("columns=['A', 'D']") - crit3 = ('columns=A') - - result = store.select('frame', [crit1, crit2]) - expected = df.loc[date:, ['A', 'D']] - tm.assert_frame_equal(result, expected) - - result = store.select('frame', [crit3]) - expected = df.loc[:, ['A']] - tm.assert_frame_equal(result, expected) - - # invalid terms - df = tm.makeTimeDataFrame() - store.append('df_time', df) - pytest.raises( - ValueError, store.select, 'df_time', "index>0") - - # can't select if not written as table - # store['frame'] = df - # pytest.raises(ValueError, store.select, - # 'frame', [crit1, crit2]) - - def test_frame_select_complex(self): - # select via complex criteria - - df = tm.makeTimeDataFrame() - df['string'] = 'foo' - df.loc[df.index[0:4], 'string'] = 'bar' - - with ensure_clean_store(self.path) as store: - store.put('df', df, format='table', data_columns=['string']) - - # empty - result = store.select('df', 'index>df.index[3] & string="bar"') - expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')] - tm.assert_frame_equal(result, expected) - - result = store.select('df', 'index>df.index[3] & string="foo"') - expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')] - tm.assert_frame_equal(result, expected) - - # or - result = store.select('df', 'index>df.index[3] | string="bar"') - expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')] - tm.assert_frame_equal(result, expected) - - result = store.select('df', '(index>df.index[3] & ' - 'index<=df.index[6]) | string="bar"') - expected = df.loc[((df.index > df.index[3]) & ( - df.index <= df.index[6])) | (df.string == 'bar')] - tm.assert_frame_equal(result, expected) - - # invert - result = store.select('df', 'string!="bar"') - expected = df.loc[df.string != 'bar'] - tm.assert_frame_equal(result, expected) - - # invert not implemented in numexpr :( - pytest.raises(NotImplementedError, - store.select, 'df', '~(string="bar")') - - # invert ok for filters - result = store.select('df', "~(columns=['A','B'])") - expected = df.loc[:, df.columns.difference(['A', 'B'])] - tm.assert_frame_equal(result, expected) - - # in - result = store.select( - 'df', "index>df.index[3] & columns in ['A','B']") - expected = df.loc[df.index > df.index[3]].reindex(columns=[ - 'A', 'B']) - tm.assert_frame_equal(result, expected) - - def test_frame_select_complex2(self): - - with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths: - - pp, hh = paths - - # use non-trivial selection criteria - parms = DataFrame({'A': [1, 1, 2, 2, 3]}) - parms.to_hdf(pp, 'df', mode='w', - format='table', data_columns=['A']) - - selection = read_hdf(pp, 'df', where='A=[2,3]') - hist = DataFrame(np.random.randn(25, 1), - columns=['data'], - index=MultiIndex.from_tuples( - [(i, j) for i in range(5) - for j in range(5)], - names=['l1', 'l2'])) - - hist.to_hdf(hh, 'df', mode='w', format='table') - - expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]') - - # sccope with list like - l = selection.index.tolist() # noqa - store = HDFStore(hh) - result = store.select('df', where='l1=l') - assert_frame_equal(result, expected) - store.close() - - result = read_hdf(hh, 'df', where='l1=l') - assert_frame_equal(result, expected) - - # index - index = selection.index # noqa - result = read_hdf(hh, 'df', where='l1=index') - assert_frame_equal(result, expected) - - result = read_hdf(hh, 'df', where='l1=selection.index') - assert_frame_equal(result, expected) - - result = read_hdf(hh, 'df', where='l1=selection.index.tolist()') - assert_frame_equal(result, expected) - - result = read_hdf(hh, 'df', where='l1=list(selection.index)') - assert_frame_equal(result, expected) - - # sccope with index - store = HDFStore(hh) - - result = store.select('df', where='l1=index') - assert_frame_equal(result, expected) - - result = store.select('df', where='l1=selection.index') - assert_frame_equal(result, expected) - - result = store.select('df', where='l1=selection.index.tolist()') - assert_frame_equal(result, expected) - - result = store.select('df', where='l1=list(selection.index)') - assert_frame_equal(result, expected) - - store.close() - - def test_invalid_filtering(self): - - # can't use more than one filter (atm) - - df = tm.makeTimeDataFrame() - - with ensure_clean_store(self.path) as store: - store.put('df', df, format='table') - - # not implemented - pytest.raises(NotImplementedError, store.select, - 'df', "columns=['A'] | columns=['B']") - - # in theory we could deal with this - pytest.raises(NotImplementedError, store.select, - 'df', "columns=['A','B'] & columns=['C']") - - def test_string_select(self): - # GH 2973 - with ensure_clean_store(self.path) as store: - - df = tm.makeTimeDataFrame() - - # test string ==/!= - df['x'] = 'none' - df.loc[2:7, 'x'] = '' - - store.append('df', df, data_columns=['x']) - - result = store.select('df', 'x=none') - expected = df[df.x == 'none'] - assert_frame_equal(result, expected) - - try: - result = store.select('df', 'x!=none') - expected = df[df.x != 'none'] - assert_frame_equal(result, expected) - except Exception as detail: - pprint_thing("[{0}]".format(detail)) - pprint_thing(store) - pprint_thing(expected) - - df2 = df.copy() - df2.loc[df2.x == '', 'x'] = np.nan - - store.append('df2', df2, data_columns=['x']) - result = store.select('df2', 'x!=none') - expected = df2[isna(df2.x)] - assert_frame_equal(result, expected) - - # int ==/!= - df['int'] = 1 - df.loc[2:7, 'int'] = 2 - - store.append('df3', df, data_columns=['int']) - - result = store.select('df3', 'int=2') - expected = df[df.int == 2] - assert_frame_equal(result, expected) - - result = store.select('df3', 'int!=2') - expected = df[df.int != 2] - assert_frame_equal(result, expected) - - def test_read_column(self): - - df = tm.makeTimeDataFrame() - - with ensure_clean_store(self.path) as store: - _maybe_remove(store, 'df') - store.append('df', df) - - # error - pytest.raises(KeyError, store.select_column, 'df', 'foo') - - def f(): - store.select_column('df', 'index', where=['index>5']) - pytest.raises(Exception, f) - - # valid - result = store.select_column('df', 'index') - tm.assert_almost_equal(result.values, Series(df.index).values) - assert isinstance(result, Series) - - # not a data indexable column - pytest.raises( - ValueError, store.select_column, 'df', 'values_block_0') - - # a data column - df2 = df.copy() - df2['string'] = 'foo' - store.append('df2', df2, data_columns=['string']) - result = store.select_column('df2', 'string') - tm.assert_almost_equal(result.values, df2['string'].values) - - # a data column with NaNs, result excludes the NaNs - df3 = df.copy() - df3['string'] = 'foo' - df3.loc[4:6, 'string'] = np.nan - store.append('df3', df3, data_columns=['string']) - result = store.select_column('df3', 'string') - tm.assert_almost_equal(result.values, df3['string'].values) - - # start/stop - result = store.select_column('df3', 'string', start=2) - tm.assert_almost_equal(result.values, df3['string'].values[2:]) - - result = store.select_column('df3', 'string', start=-2) - tm.assert_almost_equal(result.values, df3['string'].values[-2:]) - - result = store.select_column('df3', 'string', stop=2) - tm.assert_almost_equal(result.values, df3['string'].values[:2]) - - result = store.select_column('df3', 'string', stop=-2) - tm.assert_almost_equal(result.values, df3['string'].values[:-2]) - - result = store.select_column('df3', 'string', start=2, stop=-2) - tm.assert_almost_equal(result.values, df3['string'].values[2:-2]) - - result = store.select_column('df3', 'string', start=-2, stop=2) - tm.assert_almost_equal(result.values, df3['string'].values[-2:2]) - - # GH 10392 - make sure column name is preserved - df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'}) - store.append('df4', df4, data_columns=True) - expected = df4['B'] - result = store.select_column('df4', 'B') - tm.assert_series_equal(result, expected) - - def test_coordinates(self): - df = tm.makeTimeDataFrame() - - with ensure_clean_store(self.path) as store: - - _maybe_remove(store, 'df') - store.append('df', df) - - # all - c = store.select_as_coordinates('df') - assert((c.values == np.arange(len(df.index))).all()) - - # get coordinates back & test vs frame - _maybe_remove(store, 'df') - - df = DataFrame(dict(A=lrange(5), B=lrange(5))) - store.append('df', df) - c = store.select_as_coordinates('df', ['index<3']) - assert((c.values == np.arange(3)).all()) - result = store.select('df', where=c) - expected = df.loc[0:2, :] - tm.assert_frame_equal(result, expected) - - c = store.select_as_coordinates('df', ['index>=3', 'index<=4']) - assert((c.values == np.arange(2) + 3).all()) - result = store.select('df', where=c) - expected = df.loc[3:4, :] - tm.assert_frame_equal(result, expected) - assert isinstance(c, Index) - - # multiple tables - _maybe_remove(store, 'df1') - _maybe_remove(store, 'df2') - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) - store.append('df1', df1, data_columns=['A', 'B']) - store.append('df2', df2) - - c = store.select_as_coordinates('df1', ['A>0', 'B>0']) - df1_result = store.select('df1', c) - df2_result = store.select('df2', c) - result = concat([df1_result, df2_result], axis=1) - - expected = concat([df1, df2], axis=1) - expected = expected[(expected.A > 0) & (expected.B > 0)] - tm.assert_frame_equal(result, expected) - - # pass array/mask as the coordinates - with ensure_clean_store(self.path) as store: - - df = DataFrame(np.random.randn(1000, 2), - index=date_range('20000101', periods=1000)) - store.append('df', df) - c = store.select_column('df', 'index') - where = c[DatetimeIndex(c).month == 5].index - expected = df.iloc[where] - - # locations - result = store.select('df', where=where) - tm.assert_frame_equal(result, expected) - - # boolean - result = store.select('df', where=where) - tm.assert_frame_equal(result, expected) - - # invalid - pytest.raises(ValueError, store.select, 'df', - where=np.arange(len(df), dtype='float64')) - pytest.raises(ValueError, store.select, 'df', - where=np.arange(len(df) + 1)) - pytest.raises(ValueError, store.select, 'df', - where=np.arange(len(df)), start=5) - pytest.raises(ValueError, store.select, 'df', - where=np.arange(len(df)), start=5, stop=10) - - # selection with filter - selection = date_range('20000101', periods=500) - result = store.select('df', where='index in selection') - expected = df[df.index.isin(selection)] - tm.assert_frame_equal(result, expected) - - # list - df = DataFrame(np.random.randn(10, 2)) - store.append('df2', df) - result = store.select('df2', where=[0, 3, 5]) - expected = df.iloc[[0, 3, 5]] - tm.assert_frame_equal(result, expected) - - # boolean - where = [True] * 10 - where[-2] = False - result = store.select('df2', where=where) - expected = df.loc[where] - tm.assert_frame_equal(result, expected) - - # start/stop - result = store.select('df2', start=5, stop=10) - expected = df[5:10] - tm.assert_frame_equal(result, expected) - - def test_append_to_multiple(self): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) - df2['foo'] = 'bar' - df = concat([df1, df2], axis=1) - - with ensure_clean_store(self.path) as store: - - # exceptions - pytest.raises(ValueError, store.append_to_multiple, - {'df1': ['A', 'B'], 'df2': None}, df, - selector='df3') - pytest.raises(ValueError, store.append_to_multiple, - {'df1': None, 'df2': None}, df, selector='df3') - pytest.raises( - ValueError, store.append_to_multiple, 'df1', df, 'df1') - - # regular operation - store.append_to_multiple( - {'df1': ['A', 'B'], 'df2': None}, df, selector='df1') - result = store.select_as_multiple( - ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1') - expected = df[(df.A > 0) & (df.B > 0)] - tm.assert_frame_equal(result, expected) - - def test_append_to_multiple_dropna(self): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) - df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan - df = concat([df1, df2], axis=1) - - with ensure_clean_store(self.path) as store: - - # dropna=True should guarantee rows are synchronized - store.append_to_multiple( - {'df1': ['A', 'B'], 'df2': None}, df, selector='df1', - dropna=True) - result = store.select_as_multiple(['df1', 'df2']) - expected = df.dropna() - tm.assert_frame_equal(result, expected) - tm.assert_index_equal(store.select('df1').index, - store.select('df2').index) - - @pytest.mark.xfail(run=False, - reason="append_to_multiple_dropna_false " - "is not raising as failed") - def test_append_to_multiple_dropna_false(self): - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) - df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan - df = concat([df1, df2], axis=1) - - with ensure_clean_store(self.path) as store: - - # dropna=False shouldn't synchronize row indexes - store.append_to_multiple( - {'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a', - dropna=False) - - with pytest.raises(ValueError): - store.select_as_multiple(['df1a', 'df2a']) - - assert not store.select('df1a').index.equals( - store.select('df2a').index) - - def test_select_as_multiple(self): - - df1 = tm.makeTimeDataFrame() - df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) - df2['foo'] = 'bar' - - with ensure_clean_store(self.path) as store: - - # no tables stored - pytest.raises(Exception, store.select_as_multiple, - None, where=['A>0', 'B>0'], selector='df1') - - store.append('df1', df1, data_columns=['A', 'B']) - store.append('df2', df2) - - # exceptions - pytest.raises(Exception, store.select_as_multiple, - None, where=['A>0', 'B>0'], selector='df1') - pytest.raises(Exception, store.select_as_multiple, - [None], where=['A>0', 'B>0'], selector='df1') - pytest.raises(KeyError, store.select_as_multiple, - ['df1', 'df3'], where=['A>0', 'B>0'], - selector='df1') - pytest.raises(KeyError, store.select_as_multiple, - ['df3'], where=['A>0', 'B>0'], selector='df1') - pytest.raises(KeyError, store.select_as_multiple, - ['df1', 'df2'], where=['A>0', 'B>0'], - selector='df4') - - # default select - result = store.select('df1', ['A>0', 'B>0']) - expected = store.select_as_multiple( - ['df1'], where=['A>0', 'B>0'], selector='df1') - tm.assert_frame_equal(result, expected) - expected = store.select_as_multiple( - 'df1', where=['A>0', 'B>0'], selector='df1') - tm.assert_frame_equal(result, expected) - - # multiple - result = store.select_as_multiple( - ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1') - expected = concat([df1, df2], axis=1) - expected = expected[(expected.A > 0) & (expected.B > 0)] - tm.assert_frame_equal(result, expected) - - # multiple (diff selector) - result = store.select_as_multiple( - ['df1', 'df2'], where='index>df2.index[4]', selector='df2') - expected = concat([df1, df2], axis=1) - expected = expected[5:] - tm.assert_frame_equal(result, expected) - - # test excpection for diff rows - store.append('df3', tm.makeTimeDataFrame(nper=50)) - pytest.raises(ValueError, store.select_as_multiple, - ['df1', 'df3'], where=['A>0', 'B>0'], - selector='df1') - - @pytest.mark.skipif( - LooseVersion(tables.__version__) < LooseVersion('3.1.0'), - reason=("tables version does not support fix for nan selection " - "bug: GH 4858")) - def test_nan_selection_bug_4858(self): - - with ensure_clean_store(self.path) as store: - - df = DataFrame(dict(cols=range(6), values=range(6)), - dtype='float64') - df['cols'] = (df['cols'] + 10).apply(str) - df.iloc[0] = np.nan - - expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[ - 3., 4., 5.]), index=[3, 4, 5]) - - # write w/o the index on that particular column - store.append('df', df, data_columns=True, index=['cols']) - result = store.select('df', where='values>2.0') - assert_frame_equal(result, expected) - - def test_start_stop_table(self): - - with ensure_clean_store(self.path) as store: - - # table - df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) - store.append('df', df) - - result = store.select( - 'df', "columns=['A']", start=0, stop=5) - expected = df.loc[0:4, ['A']] - tm.assert_frame_equal(result, expected) - - # out of range - result = store.select( - 'df', "columns=['A']", start=30, stop=40) - assert len(result) == 0 - expected = df.loc[30:40, ['A']] - tm.assert_frame_equal(result, expected) - - def test_start_stop_multiple(self): - - # GH 16209 - with ensure_clean_store(self.path) as store: - - df = DataFrame({"foo": [1, 2], "bar": [1, 2]}) - - store.append_to_multiple({'selector': ['foo'], 'data': None}, df, - selector='selector') - result = store.select_as_multiple(['selector', 'data'], - selector='selector', start=0, - stop=1) - expected = df.loc[[0], ['foo', 'bar']] - tm.assert_frame_equal(result, expected) - - def test_start_stop_fixed(self): - - with ensure_clean_store(self.path) as store: - - # fixed, GH 8287 - df = DataFrame(dict(A=np.random.rand(20), - B=np.random.rand(20)), - index=pd.date_range('20130101', periods=20)) - store.put('df', df) - - result = store.select( - 'df', start=0, stop=5) - expected = df.iloc[0:5, :] - tm.assert_frame_equal(result, expected) - - result = store.select( - 'df', start=5, stop=10) - expected = df.iloc[5:10, :] - tm.assert_frame_equal(result, expected) - - # out of range - result = store.select( - 'df', start=30, stop=40) - expected = df.iloc[30:40, :] - tm.assert_frame_equal(result, expected) - - # series - s = df.A - store.put('s', s) - result = store.select('s', start=0, stop=5) - expected = s.iloc[0:5] - tm.assert_series_equal(result, expected) - - result = store.select('s', start=5, stop=10) - expected = s.iloc[5:10] - tm.assert_series_equal(result, expected) - - # sparse; not implemented - df = tm.makeDataFrame() - df.iloc[3:5, 1:3] = np.nan - df.iloc[8:10, -2] = np.nan - dfs = df.to_sparse() - store.put('dfs', dfs) - with pytest.raises(NotImplementedError): - store.select('dfs', start=0, stop=5) - - def test_select_filter_corner(self): - - df = DataFrame(np.random.randn(50, 100)) - df.index = ['%.3d' % c for c in df.index] - df.columns = ['%.3d' % c for c in df.columns] - - with ensure_clean_store(self.path) as store: - store.put('frame', df, format='table') - - crit = 'columns=df.columns[:75]' - result = store.select('frame', [crit]) - tm.assert_frame_equal(result, df.loc[:, df.columns[:75]]) - - crit = 'columns=df.columns[:75:2]' - result = store.select('frame', [crit]) - tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]]) - - def test_path_pathlib(self): - df = tm.makeDataFrame() - - result = tm.round_trip_pathlib( - lambda p: df.to_hdf(p, 'df'), - lambda p: pd.read_hdf(p, 'df')) - tm.assert_frame_equal(df, result) - - @pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)]) - def test_contiguous_mixed_data_table(self, start, stop): - # GH 17021 - # ValueError when reading a contiguous mixed-data table ft. VLArray - df = DataFrame({'a': Series([20111010, 20111011, 20111012]), - 'b': Series(['ab', 'cd', 'ab'])}) - - with ensure_clean_store(self.path) as store: - store.append('test_dataset', df) - - result = store.select('test_dataset', start=start, stop=stop) - assert_frame_equal(df[start:stop], result) - - def test_path_pathlib_hdfstore(self): - df = tm.makeDataFrame() - - def writer(path): - with pd.HDFStore(path) as store: - df.to_hdf(store, 'df') - - def reader(path): - with pd.HDFStore(path) as store: - return pd.read_hdf(store, 'df') - - result = tm.round_trip_pathlib(writer, reader) - tm.assert_frame_equal(df, result) - - def test_pickle_path_localpath(self): - df = tm.makeDataFrame() - result = tm.round_trip_pathlib( - lambda p: df.to_hdf(p, 'df'), - lambda p: pd.read_hdf(p, 'df')) - tm.assert_frame_equal(df, result) - - def test_path_localpath_hdfstore(self): - df = tm.makeDataFrame() - - def writer(path): - with pd.HDFStore(path) as store: - df.to_hdf(store, 'df') - - def reader(path): - with pd.HDFStore(path) as store: - return pd.read_hdf(store, 'df') - - result = tm.round_trip_localpath(writer, reader) - tm.assert_frame_equal(df, result) - - def _check_roundtrip(self, obj, comparator, compression=False, **kwargs): - - options = {} - if compression: - options['complib'] = _default_compressor - - with ensure_clean_store(self.path, 'w', **options) as store: - store['obj'] = obj - retrieved = store['obj'] - comparator(retrieved, obj, **kwargs) - - def _check_double_roundtrip(self, obj, comparator, compression=False, - **kwargs): - options = {} - if compression: - options['complib'] = compression or _default_compressor - - with ensure_clean_store(self.path, 'w', **options) as store: - store['obj'] = obj - retrieved = store['obj'] - comparator(retrieved, obj, **kwargs) - store['obj'] = retrieved - again = store['obj'] - comparator(again, obj, **kwargs) - - def _check_roundtrip_table(self, obj, comparator, compression=False): - options = {} - if compression: - options['complib'] = _default_compressor - - with ensure_clean_store(self.path, 'w', **options) as store: - store.put('obj', obj, format='table') - retrieved = store['obj'] - - comparator(retrieved, obj) - - def test_multiple_open_close(self): - # gh-4409: open & close multiple times - - with ensure_clean_path(self.path) as path: - - df = tm.makeDataFrame() - df.to_hdf(path, 'df', mode='w', format='table') - - # single - store = HDFStore(path) - assert 'CLOSED' not in store.info() - assert store.is_open - - store.close() - assert 'CLOSED' in store.info() - assert not store.is_open - - with ensure_clean_path(self.path) as path: - - if pytables._table_file_open_policy_is_strict: - - # multiples - store1 = HDFStore(path) - - def f(): - HDFStore(path) - pytest.raises(ValueError, f) - store1.close() - - else: - - # multiples - store1 = HDFStore(path) - store2 = HDFStore(path) - - assert 'CLOSED' not in store1.info() - assert 'CLOSED' not in store2.info() - assert store1.is_open - assert store2.is_open - - store1.close() - assert 'CLOSED' in store1.info() - assert not store1.is_open - assert 'CLOSED' not in store2.info() - assert store2.is_open - - store2.close() - assert 'CLOSED' in store1.info() - assert 'CLOSED' in store2.info() - assert not store1.is_open - assert not store2.is_open - - # nested close - store = HDFStore(path, mode='w') - store.append('df', df) - - store2 = HDFStore(path) - store2.append('df2', df) - store2.close() - assert 'CLOSED' in store2.info() - assert not store2.is_open - - store.close() - assert 'CLOSED' in store.info() - assert not store.is_open - - # double closing - store = HDFStore(path, mode='w') - store.append('df', df) - - store2 = HDFStore(path) - store.close() - assert 'CLOSED' in store.info() - assert not store.is_open - - store2.close() - assert 'CLOSED' in store2.info() - assert not store2.is_open - - # ops on a closed store - with ensure_clean_path(self.path) as path: - - df = tm.makeDataFrame() - df.to_hdf(path, 'df', mode='w', format='table') - - store = HDFStore(path) - store.close() - - pytest.raises(ClosedFileError, store.keys) - pytest.raises(ClosedFileError, lambda: 'df' in store) - pytest.raises(ClosedFileError, lambda: len(store)) - pytest.raises(ClosedFileError, lambda: store['df']) - pytest.raises(AttributeError, lambda: store.df) - pytest.raises(ClosedFileError, store.select, 'df') - pytest.raises(ClosedFileError, store.get, 'df') - pytest.raises(ClosedFileError, store.append, 'df2', df) - pytest.raises(ClosedFileError, store.put, 'df3', df) - pytest.raises(ClosedFileError, store.get_storer, 'df2') - pytest.raises(ClosedFileError, store.remove, 'df2') - - def f(): - store.select('df') - tm.assert_raises_regex(ClosedFileError, 'file is not open', f) - - def test_pytables_native_read(self): - - with ensure_clean_store( - tm.get_data_path('legacy_hdf/pytables_native.h5'), - mode='r') as store: - d2 = store['detector/readout'] - assert isinstance(d2, DataFrame) - - @pytest.mark.skipif(PY35 and is_platform_windows(), - reason="native2 read fails oddly on windows / 3.5") - def test_pytables_native2_read(self): - with ensure_clean_store( - tm.get_data_path('legacy_hdf/pytables_native2.h5'), - mode='r') as store: - str(store) - d1 = store['detector'] - assert isinstance(d1, DataFrame) - - def test_legacy_table_read(self): - # legacy table types - with ensure_clean_store( - tm.get_data_path('legacy_hdf/legacy_table.h5'), - mode='r') as store: - - with catch_warnings(record=True): - store.select('df1') - store.select('df2') - store.select('wp1') - - # force the frame - store.select('df2', typ='legacy_frame') - - # old version warning - pytest.raises( - Exception, store.select, 'wp1', 'minor_axis=B') - - df2 = store.select('df2') - result = store.select('df2', 'index>df2.index[2]') - expected = df2[df2.index > df2.index[2]] - assert_frame_equal(expected, result) - - def test_copy(self): - - with catch_warnings(record=True): - - def do_copy(f, new_f=None, keys=None, - propindexes=True, **kwargs): - try: - store = HDFStore(f, 'r') - - if new_f is None: - import tempfile - fd, new_f = tempfile.mkstemp() - - tstore = store.copy( - new_f, keys=keys, propindexes=propindexes, **kwargs) - - # check keys - if keys is None: - keys = store.keys() - assert set(keys) == set(tstore.keys()) - - # check indicies & nrows - for k in tstore.keys(): - if tstore.get_storer(k).is_table: - new_t = tstore.get_storer(k) - orig_t = store.get_storer(k) - - assert orig_t.nrows == new_t.nrows - - # check propindixes - if propindexes: - for a in orig_t.axes: - if a.is_indexed: - assert new_t[a.name].is_indexed - - finally: - safe_close(store) - safe_close(tstore) - try: - os.close(fd) - except: - pass - safe_remove(new_f) - - # new table - df = tm.makeDataFrame() - - try: - path = create_tempfile(self.path) - st = HDFStore(path) - st.append('df', df, data_columns=['A']) - st.close() - do_copy(f=path) - do_copy(f=path, propindexes=False) - finally: - safe_remove(path) - - def test_store_datetime_fractional_secs(self): - - with ensure_clean_store(self.path) as store: - dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456) - series = Series([0], [dt]) - store['a'] = series - assert store['a'].index[0] == dt - - def test_tseries_indices_series(self): - - with ensure_clean_store(self.path) as store: - idx = tm.makeDateIndex(10) - ser = Series(np.random.randn(len(idx)), idx) - store['a'] = ser - result = store['a'] - - tm.assert_series_equal(result, ser) - assert result.index.freq == ser.index.freq - tm.assert_class_equal(result.index, ser.index, obj="series index") - - idx = tm.makePeriodIndex(10) - ser = Series(np.random.randn(len(idx)), idx) - store['a'] = ser - result = store['a'] - - tm.assert_series_equal(result, ser) - assert result.index.freq == ser.index.freq - tm.assert_class_equal(result.index, ser.index, obj="series index") - - def test_tseries_indices_frame(self): - - with ensure_clean_store(self.path) as store: - idx = tm.makeDateIndex(10) - df = DataFrame(np.random.randn(len(idx), 3), index=idx) - store['a'] = df - result = store['a'] - - assert_frame_equal(result, df) - assert result.index.freq == df.index.freq - tm.assert_class_equal(result.index, df.index, - obj="dataframe index") - - idx = tm.makePeriodIndex(10) - df = DataFrame(np.random.randn(len(idx), 3), idx) - store['a'] = df - result = store['a'] - - assert_frame_equal(result, df) - assert result.index.freq == df.index.freq - tm.assert_class_equal(result.index, df.index, - obj="dataframe index") - - def test_unicode_index(self): - - unicode_values = [u('\u03c3'), u('\u03c3\u03c3')] - - # PerformanceWarning - with catch_warnings(record=True): - s = Series(np.random.randn(len(unicode_values)), unicode_values) - self._check_roundtrip(s, tm.assert_series_equal) - - def test_unicode_longer_encoded(self): - # GH 11234 - char = '\u0394' - df = pd.DataFrame({'A': [char]}) - with ensure_clean_store(self.path) as store: - store.put('df', df, format='table', encoding='utf-8') - result = store.get('df') - tm.assert_frame_equal(result, df) - - df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']}) - with ensure_clean_store(self.path) as store: - store.put('df', df, format='table', encoding='utf-8') - result = store.get('df') - tm.assert_frame_equal(result, df) - - def test_store_datetime_mixed(self): - - df = DataFrame( - {'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']}) - ts = tm.makeTimeSeries() - df['d'] = ts.index[:3] - self._check_roundtrip(df, tm.assert_frame_equal) - - # def test_cant_write_multiindex_table(self): - # # for now, #1848 - # df = DataFrame(np.random.randn(10, 4), - # index=[np.arange(5).repeat(2), - # np.tile(np.arange(2), 5)]) - - # pytest.raises(Exception, store.put, 'foo', df, format='table') - - def test_append_with_diff_col_name_types_raises_value_error(self): - df = DataFrame(np.random.randn(10, 1)) - df2 = DataFrame({'a': np.random.randn(10)}) - df3 = DataFrame({(1, 2): np.random.randn(10)}) - df4 = DataFrame({('1', 2): np.random.randn(10)}) - df5 = DataFrame({('1', 2, object): np.random.randn(10)}) - - with ensure_clean_store(self.path) as store: - name = 'df_%s' % tm.rands(10) - store.append(name, df) - - for d in (df2, df3, df4, df5): - with pytest.raises(ValueError): - store.append(name, d) - - def test_query_with_nested_special_character(self): - df = DataFrame({'a': ['a', 'a', 'c', 'b', - 'test & test', 'c', 'b', 'e'], - 'b': [1, 2, 3, 4, 5, 6, 7, 8]}) - expected = df[df.a == 'test & test'] - with ensure_clean_store(self.path) as store: - store.append('test', df, format='table', data_columns=True) - result = store.select('test', 'a = "test & test"') - tm.assert_frame_equal(expected, result) - - def test_categorical(self): - - with ensure_clean_store(self.path) as store: - - # Basic - _maybe_remove(store, 's') - s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ - 'a', 'b', 'c', 'd'], ordered=False)) - store.append('s', s, format='table') - result = store.select('s') - tm.assert_series_equal(s, result) - - _maybe_remove(store, 's_ordered') - s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ - 'a', 'b', 'c', 'd'], ordered=True)) - store.append('s_ordered', s, format='table') - result = store.select('s_ordered') - tm.assert_series_equal(s, result) - - _maybe_remove(store, 'df') - - df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]}) - store.append('df', df, format='table') - result = store.select('df') - tm.assert_frame_equal(result, df) - - # Dtypes - s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category') - store.append('si', s) - result = store.select('si') - tm.assert_series_equal(result, s) - - s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category') - store.append('si2', s) - result = store.select('si2') - tm.assert_series_equal(result, s) - - # Multiple - df2 = df.copy() - df2['s2'] = Series(list('abcdefg')).astype('category') - store.append('df2', df2) - result = store.select('df2') - tm.assert_frame_equal(result, df2) - - # Make sure the metadata is OK - info = store.info() - assert '/df2 ' in info - # assert '/df2/meta/values_block_0/meta' in info - assert '/df2/meta/values_block_1/meta' in info - - # unordered - s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[ - 'a', 'b', 'c', 'd'], ordered=False)) - store.append('s2', s, format='table') - result = store.select('s2') - tm.assert_series_equal(result, s) - - # Query - store.append('df3', df, data_columns=['s']) - expected = df[df.s.isin(['b', 'c'])] - result = store.select('df3', where=['s in ["b","c"]']) - tm.assert_frame_equal(result, expected) - - expected = df[df.s.isin(['b', 'c'])] - result = store.select('df3', where=['s = ["b","c"]']) - tm.assert_frame_equal(result, expected) - - expected = df[df.s.isin(['d'])] - result = store.select('df3', where=['s in ["d"]']) - tm.assert_frame_equal(result, expected) - - expected = df[df.s.isin(['f'])] - result = store.select('df3', where=['s in ["f"]']) - tm.assert_frame_equal(result, expected) - - # Appending with same categories is ok - store.append('df3', df) - - df = concat([df, df]) - expected = df[df.s.isin(['b', 'c'])] - result = store.select('df3', where=['s in ["b","c"]']) - tm.assert_frame_equal(result, expected) - - # Appending must have the same categories - df3 = df.copy() - df3['s'].cat.remove_unused_categories(inplace=True) - - with pytest.raises(ValueError): - store.append('df3', df3) - - # Remove, and make sure meta data is removed (its a recursive - # removal so should be). - result = store.select('df3/meta/s/meta') - assert result is not None - store.remove('df3') - - with pytest.raises(KeyError): - store.select('df3/meta/s/meta') - - def test_categorical_conversion(self): - - # GH13322 - # Check that read_hdf with categorical columns doesn't return rows if - # where criteria isn't met. - obsids = ['ESP_012345_6789', 'ESP_987654_3210'] - imgids = ['APF00006np', 'APF0001imm'] - data = [4.3, 9.8] - - # Test without categories - df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data)) - - # We are expecting an empty DataFrame matching types of df - expected = df.iloc[[], :] - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format='table', data_columns=True) - result = read_hdf(path, 'df', where='obsids=B') - tm.assert_frame_equal(result, expected) - - # Test with categories - df.obsids = df.obsids.astype('category') - df.imgids = df.imgids.astype('category') - - # We are expecting an empty DataFrame matching types of df - expected = df.iloc[[], :] - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format='table', data_columns=True) - result = read_hdf(path, 'df', where='obsids=B') - tm.assert_frame_equal(result, expected) - - def test_categorical_nan_only_columns(self): - # GH18413 - # Check that read_hdf with categorical columns with NaN-only values can - # be read back. - df = pd.DataFrame({ - 'a': ['a', 'b', 'c', np.nan], - 'b': [np.nan, np.nan, np.nan, np.nan], - 'c': [1, 2, 3, 4], - 'd': pd.Series([None] * 4, dtype=object) - }) - df['a'] = df.a.astype('category') - df['b'] = df.b.astype('category') - df['d'] = df.b.astype('category') - expected = df - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format='table', data_columns=True) - result = read_hdf(path, 'df') - tm.assert_frame_equal(result, expected) - - def test_duplicate_column_name(self): - df = DataFrame(columns=["a", "a"], data=[[0, 0]]) - - with ensure_clean_path(self.path) as path: - pytest.raises(ValueError, df.to_hdf, - path, 'df', format='fixed') - - df.to_hdf(path, 'df', format='table') - other = read_hdf(path, 'df') - - tm.assert_frame_equal(df, other) - assert df.equals(other) - assert other.equals(df) - - def test_round_trip_equals(self): - # GH 9330 - df = DataFrame({"B": [1, 2], "A": ["x", "y"]}) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format='table') - other = read_hdf(path, 'df') - tm.assert_frame_equal(df, other) - assert df.equals(other) - assert other.equals(df) - - def test_preserve_timedeltaindex_type(self): - # GH9635 - # Storing TimedeltaIndexed DataFrames in fixed stores did not preserve - # the type of the index. - df = DataFrame(np.random.normal(size=(10, 5))) - df.index = timedelta_range( - start='0s', periods=10, freq='1s', name='example') - - with ensure_clean_store(self.path) as store: - - store['df'] = df - assert_frame_equal(store['df'], df) - - def test_columns_multiindex_modified(self): - # BUG: 7212 - # read_hdf store.select modified the passed columns parameters - # when multi-indexed. - - df = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - df.index.name = 'letters' - df = df.set_index(keys='E', append=True) - - data_columns = df.index.names + df.columns.tolist() - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', - mode='a', - append=True, - data_columns=data_columns, - index=False) - cols2load = list('BCD') - cols2load_original = list(cols2load) - df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa - assert cols2load_original == cols2load - - def test_to_hdf_with_object_column_names(self): - # GH9057 - # Writing HDF5 table format should only work for string-like - # column types - - types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex, - tm.makeDateIndex, tm.makeTimedeltaIndex, - tm.makePeriodIndex] - types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex] - - if compat.PY3: - types_should_run.append(tm.makeUnicodeIndex) - else: - types_should_fail.append(tm.makeUnicodeIndex) - - for index in types_should_fail: - df = DataFrame(np.random.randn(10, 2), columns=index(2)) - with ensure_clean_path(self.path) as path: - with catch_warnings(record=True): - with pytest.raises( - ValueError, msg=("cannot have non-object label " - "DataIndexableCol")): - df.to_hdf(path, 'df', format='table', - data_columns=True) - - for index in types_should_run: - df = DataFrame(np.random.randn(10, 2), columns=index(2)) - with ensure_clean_path(self.path) as path: - with catch_warnings(record=True): - df.to_hdf(path, 'df', format='table', data_columns=True) - result = pd.read_hdf( - path, 'df', where="index = [{0}]".format(df.index[0])) - assert(len(result)) - - def test_read_hdf_open_store(self): - # GH10330 - # No check for non-string path_or-buf, and no test of open store - df = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - df.index.name = 'letters' - df = df.set_index(keys='E', append=True) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', mode='w') - direct = read_hdf(path, 'df') - store = HDFStore(path, mode='r') - indirect = read_hdf(store, 'df') - tm.assert_frame_equal(direct, indirect) - assert store.is_open - store.close() - - def test_read_hdf_iterator(self): - df = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - df.index.name = 'letters' - df = df.set_index(keys='E', append=True) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', mode='w', format='t') - direct = read_hdf(path, 'df') - iterator = read_hdf(path, 'df', iterator=True) - assert isinstance(iterator, TableIterator) - indirect = next(iterator.__iter__()) - tm.assert_frame_equal(direct, indirect) - iterator.store.close() - - def test_read_hdf_errors(self): - df = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - - with ensure_clean_path(self.path) as path: - pytest.raises(IOError, read_hdf, path, 'key') - df.to_hdf(path, 'df') - store = HDFStore(path, mode='r') - store.close() - pytest.raises(IOError, read_hdf, store, 'df') - - def test_read_hdf_generic_buffer_errors(self): - pytest.raises(NotImplementedError, read_hdf, BytesIO(b''), 'df') - - def test_invalid_complib(self): - df = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - with ensure_clean_path(self.path) as path: - with pytest.raises(ValueError): - df.to_hdf(path, 'df', complib='foolib') - # GH10443 - - def test_read_nokey(self): - df = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - - # Categorical dtype not supported for "fixed" format. So no need - # to test with that dtype in the dataframe here. - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', mode='a') - reread = read_hdf(path) - assert_frame_equal(df, reread) - df.to_hdf(path, 'df2', mode='a') - pytest.raises(ValueError, read_hdf, path) - - def test_read_nokey_table(self): - # GH13231 - df = DataFrame({'i': range(5), - 'c': Series(list('abacd'), dtype='category')}) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', mode='a', format='table') - reread = read_hdf(path) - assert_frame_equal(df, reread) - df.to_hdf(path, 'df2', mode='a', format='table') - pytest.raises(ValueError, read_hdf, path) - - def test_read_nokey_empty(self): - with ensure_clean_path(self.path) as path: - store = HDFStore(path) - store.close() - pytest.raises(ValueError, read_hdf, path) - - @td.skip_if_no('pathlib') - def test_read_from_pathlib_path(self): - - # GH11773 - from pathlib import Path - - expected = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - with ensure_clean_path(self.path) as filename: - path_obj = Path(filename) - - expected.to_hdf(path_obj, 'df', mode='a') - actual = read_hdf(path_obj, 'df') - - tm.assert_frame_equal(expected, actual) - - @td.skip_if_no('py.path') - def test_read_from_py_localpath(self): - - # GH11773 - from py.path import local as LocalPath - - expected = DataFrame(np.random.rand(4, 5), - index=list('abcd'), - columns=list('ABCDE')) - with ensure_clean_path(self.path) as filename: - path_obj = LocalPath(filename) - - expected.to_hdf(path_obj, 'df', mode='a') - actual = read_hdf(path_obj, 'df') - - tm.assert_frame_equal(expected, actual) - - def test_query_long_float_literal(self): - # GH 14241 - df = pd.DataFrame({'A': [1000000000.0009, - 1000000000.0011, - 1000000000.0015]}) - - with ensure_clean_store(self.path) as store: - store.append('test', df, format='table', data_columns=True) - - cutoff = 1000000000.0006 - result = store.select('test', "A < %.4f" % cutoff) - assert result.empty - - cutoff = 1000000000.0010 - result = store.select('test', "A > %.4f" % cutoff) - expected = df.loc[[1, 2], :] - tm.assert_frame_equal(expected, result) - - exact = 1000000000.0011 - result = store.select('test', 'A == %.4f' % exact) - expected = df.loc[[1], :] - tm.assert_frame_equal(expected, result) - - def test_query_compare_column_type(self): - # GH 15492 - df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'], - 'real_date': date_range('2014-01-01', periods=2), - 'float': [1.1, 1.2], - 'int': [1, 2]}, - columns=['date', 'real_date', 'float', 'int']) - - with ensure_clean_store(self.path) as store: - store.append('test', df, format='table', data_columns=True) - - ts = pd.Timestamp('2014-01-01') # noqa - result = store.select('test', where='real_date > ts') - expected = df.loc[[1], :] - tm.assert_frame_equal(expected, result) - - for op in ['<', '>', '==']: - # non strings to string column always fail - for v in [2.1, True, pd.Timestamp('2014-01-01'), - pd.Timedelta(1, 's')]: - query = 'date {op} v'.format(op=op) - with pytest.raises(TypeError): - result = store.select('test', where=query) - - # strings to other columns must be convertible to type - v = 'a' - for col in ['int', 'float', 'real_date']: - query = '{col} {op} v'.format(op=op, col=col) - with pytest.raises(ValueError): - result = store.select('test', where=query) - - for v, col in zip(['1', '1.1', '2014-01-01'], - ['int', 'float', 'real_date']): - query = '{col} {op} v'.format(op=op, col=col) - result = store.select('test', where=query) - - if op == '==': - expected = df.loc[[0], :] - elif op == '>': - expected = df.loc[[1], :] - else: - expected = df.loc[[], :] - tm.assert_frame_equal(expected, result) - - @pytest.mark.parametrize('format', ['fixed', 'table']) - def test_read_hdf_series_mode_r(self, format): - # GH 16583 - # Tests that reading a Series saved to an HDF file - # still works if a mode='r' argument is supplied - series = tm.makeFloatSeries() - with ensure_clean_path(self.path) as path: - series.to_hdf(path, key='data', format=format) - result = pd.read_hdf(path, key='data', mode='r') - tm.assert_series_equal(result, series) - - @pytest.mark.skipif(not PY36, reason="Need python 3.6") - def test_fspath(self): - with tm.ensure_clean('foo.h5') as path: - with pd.HDFStore(path) as store: - assert os.fspath(store) == str(path) - - def test_read_py2_hdf_file_in_py3(self): - # GH 16781 - - # tests reading a PeriodIndex DataFrame written in Python2 in Python3 - - # the file was generated in Python 2.7 like so: - # - # df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex( - # ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) - # df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p') - - expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex( - ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B')) - - with ensure_clean_store( - tm.get_data_path( - 'legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5'), - mode='r') as store: - result = store['p'] - assert_frame_equal(result, expected) - - -class TestHDFComplexValues(Base): - # GH10447 - - def test_complex_fixed(self): - df = DataFrame(np.random.rand(4, 5).astype(np.complex64), - index=list('abcd'), - columns=list('ABCDE')) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df') - reread = read_hdf(path, 'df') - assert_frame_equal(df, reread) - - df = DataFrame(np.random.rand(4, 5).astype(np.complex128), - index=list('abcd'), - columns=list('ABCDE')) - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df') - reread = read_hdf(path, 'df') - assert_frame_equal(df, reread) - - def test_complex_table(self): - df = DataFrame(np.random.rand(4, 5).astype(np.complex64), - index=list('abcd'), - columns=list('ABCDE')) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format='table') - reread = read_hdf(path, 'df') - assert_frame_equal(df, reread) - - df = DataFrame(np.random.rand(4, 5).astype(np.complex128), - index=list('abcd'), - columns=list('ABCDE')) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format='table', mode='w') - reread = read_hdf(path, 'df') - assert_frame_equal(df, reread) - - def test_complex_mixed_fixed(self): - complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, - 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64) - complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], - dtype=np.complex128) - df = DataFrame({'A': [1, 2, 3, 4], - 'B': ['a', 'b', 'c', 'd'], - 'C': complex64, - 'D': complex128, - 'E': [1.0, 2.0, 3.0, 4.0]}, - index=list('abcd')) - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df') - reread = read_hdf(path, 'df') - assert_frame_equal(df, reread) - - def test_complex_mixed_table(self): - complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j, - 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64) - complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], - dtype=np.complex128) - df = DataFrame({'A': [1, 2, 3, 4], - 'B': ['a', 'b', 'c', 'd'], - 'C': complex64, - 'D': complex128, - 'E': [1.0, 2.0, 3.0, 4.0]}, - index=list('abcd')) - - with ensure_clean_store(self.path) as store: - store.append('df', df, data_columns=['A', 'B']) - result = store.select('df', where='A>2') - assert_frame_equal(df.loc[df.A > 2], result) - - with ensure_clean_path(self.path) as path: - df.to_hdf(path, 'df', format='table') - reread = read_hdf(path, 'df') - assert_frame_equal(df, reread) - - def test_complex_across_dimensions_fixed(self): - with catch_warnings(record=True): - complex128 = np.array( - [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) - s = Series(complex128, index=list('abcd')) - df = DataFrame({'A': s, 'B': s}) - p = Panel({'One': df, 'Two': df}) - - objs = [s, df, p] - comps = [tm.assert_series_equal, tm.assert_frame_equal, - tm.assert_panel_equal] - for obj, comp in zip(objs, comps): - with ensure_clean_path(self.path) as path: - obj.to_hdf(path, 'obj', format='fixed') - reread = read_hdf(path, 'obj') - comp(obj, reread) - - def test_complex_across_dimensions(self): - complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) - s = Series(complex128, index=list('abcd')) - df = DataFrame({'A': s, 'B': s}) - - with catch_warnings(record=True): - p = Panel({'One': df, 'Two': df}) - - objs = [df, p] - comps = [tm.assert_frame_equal, tm.assert_panel_equal] - for obj, comp in zip(objs, comps): - with ensure_clean_path(self.path) as path: - obj.to_hdf(path, 'obj', format='table') - reread = read_hdf(path, 'obj') - comp(obj, reread) - - def test_complex_indexing_error(self): - complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], - dtype=np.complex128) - df = DataFrame({'A': [1, 2, 3, 4], - 'B': ['a', 'b', 'c', 'd'], - 'C': complex128}, - index=list('abcd')) - with ensure_clean_store(self.path) as store: - pytest.raises(TypeError, store.append, - 'df', df, data_columns=['C']) - - def test_complex_series_error(self): - complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) - s = Series(complex128, index=list('abcd')) - - with ensure_clean_path(self.path) as path: - pytest.raises(TypeError, s.to_hdf, path, 'obj', format='t') - - with ensure_clean_path(self.path) as path: - s.to_hdf(path, 'obj', format='t', index=False) - reread = read_hdf(path, 'obj') - tm.assert_series_equal(s, reread) - - def test_complex_append(self): - df = DataFrame({'a': np.random.randn(100).astype(np.complex128), - 'b': np.random.randn(100)}) - - with ensure_clean_store(self.path) as store: - store.append('df', df, data_columns=['b']) - store.append('df', df) - result = store.select('df') - assert_frame_equal(pd.concat([df, df], 0), result) - - -class TestTimezones(Base): - - def _compare_with_tz(self, a, b): - tm.assert_frame_equal(a, b) - - # compare the zones on each element - for c in a.columns: - for i in a.index: - a_e = a.loc[i, c] - b_e = b.loc[i, c] - if not (a_e == b_e and a_e.tz == b_e.tz): - raise AssertionError( - "invalid tz comparison [%s] [%s]" % (a_e, b_e)) - - def test_append_with_timezones_dateutil(self): - - from datetime import timedelta - - # use maybe_get_tz instead of dateutil.tz.gettz to handle the windows - # filename issues. - from pandas._libs.tslibs.timezones import maybe_get_tz - gettz = lambda x: maybe_get_tz('dateutil/' + x) - - # as columns - with ensure_clean_store(self.path) as store: - - _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz( - 'US/Eastern')) + timedelta(hours=1) * i for i in range(5)])) - - store.append('df_tz', df, data_columns=['A']) - result = store['df_tz'] - self._compare_with_tz(result, df) - assert_frame_equal(result, df) - - # select with tz aware - expected = df[df.A >= df.A[3]] - result = store.select('df_tz', where='A>=df.A[3]') - self._compare_with_tz(result, expected) - - # ensure we include dates in DST and STD time here. - _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A=Timestamp('20130102', - tz=gettz('US/Eastern')), - B=Timestamp('20130603', - tz=gettz('US/Eastern'))), - index=range(5)) - store.append('df_tz', df) - result = store['df_tz'] - self._compare_with_tz(result, df) - assert_frame_equal(result, df) - - df = DataFrame(dict(A=Timestamp('20130102', - tz=gettz('US/Eastern')), - B=Timestamp('20130102', tz=gettz('EET'))), - index=range(5)) - pytest.raises(ValueError, store.append, 'df_tz', df) - - # this is ok - _maybe_remove(store, 'df_tz') - store.append('df_tz', df, data_columns=['A', 'B']) - result = store['df_tz'] - self._compare_with_tz(result, df) - assert_frame_equal(result, df) - - # can't append with diff timezone - df = DataFrame(dict(A=Timestamp('20130102', - tz=gettz('US/Eastern')), - B=Timestamp('20130102', tz=gettz('CET'))), - index=range(5)) - pytest.raises(ValueError, store.append, 'df_tz', df) - - # as index - with ensure_clean_store(self.path) as store: - - # GH 4098 example - df = DataFrame(dict(A=Series(lrange(3), index=date_range( - '2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern'))))) - - _maybe_remove(store, 'df') - store.put('df', df) - result = store.select('df') - assert_frame_equal(result, df) - - _maybe_remove(store, 'df') - store.append('df', df) - result = store.select('df') - assert_frame_equal(result, df) - - def test_append_with_timezones_pytz(self): - - from datetime import timedelta - - # as columns - with ensure_clean_store(self.path) as store: - - _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', - tz='US/Eastern') + - timedelta(hours=1) * i - for i in range(5)])) - store.append('df_tz', df, data_columns=['A']) - result = store['df_tz'] - self._compare_with_tz(result, df) - assert_frame_equal(result, df) - - # select with tz aware - self._compare_with_tz(store.select( - 'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]]) - - _maybe_remove(store, 'df_tz') - # ensure we include dates in DST and STD time here. - df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), - B=Timestamp('20130603', tz='US/Eastern')), - index=range(5)) - store.append('df_tz', df) - result = store['df_tz'] - self._compare_with_tz(result, df) - assert_frame_equal(result, df) - - df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), - B=Timestamp('20130102', tz='EET')), - index=range(5)) - pytest.raises(ValueError, store.append, 'df_tz', df) - - # this is ok - _maybe_remove(store, 'df_tz') - store.append('df_tz', df, data_columns=['A', 'B']) - result = store['df_tz'] - self._compare_with_tz(result, df) - assert_frame_equal(result, df) - - # can't append with diff timezone - df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), - B=Timestamp('20130102', tz='CET')), - index=range(5)) - pytest.raises(ValueError, store.append, 'df_tz', df) - - # as index - with ensure_clean_store(self.path) as store: - - # GH 4098 example - df = DataFrame(dict(A=Series(lrange(3), index=date_range( - '2000-1-1', periods=3, freq='H', tz='US/Eastern')))) - - _maybe_remove(store, 'df') - store.put('df', df) - result = store.select('df') - assert_frame_equal(result, df) - - _maybe_remove(store, 'df') - store.append('df', df) - result = store.select('df') - assert_frame_equal(result, df) - - def test_tseries_select_index_column(self): - # GH7777 - # selecting a UTC datetimeindex column did - # not preserve UTC tzinfo set before storing - - # check that no tz still works - rng = date_range('1/1/2000', '1/30/2000') - frame = DataFrame(np.random.randn(len(rng), 4), index=rng) - - with ensure_clean_store(self.path) as store: - store.append('frame', frame) - result = store.select_column('frame', 'index') - assert rng.tz == DatetimeIndex(result.values).tz - - # check utc - rng = date_range('1/1/2000', '1/30/2000', tz='UTC') - frame = DataFrame(np.random.randn(len(rng), 4), index=rng) - - with ensure_clean_store(self.path) as store: - store.append('frame', frame) - result = store.select_column('frame', 'index') - assert rng.tz == result.dt.tz - - # double check non-utc - rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern') - frame = DataFrame(np.random.randn(len(rng), 4), index=rng) - - with ensure_clean_store(self.path) as store: - store.append('frame', frame) - result = store.select_column('frame', 'index') - assert rng.tz == result.dt.tz - - def test_timezones_fixed(self): - with ensure_clean_store(self.path) as store: - - # index - rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern') - df = DataFrame(np.random.randn(len(rng), 4), index=rng) - store['df'] = df - result = store['df'] - assert_frame_equal(result, df) - - # as data - # GH11411 - _maybe_remove(store, 'df') - df = DataFrame({'A': rng, - 'B': rng.tz_convert('UTC').tz_localize(None), - 'C': rng.tz_convert('CET'), - 'D': range(len(rng))}, index=rng) - store['df'] = df - result = store['df'] - assert_frame_equal(result, df) - - def test_fixed_offset_tz(self): - rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00') - frame = DataFrame(np.random.randn(len(rng), 4), index=rng) - - with ensure_clean_store(self.path) as store: - store['frame'] = frame - recons = store['frame'] - tm.assert_index_equal(recons.index, rng) - assert rng.tz == recons.index.tz - - @td.skip_if_windows - def test_store_timezone(self): - # GH2852 - # issue storing datetime.date with a timezone as it resets when read - # back in a new timezone - - # original method - with ensure_clean_store(self.path) as store: - - today = datetime.date(2013, 9, 10) - df = DataFrame([1, 2, 3], index=[today, today, today]) - store['obj1'] = df - result = store['obj1'] - assert_frame_equal(result, df) - - # with tz setting - with ensure_clean_store(self.path) as store: - - with set_timezone('EST5EDT'): - today = datetime.date(2013, 9, 10) - df = DataFrame([1, 2, 3], index=[today, today, today]) - store['obj1'] = df - - with set_timezone('CST6CDT'): - result = store['obj1'] - - assert_frame_equal(result, df) - - def test_legacy_datetimetz_object(self): - # legacy from < 0.17.0 - # 8260 - expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'), - B=Timestamp('20130603', tz='CET')), - index=range(5)) - with ensure_clean_store( - tm.get_data_path('legacy_hdf/datetimetz_object.h5'), - mode='r') as store: - result = store['df'] - assert_frame_equal(result, expected) - - def test_dst_transitions(self): - # make sure we are not failing on transaitions - with ensure_clean_store(self.path) as store: - times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00", - tz="Europe/London", - freq="H", - ambiguous='infer') - - for i in [times, times + pd.Timedelta('10min')]: - _maybe_remove(store, 'df') - df = DataFrame({'A': range(len(i)), 'B': i}, index=i) - store.append('df', df) - result = store.select('df') - assert_frame_equal(result, df)
- [x] closes #18498 - [x] tests moved / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Moved test classes for pytables into sub-modules. Moved hdf test data files needed into sub-module. I did not change any tests, just broke up the classes into separate modules.
https://api.github.com/repos/pandas-dev/pandas/pulls/19735
2018-02-17T01:25:10Z
2018-07-28T14:36:46Z
null
2018-07-28T14:36:46Z
CLN: Removed Unnecessary Conditionals in groupby_helper
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index fe4d31516d839..93fbb4477e2d0 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -56,36 +56,19 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, with nogil: - if K > 1: - - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - sumx[lab, j] += val - - else: - - for i in range(N): - lab = labels[i] - if lab < 0: - continue + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - val = values[i, 0] + counts[lab] += 1 + for j in range(K): + val = values[i, j] # not nan if val == val: - nobs[lab, 0] += 1 - sumx[lab, 0] += val + nobs[lab, j] += 1 + sumx[lab, j] += val for i in range(ncounts): for j in range(K): @@ -119,33 +102,19 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, N, K = (<object> values).shape with nogil: - if K > 1: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - if val == val: - nobs[lab, j] += 1 - prodx[lab, j] *= val - else: - for i in range(N): - lab = labels[i] - if lab < 0: - continue + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - val = values[i, 0] + counts[lab] += 1 + for j in range(K): + val = values[i, j] # not nan if val == val: - nobs[lab, 0] += 1 - prodx[lab, 0] *= val + nobs[lab, j] += 1 + prodx[lab, j] *= val for i in range(ncounts): for j in range(K): @@ -231,31 +200,18 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, N, K = (<object> values).shape with nogil: - if K > 1: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - # not nan - if val == val: - nobs[lab, j] += 1 - sumx[lab, j] += val - else: - for i in range(N): - lab = labels[i] - if lab < 0: - continue + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - val = values[i, 0] + counts[lab] += 1 + for j in range(K): + val = values[i, j] # not nan if val == val: - nobs[lab, 0] += 1 - sumx[lab, 0] += val + nobs[lab, j] += 1 + sumx[lab, j] += val for i in range(ncounts): for j in range(K): @@ -670,33 +626,14 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, N, K = (<object> values).shape with nogil: - if K > 1: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} - if val == val and val != {{nan_val}}: - {{endif}} - nobs[lab, j] += 1 - if val > maxx[lab, j]: - maxx[lab, j] = val - else: - for i in range(N): - lab = labels[i] - if lab < 0: - continue + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - val = values[i, 0] + counts[lab] += 1 + for j in range(K): + val = values[i, j] # not nan {{if name == 'int64'}} @@ -704,9 +641,9 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{else}} if val == val and val != {{nan_val}}: {{endif}} - nobs[lab, 0] += 1 - if val > maxx[lab, 0]: - maxx[lab, 0] = val + nobs[lab, j] += 1 + if val > maxx[lab, j]: + maxx[lab, j] = val for i in range(ncounts): for j in range(K): @@ -744,33 +681,14 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, N, K = (<object> values).shape with nogil: - if K > 1: - for i in range(N): - lab = labels[i] - if lab < 0: - continue - - counts[lab] += 1 - for j in range(K): - val = values[i, j] - - # not nan - {{if name == 'int64'}} - if val != {{nan_val}}: - {{else}} - if val == val and val != {{nan_val}}: - {{endif}} - nobs[lab, j] += 1 - if val < minx[lab, j]: - minx[lab, j] = val - else: - for i in range(N): - lab = labels[i] - if lab < 0: - continue + for i in range(N): + lab = labels[i] + if lab < 0: + continue - counts[lab] += 1 - val = values[i, 0] + counts[lab] += 1 + for j in range(K): + val = values[i, j] # not nan {{if name == 'int64'}} @@ -778,9 +696,9 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{else}} if val == val and val != {{nan_val}}: {{endif}} - nobs[lab, 0] += 1 - if val < minx[lab, 0]: - minx[lab, 0] = val + nobs[lab, j] += 1 + if val < minx[lab, j]: + minx[lab, j] = val for i in range(ncounts): for j in range(K):
- [X] closes #19724 - [X] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ASV results are pretty inconsistent but I couldn't find anything directly related from this change. Of the marks below, only `max` was touched by this change, and that doesn't appear consistently in the results ```bash before after ratio [2fdf1e25] [458210c1] + 155Β±1ΞΌs 173Β±4ΞΌs 1.12 groupby.GroupByMethods.time_method('float', 'max') + 324Β±1ΞΌs 361Β±5ΞΌs 1.12 groupby.GroupByMethods.time_method('float', 'median') - 722Β±5ΞΌs 638Β±8ΞΌs 0.88 groupby.GroupByMethods.time_method('int', 'cumprod') - 540Β±10ΞΌs 463Β±1ΞΌs 0.86 groupby.GroupByMethods.time_method('int', 'cummax') - 551Β±20ΞΌs 459Β±1ΞΌs 0.83 groupby.GroupByMethods.time_method('int', 'cummin') - 1.17Β±0.1ms 475Β±7ΞΌs 0.40 groupby.GroupByMethods.time_method('float', 'sem') ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19734
2018-02-17T00:48:42Z
2018-02-18T16:39:50Z
2018-02-18T16:39:49Z
2018-02-18T18:40:56Z
BUG: Avoid Timedelta rounding when specifying unit and integer (#12690)
diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index 5a553264e828b..5e26c0ac4433a 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -53,7 +53,10 @@ Strings ^^^^^^^ - Bug in :meth:`Series.str.replace()` where the method throws `TypeError` on Python 3.5.2 (:issue: `21078`) -- + +Timedelta +^^^^^^^^^ +- Bug in :class:`Timedelta`: where passing a float with a unit would prematurely round the float precision (:issue: `14156`) Categorical ^^^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index f7bb6c1dbb304..22f9d3327f575 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -202,22 +202,22 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: if unit == 'D' or unit == 'd': m = 1000000000L * 86400 - p = 6 + p = 9 elif unit == 'h': m = 1000000000L * 3600 - p = 6 + p = 9 elif unit == 'm': m = 1000000000L * 60 - p = 6 + p = 9 elif unit == 's': m = 1000000000L - p = 6 + p = 9 elif unit == 'ms': m = 1000000L - p = 3 + p = 6 elif unit == 'us': m = 1000L - p = 0 + p = 3 elif unit == 'ns' or unit is None: m = 1L p = 0 @@ -231,10 +231,10 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: # cast the unit, multiply base/frace separately # to avoid precision issues from float -> int base = <int64_t> ts - frac = ts -base + frac = ts - base if p: frac = round(frac, p) - return <int64_t> (base *m) + <int64_t> (frac *m) + return <int64_t> (base * m) + <int64_t> (frac * m) cdef inline _decode_if_necessary(object ts): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 45be3974dad63..8b0514764b0c0 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -650,6 +650,14 @@ def test_unit_mixed(self, cache): with pytest.raises(ValueError): pd.to_datetime(arr, errors='raise', cache=cache) + @pytest.mark.parametrize('cache', [True, False]) + def test_unit_rounding(self, cache): + # GH 14156: argument will incur floating point errors but no + # premature rounding + result = pd.to_datetime(1434743731.8770001, unit='s', cache=cache) + expected = pd.Timestamp('2015-06-19 19:55:31.877000093') + assert result == expected + @pytest.mark.parametrize('cache', [True, False]) def test_dataframe(self, cache): diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 5da347e47957c..b80263021c269 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -182,6 +182,8 @@ def test_date_time(): fname = os.path.join(dirpath, "datetime.csv") df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime', 'DateTimeHi', 'Taiw']) + # GH 19732: Timestamps imported from sas will incur floating point errors + df.iloc[:, 3] = df.iloc[:, 3].dt.round('us') tm.assert_frame_equal(df, df0) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 3fdc2aa71bfc0..205fdf49d3e91 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -106,6 +106,16 @@ def test_compare_timedelta_ndarray(self): class TestTimedeltas(object): + @pytest.mark.parametrize("unit, value, expected", [ + ('us', 9.999, 9999), ('ms', 9.999999, 9999999), + ('s', 9.999999999, 9999999999)]) + def test_rounding_on_int_unit_construction(self, unit, value, expected): + # GH 12690 + result = Timedelta(value, unit=unit) + assert result.value == expected + result = Timedelta(str(value) + unit) + assert result.value == expected + def test_total_seconds_scalar(self): # see gh-10939 rng = Timedelta('1 days, 10:11:12.100123456') diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index b022b327de57c..ab87d98fca8eb 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -621,10 +621,51 @@ def test_basics_nanos(self): assert stamp.microsecond == 145224 assert stamp.nanosecond == 192 - def test_unit(self): - - def check(val, unit=None, h=1, s=1, us=0): - stamp = Timestamp(val, unit=unit) + @pytest.mark.parametrize('value, check_kwargs', [ + [946688461000000000, {}], + [946688461000000000 / long(1000), dict(unit='us')], + [946688461000000000 / long(1000000), dict(unit='ms')], + [946688461000000000 / long(1000000000), dict(unit='s')], + [10957, dict(unit='D', h=0)], + pytest.param((946688461000000000 + 500000) / long(1000000000), + dict(unit='s', us=499, ns=964), + marks=pytest.mark.skipif(not PY3, + reason='using truediv, so these' + ' are like floats')), + pytest.param((946688461000000000 + 500000000) / long(1000000000), + dict(unit='s', us=500000), + marks=pytest.mark.skipif(not PY3, + reason='using truediv, so these' + ' are like floats')), + pytest.param((946688461000000000 + 500000) / long(1000000), + dict(unit='ms', us=500), + marks=pytest.mark.skipif(not PY3, + reason='using truediv, so these' + ' are like floats')), + pytest.param((946688461000000000 + 500000) / long(1000000000), + dict(unit='s'), + marks=pytest.mark.skipif(PY3, + reason='get chopped in py2')), + pytest.param((946688461000000000 + 500000000) / long(1000000000), + dict(unit='s'), + marks=pytest.mark.skipif(PY3, + reason='get chopped in py2')), + pytest.param((946688461000000000 + 500000) / long(1000000), + dict(unit='ms'), + marks=pytest.mark.skipif(PY3, + reason='get chopped in py2')), + [(946688461000000000 + 500000) / long(1000), dict(unit='us', us=500)], + [(946688461000000000 + 500000000) / long(1000000), + dict(unit='ms', us=500000)], + [946688461000000000 / 1000.0 + 5, dict(unit='us', us=5)], + [946688461000000000 / 1000.0 + 5000, dict(unit='us', us=5000)], + [946688461000000000 / 1000000.0 + 0.5, dict(unit='ms', us=500)], + [946688461000000000 / 1000000.0 + 0.005, dict(unit='ms', us=5, ns=5)], + [946688461000000000 / 1000000000.0 + 0.5, dict(unit='s', us=500000)], + [10957 + 0.5, dict(unit='D', h=12)]]) + def test_unit(self, value, check_kwargs): + def check(value, unit=None, h=1, s=1, us=0, ns=0): + stamp = Timestamp(value, unit=unit) assert stamp.year == 2000 assert stamp.month == 1 assert stamp.day == 1 @@ -637,41 +678,9 @@ def check(val, unit=None, h=1, s=1, us=0): assert stamp.minute == 0 assert stamp.second == 0 assert stamp.microsecond == 0 - assert stamp.nanosecond == 0 - - ts = Timestamp('20000101 01:01:01') - val = ts.value - days = (ts - Timestamp('1970-01-01')).days - - check(val) - check(val / long(1000), unit='us') - check(val / long(1000000), unit='ms') - check(val / long(1000000000), unit='s') - check(days, unit='D', h=0) + assert stamp.nanosecond == ns - # using truediv, so these are like floats - if PY3: - check((val + 500000) / long(1000000000), unit='s', us=500) - check((val + 500000000) / long(1000000000), unit='s', us=500000) - check((val + 500000) / long(1000000), unit='ms', us=500) - - # get chopped in py2 - else: - check((val + 500000) / long(1000000000), unit='s') - check((val + 500000000) / long(1000000000), unit='s') - check((val + 500000) / long(1000000), unit='ms') - - # ok - check((val + 500000) / long(1000), unit='us', us=500) - check((val + 500000000) / long(1000000), unit='ms', us=500000) - - # floats - check(val / 1000.0 + 5, unit='us', us=5) - check(val / 1000.0 + 5000, unit='us', us=5000) - check(val / 1000000.0 + 0.5, unit='ms', us=500) - check(val / 1000000.0 + 0.005, unit='ms', us=5) - check(val / 1000000000.0 + 0.5, unit='s', us=500000) - check(days + 0.5, unit='D', h=12) + check(value, **check_kwargs) def test_roundtrip(self):
- [x] closes #14156 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19732
2018-02-16T20:28:14Z
2018-05-21T11:06:04Z
2018-05-21T11:06:04Z
2018-06-08T17:12:29Z
BUG: drop_duplicates not raising KeyError on missing key
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a2198d9103528..a4924417ff2b7 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -767,6 +767,8 @@ Indexing - Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`) - Bug in :func:`IntervalIndex.symmetric_difference` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`) - Bug in :class:`IntervalIndex` where set operations that returned an empty ``IntervalIndex`` had the wrong dtype (:issue:`19101`) +- Bug in :meth:`DataFrame.drop_duplicates` where no ``KeyError`` is raised when passing in columns that don't exist on the ``DataFrame`` (issue:`19726`) + MultiIndex ^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a001037b573d4..b4db770e6bb74 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3655,6 +3655,13 @@ def f(vals): isinstance(subset, tuple) and subset in self.columns): subset = subset, + # Verify all columns in subset exist in the queried dataframe + # Otherwise, raise a KeyError, same as if you try to __getitem__ with a + # key that doesn't exist. + diff = Index(subset).difference(self.columns) + if not diff.empty: + raise KeyError(diff) + vals = (col.values for name, col in self.iteritems() if name in subset) labels, shape = map(list, zip(*map(f, vals))) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index b9275fc69e7ff..f2b8387072c8d 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1492,6 +1492,19 @@ def test_drop_duplicates(self): for keep in ['first', 'last', False]: assert df.duplicated(keep=keep).sum() == 0 + @pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']]) + def test_duplicated_with_misspelled_column_name(self, subset): + # GH 19730 + df = pd.DataFrame({'A': [0, 0, 1], + 'B': [0, 0, 1], + 'C': [0, 0, 1]}) + + with pytest.raises(KeyError): + df.duplicated(subset) + + with pytest.raises(KeyError): + df.drop_duplicates(subset) + def test_drop_duplicates_with_duplicate_column_names(self): # GH17836 df = DataFrame([
Fix #17879 introduced an error by iterating over the columns in the dataframe, not the columns in the subset. This meant that passing in a column name missing from the dataframe would no longer raise a `KeyError` like it had previously. This fix checks the subset first before pulling necessary columns from the dataframe, and raises the necessary `KeyError` when a given column doesn't exist. - [x] closes #19726 - [x] tests added / passed - [x] passes git diff upstream/master -u -- "*.py" | flake8 --diff - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19730
2018-02-16T18:14:12Z
2018-02-21T11:40:22Z
2018-02-21T11:40:21Z
2018-02-21T14:16:45Z
BUG: fix index op names and pinning
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 59fe4bba649d3..c343126db0ea1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1,11 +1,11 @@ -import datetime +from datetime import datetime, timedelta import warnings import operator import numpy as np from pandas._libs import (lib, index as libindex, tslib as libts, algos as libalgos, join as libjoin, - Timestamp, Timedelta) + Timedelta) from pandas._libs.lib import is_datetime_array from pandas.compat import range, u, set_function_name @@ -47,6 +47,7 @@ from pandas.core.base import PandasObject, IndexOpsMixin import pandas.core.common as com import pandas.core.base as base +from pandas.core import ops from pandas.util._decorators import ( Appender, Substitution, cache_readonly, deprecate_kwarg) from pandas.core.indexes.frozen import FrozenList @@ -55,7 +56,7 @@ import pandas.core.algorithms as algos import pandas.core.sorting as sorting from pandas.io.formats.printing import pprint_thing -from pandas.core.ops import _comp_method_OBJECT_ARRAY, make_invalid_op +from pandas.core.ops import make_invalid_op from pandas.core.config import get_option from pandas.core.strings import StringMethods @@ -82,6 +83,74 @@ def _try_get_item(x): return x +def _make_comparison_op(op, cls): + def cmp_method(self, other): + if isinstance(other, (np.ndarray, Index, ABCSeries)): + if other.ndim > 0 and len(self) != len(other): + raise ValueError('Lengths must match to compare') + + # we may need to directly compare underlying + # representations + if needs_i8_conversion(self) and needs_i8_conversion(other): + return self._evaluate_compare(other, op) + + if is_object_dtype(self) and self.nlevels == 1: + # don't pass MultiIndex + with np.errstate(all='ignore'): + result = ops._comp_method_OBJECT_ARRAY(op, self.values, other) + else: + with np.errstate(all='ignore'): + result = op(self.values, np.asarray(other)) + + # technically we could support bool dtyped Index + # for now just return the indexing array directly + if is_bool_dtype(result): + return result + try: + return Index(result) + except TypeError: + return result + + name = '__{name}__'.format(name=op.__name__) + # TODO: docstring? + return set_function_name(cmp_method, name, cls) + + +def _make_arithmetic_op(op, cls): + def index_arithmetic_method(self, other): + if isinstance(other, (ABCSeries, ABCDataFrame)): + return NotImplemented + elif isinstance(other, ABCTimedeltaIndex): + # Defer to subclass implementation + return NotImplemented + + other = self._validate_for_numeric_binop(other, op) + + # handle time-based others + if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): + return self._evaluate_with_timedelta_like(other, op) + elif isinstance(other, (datetime, np.datetime64)): + return self._evaluate_with_datetime_like(other, op) + + values = self.values + with np.errstate(all='ignore'): + result = op(values, other) + + result = missing.dispatch_missing(op, values, other, result) + + attrs = self._get_attributes_dict() + attrs = self._maybe_update_attributes(attrs) + if op is divmod: + result = (Index(result[0], **attrs), Index(result[1], **attrs)) + else: + result = Index(result, **attrs) + return result + + name = '__{name}__'.format(name=op.__name__) + # TODO: docstring? + return set_function_name(index_arithmetic_method, name, cls) + + class InvalidIndexError(Exception): pass @@ -2175,11 +2244,13 @@ def __add__(self, other): def __radd__(self, other): return Index(other + np.array(self)) - __iadd__ = __add__ + def __iadd__(self, other): + # alias for __add__ + return self + other def __sub__(self, other): raise TypeError("cannot perform __sub__ with this index type: " - "{typ}".format(typ=type(self))) + "{typ}".format(typ=type(self).__name__)) def __and__(self, other): return self.intersection(other) @@ -3917,13 +3988,11 @@ def dropna(self, how='any'): return self._shallow_copy(self.values[~self._isnan]) return self._shallow_copy() - def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): + def _evaluate_with_timedelta_like(self, other, op): # Timedelta knows how to operate with np.array, so dispatch to that # operation and then wrap the results other = Timedelta(other) values = self.values - if reversed: - values, other = other, values with np.errstate(all='ignore'): result = op(values, other) @@ -3934,7 +4003,7 @@ def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): return Index(result[0], **attrs), Index(result[1], **attrs) return Index(result, **attrs) - def _evaluate_with_datetime_like(self, other, op, opstr): + def _evaluate_with_datetime_like(self, other, op): raise TypeError("can only perform ops with datetime like values") def _evaluate_compare(self, other, op): @@ -3943,64 +4012,39 @@ def _evaluate_compare(self, other, op): @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ - - def _make_compare(op): - def _evaluate_compare(self, other): - if isinstance(other, (np.ndarray, Index, ABCSeries)): - if other.ndim > 0 and len(self) != len(other): - raise ValueError('Lengths must match to compare') - - # we may need to directly compare underlying - # representations - if needs_i8_conversion(self) and needs_i8_conversion(other): - return self._evaluate_compare(other, op) - - if (is_object_dtype(self) and - self.nlevels == 1): - - # don't pass MultiIndex - with np.errstate(all='ignore'): - result = _comp_method_OBJECT_ARRAY( - op, self.values, other) - else: - with np.errstate(all='ignore'): - result = op(self.values, np.asarray(other)) - - # technically we could support bool dtyped Index - # for now just return the indexing array directly - if is_bool_dtype(result): - return result - try: - return Index(result) - except TypeError: - return result - - name = '__{name}__'.format(name=op.__name__) - return set_function_name(_evaluate_compare, name, cls) - - cls.__eq__ = _make_compare(operator.eq) - cls.__ne__ = _make_compare(operator.ne) - cls.__lt__ = _make_compare(operator.lt) - cls.__gt__ = _make_compare(operator.gt) - cls.__le__ = _make_compare(operator.le) - cls.__ge__ = _make_compare(operator.ge) + cls.__eq__ = _make_comparison_op(operator.eq, cls) + cls.__ne__ = _make_comparison_op(operator.ne, cls) + cls.__lt__ = _make_comparison_op(operator.lt, cls) + cls.__gt__ = _make_comparison_op(operator.gt, cls) + cls.__le__ = _make_comparison_op(operator.le, cls) + cls.__ge__ = _make_comparison_op(operator.ge, cls) @classmethod def _add_numeric_methods_add_sub_disabled(cls): """ add in the numeric add/sub methods to disable """ - cls.__add__ = cls.__radd__ = __iadd__ = make_invalid_op('__add__') # noqa - cls.__sub__ = __isub__ = make_invalid_op('__sub__') # noqa + cls.__add__ = make_invalid_op('__add__') + cls.__radd__ = make_invalid_op('__radd__') + cls.__iadd__ = make_invalid_op('__iadd__') + cls.__sub__ = make_invalid_op('__sub__') + cls.__rsub__ = make_invalid_op('__rsub__') + cls.__isub__ = make_invalid_op('__isub__') @classmethod def _add_numeric_methods_disabled(cls): """ add in numeric methods to disable other than add/sub """ cls.__pow__ = make_invalid_op('__pow__') cls.__rpow__ = make_invalid_op('__rpow__') - cls.__mul__ = cls.__rmul__ = make_invalid_op('__mul__') - cls.__floordiv__ = cls.__rfloordiv__ = make_invalid_op('__floordiv__') - cls.__truediv__ = cls.__rtruediv__ = make_invalid_op('__truediv__') + cls.__mul__ = make_invalid_op('__mul__') + cls.__rmul__ = make_invalid_op('__rmul__') + cls.__floordiv__ = make_invalid_op('__floordiv__') + cls.__rfloordiv__ = make_invalid_op('__rfloordiv__') + cls.__truediv__ = make_invalid_op('__truediv__') + cls.__rtruediv__ = make_invalid_op('__rtruediv__') if not compat.PY3: - cls.__div__ = cls.__rdiv__ = make_invalid_op('__div__') + cls.__div__ = make_invalid_op('__div__') + cls.__rdiv__ = make_invalid_op('__rdiv__') + cls.__mod__ = make_invalid_op('__mod__') + cls.__divmod__ = make_invalid_op('__divmod__') cls.__neg__ = make_invalid_op('__neg__') cls.__pos__ = make_invalid_op('__pos__') cls.__abs__ = make_invalid_op('__abs__') @@ -4015,34 +4059,29 @@ def _validate_for_numeric_unaryop(self, op, opstr): if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " - "{opstr} for type: {typ}".format( - opstr=opstr, - typ=type(self)) - ) + "{opstr} for type: {typ}" + .format(opstr=opstr, typ=type(self).__name__)) - def _validate_for_numeric_binop(self, other, op, opstr): + def _validate_for_numeric_binop(self, other, op): """ return valid other, evaluate or raise TypeError if we are not of the appropriate type internal method called by ops """ + opstr = '__{opname}__'.format(opname=op.__name__) # if we are an inheritor of numeric, # but not actually numeric (e.g. DatetimeIndex/PeriodIndex) if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op {opstr} " - "for type: {typ}".format( - opstr=opstr, - typ=type(self)) - ) + "for type: {typ}" + .format(opstr=opstr, typ=type(self).__name__)) if isinstance(other, Index): if not other._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " - "{opstr} with type: {typ}".format( - opstr=type(self), - typ=type(other)) - ) + "{opstr} with type: {typ}" + .format(opstr=opstr, typ=type(other))) elif isinstance(other, np.ndarray) and not other.ndim: other = other.item() @@ -4054,11 +4093,10 @@ def _validate_for_numeric_binop(self, other, op, opstr): if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") - elif isinstance(other, (ABCDateOffset, np.timedelta64, - datetime.timedelta)): + elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): # higher up to handle pass - elif isinstance(other, (Timestamp, np.datetime64)): + elif isinstance(other, (datetime, np.datetime64)): # higher up to handle pass else: @@ -4070,73 +4108,24 @@ def _validate_for_numeric_binop(self, other, op, opstr): @classmethod def _add_numeric_methods_binary(cls): """ add in numeric methods """ - - def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index): - def _evaluate_numeric_binop(self, other): - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented - elif isinstance(other, ABCTimedeltaIndex): - # Defer to subclass implementation - return NotImplemented - - other = self._validate_for_numeric_binop(other, op, opstr) - - # handle time-based others - if isinstance(other, (ABCDateOffset, np.timedelta64, - datetime.timedelta)): - return self._evaluate_with_timedelta_like(other, op, opstr, - reversed) - elif isinstance(other, (Timestamp, np.datetime64)): - return self._evaluate_with_datetime_like(other, op, opstr) - - # if we are a reversed non-commutative op - values = self.values - if reversed: - values, other = other, values - - attrs = self._get_attributes_dict() - attrs = self._maybe_update_attributes(attrs) - with np.errstate(all='ignore'): - result = op(values, other) - - result = missing.dispatch_missing(op, values, other, result) - return constructor(result, **attrs) - - return _evaluate_numeric_binop - - cls.__add__ = cls.__radd__ = _make_evaluate_binop( - operator.add, '__add__') - cls.__sub__ = _make_evaluate_binop( - operator.sub, '__sub__') - cls.__rsub__ = _make_evaluate_binop( - operator.sub, '__sub__', reversed=True) - cls.__mul__ = cls.__rmul__ = _make_evaluate_binop( - operator.mul, '__mul__') - cls.__rpow__ = _make_evaluate_binop( - operator.pow, '__pow__', reversed=True) - cls.__pow__ = _make_evaluate_binop( - operator.pow, '__pow__') - cls.__mod__ = _make_evaluate_binop( - operator.mod, '__mod__') - cls.__floordiv__ = _make_evaluate_binop( - operator.floordiv, '__floordiv__') - cls.__rfloordiv__ = _make_evaluate_binop( - operator.floordiv, '__floordiv__', reversed=True) - cls.__truediv__ = _make_evaluate_binop( - operator.truediv, '__truediv__') - cls.__rtruediv__ = _make_evaluate_binop( - operator.truediv, '__truediv__', reversed=True) + cls.__add__ = _make_arithmetic_op(operator.add, cls) + cls.__radd__ = _make_arithmetic_op(ops.radd, cls) + cls.__sub__ = _make_arithmetic_op(operator.sub, cls) + cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls) + cls.__mul__ = _make_arithmetic_op(operator.mul, cls) + cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls) + cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls) + cls.__pow__ = _make_arithmetic_op(operator.pow, cls) + cls.__mod__ = _make_arithmetic_op(operator.mod, cls) + cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls) + cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls) + cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls) + cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls) if not compat.PY3: - cls.__div__ = _make_evaluate_binop( - operator.div, '__div__') - cls.__rdiv__ = _make_evaluate_binop( - operator.div, '__div__', reversed=True) + cls.__div__ = _make_arithmetic_op(operator.div, cls) + cls.__rdiv__ = _make_arithmetic_op(ops.rdiv, cls) - cls.__divmod__ = _make_evaluate_binop( - divmod, - '__divmod__', - constructor=lambda result, **attrs: (Index(result[0], **attrs), - Index(result[1], **attrs))) + cls.__divmod__ = _make_arithmetic_op(divmod, cls) @classmethod def _add_numeric_methods_unary(cls): @@ -4153,8 +4142,8 @@ def _evaluate_numeric_unary(self): return _evaluate_numeric_unary - cls.__neg__ = _make_evaluate_unary(lambda x: -x, '__neg__') - cls.__pos__ = _make_evaluate_unary(lambda x: x, '__pos__') + cls.__neg__ = _make_evaluate_unary(operator.neg, '__neg__') + cls.__pos__ = _make_evaluate_unary(operator.pos, '__pos__') cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__') cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__') diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 187f9fcf52dd4..ac75e5ae5e2a0 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -669,6 +669,7 @@ def __add__(self, other): result = self._add_offset_array(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if hasattr(other, '_add_delta'): + # i.e. DatetimeIndex, TimedeltaIndex, or PeriodIndex result = other._add_delta(self) else: raise TypeError("cannot add TimedeltaIndex and {typ}" @@ -693,7 +694,11 @@ def __add__(self, other): return result cls.__add__ = __add__ - cls.__radd__ = __add__ + + def __radd__(self, other): + # alias for __add__ + return self.__add__(other) + cls.__radd__ = __radd__ def __sub__(self, other): from pandas.core.index import Index @@ -712,10 +717,10 @@ def __sub__(self, other): # Array/Index of DateOffset objects result = self._sub_offset_array(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): - if not isinstance(other, TimedeltaIndex): - raise TypeError("cannot subtract TimedeltaIndex and {typ}" - .format(typ=type(other).__name__)) - result = self._add_delta(-other) + # We checked above for timedelta64_dtype(other) so this + # must be invalid. + raise TypeError("cannot subtract TimedeltaIndex and {typ}" + .format(typ=type(other).__name__)) elif isinstance(other, DatetimeIndex): result = self._sub_datelike(other) elif is_integer(other): @@ -747,8 +752,15 @@ def __rsub__(self, other): return -(self - other) cls.__rsub__ = __rsub__ - cls.__iadd__ = __add__ - cls.__isub__ = __sub__ + def __iadd__(self, other): + # alias for __add__ + return self.__add__(other) + cls.__iadd__ = __iadd__ + + def __isub__(self, other): + # alias for __sub__ + return self.__sub__(other) + cls.__isub__ = __isub__ def _add_delta(self, other): return NotImplemented diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index debeabf9bae23..17f92339e4205 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -100,10 +100,11 @@ def f(self): return property(f) -def _dt_index_cmp(opname, cls, nat_result=False): +def _dt_index_cmp(opname, cls): """ Wrap comparison operations to convert datetime-like to datetime64 """ + nat_result = True if opname == '__ne__' else False def wrapper(self, other): func = getattr(super(DatetimeIndex, self), opname) @@ -291,7 +292,7 @@ def _join_i8_wrapper(joinf, **kwargs): def _add_comparison_methods(cls): """ add in comparison methods """ cls.__eq__ = _dt_index_cmp('__eq__', cls) - cls.__ne__ = _dt_index_cmp('__ne__', cls, nat_result=True) + cls.__ne__ = _dt_index_cmp('__ne__', cls) cls.__lt__ = _dt_index_cmp('__lt__', cls) cls.__gt__ = _dt_index_cmp('__gt__', cls) cls.__le__ = _dt_index_cmp('__le__', cls) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 88f9297652ebf..4c14cbffcd813 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -76,26 +76,25 @@ def dt64arr_to_periodarr(data, freq, tz): _DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX -def _period_index_cmp(opname, cls, nat_result=False): +def _period_index_cmp(opname, cls): """ - Wrap comparison operations to convert datetime-like to datetime64 + Wrap comparison operations to convert Period-like to PeriodDtype """ + nat_result = True if opname == '__ne__' else False def wrapper(self, other): + op = getattr(self._ndarray_values, opname) if isinstance(other, Period): - func = getattr(self._ndarray_values, opname) - other_base, _ = _gfc(other.freq) if other.freq != self.freq: msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr) raise IncompatibleFrequency(msg) - result = func(other.ordinal) + result = op(other.ordinal) elif isinstance(other, PeriodIndex): if other.freq != self.freq: msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr) raise IncompatibleFrequency(msg) - op = getattr(self._ndarray_values, opname) result = op(other._ndarray_values) mask = self._isnan | other._isnan @@ -108,8 +107,7 @@ def wrapper(self, other): result.fill(nat_result) else: other = Period(other, freq=self.freq) - func = getattr(self._ndarray_values, opname) - result = func(other.ordinal) + result = op(other.ordinal) if self.hasnans: result[self._isnan] = nat_result @@ -231,7 +229,7 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index): def _add_comparison_methods(cls): """ add in comparison methods """ cls.__eq__ = _period_index_cmp('__eq__', cls) - cls.__ne__ = _period_index_cmp('__ne__', cls, nat_result=True) + cls.__ne__ = _period_index_cmp('__ne__', cls) cls.__lt__ = _period_index_cmp('__lt__', cls) cls.__gt__ = _period_index_cmp('__gt__', cls) cls.__le__ = _period_index_cmp('__le__', cls) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 0ac415ee0b701..9d770cffb0059 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -16,6 +16,7 @@ from pandas.compat.numpy import function as nv import pandas.core.common as com +from pandas.core import ops from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat @@ -570,16 +571,12 @@ def __floordiv__(self, other): def _add_numeric_methods_binary(cls): """ add in numeric methods, specialized to RangeIndex """ - def _make_evaluate_binop(op, opstr, reversed=False, step=False): + def _make_evaluate_binop(op, step=False): """ Parameters ---------- op : callable that accepts 2 parms perform the binary op - opstr : string - string name of ops - reversed : boolean, default False - if this is a reversed op, e.g. radd step : callable, optional, default to False op to apply to the step parm if not None if False, use the existing step @@ -594,17 +591,13 @@ def _evaluate_numeric_binop(self, other): elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64, # so we need to catch these explicitly - if reversed: - return op(other, self._int64index) return op(self._int64index, other) - other = self._validate_for_numeric_binop(other, op, opstr) + other = self._validate_for_numeric_binop(other, op) attrs = self._get_attributes_dict() attrs = self._maybe_update_attributes(attrs) left, right = self, other - if reversed: - left, right = right, left try: # apply if we have an override @@ -638,43 +631,26 @@ def _evaluate_numeric_binop(self, other): return result - except (ValueError, TypeError, AttributeError, - ZeroDivisionError): + except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation - if reversed: - return op(other, self._int64index) return op(self._int64index, other) + # TODO: Do attrs get handled reliably? return _evaluate_numeric_binop - cls.__add__ = cls.__radd__ = _make_evaluate_binop( - operator.add, '__add__') - cls.__sub__ = _make_evaluate_binop(operator.sub, '__sub__') - cls.__rsub__ = _make_evaluate_binop( - operator.sub, '__sub__', reversed=True) - cls.__mul__ = cls.__rmul__ = _make_evaluate_binop( - operator.mul, - '__mul__', - step=operator.mul) - cls.__truediv__ = _make_evaluate_binop( - operator.truediv, - '__truediv__', - step=operator.truediv) - cls.__rtruediv__ = _make_evaluate_binop( - operator.truediv, - '__truediv__', - reversed=True, - step=operator.truediv) + cls.__add__ = _make_evaluate_binop(operator.add) + cls.__radd__ = _make_evaluate_binop(ops.radd) + cls.__sub__ = _make_evaluate_binop(operator.sub) + cls.__rsub__ = _make_evaluate_binop(ops.rsub) + cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul) + cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul) + cls.__truediv__ = _make_evaluate_binop(operator.truediv, + step=operator.truediv) + cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv, + step=ops.rtruediv) if not compat.PY3: - cls.__div__ = _make_evaluate_binop( - operator.div, - '__div__', - step=operator.div) - cls.__rdiv__ = _make_evaluate_binop( - operator.div, - '__div__', - reversed=True, - step=operator.div) + cls.__div__ = _make_evaluate_binop(operator.div, step=operator.div) + cls.__rdiv__ = _make_evaluate_binop(ops.rdiv, step=ops.rdiv) RangeIndex._add_numeric_methods() diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6b61db53d9a11..3542a24290f89 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -53,10 +53,11 @@ def f(self): return property(f) -def _td_index_cmp(opname, cls, nat_result=False): +def _td_index_cmp(opname, cls): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ + nat_result = True if opname == '__ne__' else False def wrapper(self, other): msg = "cannot compare a TimedeltaIndex with type {0}" @@ -184,7 +185,7 @@ def _join_i8_wrapper(joinf, **kwargs): def _add_comparison_methods(cls): """ add in comparison methods """ cls.__eq__ = _td_index_cmp('__eq__', cls) - cls.__ne__ = _td_index_cmp('__ne__', cls, nat_result=True) + cls.__ne__ = _td_index_cmp('__ne__', cls) cls.__lt__ = _td_index_cmp('__lt__', cls) cls.__gt__ = _td_index_cmp('__gt__', cls) cls.__le__ = _td_index_cmp('__le__', cls) @@ -383,11 +384,12 @@ def _add_delta(self, delta): return TimedeltaIndex(new_values, freq='infer') - def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): + def _evaluate_with_timedelta_like(self, other, op): if isinstance(other, ABCSeries): # GH#19042 return NotImplemented + opstr = '__{opname}__'.format(opname=op.__name__).replace('__r', '__') # allow division by a timedelta if opstr in ['__div__', '__truediv__', '__floordiv__']: if _is_convertible_to_td(other): @@ -398,11 +400,9 @@ def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): i8 = self.asi8 left, right = i8, other.value - if reversed: - left, right = right, left if opstr in ['__floordiv__']: - result = left // right + result = op(left, right) else: result = op(left, np.float64(right)) result = self._maybe_mask_results(result, convert='float64') diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 1162662bf9a08..8f51dbabd5b71 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -127,16 +127,17 @@ def test_numeric_compat(self): idx = self.create_index() tm.assert_raises_regex(TypeError, "cannot perform __mul__", lambda: idx * 1) - tm.assert_raises_regex(TypeError, "cannot perform __mul__", + tm.assert_raises_regex(TypeError, "cannot perform __rmul__", lambda: 1 * idx) div_err = "cannot perform __truediv__" if PY3 \ else "cannot perform __div__" tm.assert_raises_regex(TypeError, div_err, lambda: idx / 1) + div_err = div_err.replace(' __', ' __r') tm.assert_raises_regex(TypeError, div_err, lambda: 1 / idx) tm.assert_raises_regex(TypeError, "cannot perform __floordiv__", lambda: idx // 1) - tm.assert_raises_regex(TypeError, "cannot perform __floordiv__", + tm.assert_raises_regex(TypeError, "cannot perform __rfloordiv__", lambda: 1 // idx) def test_logical_compat(self): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 90edcb526bb2e..d7f185853ca45 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -7,6 +7,7 @@ from collections import defaultdict import pandas.util.testing as tm +from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.common import is_unsigned_integer_dtype from pandas.core.indexes.api import Index, MultiIndex from pandas.tests.indexes.common import Base @@ -1988,6 +1989,17 @@ def test_addsub_arithmetic(self, dtype, delta): tm.assert_index_equal(idx - idx, 0 * idx) assert not (idx - idx).empty + def test_iadd_preserves_name(self): + # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name + ser = pd.Series([1, 2, 3]) + ser.index.name = 'foo' + + ser.index += 1 + assert ser.index.name == "foo" + + ser.index -= 1 + assert ser.index.name == "foo" + class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ @@ -2301,9 +2313,17 @@ def test_ensure_index_from_sequences(self, data, names, expected): tm.assert_index_equal(result, expected) -@pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt']) +@pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt', + 'add', 'radd', 'sub', 'rsub', + 'mul', 'rmul', 'truediv', 'rtruediv', + 'floordiv', 'rfloordiv', + 'pow', 'rpow', 'mod', 'divmod']) def test_generated_op_names(opname, indices): index = indices + if isinstance(index, ABCIndex) and opname == 'rsub': + # pd.Index.__rsub__ does not exist; though the method does exist + # for subclasses. see GH#19723 + return opname = '__{name}__'.format(name=opname) method = getattr(index, opname) assert method.__name__ == opname
This is a moderately big one, but changes very little. Fixes incorrect Index ops names and several methods that are defined but not pinned to the class (#19716). Avoids the need to pass `reversed` and `str_rep` in a bunch of places, cleans up some of the Index classmethods. Removes the no-longer-needed catching of AttributeError in RangeIndex ops. The part that may be controversial (and almost certainly needs new tests) is that `Index.__rsub__` is implemented, since it doesn't currently exist. This implements it in "the obvious way", but the fact that `Index.__sub__` isn't defined in the obvious way suggests this _might_ not be desired. Comments welcome. - [x] closes #19716 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19723
2018-02-16T02:09:12Z
2018-02-23T11:36:43Z
2018-02-23T11:36:42Z
2018-06-22T03:31:15Z
Cythonized GroupBy any
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index c347442784d41..3e7e5c821b14c 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -11,6 +11,13 @@ from .pandas_vb_common import setup # noqa +method_blacklist = { + 'object': {'median', 'prod', 'sem', 'cumsum', 'sum', 'cummin', 'mean', + 'max', 'skew', 'cumprod', 'cummax', 'rank', 'pct_change', 'min', + 'var', 'mad', 'describe', 'std'} +} + + class ApplyDictReturn(object): goal_time = 0.2 @@ -153,6 +160,7 @@ def time_frame_nth_any(self, df): def time_frame_nth(self, df): df.groupby(0).nth(0) + def time_series_nth_any(self, df): df[1].groupby(df[0]).nth(0, dropna='any') @@ -369,7 +377,7 @@ class GroupByMethods(object): goal_time = 0.2 param_names = ['dtype', 'method'] - params = [['int', 'float'], + params = [['int', 'float', 'object'], ['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin', 'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head', 'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique', @@ -377,15 +385,19 @@ class GroupByMethods(object): 'std', 'sum', 'tail', 'unique', 'value_counts', 'var']] def setup(self, dtype, method): + if method in method_blacklist.get(dtype, {}): + raise NotImplementedError # skip benchmark ngroups = 1000 size = ngroups * 2 rng = np.arange(ngroups) values = rng.take(np.random.randint(0, ngroups, size=size)) if dtype == 'int': key = np.random.randint(0, size, size=size) - else: + elif dtype == 'float': key = np.concatenate([np.random.random(ngroups) * 0.1, np.random.random(ngroups) * 10.0]) + elif dtype == 'object': + key = ['foo'] * size df = DataFrame({'values': values, 'key': key}) self.df_groupby_method = getattr(df.groupby('key')['values'], method) diff --git a/doc/source/api.rst b/doc/source/api.rst index 0e47499a03f3a..a5e26bc948a70 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -2179,8 +2179,12 @@ Computations / Descriptive Stats .. autosummary:: :toctree: generated/ + GroupBy.all + GroupBy.any + GroupBy.bfill GroupBy.count GroupBy.cumcount + GroupBy.ffill GroupBy.first GroupBy.head GroupBy.last @@ -2192,6 +2196,7 @@ Computations / Descriptive Stats GroupBy.nth GroupBy.ohlc GroupBy.prod + GroupBy.rank GroupBy.size GroupBy.sem GroupBy.std diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6865428c352c1..887063d526d16 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -697,9 +697,10 @@ Performance Improvements - Improved performance of :func:`DataFrame.median` with ``axis=1`` when bottleneck is not installed (:issue:`16468`) - Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`) - Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`) -- Improved performance of :func:`DataFrameGroupBy.rank` (:issue:`15779`) +- Improved performance of :func:`pandas.core.groupby.GroupBy.rank` (:issue:`15779`) - Improved performance of variable ``.rolling()`` on ``.min()`` and ``.max()`` (:issue:`19521`) -- Improved performance of ``GroupBy.ffill`` and ``GroupBy.bfill`` (:issue:`11296`) +- Improved performance of :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` (:issue:`11296`) +- Improved performance of :func:`pandas.core.groupby.GroupBy.any` and :func:`pandas.core.groupby.GroupBy.all` (:issue:`15435`) .. _whatsnew_0230.docs: diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index e3d208a915225..d3fcd84e5f38d 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -310,5 +310,62 @@ def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, filled_vals = 0 +@cython.boundscheck(False) +@cython.wraparound(False) +def group_any_all(ndarray[uint8_t] out, + ndarray[int64_t] labels, + ndarray[uint8_t] values, + ndarray[uint8_t] mask, + object val_test, + bint skipna): + """Aggregated boolean values to show truthfulness of group elements + + Parameters + ---------- + out : array of values which this method will write its results to + labels : array containing unique label for each group, with its + ordering matching up to the corresponding record in `values` + values : array containing the truth value of each element + mask : array indicating whether a value is na or not + val_test : str {'any', 'all'} + String object dictating whether to use any or all truth testing + skipna : boolean + Flag to ignore nan values during truth testing + + Notes + ----- + This method modifies the `out` parameter rather than returning an object. + The returned values will either be 0 or 1 (False or True, respectively). + """ + cdef: + Py_ssize_t i, N=len(labels) + int64_t lab + uint8_t flag_val + + if val_test == 'all': + # Because the 'all' value of an empty iterable in Python is True we can + # start with an array full of ones and set to zero when a False value + # is encountered + flag_val = 0 + elif val_test == 'any': + # Because the 'any' value of an empty iterable in Python is False we + # can start with an array full of zeros and set to one only if any + # value encountered is True + flag_val = 1 + else: + raise ValueError("'bool_func' must be either 'any' or 'all'!") + + out.fill(1 - flag_val) + + with nogil: + for i in range(N): + lab = labels[i] + if lab < 0 or (skipna and mask[i]): + continue + + if values[i] == flag_val: + out[lab] = flag_val + + # generated from template include "groupby_helper.pxi" diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 00643614e8803..b8ca104c4b2c7 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1219,6 +1219,53 @@ class GroupBy(_GroupBy): """ _apply_whitelist = _common_apply_whitelist + def _bool_agg(self, val_test, skipna): + """Shared func to call any / all Cython GroupBy implementations""" + + def objs_to_bool(vals): + try: + vals = vals.astype(np.bool) + except ValueError: # for objects + vals = np.array([bool(x) for x in vals]) + + return vals.view(np.uint8) + + def result_to_bool(result): + return result.astype(np.bool, copy=False) + + return self._get_cythonized_result('group_any_all', self.grouper, + aggregate=True, + cython_dtype=np.uint8, + needs_values=True, + needs_mask=True, + pre_processing=objs_to_bool, + post_processing=result_to_bool, + val_test=val_test, skipna=skipna) + + @Substitution(name='groupby') + @Appender(_doc_template) + def any(self, skipna=True): + """Returns True if any value in the group is truthful, else False + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing + """ + return self._bool_agg('any', skipna) + + @Substitution(name='groupby') + @Appender(_doc_template) + def all(self, skipna=True): + """Returns True if all values in the group are truthful, else False + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing + """ + return self._bool_agg('all', skipna) + @Substitution(name='groupby') @Appender(_doc_template) def count(self): @@ -1485,6 +1532,8 @@ def _fill(self, direction, limit=None): return self._get_cythonized_result('group_fillna_indexer', self.grouper, needs_mask=True, + cython_dtype=np.int64, + result_is_index=True, direction=direction, limit=limit) @Substitution(name='groupby') @@ -1873,18 +1922,40 @@ def cummax(self, axis=0, **kwargs): return self._cython_transform('cummax', numeric_only=False) - def _get_cythonized_result(self, how, grouper, needs_mask=False, - needs_ngroups=False, **kwargs): + def _get_cythonized_result(self, how, grouper, aggregate=False, + cython_dtype=None, needs_values=False, + needs_mask=False, needs_ngroups=False, + result_is_index=False, + pre_processing=None, post_processing=None, + **kwargs): """Get result for Cythonized functions Parameters ---------- how : str, Cythonized function name to be called grouper : Grouper object containing pertinent group info + aggregate : bool, default False + Whether the result should be aggregated to match the number of + groups + cython_dtype : default None + Type of the array that will be modified by the Cython call. If + `None`, the type will be inferred from the values of each slice + needs_values : bool, default False + Whether the values should be a part of the Cython call + signature needs_mask : bool, default False - Whether boolean mask needs to be part of the Cython call signature + Whether boolean mask needs to be part of the Cython call + signature needs_ngroups : bool, default False - Whether number of groups part of the Cython call signature + Whether number of groups is part of the Cython call signature + result_is_index : bool, default False + Whether the result of the Cython operation is an index of + values to be retrieved, instead of the actual values themselves + pre_processing : function, default None + Function to be applied to `values` prior to passing to Cython + Raises if `needs_values` is False + post_processing : function, default None + Function to be applied to result of Cython function **kwargs : dict Extra arguments to be passed back to Cython funcs @@ -1892,14 +1963,40 @@ def _get_cythonized_result(self, how, grouper, needs_mask=False, ------- `Series` or `DataFrame` with filled values """ + if result_is_index and aggregate: + raise ValueError("'result_is_index' and 'aggregate' cannot both " + "be True!") + if post_processing: + if not callable(pre_processing): + raise ValueError("'post_processing' must be a callable!") + if pre_processing: + if not callable(pre_processing): + raise ValueError("'pre_processing' must be a callable!") + if not needs_values: + raise ValueError("Cannot use 'pre_processing' without " + "specifying 'needs_values'!") labels, _, ngroups = grouper.group_info output = collections.OrderedDict() base_func = getattr(libgroupby, how) for name, obj in self._iterate_slices(): - indexer = np.zeros_like(labels, dtype=np.int64) - func = partial(base_func, indexer, labels) + if aggregate: + result_sz = ngroups + else: + result_sz = len(obj.values) + + if not cython_dtype: + cython_dtype = obj.values.dtype + + result = np.zeros(result_sz, dtype=cython_dtype) + func = partial(base_func, result, labels) + if needs_values: + vals = obj.values + if pre_processing: + vals = pre_processing(vals) + func = partial(func, vals) + if needs_mask: mask = isnull(obj.values).view(np.uint8) func = partial(func, mask) @@ -1908,9 +2005,19 @@ def _get_cythonized_result(self, how, grouper, needs_mask=False, func = partial(func, ngroups) func(**kwargs) # Call func to modify indexer values in place - output[name] = algorithms.take_nd(obj.values, indexer) - return self._wrap_transformed_output(output) + if result_is_index: + result = algorithms.take_nd(obj.values, result) + + if post_processing: + result = post_processing(result) + + output[name] = result + + if aggregate: + return self._wrap_aggregated_output(output) + else: + return self._wrap_transformed_output(output) @Substitution(name='groupby') @Appender(_doc_template) @@ -1930,7 +2037,9 @@ def shift(self, periods=1, freq=None, axis=0): return self.apply(lambda x: x.shift(periods, freq, axis)) return self._get_cythonized_result('group_shift_indexer', - self.grouper, needs_ngroups=True, + self.grouper, cython_dtype=np.int64, + needs_ngroups=True, + result_is_index=True, periods=periods) @Substitution(name='groupby') diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 2429e9975fc8e..0561b3a1d8592 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -9,6 +9,7 @@ from pandas import (date_range, bdate_range, Timestamp, Index, MultiIndex, DataFrame, Series, concat, Panel, DatetimeIndex, read_csv) +from pandas.core.dtypes.missing import isna from pandas.errors import UnsupportedFunctionCall, PerformanceWarning from pandas.util.testing import (assert_frame_equal, assert_index_equal, assert_series_equal, assert_almost_equal) @@ -2116,6 +2117,30 @@ def interweave(list_obj): exp = DataFrame({'key': keys, 'val': _exp_vals}) assert_frame_equal(result, exp) + @pytest.mark.parametrize("agg_func", ['any', 'all']) + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("vals", [ + ['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''], + [1, 2, 3], [1, 0, 0], [0, 0, 0], + [1., 2., 3.], [1., 0., 0.], [0., 0., 0.], + [True, True, True], [True, False, False], [False, False, False], + [np.nan, np.nan, np.nan] + ]) + def test_groupby_bool_aggs(self, agg_func, skipna, vals): + df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2}) + + # Figure out expectation using Python builtin + exp = getattr(compat.builtins, agg_func)(vals) + + # edge case for missing data with skipna and 'any' + if skipna and all(isna(vals)) and agg_func == 'any': + exp = False + + exp_df = DataFrame([exp] * 2, columns=['val'], index=pd.Index( + ['a', 'b'], name='key')) + result = getattr(df.groupby('key'), agg_func)(skipna=skipna) + assert_frame_equal(result, exp_df) + def test_dont_clobber_name_column(self): df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'], 'name': ['foo', 'bar', 'baz'] * 2})
- [X] closes #15435 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry ```bash before after ratio [2fdf1e25] [a2223dbf] - 1.21s 956Β±3ms 0.79 groupby.GroupByMethods.time_method('float', 'mad') - 141Β±2ms 57.8Β±0.9ΞΌs 0.00 groupby.GroupByMethods.time_method('int', 'any') - 213Β±7ms 56.6Β±1ΞΌs 0.00 groupby.GroupByMethods.time_method('float', 'any') SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19722
2018-02-16T02:06:07Z
2018-03-01T01:30:19Z
2018-03-01T01:30:19Z
2018-03-01T03:40:46Z
Remove redundant parentheses from if conditions
diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 56b5311326e98..1f99ce0918b86 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -108,8 +108,8 @@ def _upsample_others(ax, freq, kwargs): lines.extend(rlines) labels.extend(rlabels) - if (legend is not None and kwargs.get('legend', True) and - len(lines) > 0): + if legend is not None and kwargs.get('legend', True) and + len(lines) > 0: title = legend.get_title().get_text() if title == 'None': title = None @@ -234,7 +234,7 @@ def _use_dynamic_x(ax, data): if isinstance(data.index, DatetimeIndex): base = frequencies.get_freq(freq) x = data.index - if (base <= frequencies.FreqGroup.FR_DAY): + if base <= frequencies.FreqGroup.FR_DAY: return x[:1].is_normalized return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0] return True
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19719
2018-02-15T20:40:44Z
2018-02-15T20:46:51Z
null
2018-02-15T20:46:51Z
BUG: fix Series constructor for scalar and Categorical dtype
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 8d6a3dc72163e..c963a3f403868 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -709,7 +709,8 @@ Categorical ``self`` but in a different order (:issue:`19551`) - Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) - Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) -- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (:issue:`19032`) +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) +- Bug in :class:`Series` constructor with scalar and ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19565`) Datetimelike ^^^^^^^^^^^^ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 55919fb2bea0d..352ce29f5c37b 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1178,7 +1178,7 @@ def construct_1d_arraylike_from_scalar(value, length, dtype): subarr = DatetimeIndex([value] * length, dtype=dtype) elif is_categorical_dtype(dtype): from pandas import Categorical - subarr = Categorical([value] * length) + subarr = Categorical([value] * length, dtype=dtype) else: if not isinstance(dtype, (np.dtype, type(np.dtype))): dtype = dtype.dtype diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index d13d781f03117..31bd962b67afb 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -22,7 +22,8 @@ maybe_convert_string_to_object, maybe_convert_scalar, find_common_type, - construct_1d_object_array_from_listlike) + construct_1d_object_array_from_listlike, + construct_1d_arraylike_from_scalar) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, @@ -422,3 +423,15 @@ def test_cast_1d_array(self, datum1, datum2): @pytest.mark.parametrize('val', [1, 2., None]) def test_cast_1d_array_invalid_scalar(self, val): pytest.raises(TypeError, construct_1d_object_array_from_listlike, val) + + def test_cast_1d_arraylike_from_scalar_categorical(self): + # GH 19565 - Categorical result from scalar did not maintain categories + # and ordering of the passed dtype + cats = ['a', 'b', 'c'] + cat_type = CategoricalDtype(categories=cats, ordered=False) + expected = pd.Categorical(['a', 'a'], categories=cats) + result = construct_1d_arraylike_from_scalar('a', len(expected), + cat_type) + tm.assert_categorical_equal(result, expected, + check_category_order=True, + check_dtype=True) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 33737387edffa..77f9dfcce686d 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -270,6 +270,13 @@ def test_constructor_categorical_dtype(self): tm.assert_index_equal(result.cat.categories, pd.Index(['b', 'a'])) assert result.cat.ordered is False + # GH 19565 - Check broadcasting of scalar with Categorical dtype + result = Series('a', index=[0, 1], + dtype=CategoricalDtype(['a', 'b'], ordered=True)) + expected = Series(['a', 'a'], index=[0, 1], + dtype=CategoricalDtype(['a', 'b'], ordered=True)) + tm.assert_series_equal(result, expected, check_categorical=True) + def test_categorical_sideeffects_free(self): # Passing a categorical to a Series and then changing values in either # the series or the categorical should not change the values in the
Categories and ordering of the resulting Series were not the same as those of the passed Categorical dtype. Closes #19565 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19717
2018-02-15T18:49:12Z
2018-02-24T14:58:41Z
2018-02-24T14:58:41Z
2018-02-24T14:58:45Z
BUG: Fix Series constructor for Categorical with index
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fb22dc40e335f..5330f7e7e998b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -919,6 +919,7 @@ Reshaping - Comparisons between :class:`Series` and :class:`Index` would return a ``Series`` with an incorrect name, ignoring the ``Index``'s name attribute (:issue:`19582`) - Bug in :func:`qcut` where datetime and timedelta data with ``NaT`` present raised a ``ValueError`` (:issue:`19768`) - Bug in :func:`DataFrame.iterrows`, which would infers strings not compliant to `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_ to datetimes (:issue:`19671`) +- Bug in :class:`Series` constructor with ``Categorical`` where a ```ValueError`` is not raised when an index of different length is given (:issue:`19342`) Other ^^^^^ diff --git a/pandas/core/series.py b/pandas/core/series.py index 26b7fd552b062..8053651a4877a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -212,7 +212,6 @@ def __init__(self, data=None, index=None, dtype=None, name=None, 'be False.') elif is_extension_array_dtype(data) and dtype is not None: - # GH12574: Allow dtype=category only, otherwise error if not data.dtype.is_dtype(dtype): raise ValueError("Cannot specify a dtype '{}' with an " "extension array of a different " @@ -235,6 +234,18 @@ def __init__(self, data=None, index=None, dtype=None, name=None, if not is_list_like(data): data = [data] index = com._default_index(len(data)) + elif is_list_like(data): + + # a scalar numpy array is list-like but doesn't + # have a proper length + try: + if len(index) != len(data): + raise ValueError( + 'Length of passed values is {val}, ' + 'index implies {ind}' + .format(val=len(data), ind=len(index))) + except TypeError: + pass # create/copy the manager if isinstance(data, SingleBlockManager): diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index bedb11d4fc4ae..adf8e14b756c2 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -24,7 +24,7 @@ def setup_method(self, method): def h(x, foo='bar'): return pd.Series( - ['color: {foo}'.format(foo=foo)], index=x.index, name=x.name) + 'color: {foo}'.format(foo=foo), index=x.index, name=x.name) self.h = h self.styler = Styler(self.df) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 77f9dfcce686d..25f425ffa0021 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -400,6 +400,34 @@ def test_constructor_default_index(self): s = Series([0, 1, 2]) tm.assert_index_equal(s.index, pd.Index(np.arange(3))) + @pytest.mark.parametrize('input', [[1, 2, 3], + (1, 2, 3), + list(range(3)), + pd.Categorical(['a', 'b', 'a']), + (i for i in range(3)), + map(lambda x: x, range(3))]) + def test_constructor_index_mismatch(self, input): + # GH 19342 + # test that construction of a Series with an index of different length + # raises an error + msg = 'Length of passed values is 3, index implies 4' + with pytest.raises(ValueError, message=msg): + Series(input, index=np.arange(4)) + + def test_constructor_numpy_scalar(self): + # GH 19342 + # construction with a numpy scalar + # should not raise + result = Series(np.array(100), index=np.arange(4), dtype='int64') + expected = Series(100, index=np.arange(4), dtype='int64') + tm.assert_series_equal(result, expected) + + def test_constructor_broadcast_list(self): + # GH 19342 + # construction with single-element container and index + # should raise + pytest.raises(ValueError, Series, ['foo'], index=['a', 'b', 'c']) + def test_constructor_corner(self): df = tm.makeTimeDataFrame() objs = [df, df]
Fixes Series constructor so that ValueError is raised when a Categorical and index of incorrect length are given. Closes issue #19342 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19714
2018-02-15T14:04:56Z
2018-02-27T01:13:01Z
2018-02-27T01:13:01Z
2018-02-27T01:45:16Z
Implement maybe_cache for compat between immutable/mutable classes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 932618ba1df21..2a0e6f9ad4702 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -848,3 +848,4 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) +- Bug in :func:`Series.hasnans` that could be incorrectly cached and return incorrect answers if null elements are introduced after an initial call (:issue:`19700`) diff --git a/pandas/_libs/properties.pyx b/pandas/_libs/properties.pyx index 4beb24f07c21c..ef1b4654209cd 100644 --- a/pandas/_libs/properties.pyx +++ b/pandas/_libs/properties.pyx @@ -9,22 +9,30 @@ from cpython cimport ( cdef class cache_readonly(object): cdef readonly: - object func, name, allow_setting + object func, name, allow_setting, __doc__ def __init__(self, func=None, allow_setting=False): if func is not None: self.func = func self.name = func.__name__ + self.__doc__ = func.__doc__ self.allow_setting = allow_setting def __call__(self, func, doc=None): self.func = func self.name = func.__name__ + if doc is not None: + self.__doc__ = doc + else: + self.__doc__ = func.__doc__ return self def __get__(self, obj, typ): - # Get the cache or set a default one if needed + if obj is None: + # accessed on the class, not the instance + return self + # Get the cache or set a default one if needed cache = getattr(obj, '_cache', None) if cache is None: try: @@ -55,6 +63,66 @@ cdef class cache_readonly(object): PyDict_SetItem(cache, self.name, value) + +cdef class maybe_cache(object): + """ + A configurable property-like class that acts like a cache_readonly + if an "_immutable" flag is True and a like property otherwise + """ + + cdef readonly: + object func, name, __doc__ + + def __init__(self, func=None): + if func is not None: + self.func = func + self.name = func.__name__ + self.__doc__ = func.__doc__ + + def __call__(self, func, doc=None): + self.func = func + self.name = func.__name__ + if doc is not None: + self.__doc__ = doc + else: + self.__doc__ = func.__doc__ + return self + + def __get__(self, obj, typ): + cdef: + bint immutable + + if obj is None: + # accessed on the class, not the instance + return self + + # Default to non-caching + immutable = getattr(typ, '_immutable', False) + if not immutable: + # behave like a property + val = self.func(obj) + return val + + # Get the cache or set a default one if needed + cache = getattr(obj, '_cache', None) + if cache is None: + try: + cache = obj._cache = {} + except AttributeError: + return + + if PyDict_Contains(cache, self.name): + # not necessary to Py_INCREF + val = <object> PyDict_GetItem(cache, self.name) + else: + val = self.func(obj) + PyDict_SetItem(cache, self.name, val) + return val + + def __set__(self, obj, value): + raise Exception("cannot set values for [%s]" % self.name) + + cdef class AxisProperty(object): cdef: Py_ssize_t axis diff --git a/pandas/core/series.py b/pandas/core/series.py index 655eaa5373f5a..bb94a637ae7db 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -145,6 +145,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv', 'valid']) + hasnans = property(base.IndexOpsMixin.hasnans.func) + def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index 79e23459ac992..fff43c8a7cd7e 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -8,6 +8,7 @@ from numpy import nan import numpy as np +import pandas as pd from pandas import Series from pandas.core.indexes.datetimes import Timestamp import pandas._libs.lib as lib @@ -309,3 +310,16 @@ def test_convert_preserve_all_bool(self): r = s._convert(datetime=True, numeric=True) e = Series([False, True, False, False], dtype=bool) tm.assert_series_equal(r, e) + + +def test_hasnans_unchached_for_series(): + # GH#19700 + idx = pd.Index([0, 1]) + assert not idx.hasnans + assert 'hasnans' in idx._cache + ser = idx.to_series() + assert not ser.hasnans + assert not hasattr(ser, '_cache') + ser.iloc[-1] = np.nan + assert ser.hasnans + assert 'enables' in pd.Series.hasnans.__doc__ diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 502f0c3bced61..919262be6dbaf 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -5,6 +5,7 @@ import numpy as np from pandas._libs import lib, writers as libwriters import pandas.util.testing as tm +import pandas as pd class TestMisc(object): @@ -198,3 +199,10 @@ def test_get_reverse_indexer(self): result = lib.get_reverse_indexer(indexer, 5) expected = np.array([4, 2, 3, 6, 7], dtype=np.int64) tm.assert_numpy_array_equal(result, expected) + + +def test_cache_readonly_preserve_docstrings(): + # GH#19700 accessing class attributes that are cache_readonly + # should a) not return None and b) reflect original docstrings + assert "perf" in pd.Index.hasnans.__doc__ + assert "dtype str " in pd.Index.dtype_str.__doc__
Also fixes lookups/docstrings for class-level access to cache_readonly attributes. - [x] closes #19700 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Related: https://github.com/pandas-dev/pandas2/issues/27
https://api.github.com/repos/pandas-dev/pandas/pulls/19709
2018-02-15T06:35:41Z
2018-02-16T03:36:37Z
null
2020-04-05T17:39:43Z
Fix the non cython build for cpp extensions
diff --git a/setup.py b/setup.py index c66979dd19ef0..c7784260d79ca 100755 --- a/setup.py +++ b/setup.py @@ -311,7 +311,6 @@ class CheckSDist(sdist_class): 'pandas/_libs/missing.pyx', 'pandas/_libs/reduction.pyx', 'pandas/_libs/testing.pyx', - 'pandas/_libs/window.pyx', 'pandas/_libs/skiplist.pyx', 'pandas/_libs/sparse.pyx', 'pandas/_libs/parsers.pyx', @@ -331,6 +330,10 @@ class CheckSDist(sdist_class): 'pandas/_libs/writers.pyx', 'pandas/io/sas/sas.pyx'] + _cpp_pyxfiles = ['pandas/_libs/window.pyx', + 'pandas/io/msgpack/_packer.pyx', + 'pandas/io/msgpack/_unpacker.pyx'] + def initialize_options(self): sdist_class.initialize_options(self) @@ -338,12 +341,17 @@ def run(self): if 'cython' in cmdclass: self.run_command('cython') else: - for pyxfile in self._pyxfiles: - cfile = pyxfile[:-3] + 'c' - msg = ("C-source file '{source}' not found.\n" - "Run 'setup.py cython' before sdist.".format( - source=cfile)) - assert os.path.isfile(cfile), msg + # If we are not running cython then + # compile the extensions correctly + pyx_files = [(self._pyxfiles, 'c'), (self._cpp_pyxfiles, 'cpp')] + + for pyxfiles, extension in pyx_files: + for pyxfile in pyxfiles: + sourcefile = pyxfile[:-3] + extension + msg = ("{extension}-source file '{source}' not found.\n" + "Run 'setup.py cython' before sdist.".format( + source=sourcefile, extension=extension)) + assert os.path.isfile(sourcefile), msg sdist_class.run(self) @@ -417,6 +425,11 @@ def get_tag(self): cmdclass['build_src'] = DummyBuildSrc cmdclass['build_ext'] = CheckingBuildExt +if sys.byteorder == 'big': + endian_macro = [('__BIG_ENDIAN__', '1')] +else: + endian_macro = [('__LITTLE_ENDIAN__', '1')] + lib_depends = ['inference'] @@ -453,6 +466,7 @@ def pxd(name): 'pandas/_libs/src/datetime/np_datetime_strings.h'] np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c'] + tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd'] # some linux distros require it @@ -618,17 +632,42 @@ def pxd(name): '_libs.window': { 'pyxfile': '_libs/window', 'pxdfiles': ['_libs/skiplist', '_libs/src/util'], - 'language': 'c++'}, + 'language': 'c++', + 'suffix': '.cpp'}, '_libs.writers': { 'pyxfile': '_libs/writers', 'pxdfiles': ['_libs/src/util']}, 'io.sas._sas': { - 'pyxfile': 'io/sas/sas'}} + 'pyxfile': 'io/sas/sas'}, + 'io.msgpack._packer': { + 'macros': endian_macro, + 'depends': ['pandas/_libs/src/msgpack/pack.h', + 'pandas/_libs/src/msgpack/pack_template.h'], + 'include': ['pandas/_libs/src/msgpack'] + common_include, + 'language': 'c++', + 'suffix': '.cpp', + 'pyxfile': 'io/msgpack/_packer', + 'subdir': 'io/msgpack'}, + 'io.msgpack._unpacker': { + 'depends': ['pandas/_libs/src/msgpack/unpack.h', + 'pandas/_libs/src/msgpack/unpack_define.h', + 'pandas/_libs/src/msgpack/unpack_template.h'], + 'macros': endian_macro, + 'include': ['pandas/_libs/src/msgpack'] + common_include, + 'language': 'c++', + 'suffix': '.cpp', + 'pyxfile': 'io/msgpack/_unpacker', + 'subdir': 'io/msgpack' + } +} extensions = [] for name, data in ext_data.items(): - sources = [srcpath(data['pyxfile'], suffix=suffix, subdir='')] + source_suffix = suffix if suffix == '.pyx' else data.get('suffix', '.c') + + sources = [srcpath(data['pyxfile'], suffix=source_suffix, subdir='')] + pxds = [pxd(x) for x in data.get('pxdfiles', [])] if suffix == '.pyx' and pxds: sources.extend(pxds) @@ -642,46 +681,11 @@ def pxd(name): depends=data.get('depends', []), include_dirs=include, language=data.get('language', 'c'), + define_macros=data.get('macros', []), extra_compile_args=extra_compile_args) extensions.append(obj) -# ---------------------------------------------------------------------- -# msgpack - -if sys.byteorder == 'big': - macros = [('__BIG_ENDIAN__', '1')] -else: - macros = [('__LITTLE_ENDIAN__', '1')] - -msgpack_include = ['pandas/_libs/src/msgpack'] + common_include -msgpack_suffix = suffix if suffix == '.pyx' else '.cpp' -unpacker_depends = ['pandas/_libs/src/msgpack/unpack.h', - 'pandas/_libs/src/msgpack/unpack_define.h', - 'pandas/_libs/src/msgpack/unpack_template.h'] - -packer_ext = Extension('pandas.io.msgpack._packer', - depends=['pandas/_libs/src/msgpack/pack.h', - 'pandas/_libs/src/msgpack/pack_template.h'], - sources=[srcpath('_packer', - suffix=msgpack_suffix, - subdir='io/msgpack')], - language='c++', - include_dirs=msgpack_include, - define_macros=macros, - extra_compile_args=extra_compile_args) -unpacker_ext = Extension('pandas.io.msgpack._unpacker', - depends=unpacker_depends, - sources=[srcpath('_unpacker', - suffix=msgpack_suffix, - subdir='io/msgpack')], - language='c++', - include_dirs=msgpack_include, - define_macros=macros, - extra_compile_args=extra_compile_args) -extensions.append(packer_ext) -extensions.append(unpacker_ext) - # ---------------------------------------------------------------------- # ujson
- [ ] closes #19668 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry cc @TomAugspurger @jreback This _should_ fix the problem with building with a non Cython environment. The problem is that we assume extensions are C everywhere and I didn't realize that this was a test path. Also setup.py might be in need of a rethink, there's a lot of special conditions that I think could be cleaned up with some better formatting.
https://api.github.com/repos/pandas-dev/pandas/pulls/19707
2018-02-15T00:57:10Z
2018-02-19T23:38:43Z
2018-02-19T23:38:43Z
2018-02-19T23:38:47Z
DOC: Adding guide for the pandas documentation sprint
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 258ab874cafcf..d0c5032079288 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -292,12 +292,9 @@ Some other important things to know about the docs: overviews per topic together with some other information (what's new, installation, etc). -- The docstrings follow the **Numpy Docstring Standard**, which is used widely - in the Scientific Python community. This standard specifies the format of - the different sections of the docstring. See `this document - <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_ - for a detailed explanation, or look at some of the existing functions to - extend it in a similar manner. +- The docstrings follow a pandas convention, based on the **Numpy Docstring + Standard**. Follow the :ref:`pandas docstring guide <docstring>` for detailed + instructions on how to write a correct docstring. - The tutorials make heavy use of the `ipython directive <http://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension. diff --git a/doc/source/contributing_docstring.rst b/doc/source/contributing_docstring.rst new file mode 100644 index 0000000000000..cd56b76fa891b --- /dev/null +++ b/doc/source/contributing_docstring.rst @@ -0,0 +1,910 @@ +.. _docstring: + +====================== +pandas docstring guide +====================== + +.. note:: + `Video tutorial: Pandas docstring guide + <https://www.youtube.com/watch?v=EOA0lUeW4NI>`_ by Frank Akogun. + +About docstrings and standards +------------------------------ + +A Python docstring is a string used to document a Python module, class, +function or method, so programmers can understand what it does without having +to read the details of the implementation. + +Also, it is a common practice to generate online (html) documentation +automatically from docstrings. `Sphinx <http://www.sphinx-doc.org>`_ serves +this purpose. + +Next example gives an idea on how a docstring looks like: + +.. code-block:: python + + def add(num1, num2): + """ + Add up two integer numbers. + + This function simply wraps the `+` operator, and does not + do anything interesting, except for illustrating what is + the docstring of a very simple function. + + Parameters + ---------- + num1 : int + First number to add + num2 : int + Second number to add + + Returns + ------- + int + The sum of `num1` and `num2` + + See Also + -------- + subtract : Subtract one integer from another + + Examples + -------- + >>> add(2, 2) + 4 + >>> add(25, 0) + 25 + >>> add(10, -10) + 0 + """ + return num1 + num2 + +Some standards exist about docstrings, so they are easier to read, and they can +be exported to other formats such as html or pdf. + +The first conventions every Python docstring should follow are defined in +`PEP-257 <https://www.python.org/dev/peps/pep-0257/>`_. + +As PEP-257 is quite open, and some other standards exist on top of it. In the +case of pandas, the numpy docstring convention is followed. The conventions is +explained in this document: + +- `numpydoc docstring guide <http://numpydoc.readthedocs.io/en/latest/format.html>`_ + (which is based in the original `Guide to NumPy/SciPy documentation + <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_) + +numpydoc is a Sphinx extension to support the numpy docstring convention. + +The standard uses reStructuredText (reST). reStructuredText is a markup +language that allows encoding styles in plain text files. Documentation +about reStructuredText can be found in: + +- `Sphinx reStructuredText primer <http://www.sphinx-doc.org/en/stable/rest.html>`_ +- `Quick reStructuredText reference <http://docutils.sourceforge.net/docs/user/rst/quickref.html>`_ +- `Full reStructuredText specification <http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html>`_ + +The rest of this document will summarize all the above guides, and will +provide additional convention specific to the pandas project. + +.. _docstring.tutorial: + +Writing a docstring +------------------- + +.. _docstring.general: + +General rules +~~~~~~~~~~~~~ + +Docstrings must be defined with three double-quotes. No blank lines should be +left before or after the docstring. The text starts in the next line after the +opening quotes. The closing quotes have their own line +(meaning that they are not at the end of the last sentence). + +In rare occasions reST styles like bold text or itallics will be used in +docstrings, but is it common to have inline code, which is presented between +backticks. It is considered inline code: + +- The name of a parameter +- Python code, a module, function, built-in, type, literal... (e.g. ``os``, + ``list``, ``numpy.abs``, ``datetime.date``, ``True``) +- A pandas class (in the form ``:class:`~pandas.Series```) +- A pandas method (in the form ``:meth:`pandas.Series.sum```) +- A pandas function (in the form ``:func:`pandas.to_datetime```) + +**Good:** + +.. code-block:: python + + def add_values(arr): + """ + Add the values in `arr`. + + This is equivalent to Python `sum` of :meth:`pandas.Series.sum`. + + Some sections are omitted here for simplicity. + """ + return sum(arr) + +**Bad:** + +.. code-block:: python + + def func(): + + """Some function. + + With several mistakes in the docstring. + + It has a blank like after the signature `def func():`. + + The text 'Some function' should go in the line after the + opening quotes of the docstring, not in the same line. + + There is a blank line between the docstring and the first line + of code `foo = 1`. + + The closing quotes should be in the next line, not in this one.""" + + foo = 1 + bar = 2 + return foo + bar + +.. _docstring.short_summary: + +Section 1: Short summary +~~~~~~~~~~~~~~~~~~~~~~~~ + +The short summary is a single sentence that expresses what the function does in +a concise way. + +The short summary must start with a capital letter, end with a dot, and fit in +a single line. It needs to express what the object does without providing +details. For functions and methods, the short summary must start with an +infinitive verb. + +**Good:** + +.. code-block:: python + + def astype(dtype): + """ + Cast Series type. + + This section will provide further details. + """ + pass + +**Bad:** + +.. code-block:: python + + def astype(dtype): + """ + Casts Series type. + + Verb in third-person of the present simple, should be infinitive. + """ + pass + + def astype(dtype): + """ + Method to cast Series type. + + Does not start with verb. + """ + pass + + def astype(dtype): + """ + Cast Series type + + Missing dot at the end. + """ + pass + + def astype(dtype): + """ + Cast Series type from its current type to the new type defined in + the parameter dtype. + + Summary is too verbose and doesn't fit in a single line. + """ + pass + +.. _docstring.extended_summary: + +Section 2: Extended summary +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The extended summary provides details on what the function does. It should not +go into the details of the parameters, or discuss implementation notes, which +go in other sections. + +A blank line is left between the short summary and the extended summary. And +every paragraph in the extended summary is finished by a dot. + +The extended summary should provide details on why the function is useful and +their use cases, if it is not too generic. + +.. code-block:: python + + def unstack(): + """ + Pivot a row index to columns. + + When using a multi-index, a level can be pivoted so each value in + the index becomes a column. This is especially useful when a subindex + is repeated for the main index, and data is easier to visualize as a + pivot table. + + The index level will be automatically removed from the index when added + as columns. + """ + pass + +.. _docstring.parameters: + +Section 3: Parameters +~~~~~~~~~~~~~~~~~~~~~ + +The details of the parameters will be added in this section. This section has +the title "Parameters", followed by a line with a hyphen under each letter of +the word "Parameters". A blank line is left before the section title, but not +after, and not between the line with the word "Parameters" and the one with +the hyphens. + +After the title, each parameter in the signature must be documented, including +`*args` and `**kwargs`, but not `self`. + +The parameters are defined by their name, followed by a space, a colon, another +space, and the type (or types). Note that the space between the name and the +colon is important. Types are not defined for `*args` and `**kwargs`, but must +be defined for all other parameters. After the parameter definition, it is +required to have a line with the parameter description, which is indented, and +can have multiple lines. The description must start with a capital letter, and +finish with a dot. + +For keyword arguments with a default value, the default will be listed after a +comma at the end of the type. The exact form of the type in this case will be +"int, default 0". In some cases it may be useful to explain what the default +argument means, which can be added after a comma "int, default -1, meaning all +cpus". + +In cases where the default value is `None`, meaning that the value will not be +used. Instead of "str, default None", it is preferred to write "str, optional". +When `None` is a value being used, we will keep the form "str, default None". +For example, in `df.to_csv(compression=None)`, `None` is not a value being used, +but means that compression is optional, and no compression is being used if not +provided. In this case we will use `str, optional`. Only in cases like +`func(value=None)` and `None` is being used in the same way as `0` or `foo` +would be used, then we will specify "str, int or None, default None". + +**Good:** + +.. code-block:: python + + class Series: + def plot(self, kind, color='blue', **kwargs): + """ + Generate a plot. + + Render the data in the Series as a matplotlib plot of the + specified kind. + + Parameters + ---------- + kind : str + Kind of matplotlib plot. + color : str, default 'blue' + Color name or rgb code. + **kwargs + These parameters will be passed to the matplotlib plotting + function. + """ + pass + +**Bad:** + +.. code-block:: python + + class Series: + def plot(self, kind, **kwargs): + """ + Generate a plot. + + Render the data in the Series as a matplotlib plot of the + specified kind. + + Note the blank line between the parameters title and the first + parameter. Also, note that after the name of the parameter `kind` + and before the colon, a space is missing. + + Also, note that the parameter descriptions do not start with a + capital letter, and do not finish with a dot. + + Finally, the `**kwargs` parameter is missing. + + Parameters + ---------- + + kind: str + kind of matplotlib plot + """ + pass + +.. _docstring.parameter_types: + +Parameter types +^^^^^^^^^^^^^^^ + +When specifying the parameter types, Python built-in data types can be used +directly (the Python type is preferred to the more verbose string, integer, +boolean, etc): + +- int +- float +- str +- bool + +For complex types, define the subtypes. For `dict` and `tuple`, as more than +one type is present, we use the brackets to help read the type (curly brackets +for `dict` and normal brackets for `tuple`): + +- list of int +- dict of {str : int} +- tuple of (str, int, int) +- tuple of (str,) +- set of str + +In case where there are just a set of values allowed, list them in curly +brackets and separated by commas (followed by a space). If the values are +ordinal and they have an order, list them in this order. Otherwise, list +the default value first, if there is one: + +- {0, 10, 25} +- {'simple', 'advanced'} +- {'low', 'medium', 'high'} +- {'cat', 'dog', 'bird'} + +If the type is defined in a Python module, the module must be specified: + +- datetime.date +- datetime.datetime +- decimal.Decimal + +If the type is in a package, the module must be also specified: + +- numpy.ndarray +- scipy.sparse.coo_matrix + +If the type is a pandas type, also specify pandas except for Series and +DataFrame: + +- Series +- DataFrame +- pandas.Index +- pandas.Categorical +- pandas.SparseArray + +If the exact type is not relevant, but must be compatible with a numpy +array, array-like can be specified. If Any type that can be iterated is +accepted, iterable can be used: + +- array-like +- iterable + +If more than one type is accepted, separate them by commas, except the +last two types, that need to be separated by the word 'or': + +- int or float +- float, decimal.Decimal or None +- str or list of str + +If ``None`` is one of the accepted values, it always needs to be the last in +the list. + +For axis, the convention is to use something like: + +- axis : {0 or 'index', 1 or 'columns', None}, default None + +.. _docstring.returns: + +Section 4: Returns or Yields +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the method returns a value, it will be documented in this section. Also +if the method yields its output. + +The title of the section will be defined in the same way as the "Parameters". +With the names "Returns" or "Yields" followed by a line with as many hyphens +as the letters in the preceding word. + +The documentation of the return is also similar to the parameters. But in this +case, no name will be provided, unless the method returns or yields more than +one value (a tuple of values). + +The types for "Returns" and "Yields" are the same as the ones for the +"Parameters". Also, the description must finish with a dot. + +For example, with a single value: + +.. code-block:: python + + def sample(): + """ + Generate and return a random number. + + The value is sampled from a continuous uniform distribution between + 0 and 1. + + Returns + ------- + float + Random number generated. + """ + return random.random() + +With more than one value: + +.. code-block:: python + + def random_letters(): + """ + Generate and return a sequence of random letters. + + The length of the returned string is also random, and is also + returned. + + Returns + ------- + length : int + Length of the returned string. + letters : str + String of random letters. + """ + length = random.randint(1, 10) + letters = ''.join(random.choice(string.ascii_lowercase) + for i in range(length)) + return length, letters + +If the method yields its value: + +.. code-block:: python + + def sample_values(): + """ + Generate an infinite sequence of random numbers. + + The values are sampled from a continuous uniform distribution between + 0 and 1. + + Yields + ------ + float + Random number generated. + """ + while True: + yield random.random() + +.. _docstring.see_also: + +Section 5: See Also +~~~~~~~~~~~~~~~~~~~ + +This section is used to let users know about pandas functionality +related to the one being documented. In rare cases, if no related methods +or functions can be found at all, this section can be skipped. + +An obvious example would be the `head()` and `tail()` methods. As `tail()` does +the equivalent as `head()` but at the end of the `Series` or `DataFrame` +instead of at the beginning, it is good to let the users know about it. + +To give an intuition on what can be considered related, here there are some +examples: + +* ``loc`` and ``iloc``, as they do the same, but in one case providing indices + and in the other positions +* ``max`` and ``min``, as they do the opposite +* ``iterrows``, ``itertuples`` and ``iteritems``, as it is easy that a user + looking for the method to iterate over columns ends up in the method to + iterate over rows, and vice-versa +* ``fillna`` and ``dropna``, as both methods are used to handle missing values +* ``read_csv`` and ``to_csv``, as they are complementary +* ``merge`` and ``join``, as one is a generalization of the other +* ``astype`` and ``pandas.to_datetime``, as users may be reading the + documentation of ``astype`` to know how to cast as a date, and the way to do + it is with ``pandas.to_datetime`` +* ``where`` is related to ``numpy.where``, as its functionality is based on it + +When deciding what is related, you should mainly use your common sense and +think about what can be useful for the users reading the documentation, +especially the less experienced ones. + +When relating to other libraries (mainly ``numpy``), use the name of the module +first (not an alias like ``np``). If the function is in a module which is not +the main one, like ``scipy.sparse``, list the full module (e.g. +``scipy.sparse.coo_matrix``). + +This section, as the previous, also has a header, "See Also" (note the capital +S and A). Also followed by the line with hyphens, and preceded by a blank line. + +After the header, we will add a line for each related method or function, +followed by a space, a colon, another space, and a short description that +illustrated what this method or function does, why is it relevant in this +context, and what are the key differences between the documented function and +the one referencing. The description must also finish with a dot. + +Note that in "Returns" and "Yields", the description is located in the +following line than the type. But in this section it is located in the same +line, with a colon in between. If the description does not fit in the same +line, it can continue in the next ones, but it has to be indented in them. + +For example: + +.. code-block:: python + + class Series: + def head(self): + """ + Return the first 5 elements of the Series. + + This function is mainly useful to preview the values of the + Series without displaying the whole of it. + + Returns + ------- + Series + Subset of the original series with the 5 first values. + + See Also + -------- + Series.tail : Return the last 5 elements of the Series. + Series.iloc : Return a slice of the elements in the Series, + which can also be used to return the first or last n. + """ + return self.iloc[:5] + +.. _docstring.notes: + +Section 6: Notes +~~~~~~~~~~~~~~~~ + +This is an optional section used for notes about the implementation of the +algorithm. Or to document technical aspects of the function behavior. + +Feel free to skip it, unless you are familiar with the implementation of the +algorithm, or you discover some counter-intuitive behavior while writing the +examples for the function. + +This section follows the same format as the extended summary section. + +.. _docstring.examples: + +Section 7: Examples +~~~~~~~~~~~~~~~~~~~ + +This is one of the most important sections of a docstring, even if it is +placed in the last position. As often, people understand concepts better +with examples, than with accurate explanations. + +Examples in docstrings, besides illustrating the usage of the function or +method, must be valid Python code, that in a deterministic way returns the +presented output, and that can be copied and run by users. + +They are presented as a session in the Python terminal. `>>>` is used to +present code. `...` is used for code continuing from the previous line. +Output is presented immediately after the last line of code generating the +output (no blank lines in between). Comments describing the examples can +be added with blank lines before and after them. + +The way to present examples is as follows: + +1. Import required libraries (except ``numpy`` and ``pandas``) + +2. Create the data required for the example + +3. Show a very basic example that gives an idea of the most common use case + +4. Add examples with explanations that illustrate how the parameters can be + used for extended functionality + +A simple example could be: + +.. code-block:: python + + class Series: + def head(self, n=5): + """ + Return the first elements of the Series. + + This function is mainly useful to preview the values of the + Series without displaying the whole of it. + + Parameters + ---------- + n : int + Number of values to return. + + Return + ------ + pandas.Series + Subset of the original series with the n first values. + + See Also + -------- + tail : Return the last n elements of the Series. + + Examples + -------- + >>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon', + ... 'Lion', 'Monkey', 'Rabbit', 'Zebra']) + >>> s.head() + 0 Ant + 1 Bear + 2 Cow + 3 Dog + 4 Falcon + dtype: object + + With the `n` parameter, we can change the number of returned rows: + + >>> s.head(n=3) + 0 Ant + 1 Bear + 2 Cow + dtype: object + """ + return self.iloc[:n] + +The examples should be as concise as possible. In cases where the complexity of +the function requires long examples, is recommended to use blocks with headers +in bold. Use double star ``**`` to make a text bold, like in ``**this example**``. + +.. _docstring.example_conventions: + +Conventions for the examples +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Code in examples is assumed to always start with these two lines which are not +shown: + +.. code-block:: python + + import numpy as np + import pandas as pd + + +Any other module used in the examples must be explicitly imported, one per line (as +recommended in `PEP-8 <https://www.python.org/dev/peps/pep-0008/#imports>`_) +and avoiding aliases. Avoid excessive imports, but if needed, imports from +the standard library go first, followed by third-party libraries (like +matplotlib). + +When illustrating examples with a single ``Series`` use the name ``s``, and if +illustrating with a single ``DataFrame`` use the name ``df``. For indices, +``idx`` is the preferred name. If a set of homogeneous ``Series`` or +``DataFrame`` is used, name them ``s1``, ``s2``, ``s3``... or ``df1``, +``df2``, ``df3``... If the data is not homogeneous, and more than one structure +is needed, name them with something meaningful, for example ``df_main`` and +``df_to_join``. + +Data used in the example should be as compact as possible. The number of rows +is recommended to be around 4, but make it a number that makes sense for the +specific example. For example in the ``head`` method, it requires to be higher +than 5, to show the example with the default values. If doing the ``mean``, we +could use something like ``[1, 2, 3]``, so it is easy to see that the value +returned is the mean. + +For more complex examples (groupping for example), avoid using data without +interpretation, like a matrix of random numbers with columns A, B, C, D... +And instead use a meaningful example, which makes it easier to understand the +concept. Unless required by the example, use names of animals, to keep examples +consistent. And numerical properties of them. + +When calling the method, keywords arguments ``head(n=3)`` are preferred to +positional arguments ``head(3)``. + +**Good:** + +.. code-block:: python + + class Series: + def mean(self): + """ + Compute the mean of the input. + + Examples + -------- + >>> s = pd.Series([1, 2, 3]) + >>> s.mean() + 2 + """ + pass + + + def fillna(self, value): + """ + Replace missing values by `value`. + + Examples + -------- + >>> s = pd.Series([1, np.nan, 3]) + >>> s.fillna(0) + [1, 0, 3] + """ + pass + + def groupby_mean(self): + """ + Group by index and return mean. + + Examples + -------- + >>> s = pd.Series([380., 370., 24., 26], + ... name='max_speed', + ... index=['falcon', 'falcon', 'parrot', 'parrot']) + >>> s.groupby_mean() + index + falcon 375.0 + parrot 25.0 + Name: max_speed, dtype: float64 + """ + pass + + def contains(self, pattern, case_sensitive=True, na=numpy.nan): + """ + Return whether each value contains `pattern`. + + In this case, we are illustrating how to use sections, even + if the example is simple enough and does not require them. + + Examples + -------- + >>> s = pd.Series('Antelope', 'Lion', 'Zebra', numpy.nan) + >>> s.contains(pattern='a') + 0 False + 1 False + 2 True + 3 NaN + dtype: bool + + **Case sensitivity** + + With `case_sensitive` set to `False` we can match `a` with both + `a` and `A`: + + >>> s.contains(pattern='a', case_sensitive=False) + 0 True + 1 False + 2 True + 3 NaN + dtype: bool + + **Missing values** + + We can fill missing values in the output using the `na` parameter: + + >>> s.contains(pattern='a', na=False) + 0 False + 1 False + 2 True + 3 False + dtype: bool + """ + pass + +**Bad:** + +.. code-block:: python + + def method(foo=None, bar=None): + """ + A sample DataFrame method. + + Do not import numpy and pandas. + + Try to use meaningful data, when it makes the example easier + to understand. + + Try to avoid positional arguments like in `df.method(1)`. They + can be all right if previously defined with a meaningful name, + like in `present_value(interest_rate)`, but avoid them otherwise. + + When presenting the behavior with different parameters, do not place + all the calls one next to the other. Instead, add a short sentence + explaining what the example shows. + + Examples + -------- + >>> import numpy as np + >>> import pandas as pd + >>> df = pd.DataFrame(numpy.random.randn(3, 3), + ... columns=('a', 'b', 'c')) + >>> df.method(1) + 21 + >>> df.method(bar=14) + 123 + """ + pass + + +.. _docstring.doctest_tips: + +Tips for getting your examples pass the doctests +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Getting the examples pass the doctests in the validation script can sometimes +be tricky. Here are some attention points: + +* Import all needed libraries (except for pandas and numpy, those are already + imported as ``import pandas as pd`` and ``import numpy as np``) and define + all variables you use in the example. + +* Try to avoid using random data. However random data might be OK in some + cases, like if the function you are documenting deals with probability + distributions, or if the amount of data needed to make the function result + meaningful is too much, such that creating it manually is very cumbersome. + In those cases, always use a fixed random seed to make the generated examples + predictable. Example:: + + >>> np.random.seed(42) + >>> df = pd.DataFrame({'normal': np.random.normal(100, 5, 20)}) + +* If you have a code snippet that wraps multiple lines, you need to use '...' + on the continued lines: :: + + >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=['a', 'b', 'c'], + ... columns=['A', 'B']) + +* If you want to show a case where an exception is raised, you can do:: + + >>> pd.to_datetime(["712-01-01"]) + Traceback (most recent call last): + OutOfBoundsDatetime: Out of bounds nanosecond timestamp: 712-01-01 00:00:00 + + It is essential to include the "Traceback (most recent call last):", but for + the actual error only the error name is sufficient. + +* If there is a small part of the result that can vary (e.g. a hash in an object + represenation), you can use ``...`` to represent this part. + + If you want to show that ``s.plot()`` returns a matplotlib AxesSubplot object, + this will fail the doctest :: + + >>> s.plot() + <matplotlib.axes._subplots.AxesSubplot at 0x7efd0c0b0690> + + However, you can do (notice the comment that needs to be added) :: + + >>> s.plot() # doctest: +ELLIPSIS + <matplotlib.axes._subplots.AxesSubplot at ...> + + +.. _docstring.example_plots: + +Plots in examples +^^^^^^^^^^^^^^^^^ + +There are some methods in pandas returning plots. To render the plots generated +by the examples in the documentation, the ``.. plot::`` directive exists. + +To use it, place the next code after the "Examples" header as shown below. The +plot will be generated automatically when building the documentation. + +.. code-block:: python + + class Series: + def plot(self): + """ + Generate a plot with the `Series` data. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> s = pd.Series([1, 2, 3]) + >>> s.plot() + """ + pass
This PR is to make it easier to review the proposal guide for the pandas documentation sprint, as discussed in pandas-dev.
https://api.github.com/repos/pandas-dev/pandas/pulls/19704
2018-02-14T22:06:34Z
2018-03-12T17:32:30Z
2018-03-12T17:32:30Z
2018-03-12T17:44:34Z
Try matplotlib RC
diff --git a/ci/requirements-3.6.sh b/ci/requirements-3.6.sh index f5c3dbf59a29d..4cdd8f90ec278 100644 --- a/ci/requirements-3.6.sh +++ b/ci/requirements-3.6.sh @@ -5,3 +5,5 @@ source activate pandas echo "[install 3.6 downstream deps]" conda install -n pandas -c conda-forge pandas-datareader xarray geopandas seaborn statsmodels scikit-learn dask + +conda install -c conda-forge/label/rc matplotlib -c conda-forge diff --git a/ci/script_multi.sh b/ci/script_multi.sh index 2b2d4d5488b91..18d328ab83788 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -31,6 +31,8 @@ elif [ "$COVERAGE" ]; then pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas elif [ "$SLOW" ]; then + conda install -c conda-forge/label/rc matplotlib -c conda-forge + TEST_ARGS="--only-slow --skip-network" echo pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas pytest -r xX -m "not single and slow" -v --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas
Don't want to merge this.
https://api.github.com/repos/pandas-dev/pandas/pulls/19702
2018-02-14T21:44:34Z
2018-03-12T20:02:40Z
null
2018-03-12T20:02:44Z
GroupBy Rank SegFault Fix - astype instead of view
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 1d77a373bb7dd..fe4d31516d839 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -531,7 +531,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, # each label corresponds to a different group value, # the mask helps you differentiate missing values before # performing sort on the actual values - _as = np.lexsort(order).view(dtype=np.int64) + _as = np.lexsort(order).astype(np.int64, copy=False) if not ascending: _as = _as[::-1]
xref #19679 and #19481
https://api.github.com/repos/pandas-dev/pandas/pulls/19701
2018-02-14T19:46:29Z
2018-02-14T23:12:15Z
2018-02-14T23:12:15Z
2018-02-14T23:22:37Z
Move conda build and ASV check to cron job
diff --git a/.travis.yml b/.travis.yml index 4cbe7f86bd2fa..b1168f18315c3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,7 +52,7 @@ matrix: # In allow_failures - dist: trusty env: - - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true + - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" - dist: trusty env: - JOB="3.6" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" CONDA_FORGE=true COVERAGE=true @@ -73,17 +73,13 @@ matrix: env: - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" # In allow_failures - - dist: trusty - env: - - JOB="3.6_ASV" ASV=true - # In allow_failures - dist: trusty env: - JOB="3.6_DOC" DOC=true allow_failures: - dist: trusty env: - - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true + - JOB="3.5" TEST_ARGS="--skip-slow --skip-network" - dist: trusty env: - JOB="2.7_SLOW" SLOW=true @@ -97,9 +93,6 @@ matrix: - dist: trusty env: - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" - - dist: trusty - env: - - JOB="3.6_ASV" ASV=true - dist: trusty env: - JOB="3.6_DOC" DOC=true @@ -135,7 +128,6 @@ script: - ci/script_single.sh - ci/script_multi.sh - ci/lint.sh - - ci/asv.sh - echo "checking imports" - source activate pandas && python ci/check_imports.py - echo "script done" diff --git a/ci/asv.sh b/ci/asv.sh deleted file mode 100755 index 1e9a8d6380eb5..0000000000000 --- a/ci/asv.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -echo "inside $0" - -source activate pandas - -RET=0 - -if [ "$ASV" ]; then - echo "Check for failed asv benchmarks" - - cd asv_bench - - asv machine --yes - - time asv dev | tee failed_asv.txt - - echo "The following asvs benchmarks (if any) failed." - - cat failed_asv.txt | grep "failed" failed_asv.txt - - if [ $? = "0" ]; then - RET=1 - fi - - echo "DONE displaying failed asvs benchmarks." - - rm failed_asv.txt - - echo "Check for failed asv benchmarks DONE" -else - echo "NOT checking for failed asv benchmarks" -fi - -exit $RET diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 6e270519e60c3..458ff083b65eb 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -50,12 +50,6 @@ conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 conda update -q conda -if [ "$CONDA_BUILD_TEST" ]; then - echo - echo "[installing conda-build]" - conda install conda-build -fi - echo echo "[add channels]" conda config --remove channels defaults || exit 1 @@ -122,7 +116,7 @@ if [ "$COVERAGE" ]; then fi echo -if [ -z "$PIP_BUILD_TEST" ] && [ -z "$CONDA_BUILD_TEST" ]; then +if [ -z "$PIP_BUILD_TEST" ] ; then # build but don't install echo "[build em]" @@ -177,15 +171,6 @@ if [ "$PIP_BUILD_TEST" ]; then conda uninstall -y cython time pip install dist/*tar.gz || exit 1 -elif [ "$CONDA_BUILD_TEST" ]; then - - # build & install testing - echo "[building conda recipe]" - time conda build ./conda.recipe --python 3.5 -q --no-test || exit 1 - - echo "[installing]" - conda install pandas --use-local || exit 1 - else # install our pandas diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.build b/ci/requirements-3.5.build similarity index 100% rename from ci/requirements-3.5_CONDA_BUILD_TEST.build rename to ci/requirements-3.5.build diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.pip b/ci/requirements-3.5.pip similarity index 100% rename from ci/requirements-3.5_CONDA_BUILD_TEST.pip rename to ci/requirements-3.5.pip diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.run b/ci/requirements-3.5.run similarity index 100% rename from ci/requirements-3.5_CONDA_BUILD_TEST.run rename to ci/requirements-3.5.run diff --git a/ci/requirements-3.5_CONDA_BUILD_TEST.sh b/ci/requirements-3.5.sh similarity index 86% rename from ci/requirements-3.5_CONDA_BUILD_TEST.sh rename to ci/requirements-3.5.sh index 093fdbcf21d78..529e1e8742722 100644 --- a/ci/requirements-3.5_CONDA_BUILD_TEST.sh +++ b/ci/requirements-3.5.sh @@ -2,7 +2,7 @@ source activate pandas -echo "install 35 CONDA_BUILD_TEST" +echo "install 35" # pip install python-dateutil to get latest conda remove -n pandas python-dateutil --force diff --git a/ci/requirements-3.6_ASV.build b/ci/requirements-3.6_ASV.build deleted file mode 100644 index bc72eed2a0d4e..0000000000000 --- a/ci/requirements-3.6_ASV.build +++ /dev/null @@ -1,5 +0,0 @@ -python=3.6* -python-dateutil -pytz -numpy=1.13* -cython diff --git a/ci/requirements-3.6_ASV.run b/ci/requirements-3.6_ASV.run deleted file mode 100644 index 6c45e3371e9cf..0000000000000 --- a/ci/requirements-3.6_ASV.run +++ /dev/null @@ -1,25 +0,0 @@ -ipython -ipykernel -ipywidgets -sphinx=1.5* -nbconvert -nbformat -notebook -matplotlib -seaborn -scipy -lxml -beautifulsoup4 -html5lib -pytables -python-snappy -openpyxl -xlrd -xlwt -xlsxwriter -sqlalchemy -numexpr -bottleneck -statsmodels -xarray -pyqt diff --git a/ci/requirements-3.6_ASV.sh b/ci/requirements-3.6_ASV.sh deleted file mode 100755 index 8a46f85dbb6bc..0000000000000 --- a/ci/requirements-3.6_ASV.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -source activate pandas - -echo "[install ASV_BUILD deps]" - -pip install git+https://github.com/spacetelescope/asv diff --git a/ci/script_multi.sh b/ci/script_multi.sh index 766e51625fbe6..6c354fc4cab0b 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -18,7 +18,7 @@ fi export PYTHONHASHSEED=$(python -c 'import random; print(random.randint(1, 4294967295))') echo PYTHONHASHSEED=$PYTHONHASHSEED -if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then +if [ "$PIP_BUILD_TEST" ] ; then echo "[build-test]" echo "[env]" @@ -37,9 +37,6 @@ if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then elif [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" -elif [ "$ASV" ]; then - echo "We are not running pytest as this is an asv-build" - elif [ "$COVERAGE" ]; then echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas diff --git a/ci/script_single.sh b/ci/script_single.sh index 153847ab2e8c9..74b0e897f1d73 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -16,15 +16,12 @@ if [ "$SLOW" ]; then TEST_ARGS="--only-slow --skip-network" fi -if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then +if [ "$PIP_BUILD_TEST" ]; then echo "We are not running pytest as this is a build test." elif [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" -elif [ "$ASV" ]; then - echo "We are not running pytest as this is an asv-build" - elif [ "$COVERAGE" ]; then echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
xref #19695 (not closing until pip build test is moved or we decide to keep that here) https://travis-ci.org/pandas-dev/pandas-ci and http://github.com/pandas-dev/pandas-ci
https://api.github.com/repos/pandas-dev/pandas/pulls/19698
2018-02-14T16:22:16Z
2018-02-14T19:56:52Z
2018-02-14T19:56:52Z
2018-02-14T19:58:11Z
remove usages of _get_na_value
diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index b7af533f96ddc..2e5ec8b554ce7 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -2,7 +2,6 @@ _new_Index, _ensure_index, _ensure_index_from_sequences, - _get_na_value, InvalidIndexError) # noqa from pandas.core.indexes.category import CategoricalIndex # noqa from pandas.core.indexes.multi import MultiIndex # noqa @@ -25,7 +24,7 @@ 'InvalidIndexError', 'TimedeltaIndex', 'PeriodIndex', 'DatetimeIndex', '_new_Index', 'NaT', - '_ensure_index', '_ensure_index_from_sequences', '_get_na_value', + '_ensure_index', '_ensure_index_from_sequences', '_get_combined_index', '_get_objs_combined_axis', '_union_indexes', '_get_consensus_names', diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index be7c1624936bf..1a74eb28e3f03 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2098,7 +2098,7 @@ def asof(self, label): try: loc = self.get_loc(label, method='pad') except KeyError: - return _get_na_value(self.dtype) + return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] @@ -4316,12 +4316,6 @@ def _ensure_index(index_like, copy=False): return Index(index_like) -def _get_na_value(dtype): - if is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype): - return libts.NaT - return np.nan - - def _ensure_has_len(seq): """If seq is an iterator, put its values into a list.""" try: diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 94dbd8b884e47..73f4aee1c4880 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -34,7 +34,7 @@ from pandas.core.indexes.base import ( Index, _ensure_index, - _get_na_value, InvalidIndexError, + InvalidIndexError, _index_shared_docs) from pandas.core.indexes.frozen import ( FrozenNDArray, FrozenList, _ensure_frozen) @@ -804,7 +804,7 @@ def values(self): elif box: taken = algos.take_1d(lev._box_values(lev._ndarray_values), lab, - fill_value=_get_na_value(lev.dtype.type)) + fill_value=lev._na_value) else: taken = algos.take_1d(np.asarray(lev._values), lab) values.append(taken) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index c8bca476c65f2..3ef152d091b24 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -29,7 +29,7 @@ import pandas.core.algorithms as algos from pandas._libs import algos as _algos, reshape as _reshape -from pandas.core.index import Index, MultiIndex, _get_na_value +from pandas.core.index import Index, MultiIndex class _Unstacker(object): @@ -260,7 +260,7 @@ def get_new_columns(self): return self.removed_level lev = self.removed_level - return lev.insert(0, _get_na_value(lev.dtype.type)) + return lev.insert(0, lev._na_value) stride = len(self.removed_level) + self.lift width = len(self.value_columns) @@ -299,7 +299,7 @@ def get_new_index(self): if len(self.new_index_levels) == 1: lev, lab = self.new_index_levels[0], result_labels[0] if (lab == -1).any(): - lev = lev.insert(len(lev), _get_na_value(lev.dtype.type)) + lev = lev.insert(len(lev), lev._na_value) return lev.take(lab) return MultiIndex(levels=self.new_index_levels, labels=result_labels, diff --git a/pandas/core/series.py b/pandas/core/series.py index 655eaa5373f5a..90dc14836ab55 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1261,8 +1261,6 @@ def count(self, level=None): ------- nobs : int or Series (if level specified) """ - from pandas.core.index import _get_na_value - if level is None: return notna(com._values_from_object(self)).sum() @@ -1275,7 +1273,7 @@ def count(self, level=None): mask = lab == -1 if mask.any(): lab[mask] = cnt = len(lev) - lev = lev.insert(cnt, _get_na_value(lev.dtype.type)) + lev = lev.insert(cnt, lev._na_value) obs = lab[notna(self.values)] out = np.bincount(obs, minlength=len(lev) or None)
I think the function might also be incorrectly excluding period_dtype.
https://api.github.com/repos/pandas-dev/pandas/pulls/19692
2018-02-14T03:53:07Z
2018-02-16T18:21:52Z
2018-02-16T18:21:52Z
2018-02-18T18:22:02Z
add missing args, make kwarg explicit
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index be7c1624936bf..81b6b28d3927e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3943,8 +3943,8 @@ def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): def _evaluate_with_datetime_like(self, other, op, opstr): raise TypeError("can only perform ops with datetime like values") - def _evaluate_compare(self, op): - raise base.AbstractMethodError(self) + def _evaluate_compare(self, other, op): + raise com.AbstractMethodError(self) @classmethod def _add_comparison_methods(cls): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f553e1a02c9d6..dd5feefc49fe3 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -3569,8 +3569,8 @@ def reduction(self, f, axis=0, consolidate=True, transposed=False, placement=np.arange(len(values)))], axes[0]) - def isna(self, **kwargs): - return self.apply('apply', **kwargs) + def isna(self, func, **kwargs): + return self.apply('apply', func=func, **kwargs) def where(self, **kwargs): return self.apply('where', **kwargs)
Index._evaluate_compare is never hit, but if it were it would raise twice: once for not having the right signature and again for referencing a non-existent base.AbstractMethodError `BlockManager.isna` is entirely non-obvious because there is a required kwarg that isn't in its signature. This makes it explicit.
https://api.github.com/repos/pandas-dev/pandas/pulls/19691
2018-02-14T03:10:36Z
2018-02-15T12:31:12Z
2018-02-15T12:31:12Z
2018-06-22T03:32:32Z
DOC/BLD: unpin sphinx to use sphinx 1.7
diff --git a/ci/requirements-3.6_DOC.run b/ci/requirements-3.6_DOC.run index 6c45e3371e9cf..084f38ce17eb2 100644 --- a/ci/requirements-3.6_DOC.run +++ b/ci/requirements-3.6_DOC.run @@ -1,7 +1,7 @@ ipython ipykernel ipywidgets -sphinx=1.5* +sphinx nbconvert nbformat notebook diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index a474658fa2922..82f8de277c57b 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -7,4 +7,4 @@ pytest>=3.1 python-dateutil>=2.5.0 pytz setuptools>=3.3 -sphinx=1.5* +sphinx diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py index 2bc2d1e91ed3f..4861aa90edce1 100755 --- a/doc/sphinxext/numpydoc/numpydoc.py +++ b/doc/sphinxext/numpydoc/numpydoc.py @@ -26,7 +26,6 @@ raise RuntimeError("Sphinx 1.0.1 or newer is required") from .docscrape_sphinx import get_doc_object, SphinxDocString -from sphinx.util.compat import Directive if sys.version_info[0] >= 3: sixu = lambda s: s
xref https://github.com/pandas-dev/pandas/issues/18147, closes https://github.com/pandas-dev/pandas/issues/16705 Sphinx 1.7 is released, so let's try out if we can now remove the pinning of sphinx to 1.5.x
https://api.github.com/repos/pandas-dev/pandas/pulls/19687
2018-02-13T21:53:27Z
2018-02-26T08:46:26Z
2018-02-26T08:46:26Z
2018-02-26T08:46:29Z
API: Validate keyword arguments to fillna
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a4b943f995a33..54fbbb12c2917 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -556,6 +556,7 @@ Datetimelike API Changes - Subtracting ``NaT`` from a :class:`Series` with ``dtype='datetime64[ns]'`` returns a ``Series`` with ``dtype='timedelta64[ns]'`` instead of ``dtype='datetime64[ns]'`` (:issue:`18808`) - Operations between a :class:`Series` with dtype ``dtype='datetime64[ns]'`` and a :class:`PeriodIndex` will correctly raises ``TypeError`` (:issue:`18850`) - Subtraction of :class:`Series` with timezone-aware ``dtype='datetime64[ns]'`` with mis-matched timezones will raise ``TypeError`` instead of ``ValueError`` (:issue:`18817`) +- :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`) .. _whatsnew_0230.api.other: @@ -575,7 +576,6 @@ Other API Changes - :func:`Series.fillna` now raises a ``TypeError`` instead of a ``ValueError`` when passed a list, tuple or DataFrame as a ``value`` (:issue:`18293`) - :func:`pandas.DataFrame.merge` no longer casts a ``float`` column to ``object`` when merging on ``int`` and ``float`` columns (:issue:`16572`) - :func:`pandas.merge` now raises a ``ValueError`` when trying to merge on incompatible data types (:issue:`9780`) -- :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`) - The default NA value for :class:`UInt64Index` has changed from 0 to ``NaN``, which impacts methods that mask with NA, such as ``UInt64Index.where()`` (:issue:`18398`) - Refactored ``setup.py`` to use ``find_packages`` instead of explicitly listing out all subpackages (:issue:`18535`) - Rearranged the order of keyword arguments in :func:`read_excel()` to align with :func:`read_csv()` (:issue:`16672`) @@ -589,6 +589,7 @@ Other API Changes - :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`) - Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`) - :class:`DateOffset` objects render more simply, e.g. ``<DateOffset: days=1>`` instead of ``<DateOffset: kwds={'days': 1}>`` (:issue:`19403`) +- ``Categorical.fillna`` now validates its ``value`` and ``method`` keyword arguments. It now raises when both or none are specified, matching the behavior of :meth:`Series.fillna` (:issue:`19682`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 7354115f8295e..493b2e5bd899b 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -40,7 +40,7 @@ Appender, cache_readonly, deprecate_kwarg, Substitution) from pandas.io.formats.terminal import get_terminal_size -from pandas.util._validators import validate_bool_kwarg +from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core.config import get_option from .base import ExtensionArray @@ -1610,6 +1610,9 @@ def fillna(self, value=None, method=None, limit=None): ------- filled : Categorical with NA/NaN filled """ + value, method = validate_fillna_kwargs( + value, method, validate_scalar_dict_value=False + ) if value is None: value = np.nan diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 297450417e3cf..8034cf89cf8b7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -54,7 +54,7 @@ import pandas.core.nanops as nanops from pandas.util._decorators import (Appender, Substitution, deprecate_kwarg) -from pandas.util._validators import validate_bool_kwarg +from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core import config # goal is to be able to define the docs close to function, while still being @@ -4697,10 +4697,8 @@ def infer_objects(self): def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None): inplace = validate_bool_kwarg(inplace, 'inplace') + value, method = validate_fillna_kwargs(value, method) - if isinstance(value, (list, tuple)): - raise TypeError('"value" parameter must be a scalar or dict, but ' - 'you passed a "{0}"'.format(type(value).__name__)) self._consolidate_inplace() # set the default here, so functions examining the signaure @@ -4711,8 +4709,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, method = missing.clean_fill_method(method) from pandas import DataFrame if value is None: - if method is None: - raise ValueError('must specify a fill method or value') + if self._is_mixed_type and axis == 1: if inplace: raise NotImplementedError() @@ -4746,9 +4743,6 @@ def fillna(self, value=None, method=None, axis=None, inplace=False, coerce=True, downcast=downcast) else: - if method is not None: - raise ValueError('cannot specify both a fill method and value') - if len(self._get_axis(axis)) == 0: return self diff --git a/pandas/tests/categorical/test_missing.py b/pandas/tests/categorical/test_missing.py index 79758dee5cfda..fca5573547071 100644 --- a/pandas/tests/categorical/test_missing.py +++ b/pandas/tests/categorical/test_missing.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import numpy as np +import pytest import pandas.util.testing as tm from pandas import (Categorical, Index, isna) @@ -53,3 +54,18 @@ def test_set_item_nan(self): exp = Categorical([1, np.nan, 3], categories=[1, 2, 3]) tm.assert_categorical_equal(cat, exp) + + @pytest.mark.parametrize('fillna_kwargs, msg', [ + (dict(value=1, method='ffill'), + "Cannot specify both 'value' and 'method'."), + (dict(), + "Must specify a fill 'value' or 'method'."), + (dict(method='bad'), + "Invalid fill method. Expecting .* bad"), + ]) + def test_fillna_raises(self, fillna_kwargs, msg): + # https://github.com/pandas-dev/pandas/issues/19682 + cat = Categorical([1, 2, 3]) + + with tm.assert_raises_regex(ValueError, msg): + cat.fillna(**fillna_kwargs) diff --git a/pandas/util/_validators.py b/pandas/util/_validators.py index b30ffc7416f92..a96563051e7de 100644 --- a/pandas/util/_validators.py +++ b/pandas/util/_validators.py @@ -320,3 +320,39 @@ def validate_axis_style_args(data, args, kwargs, arg_name, method_name): msg = "Cannot specify all of '{}', 'index', 'columns'." raise TypeError(msg.format(arg_name)) return out + + +def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True): + """Validate the keyword arguments to 'fillna'. + + This checks that exactly one of 'value' and 'method' is specified. + If 'method' is specified, this validates that it's a valid method. + + Parameters + ---------- + value, method : object + The 'value' and 'method' keyword arguments for 'fillna'. + validate_scalar_dict_value : bool, default True + Whether to validate that 'value' is a scalar or dict. Specifically, + validate that it is not a list or tuple. + + Returns + ------- + value, method : object + """ + from pandas.core.missing import clean_fill_method + + if value is None and method is None: + raise ValueError("Must specify a fill 'value' or 'method'.") + elif value is None and method is not None: + method = clean_fill_method(method) + + elif value is not None and method is None: + if validate_scalar_dict_value and isinstance(value, (list, tuple)): + raise TypeError('"value" parameter must be a scalar or dict, but ' + 'you passed a "{0}"'.format(type(value).__name__)) + + elif value is not None and method is not None: + raise ValueError("Cannot specify both 'value' and 'method'.") + + return value, method
Closes https://github.com/pandas-dev/pandas/issues/19682
https://api.github.com/repos/pandas-dev/pandas/pulls/19684
2018-02-13T20:13:08Z
2018-02-22T00:20:58Z
2018-02-22T00:20:58Z
2018-05-02T13:09:54Z
DOC: fix IPython spelling.
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2782ee7b9d201..bc045d74cee52 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1059,7 +1059,7 @@ def to_gbq(self, destination_table, project_id, chunksize=10000, private_key : str (optional) Service account private key in JSON format. Can be file path or string contents. This is useful for remote server - authentication (eg. jupyter iPython notebook on remote host) + authentication (eg. Jupyter/IPython notebook on remote host) """ from pandas.io import gbq diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index b452b0cf5ddd4..f9bc6ae1a5451 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -65,7 +65,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, private_key : str (optional) Service account private key in JSON format. Can be file path or string contents. This is useful for remote server - authentication (eg. jupyter iPython notebook on remote host) + authentication (eg. Jupyter/IPython notebook on remote host) dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect.
It's upper case I and P (or full lower case),
https://api.github.com/repos/pandas-dev/pandas/pulls/19683
2018-02-13T19:53:57Z
2018-02-13T21:41:15Z
2018-02-13T21:41:15Z
2018-02-13T21:41:59Z
Explicitly set dtype of np.lexsort in group_rank
diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 48dac7bf10362..1d77a373bb7dd 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -531,7 +531,7 @@ def group_rank_{{name}}(ndarray[float64_t, ndim=2] out, # each label corresponds to a different group value, # the mask helps you differentiate missing values before # performing sort on the actual values - _as = np.lexsort(order) + _as = np.lexsort(order).view(dtype=np.int64) if not ascending: _as = _as[::-1]
xref #19481 I didn't see any other instance in `group_rank` where the dtype was open to interpretation, so I'm wondering if `np.lexsort` is returning a plain `int` on 32 bit systems. Hoping that explicitly getting a view of that `np.lexsort` result to match the dtype of `_as` will resolve the test issues
https://api.github.com/repos/pandas-dev/pandas/pulls/19679
2018-02-13T18:19:44Z
2018-02-13T23:56:32Z
2018-02-13T23:56:32Z
2018-02-14T19:23:22Z
DOC: Clarify and add fill_value example in arithmetic ops
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 4c234ccb4dd47..5a715f3dceb76 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -255,8 +255,10 @@ def _get_frame_op_default_axis(name): ---------- other : Series or scalar value fill_value : None or float value, default None (NaN) - Fill missing (NaN) values with this value. If both Series are - missing, the result will be missing + Fill existing missing (NaN) values, and any new element needed for + successful Series alignment, with this value before computation. + If data in both corresponding Series locations is missing + the result will be missing level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level @@ -265,6 +267,30 @@ def _get_frame_op_default_axis(name): ------- result : Series +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +dtype: float64 +>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) +>>> b +a 1.0 +b NaN +d 1.0 +e NaN +dtype: float64 +>>> a.add(b, fill_value=0) +a 2.0 +b 1.0 +c 1.0 +d 1.0 +e NaN +dtype: float64 + See also -------- Series.{reverse} @@ -280,8 +306,10 @@ def _get_frame_op_default_axis(name): axis : {0, 1, 'index', 'columns'} For Series input, axis to match Series index on fill_value : None or float value, default None - Fill missing (NaN) values with this value. If both DataFrame locations are - missing, the result will be missing + Fill existing missing (NaN) values, and any new element needed for + successful DataFrame alignment, with this value before computation. + If data in both corresponding DataFrame locations is missing + the result will be missing level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level @@ -293,6 +321,33 @@ def _get_frame_op_default_axis(name): Returns ------- result : DataFrame + +Examples +-------- +>>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'], + columns=['one']) +>>> a + one +a 1.0 +b 1.0 +c 1.0 +d NaN +>>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan], + two=[np.nan, 2, np.nan, 2]), + index=['a', 'b', 'd', 'e']) +>>> b + one two +a 1.0 NaN +b NaN 2.0 +d 1.0 NaN +e NaN 2.0 +>>> a.add(b, fill_value=0) + one two +a 2.0 NaN +b 1.0 2.0 +c 1.0 NaN +d 1.0 NaN +e NaN 2.0 """ _flex_doc_FRAME = """ @@ -307,8 +362,10 @@ def _get_frame_op_default_axis(name): axis : {{0, 1, 'index', 'columns'}} For Series input, axis to match Series index on fill_value : None or float value, default None - Fill missing (NaN) values with this value. If both DataFrame - locations are missing, the result will be missing + Fill existing missing (NaN) values, and any new element needed for + successful DataFrame alignment, with this value before computation. + If data in both corresponding DataFrame locations is missing + the result will be missing level : int or name Broadcast across a level, matching Index values on the passed MultiIndex level @@ -321,6 +378,33 @@ def _get_frame_op_default_axis(name): ------- result : DataFrame +Examples +-------- +>>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'], + columns=['one']) +>>> a + one +a 1.0 +b 1.0 +c 1.0 +d NaN +>>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan], + two=[np.nan, 2, np.nan, 2]), + index=['a', 'b', 'd', 'e']) +>>> b + one two +a 1.0 NaN +b NaN 2.0 +d 1.0 NaN +e NaN 2.0 +>>> a.add(b, fill_value=0) + one two +a 2.0 NaN +b 1.0 2.0 +c 1.0 NaN +d 1.0 NaN +e NaN 2.0 + See also -------- DataFrame.{reverse} @@ -392,7 +476,6 @@ def _make_flex_doc(op_name, typ): base_doc = _flex_doc_PANEL else: raise AssertionError('Invalid typ argument.') - doc = base_doc.format(desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse']) return doc
- [ ] closes #19653 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/19675
2018-02-13T14:32:36Z
2018-02-22T00:16:22Z
2018-02-22T00:16:22Z
2018-02-22T00:16:25Z
Cythonized GroupBy Fill
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 61db39528a5fb..c347442784d41 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -370,11 +370,11 @@ class GroupByMethods(object): param_names = ['dtype', 'method'] params = [['int', 'float'], - ['all', 'any', 'count', 'cumcount', 'cummax', 'cummin', - 'cumprod', 'cumsum', 'describe', 'first', 'head', 'last', 'mad', - 'max', 'min', 'median', 'mean', 'nunique', 'pct_change', 'prod', - 'rank', 'sem', 'shift', 'size', 'skew', 'std', 'sum', 'tail', - 'unique', 'value_counts', 'var']] + ['all', 'any', 'bfill', 'count', 'cumcount', 'cummax', 'cummin', + 'cumprod', 'cumsum', 'describe', 'ffill', 'first', 'head', + 'last', 'mad', 'max', 'min', 'median', 'mean', 'nunique', + 'pct_change', 'prod', 'rank', 'sem', 'shift', 'size', 'skew', + 'std', 'sum', 'tail', 'unique', 'value_counts', 'var']] def setup(self, dtype, method): ngroups = 1000 diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fd3c3a5a7a301..fcaf46b1c3d71 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -689,6 +689,7 @@ Performance Improvements - Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`) - Improved performance of :func:`DataFrameGroupBy.rank` (:issue:`15779`) - Improved performance of variable ``.rolling()`` on ``.min()`` and ``.max()`` (:issue:`19521`) +- Improved performance of ``GroupBy.ffill`` and ``GroupBy.bfill`` (:issue:`11296`) .. _whatsnew_0230.docs: diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 866683ce378ab..e3d208a915225 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -94,5 +94,221 @@ cdef inline float64_t kth_smallest_c(float64_t* a, return a[k] +@cython.boundscheck(False) +@cython.wraparound(False) +def group_median_float64(ndarray[float64_t, ndim=2] out, + ndarray[int64_t] counts, + ndarray[float64_t, ndim=2] values, + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): + """ + Only aggregates on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, ngroups, size + ndarray[int64_t] _counts + ndarray data + float64_t* ptr + + assert min_count == -1, "'min_count' only used in add and prod" + + ngroups = len(counts) + N, K = (<object> values).shape + + indexer, _counts = groupsort_indexer(labels, ngroups) + counts[:] = _counts[1:] + + data = np.empty((K, N), dtype=np.float64) + ptr = <float64_t*> data.data + + take_2d_axis1_float64_float64(values.T, indexer, out=data) + + with nogil: + + for i in range(K): + # exclude NA group + ptr += _counts[0] + for j in range(ngroups): + size = _counts[j + 1] + out[j, i] = median_linear(ptr, size) + ptr += size + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cumprod_float64(float64_t[:, :] out, + float64_t[:, :] values, + int64_t[:] labels, + bint is_datetimelike): + """ + Only transforms on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, size + float64_t val + float64_t[:, :] accum + int64_t lab + + N, K = (<object> values).shape + accum = np.ones_like(values) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i, j] + if val == val: + accum[lab, j] *= val + out[i, j] = accum[lab, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_cumsum(numeric[:, :] out, + numeric[:, :] values, + int64_t[:] labels, + is_datetimelike): + """ + Only transforms on axis=0 + """ + cdef: + Py_ssize_t i, j, N, K, size + numeric val + numeric[:, :] accum + int64_t lab + + N, K = (<object> values).shape + accum = np.zeros_like(values) + + with nogil: + for i in range(N): + lab = labels[i] + + if lab < 0: + continue + for j in range(K): + val = values[i, j] + + if numeric == float32_t or numeric == float64_t: + if val == val: + accum[lab, j] += val + out[i, j] = accum[lab, j] + else: + accum[lab, j] += val + out[i, j] = accum[lab, j] + + +@cython.boundscheck(False) +@cython.wraparound(False) +def group_shift_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, + int ngroups, int periods): + cdef: + Py_ssize_t N, i, j, ii + int offset, sign + int64_t lab, idxer, idxer_slot + int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64) + int64_t[:, :] label_indexer + + N, = (<object> labels).shape + + if periods < 0: + periods = -periods + offset = N - 1 + sign = -1 + elif periods > 0: + offset = 0 + sign = 1 + + if periods == 0: + with nogil: + for i in range(N): + out[i] = i + else: + # array of each previous indexer seen + label_indexer = np.zeros((ngroups, periods), dtype=np.int64) + with nogil: + for i in range(N): + ## reverse iterator if shifting backwards + ii = offset + sign * i + lab = labels[ii] + + # Skip null keys + if lab == -1: + out[ii] = -1 + continue + + label_seen[lab] += 1 + + idxer_slot = label_seen[lab] % periods + idxer = label_indexer[lab, idxer_slot] + + if label_seen[lab] > periods: + out[ii] = idxer + else: + out[ii] = -1 + + label_indexer[lab, idxer_slot] = ii + + +@cython.wraparound(False) +@cython.boundscheck(False) +def group_fillna_indexer(ndarray[int64_t] out, ndarray[int64_t] labels, + ndarray[uint8_t] mask, object direction, + int64_t limit): + """Indexes how to fill values forwards or backwards within a group + + Parameters + ---------- + out : array of int64_t values which this method will write its results to + Missing values will be written to with a value of -1 + labels : array containing unique label for each group, with its ordering + matching up to the corresponding record in `values` + mask : array of int64_t values where a 1 indicates a missing value + direction : {'ffill', 'bfill'} + Direction for fill to be applied (forwards or backwards, respectively) + limit : Consecutive values to fill before stopping, or -1 for no limit + + Notes + ----- + This method modifies the `out` parameter rather than returning an object + """ + cdef: + Py_ssize_t i, N + ndarray[int64_t] sorted_labels + int64_t idx, curr_fill_idx=-1, filled_vals=0 + + N = len(out) + + # Make sure all arrays are the same size + assert N == len(labels) == len(mask) + + sorted_labels = np.argsort(labels).astype(np.int64, copy=False) + if direction == 'bfill': + sorted_labels = sorted_labels[::-1] + + with nogil: + for i in range(N): + idx = sorted_labels[i] + if mask[idx] == 1: # is missing + # Stop filling once we've hit the limit + if filled_vals >= limit and limit != -1: + curr_fill_idx = -1 + filled_vals += 1 + else: # reset items when not missing + filled_vals = 0 + curr_fill_idx = idx + + out[idx] = curr_fill_idx + + # If we move to the next group, reset + # the fill_idx and counter + if i == N - 1 or labels[idx] != labels[sorted_labels[i+1]]: + curr_fill_idx = -1 + filled_vals = 0 + + # generated from template include "groupby_helper.pxi" diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index e03e3af65755b..de802f4a72277 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -791,166 +791,3 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, out[i, j] = mval {{endfor}} - -#---------------------------------------------------------------------- -# other grouping functions not needing a template -#---------------------------------------------------------------------- - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_median_float64(ndarray[float64_t, ndim=2] out, - ndarray[int64_t] counts, - ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels, - Py_ssize_t min_count=-1): - """ - Only aggregates on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, ngroups, size - ndarray[int64_t] _counts - ndarray data - float64_t* ptr - - assert min_count == -1, "'min_count' only used in add and prod" - - ngroups = len(counts) - N, K = (<object> values).shape - - indexer, _counts = groupsort_indexer(labels, ngroups) - counts[:] = _counts[1:] - - data = np.empty((K, N), dtype=np.float64) - ptr = <float64_t*> data.data - - take_2d_axis1_float64_float64(values.T, indexer, out=data) - - with nogil: - - for i in range(K): - # exclude NA group - ptr += _counts[0] - for j in range(ngroups): - size = _counts[j + 1] - out[j, i] = median_linear(ptr, size) - ptr += size - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_cumprod_float64(float64_t[:, :] out, - float64_t[:, :] values, - int64_t[:] labels, - bint is_datetimelike): - """ - Only transforms on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, size - float64_t val - float64_t[:, :] accum - int64_t lab - - N, K = (<object> values).shape - accum = np.ones_like(values) - - with nogil: - for i in range(N): - lab = labels[i] - - if lab < 0: - continue - for j in range(K): - val = values[i, j] - if val == val: - accum[lab, j] *= val - out[i, j] = accum[lab, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_cumsum(numeric[:, :] out, - numeric[:, :] values, - int64_t[:] labels, - is_datetimelike): - """ - Only transforms on axis=0 - """ - cdef: - Py_ssize_t i, j, N, K, size - numeric val - numeric[:, :] accum - int64_t lab - - N, K = (<object> values).shape - accum = np.zeros_like(values) - - with nogil: - for i in range(N): - lab = labels[i] - - if lab < 0: - continue - for j in range(K): - val = values[i, j] - - if numeric == float32_t or numeric == float64_t: - if val == val: - accum[lab, j] += val - out[i, j] = accum[lab, j] - else: - accum[lab, j] += val - out[i, j] = accum[lab, j] - - -@cython.boundscheck(False) -@cython.wraparound(False) -def group_shift_indexer(int64_t[:] out, int64_t[:] labels, - int ngroups, int periods): - cdef: - Py_ssize_t N, i, j, ii - int offset, sign - int64_t lab, idxer, idxer_slot - int64_t[:] label_seen = np.zeros(ngroups, dtype=np.int64) - int64_t[:, :] label_indexer - - N, = (<object> labels).shape - - if periods < 0: - periods = -periods - offset = N - 1 - sign = -1 - elif periods > 0: - offset = 0 - sign = 1 - - if periods == 0: - with nogil: - for i in range(N): - out[i] = i - else: - # array of each previous indexer seen - label_indexer = np.zeros((ngroups, periods), dtype=np.int64) - with nogil: - for i in range(N): - ## reverse iterator if shifting backwards - ii = offset + sign * i - lab = labels[ii] - - # Skip null keys - if lab == -1: - out[ii] = -1 - continue - - label_seen[lab] += 1 - - idxer_slot = label_seen[lab] % periods - idxer = label_indexer[lab, idxer_slot] - - if label_seen[lab] > periods: - out[ii] = idxer - else: - out[ii] = -1 - - label_indexer[lab, idxer_slot] = ii diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index b1615f720368d..852ad04cd8a2e 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -1,5 +1,5 @@ import types -from functools import wraps +from functools import wraps, partial import numpy as np import datetime import collections @@ -38,7 +38,7 @@ _ensure_float) from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.generic import ABCSeries -from pandas.core.dtypes.missing import isna, notna, _maybe_fill +from pandas.core.dtypes.missing import isna, isnull, notna, _maybe_fill from pandas.core.base import (PandasObject, SelectionMixin, GroupByError, DataError, SpecificationError) @@ -1457,6 +1457,36 @@ def expanding(self, *args, **kwargs): from pandas.core.window import ExpandingGroupby return ExpandingGroupby(self, *args, **kwargs) + def _fill(self, direction, limit=None): + """Shared function for `pad` and `backfill` to call Cython method + + Parameters + ---------- + direction : {'ffill', 'bfill'} + Direction passed to underlying Cython function. `bfill` will cause + values to be filled backwards. `ffill` and any other values will + default to a forward fill + limit : int, default None + Maximum number of consecutive values to fill. If `None`, this + method will convert to -1 prior to passing to Cython + + Returns + ------- + `Series` or `DataFrame` with filled values + + See Also + -------- + pad + backfill + """ + # Need int value for Cython + if limit is None: + limit = -1 + + return self._get_cythonized_result('group_fillna_indexer', + self.grouper, needs_mask=True, + direction=direction, limit=limit) + @Substitution(name='groupby') def pad(self, limit=None): """ @@ -1474,7 +1504,7 @@ def pad(self, limit=None): Series.fillna DataFrame.fillna """ - return self.apply(lambda x: x.ffill(limit=limit)) + return self._fill('ffill', limit=limit) ffill = pad @Substitution(name='groupby') @@ -1494,7 +1524,7 @@ def backfill(self, limit=None): Series.fillna DataFrame.fillna """ - return self.apply(lambda x: x.bfill(limit=limit)) + return self._fill('bfill', limit=limit) bfill = backfill @Substitution(name='groupby') @@ -1843,6 +1873,45 @@ def cummax(self, axis=0, **kwargs): return self._cython_transform('cummax', numeric_only=False) + def _get_cythonized_result(self, how, grouper, needs_mask=False, + needs_ngroups=False, **kwargs): + """Get result for Cythonized functions + + Parameters + ---------- + how : str, Cythonized function name to be called + grouper : Grouper object containing pertinent group info + needs_mask : bool, default False + Whether boolean mask needs to be part of the Cython call signature + needs_ngroups : bool, default False + Whether number of groups part of the Cython call signature + **kwargs : dict + Extra arguments to be passed back to Cython funcs + + Returns + ------- + `Series` or `DataFrame` with filled values + """ + + labels, _, ngroups = grouper.group_info + output = collections.OrderedDict() + base_func = getattr(libgroupby, how) + + for name, obj in self._iterate_slices(): + indexer = np.zeros_like(labels, dtype=np.int64) + func = partial(base_func, indexer, labels) + if needs_mask: + mask = isnull(obj.values).view(np.uint8) + func = partial(func, mask) + + if needs_ngroups: + func = partial(func, ngroups) + + func(**kwargs) # Call func to modify indexer values in place + output[name] = algorithms.take_nd(obj.values, indexer) + + return self._wrap_transformed_output(output) + @Substitution(name='groupby') @Appender(_doc_template) def shift(self, periods=1, freq=None, axis=0): @@ -1860,17 +1929,9 @@ def shift(self, periods=1, freq=None, axis=0): if freq is not None or axis != 0: return self.apply(lambda x: x.shift(periods, freq, axis)) - labels, _, ngroups = self.grouper.group_info - - # filled in by Cython - indexer = np.zeros_like(labels) - libgroupby.group_shift_indexer(indexer, labels, ngroups, periods) - - output = {} - for name, obj in self._iterate_slices(): - output[name] = algorithms.take_nd(obj.values, indexer) - - return self._wrap_transformed_output(output) + return self._get_cythonized_result('group_shift_indexer', + self.grouper, needs_ngroups=True, + periods=periods) @Substitution(name='groupby') @Appender(_doc_template) @@ -3577,7 +3638,6 @@ def describe(self, **kwargs): def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): - from functools import partial from pandas.core.reshape.tile import cut from pandas.core.reshape.merge import _get_join_indexers @@ -4585,9 +4645,17 @@ def _apply_to_column_groupbys(self, func): in self._iterate_column_groupbys()), keys=self._selected_obj.columns, axis=1) + def _fill(self, direction, limit=None): + """Overriden method to join grouped columns in output""" + res = super(DataFrameGroupBy, self)._fill(direction, limit=limit) + output = collections.OrderedDict( + (grp.name, grp.grouper) for grp in self.grouper.groupings) + + from pandas import concat + return concat((self._wrap_transformed_output(output), res), axis=1) + def count(self): """ Compute count of group, excluding missing values """ - from functools import partial from pandas.core.dtypes.missing import _isna_ndarraylike as isna data, _ = self._get_data_to_aggregate() diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 129ac6b06205c..2429e9975fc8e 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2061,6 +2061,61 @@ def test_rank_object_raises(self, ties_method, ascending, na_option, ascending=ascending, na_option=na_option, pct=pct) + @pytest.mark.parametrize("mix_groupings", [True, False]) + @pytest.mark.parametrize("as_series", [True, False]) + @pytest.mark.parametrize("val1,val2", [ + ('foo', 'bar'), (1, 2), (1., 2.)]) + @pytest.mark.parametrize("fill_method,limit,exp_vals", [ + ("ffill", None, + [np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']), + ("ffill", 1, + [np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]), + ("bfill", None, + ['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]), + ("bfill", 1, + [np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan]) + ]) + def test_group_fill_methods(self, mix_groupings, as_series, val1, val2, + fill_method, limit, exp_vals): + vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan] + _exp_vals = list(exp_vals) + # Overwrite placeholder values + for index, exp_val in enumerate(_exp_vals): + if exp_val == 'val1': + _exp_vals[index] = val1 + elif exp_val == 'val2': + _exp_vals[index] = val2 + + # Need to modify values and expectations depending on the + # Series / DataFrame that we ultimately want to generate + if mix_groupings: # ['a', 'b', 'a, 'b', ...] + keys = ['a', 'b'] * len(vals) + + def interweave(list_obj): + temp = list() + for x in list_obj: + temp.extend([x, x]) + + return temp + + _exp_vals = interweave(_exp_vals) + vals = interweave(vals) + else: # ['a', 'a', 'a', ... 'b', 'b', 'b'] + keys = ['a'] * len(vals) + ['b'] * len(vals) + _exp_vals = _exp_vals * 2 + vals = vals * 2 + + df = DataFrame({'key': keys, 'val': vals}) + if as_series: + result = getattr( + df.groupby('key')['val'], fill_method)(limit=limit) + exp = Series(_exp_vals, name='val') + assert_series_equal(result, exp) + else: + result = getattr(df.groupby('key'), fill_method)(limit=limit) + exp = DataFrame({'key': keys, 'val': _exp_vals}) + assert_frame_equal(result, exp) + def test_dont_clobber_name_column(self): df = DataFrame({'key': ['a', 'a', 'a', 'b', 'b', 'b'], 'name': ['foo', 'bar', 'baz'] * 2})
- [X] closes #11296 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I am not a fan of how I've implemented this in `groupby.py` but I think this change highlights even more the need for some refactoring of that module, specifically in how Cython transformations are getting dispatched. This still works in the meantime but open to any feedback on how the methods are getting wired back to the Cython layer. Below are ASVs for the change ```bash before after ratio [d9551c8e] [5e007f86] + 81.6Β±0.3ΞΌs 96.7Β±2ΞΌs 1.19 groupby.GroupByMethods.time_method('float', 'count') + 455Β±3ΞΌs 516Β±20ΞΌs 1.13 groupby.GroupByMethods.time_method('float', 'cummin') + 306Β±0.6ΞΌs 340Β±5ΞΌs 1.11 groupby.GroupByMethods.time_method('float', 'prod') - 142Β±0.4ms 280Β±6ΞΌs 0.00 groupby.GroupByMethods.time_method('int', 'bfill') - 230Β±10ms 327Β±7ΞΌs 0.00 groupby.GroupByMethods.time_method('float', 'bfill') - 135Β±0.8ms 189Β±0.3ΞΌs 0.00 groupby.GroupByMethods.time_method('int', 'ffill') - 227Β±2ms 184Β±0.6ΞΌs 0.00 groupby.GroupByMethods.time_method('float', 'ffill') ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19673
2018-02-13T07:36:11Z
2018-02-25T16:05:27Z
2018-02-25T16:05:27Z
2018-02-25T20:38:08Z
Fix docstrings, error messages
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index c98f8ceea0ffa..f37f6d545f343 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -627,9 +627,9 @@ def _convert_scalar_indexer(self, key, kind=None): ._convert_scalar_indexer(key, kind=kind)) def _add_datelike(self, other): - raise TypeError("cannot add {0} and {1}" - .format(type(self).__name__, - type(other).__name__)) + raise TypeError("cannot add {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) def _sub_datelike(self, other): raise com.AbstractMethodError(self) @@ -670,8 +670,9 @@ def __add__(self, other): elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if hasattr(other, '_add_delta'): return other._add_delta(self) - raise TypeError("cannot add TimedeltaIndex and {typ}" - .format(typ=type(other))) + raise TypeError("cannot add {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) elif is_integer(other): return self.shift(other) elif isinstance(other, (datetime, np.datetime64)): @@ -705,8 +706,9 @@ def __sub__(self, other): return self._sub_offset_array(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if not isinstance(other, TimedeltaIndex): - raise TypeError("cannot subtract TimedeltaIndex and {typ}" - .format(typ=type(other).__name__)) + raise TypeError("cannot subtract {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) return self._add_delta(-other) elif isinstance(other, DatetimeIndex): return self._sub_datelike(other) @@ -794,7 +796,7 @@ def isin(self, values): def shift(self, n, freq=None): """ - Specialized shift which produces a DatetimeIndex + Specialized shift which produces a %(klass)s Parameters ---------- @@ -804,7 +806,7 @@ def shift(self, n, freq=None): Returns ------- - shifted : DatetimeIndex + shifted : %(klass)s """ if freq is not None and freq != self.freq: if isinstance(freq, compat.string_types): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index cc9ce1f3fd5eb..a422d5b4c3851 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -106,7 +106,7 @@ def _dt_index_cmp(opname, cls, nat_result=False): """ def wrapper(self, other): - func = getattr(super(DatetimeIndex, self), opname) + binop = getattr(Index, opname) if isinstance(other, (datetime, compat.string_types)): if isinstance(other, datetime): @@ -114,7 +114,7 @@ def wrapper(self, other): self._assert_tzawareness_compat(other) other = _to_m8(other, tz=self.tz) - result = func(other) + result = binop(self, other) if isna(other): result.fill(nat_result) else: @@ -134,7 +134,7 @@ def wrapper(self, other): if is_datetimelike(other): self._assert_tzawareness_compat(other) - result = func(np.asarray(other)) + result = binop(self, np.asarray(other)) result = com._values_from_object(result) if isinstance(other, Index): @@ -856,9 +856,9 @@ def _add_datelike(self, other): # adding a timedeltaindex to a datetimelike if other is libts.NaT: return self._nat_new(box=True) - raise TypeError("cannot add {0} and {1}" - .format(type(self).__name__, - type(other).__name__)) + raise TypeError("cannot add {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) def _sub_datelike(self, other): # subtract a datetime from myself, yielding a TimedeltaIndex @@ -866,8 +866,9 @@ def _sub_datelike(self, other): if isinstance(other, DatetimeIndex): # require tz compat if not self._has_same_tz(other): - raise TypeError("DatetimeIndex subtraction must have the same " - "timezones or no timezones") + raise TypeError("{cls} subtraction must have the same " + "timezones or no timezones" + .format(cls=type(self).__name__)) result = self._sub_datelike_dti(other) elif isinstance(other, (datetime, np.datetime64)): other = Timestamp(other) @@ -884,8 +885,9 @@ def _sub_datelike(self, other): result = self._maybe_mask_results(result, fill_value=libts.iNaT) else: - raise TypeError("cannot subtract DatetimeIndex and {typ}" - .format(typ=type(other).__name__)) + raise TypeError("cannot subtract {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) return TimedeltaIndex(result, name=self.name, copy=False) def _sub_datelike_dti(self, other): @@ -901,6 +903,11 @@ def _sub_datelike_dti(self, other): new_values[mask] = libts.iNaT return new_values.view('i8') + @Substitution(klass='DatetimeIndex') + @Appender(DatetimeIndexOpsMixin.shift.__doc__) + def shift(self, n, freq=None): + return super(DatetimeIndex, self).shift(n, freq) + def _maybe_update_attributes(self, attrs): """ Update Index attributes (e.g. freq) depending on op """ freq = attrs.get('freq', None) @@ -948,8 +955,8 @@ def _add_offset(self, offset): return result except NotImplementedError: - warnings.warn("Non-vectorized DateOffset being applied to Series " - "or DatetimeIndex", PerformanceWarning) + warnings.warn("Non-vectorized DateOffset being applied to {cls}" + .format(cls=type(self).__name__), PerformanceWarning) return self.astype('O') + offset def _add_offset_array(self, other): @@ -960,8 +967,8 @@ def _add_offset_array(self, other): return self + other[0] else: warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), - PerformanceWarning) + "{cls} not vectorized" + .format(cls=type(self).__name__), PerformanceWarning) return self.astype('O') + np.array(other) # TODO: This works for __add__ but loses dtype in __sub__ @@ -973,8 +980,8 @@ def _sub_offset_array(self, other): return self - other[0] else: warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), - PerformanceWarning) + "{cls} not vectorized" + .format(cls=type(self).__name__), PerformanceWarning) res_values = self.astype('O').values - np.array(other) return self.__class__(res_values, freq='infer') diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 4b543262fc485..cd9f08a3e7f16 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -59,23 +59,25 @@ def _td_index_cmp(opname, cls, nat_result=False): """ def wrapper(self, other): - msg = "cannot compare a TimedeltaIndex with type {0}" - func = getattr(super(TimedeltaIndex, self), opname) + msg = "cannot compare a {cls} with type {typ}" + binop = getattr(Index, opname) if _is_convertible_to_td(other) or other is NaT: try: other = _to_m8(other) except ValueError: # failed to parse as timedelta - raise TypeError(msg.format(type(other))) - result = func(other) + raise TypeError(msg.format(cls=type(self).__name__, + typ=type(other).__name__)) + result = binop(self, other) if isna(other): result.fill(nat_result) else: if not is_list_like(other): - raise TypeError(msg.format(type(other))) + raise TypeError(msg.format(cls=type(self).__name__, + typ=type(other).__name__)) other = TimedeltaIndex(other).values - result = func(other) + result = binop(self, other) result = com._values_from_object(result) if isinstance(other, Index): @@ -364,8 +366,9 @@ def _add_delta(self, delta): # update name when delta is index name = com._maybe_match_name(self, delta) else: - raise TypeError("cannot add the type {0} to a TimedeltaIndex" - .format(type(delta))) + raise TypeError("cannot add the type {typ} to a {cls}" + .format(typ=type(delta).__name__, + cls=type(self).__name__)) result = TimedeltaIndex(new_values, freq='infer', name=name) return result @@ -409,7 +412,7 @@ def _add_datelike(self, other): result = checked_add_with_arr(i8, other.value, arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=iNaT) - return DatetimeIndex(result, name=self.name, copy=False) + return DatetimeIndex(result, name=self.name, copy=False) def _sub_datelike(self, other): # GH#19124 Timedelta - datetime is not in general well-defined. @@ -418,7 +421,8 @@ def _sub_datelike(self, other): if other is NaT: return self._nat_new() else: - raise TypeError("cannot subtract a datelike from a TimedeltaIndex") + raise TypeError("cannot subtract a datelike from a {cls}" + .format(cls=type(self).__name__)) def _add_offset_array(self, other): # Array/Index of DateOffset objects @@ -433,12 +437,14 @@ def _add_offset_array(self, other): else: from pandas.errors import PerformanceWarning warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), + "{cls} not vectorized" + .format(cls=type(self).__name__), PerformanceWarning) return self.astype('O') + np.array(other) # TODO: This works for __add__ but loses dtype in __sub__ except AttributeError: - raise TypeError("Cannot add non-tick DateOffset to TimedeltaIndex") + raise TypeError("Cannot add non-tick DateOffset to {cls}" + .format(cls=type(self).__name__)) def _sub_offset_array(self, other): # Array/Index of DateOffset objects @@ -453,13 +459,19 @@ def _sub_offset_array(self, other): else: from pandas.errors import PerformanceWarning warnings.warn("Adding/subtracting array of DateOffsets to " - "{} not vectorized".format(type(self)), + "{cls} not vectorized" + .format(cls=type(self).__name__), PerformanceWarning) res_values = self.astype('O').values - np.array(other) return self.__class__(res_values, freq='infer') except AttributeError: - raise TypeError("Cannot subtrack non-tick DateOffset from" - " TimedeltaIndex") + raise TypeError("Cannot subtrack non-tick DateOffset " + "from {cls}".format(cls=type(self).__name__)) + + @Substitution(klass='TimedeltaIndex') + @Appender(DatetimeIndexOpsMixin.shift.__doc__) + def shift(self, n, freq=None): + return super(TimedeltaIndex, self).shift(n, freq) def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs): @@ -928,7 +940,7 @@ def insert(self, loc, item): def delete(self, loc): """ - Make a new DatetimeIndex with passed location(s) deleted. + Make a new TimedeltaIndex with passed location(s) deleted. Parameters ----------
The discussion in #19558 makes me think getting PeriodArray and DatetimeArray implemented sooner rather than later will make things easier for other goals. In preparation for that, this PR goes through (most) of the methods that will need to be ported and adapts their error messages to be class-agnostic, and to use `str.format` syntax. Also fixes a couple of copy/paste typos in docstrings.
https://api.github.com/repos/pandas-dev/pandas/pulls/19672
2018-02-13T05:43:38Z
2018-02-19T02:40:48Z
null
2018-06-22T03:32:18Z
ENH-19629: Adding numpy nansun/nanmean, etc etc to _cython_table
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 43e384b01ad2c..1b7cc3d5b235e 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1098,6 +1098,7 @@ Numeric - Bug in :class:`DataFrame` flex arithmetic (e.g. ``df.add(other, fill_value=foo)``) with a ``fill_value`` other than ``None`` failed to raise ``NotImplementedError`` in corner cases where either the frame or ``other`` has length zero (:issue:`19522`) - Multiplication and division of numeric-dtyped :class:`Index` objects with timedelta-like scalars returns ``TimedeltaIndex`` instead of raising ``TypeError`` (:issue:`19333`) - Bug where ``NaN`` was returned instead of 0 by :func:`Series.pct_change` and :func:`DataFrame.pct_change` when ``fill_method`` is not ``None`` (:issue:`19873`) +- :meth:`~DataFrame.agg` now correctly handles numpy NaN-aware methods like :meth:`numpy.nansum` (:issue:`19629`) Strings ^^^^^^^ diff --git a/pandas/core/base.py b/pandas/core/base.py index 0d55fa8b97aae..ed3bdb8dc45fb 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -22,7 +22,8 @@ from pandas.core import common as com, algorithms import pandas.core.nanops as nanops import pandas._libs.lib as lib -from pandas.compat.numpy import function as nv +from pandas.compat.numpy import (function as nv, _np_version_under1p10, + _np_version_under1p12) from pandas.compat import PYPY from pandas.util._decorators import (Appender, cache_readonly, deprecate_kwarg, Substitution) @@ -191,17 +192,31 @@ class SelectionMixin(object): np.all: 'all', np.any: 'any', np.sum: 'sum', + np.nansum: 'sum', np.mean: 'mean', + np.nanmean: 'mean', np.prod: 'prod', np.std: 'std', + np.nanstd: 'std', np.var: 'var', + np.nanvar: 'var', np.median: 'median', + np.nanmedian: 'median', np.max: 'max', + np.nanmax: 'max', np.min: 'min', + np.nanmin: 'min', np.cumprod: 'cumprod', - np.cumsum: 'cumsum' + np.cumsum: 'cumsum', } + if not _np_version_under1p10: + _cython_table[np.nanprod] = 'prod' + + if not _np_version_under1p12: + _cython_table[np.nancumprod] = 'cumprod' + _cython_table[np.nancumsum] = 'cumsum' + @property def _selection_name(self): """ diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index a70ee80aee180..5d90a67ca7e3b 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -994,6 +994,49 @@ def prng(self): return np.random.RandomState(1234) +@pytest.fixture(params=[ + pd.Series([1, 2, 3, 4, 5, 6]), + pd.DataFrame([[1, 2, 3], [4, 5, 6]]) +]) +def nan_test_object(request): + return request.param + + +@pytest.mark.parametrize("standard, nan_method", [ + (np.sum, np.nansum), + (np.mean, np.nanmean), + (np.std, np.nanstd), + (np.var, np.nanvar), + (np.median, np.nanmedian), + (np.max, np.nanmax), + (np.min, np.nanmin) +]) +def test_np_nan_functions(standard, nan_method, nan_test_object): + tm.assert_almost_equal(nan_test_object.agg(standard), + nan_test_object.agg(nan_method), + check_exact=True) + + +@td.skip_if_no("numpy", min_version="1.10.0") +def test_np_nanprod(nan_test_object): + tm.assert_almost_equal(nan_test_object.agg(np.prod), + nan_test_object.agg(np.nanprod), + check_exact=True) + + +@td.skip_if_no("numpy", min_version="1.12.0") +def test_np_nancumprod(nan_test_object): + # Not using pytest params for methods as will fail at build time + methods = [ + (np.cumprod, np.nancumprod), + (np.cumsum, np.nancumsum) + ] + for standard, nan_method in methods: + tm.assert_almost_equal(nan_test_object.agg(standard), + nan_test_object.agg(nan_method), + check_exact=True) + + def test_use_bottleneck(): if nanops._BOTTLENECK_INSTALLED:
- [x] closes #19629 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry As per the issue, here's proof of solution: ```python Python 3.6.4 |Anaconda, Inc.| (default, Jan 16 2018, 12:04:33) [GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)] on darwin Type "help", "copyright", "credits" or "license" for more information. >>> import pandas as pd >>> import numpy as np >>> pd.Series([1,2,3,4]).agg(np.sum) 10 >>> pd.Series([1,2,3,4]).agg(np.nansum) 10 ``` Where should I add tests for these changes? I wasn't sure on the best fit and how to most effectively test. Also, is a whatsnew entry needed here? If yes, any guidance on what it should be?
https://api.github.com/repos/pandas-dev/pandas/pulls/19670
2018-02-13T00:45:49Z
2018-05-07T21:26:32Z
null
2018-05-07T21:26:32Z
DOC: Ambiguous description in to_parquet engine documentation
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2782ee7b9d201..02d9cc5cbdf30 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1678,9 +1678,10 @@ def to_parquet(self, fname, engine='auto', compression='snappy', fname : str string file path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' - Parquet reader library to use. If 'auto', then the option - 'io.parquet.engine' is used. If 'auto', then the first - library to be installed is used. + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. kwargs diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 1c22a305c089d..a99014f07a6b3 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -244,9 +244,10 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): path : string File path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' - Parquet reader library to use. If 'auto', then the option - 'io.parquet.engine' is used. If 'auto', then the first - library to be installed is used. + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. kwargs @@ -271,9 +272,10 @@ def read_parquet(path, engine='auto', columns=None, **kwargs): .. versionadded 0.21.1 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' - Parquet reader library to use. If 'auto', then the option - 'io.parquet.engine' is used. If 'auto', then the first - library to be installed is used. + Parquet library to use. If 'auto', then the option + ``io.parquet.engine`` is used. The default ``io.parquet.engine`` + behavior is to try 'pyarrow', falling back to 'fastparquet' if + 'pyarrow' is unavailable. kwargs are passed to the engine Returns
Ambiguity correction in documentation on pandas.DataFrame.to_parquet - [x] closes #19662 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] tests added / passed - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19669
2018-02-12T23:17:59Z
2018-02-15T08:17:32Z
2018-02-15T08:17:32Z
2018-02-15T08:17:32Z
TST: split tests for windows to sub-modules
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1c5cf87d6b39b..9cbd20e35463c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2633,7 +2633,7 @@ def _ensure_valid_index(self, value): if not len(self.index) and is_list_like(value): try: value = Series(value) - except: + except ValueError: raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') diff --git a/pandas/tests/windows/__init__.py b/pandas/tests/windows/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/windows/test_ewma.py b/pandas/tests/windows/test_ewma.py new file mode 100644 index 0000000000000..199508b856448 --- /dev/null +++ b/pandas/tests/windows/test_ewma.py @@ -0,0 +1,97 @@ +from datetime import datetime + +import numpy as np +import pytest +from numpy.random import randn + +import pandas.core.window as rwindow +import pandas.util.testing as tm +from pandas import DataFrame, Series, bdate_range +from pandas.errors import UnsupportedFunctionCall + +N, K = 100, 10 + + +def assert_equal(left, right): + if isinstance(left, Series): + tm.assert_series_equal(left, right) + else: + tm.assert_frame_equal(left, right) + + +class Base(object): + + _nan_locs = np.arange(20, 40) + _inf_locs = np.array([]) + + def _create_data(self): + arr = randn(N) + arr[self._nan_locs] = np.NaN + + self.arr = arr + self.rng = bdate_range(datetime(2009, 1, 1), periods=N) + self.series = Series(arr.copy(), index=self.rng) + self.frame = DataFrame(randn(N, K), index=self.rng, + columns=np.arange(K)) + + +class TestEWM(Base): + + def setup_method(self, method): + self._create_data() + + def test_doc_string(self): + + df = DataFrame({'B': [0, 1, 2, np.nan, 4]}) + df + df.ewm(com=0.5).mean() + + def test_constructor(self): + for o in [self.series, self.frame]: + c = o.ewm + + # valid + c(com=0.5) + c(span=1.5) + c(alpha=0.5) + c(halflife=0.75) + c(com=0.5, span=None) + c(alpha=0.5, com=None) + c(halflife=0.75, alpha=None) + + # not valid: mutually exclusive + with pytest.raises(ValueError): + c(com=0.5, alpha=0.5) + with pytest.raises(ValueError): + c(span=1.5, halflife=0.75) + with pytest.raises(ValueError): + c(alpha=0.5, span=1.5) + + # not valid: com < 0 + with pytest.raises(ValueError): + c(com=-0.5) + + # not valid: span < 1 + with pytest.raises(ValueError): + c(span=0.5) + + # not valid: halflife <= 0 + with pytest.raises(ValueError): + c(halflife=0) + + # not valid: alpha <= 0 or alpha > 1 + for alpha in (-0.5, 1.5): + with pytest.raises(ValueError): + c(alpha=alpha) + + def test_numpy_compat(self): + # see gh-12811 + e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) + + msg = "numpy operations are not valid with window objects" + + for func in ('std', 'mean', 'var'): + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, func), 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, func), dtype=np.float64) diff --git a/pandas/tests/windows/test_expanding.py b/pandas/tests/windows/test_expanding.py new file mode 100644 index 0000000000000..1fbb800092747 --- /dev/null +++ b/pandas/tests/windows/test_expanding.py @@ -0,0 +1,111 @@ +from datetime import datetime + +import numpy as np +import pytest +from numpy.random import randn + +import pandas as pd +import pandas.core.window as rwindow +import pandas.util.testing as tm +from pandas import DataFrame, Series, bdate_range +from pandas.errors import UnsupportedFunctionCall + +N, K = 100, 10 + + +def assert_equal(left, right): + if isinstance(left, Series): + tm.assert_series_equal(left, right) + else: + tm.assert_frame_equal(left, right) + + +class Base(object): + + _nan_locs = np.arange(20, 40) + _inf_locs = np.array([]) + + def _create_data(self): + arr = randn(N) + arr[self._nan_locs] = np.NaN + + self.arr = arr + self.rng = bdate_range(datetime(2009, 1, 1), periods=N) + self.series = Series(arr.copy(), index=self.rng) + self.frame = DataFrame(randn(N, K), index=self.rng, + columns=np.arange(K)) + + +class TestExpanding(Base): + + def setup_method(self, method): + self._create_data() + + def test_doc_string(self): + + df = DataFrame({'B': [0, 1, 2, np.nan, 4]}) + df + df.expanding(2).sum() + + def test_constructor(self): + # GH 12669 + + for o in [self.series, self.frame]: + c = o.expanding + + # valid + c(min_periods=1) + c(min_periods=1, center=True) + c(min_periods=1, center=False) + + # not valid + for w in [2., 'foo', np.array([2])]: + with pytest.raises(ValueError): + c(min_periods=w) + with pytest.raises(ValueError): + c(min_periods=1, center=w) + + def test_numpy_compat(self): + # see gh-12811 + e = rwindow.Expanding(Series([2, 4, 6]), window=2) + + msg = "numpy operations are not valid with window objects" + + for func in ('std', 'mean', 'sum', 'max', 'min', 'var'): + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, func), 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(e, func), dtype=np.float64) + + @pytest.mark.parametrize( + 'expander', + [1, pytest.param('ls', marks=pytest.mark.xfail( + reason='GH 16425 expanding with ' + 'offset not supported'))]) + def test_empty_df_expanding(self, expander): + # GH 15819 Verifies that datetime and integer expanding windows can be + # applied to empty DataFrames + + expected = DataFrame() + result = DataFrame().expanding(expander).sum() + tm.assert_frame_equal(result, expected) + + # Verifies that datetime and integer expanding windows can be applied + # to empty DataFrames with datetime index + expected = DataFrame(index=pd.DatetimeIndex([])) + result = DataFrame( + index=pd.DatetimeIndex([])).expanding(expander).sum() + tm.assert_frame_equal(result, expected) + + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.expanding(min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.expanding(min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/windows/test_rolling.py b/pandas/tests/windows/test_rolling.py new file mode 100644 index 0000000000000..e7816684a9498 --- /dev/null +++ b/pandas/tests/windows/test_rolling.py @@ -0,0 +1,824 @@ +import pytest + +from datetime import datetime, timedelta +from numpy.random import randn +import numpy as np + +import pandas as pd +from pandas import (Series, DataFrame, bdate_range, + Timestamp, Index) +import pandas.core.window as rwindow +from pandas.errors import UnsupportedFunctionCall +import pandas.util.testing as tm +import pandas.util._test_decorators as td +from pandas.compat import range + +N, K = 100, 10 + + +def assert_equal(left, right): + if isinstance(left, Series): + tm.assert_series_equal(left, right) + else: + tm.assert_frame_equal(left, right) + + +class Base(object): + + _nan_locs = np.arange(20, 40) + _inf_locs = np.array([]) + + def _create_data(self): + arr = randn(N) + arr[self._nan_locs] = np.NaN + + self.arr = arr + self.rng = bdate_range(datetime(2009, 1, 1), periods=N) + self.series = Series(arr.copy(), index=self.rng) + self.frame = DataFrame(randn(N, K), index=self.rng, + columns=np.arange(K)) + + +class TestRolling(Base): + + def setup_method(self, method): + self._create_data() + + def test_doc_string(self): + + df = DataFrame({'B': [0, 1, 2, np.nan, 4]}) + df + df.rolling(2).sum() + df.rolling(2, min_periods=1).sum() + + def test_constructor(self): + # GH 12669 + + for o in [self.series, self.frame]: + c = o.rolling + + # valid + c(window=2) + c(window=2, min_periods=1) + c(window=2, min_periods=1, center=True) + c(window=2, min_periods=1, center=False) + + # GH 13383 + c(0) + with pytest.raises(ValueError): + c(-1) + + # not valid + for w in [2., 'foo', np.array([2])]: + with pytest.raises(ValueError): + c(window=w) + with pytest.raises(ValueError): + c(window=2, min_periods=w) + with pytest.raises(ValueError): + c(window=2, min_periods=1, center=w) + + @td.skip_if_no_scipy + def test_constructor_with_win_type(self): + # GH 13383 + for o in [self.series, self.frame]: + c = o.rolling + c(0, win_type='boxcar') + with pytest.raises(ValueError): + c(-1, win_type='boxcar') + + def test_constructor_with_timedelta_window(self): + # GH 15440 + n = 10 + df = DataFrame({'value': np.arange(n)}, + index=pd.date_range('2015-12-24', periods=n, freq="D")) + expected_data = np.append([0., 1.], np.arange(3., 27., 3)) + for window in [timedelta(days=3), pd.Timedelta(days=3)]: + result = df.rolling(window=window).sum() + expected = DataFrame({'value': expected_data}, + index=pd.date_range('2015-12-24', periods=n, + freq="D")) + tm.assert_frame_equal(result, expected) + expected = df.rolling('3D').sum() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + 'window', [timedelta(days=3), pd.Timedelta(days=3), '3D']) + def test_constructor_with_timedelta_window_and_minperiods(self, window): + # GH 15305 + n = 10 + df = DataFrame({'value': np.arange(n)}, + index=pd.date_range('2017-08-08', periods=n, freq="D")) + expected = DataFrame( + {'value': np.append([np.NaN, 1.], np.arange(3., 27., 3))}, + index=pd.date_range('2017-08-08', periods=n, freq="D")) + result_roll_sum = df.rolling(window=window, min_periods=2).sum() + result_roll_generic = df.rolling(window=window, + min_periods=2).apply(sum) + tm.assert_frame_equal(result_roll_sum, expected) + tm.assert_frame_equal(result_roll_generic, expected) + + def test_numpy_compat(self): + # see gh-12811 + r = rwindow.Rolling(Series([2, 4, 6]), window=2) + + msg = "numpy operations are not valid with window objects" + + for func in ('std', 'mean', 'sum', 'max', 'min', 'var'): + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(r, func), 1, 2, 3) + tm.assert_raises_regex(UnsupportedFunctionCall, msg, + getattr(r, func), dtype=np.float64) + + def test_closed(self): + df = DataFrame({'A': [0, 1, 2, 3, 4]}) + # closed only allowed for datetimelike + with pytest.raises(ValueError): + df.rolling(window=3, closed='neither') + + @pytest.mark.parametrize('roller', ['1s', 1]) + def tests_empty_df_rolling(self, roller): + # GH 15819 Verifies that datetime and integer rolling windows can be + # applied to empty DataFrames + expected = DataFrame() + result = DataFrame().rolling(roller).sum() + tm.assert_frame_equal(result, expected) + + # Verifies that datetime and integer rolling windows can be applied to + # empty DataFrames with datetime index + expected = DataFrame(index=pd.DatetimeIndex([])) + result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() + tm.assert_frame_equal(result, expected) + + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.rolling(1, min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.rolling(1, min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + + def test_missing_minp_zero_variable(self): + # https://github.com/pandas-dev/pandas/pull/18921 + x = pd.Series([np.nan] * 4, + index=pd.DatetimeIndex(['2017-01-01', '2017-01-04', + '2017-01-06', '2017-01-07'])) + result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() + expected = pd.Series(0.0, index=x.index) + tm.assert_series_equal(result, expected) + + def test_multi_index_names(self): + + # GH 16789, 16825 + cols = pd.MultiIndex.from_product([['A', 'B'], ['C', 'D', 'E']], + names=['1', '2']) + df = DataFrame(np.ones((10, 6)), columns=cols) + result = df.rolling(3).cov() + + tm.assert_index_equal(result.columns, df.columns) + assert result.index.names == [None, '1', '2'] + + +class TestRollingTS(object): + + # rolling time-series friendly + # xref GH13327 + + def setup_method(self, method): + + self.regular = DataFrame({'A': pd.date_range('20130101', + periods=5, + freq='s'), + 'B': range(5)}).set_index('A') + + self.ragged = DataFrame({'B': range(5)}) + self.ragged.index = [Timestamp('20130101 09:00:00'), + Timestamp('20130101 09:00:02'), + Timestamp('20130101 09:00:03'), + Timestamp('20130101 09:00:05'), + Timestamp('20130101 09:00:06')] + + def test_doc_string(self): + + df = DataFrame({'B': [0, 1, 2, np.nan, 4]}, + index=[Timestamp('20130101 09:00:00'), + Timestamp('20130101 09:00:02'), + Timestamp('20130101 09:00:03'), + Timestamp('20130101 09:00:05'), + Timestamp('20130101 09:00:06')]) + df + df.rolling('2s').sum() + + def test_valid(self): + + df = self.regular + + # not a valid freq + with pytest.raises(ValueError): + df.rolling(window='foobar') + + # not a datetimelike index + with pytest.raises(ValueError): + df.reset_index().rolling(window='foobar') + + # non-fixed freqs + for freq in ['2MS', pd.offsets.MonthBegin(2)]: + with pytest.raises(ValueError): + df.rolling(window=freq) + + for freq in ['1D', pd.offsets.Day(2), '2ms']: + df.rolling(window=freq) + + # non-integer min_periods + for minp in [1.0, 'foo', np.array([1, 2, 3])]: + with pytest.raises(ValueError): + df.rolling(window='1D', min_periods=minp) + + # center is not implemented + with pytest.raises(NotImplementedError): + df.rolling(window='1D', center=True) + + def test_on(self): + + df = self.regular + + # not a valid column + with pytest.raises(ValueError): + df.rolling(window='2s', on='foobar') + + # column is valid + df = df.copy() + df['C'] = pd.date_range('20130101', periods=len(df)) + df.rolling(window='2d', on='C').sum() + + # invalid columns + with pytest.raises(ValueError): + df.rolling(window='2d', on='B') + + # ok even though on non-selected + df.rolling(window='2d', on='C').B.sum() + + def test_monotonic_on(self): + + # on/index must be monotonic + df = DataFrame({'A': pd.date_range('20130101', + periods=5, + freq='s'), + 'B': range(5)}) + + assert df.A.is_monotonic + df.rolling('2s', on='A').sum() + + df = df.set_index('A') + assert df.index.is_monotonic + df.rolling('2s').sum() + + # non-monotonic + df.index = reversed(df.index.tolist()) + assert not df.index.is_monotonic + + with pytest.raises(ValueError): + df.rolling('2s').sum() + + df = df.reset_index() + with pytest.raises(ValueError): + df.rolling('2s', on='A').sum() + + def test_frame_on(self): + + df = DataFrame({'B': range(5), + 'C': pd.date_range('20130101 09:00:00', + periods=5, + freq='3s')}) + + df['A'] = [Timestamp('20130101 09:00:00'), + Timestamp('20130101 09:00:02'), + Timestamp('20130101 09:00:03'), + Timestamp('20130101 09:00:05'), + Timestamp('20130101 09:00:06')] + + # we are doing simulating using 'on' + expected = (df.set_index('A') + .rolling('2s') + .B + .sum() + .reset_index(drop=True) + ) + + result = (df.rolling('2s', on='A') + .B + .sum() + ) + tm.assert_series_equal(result, expected) + + # test as a frame + # we should be ignoring the 'on' as an aggregation column + # note that the expected is setting, computing, and resetting + # so the columns need to be switched compared + # to the actual result where they are ordered as in the + # original + expected = (df.set_index('A') + .rolling('2s')[['B']] + .sum() + .reset_index()[['B', 'A']] + ) + + result = (df.rolling('2s', on='A')[['B']] + .sum() + ) + tm.assert_frame_equal(result, expected) + + def test_frame_on2(self): + + # using multiple aggregation columns + df = DataFrame({'A': [0, 1, 2, 3, 4], + 'B': [0, 1, 2, np.nan, 4], + 'C': Index([Timestamp('20130101 09:00:00'), + Timestamp('20130101 09:00:02'), + Timestamp('20130101 09:00:03'), + Timestamp('20130101 09:00:05'), + Timestamp('20130101 09:00:06')])}, + columns=['A', 'C', 'B']) + + expected1 = DataFrame({'A': [0., 1, 3, 3, 7], + 'B': [0, 1, 3, np.nan, 4], + 'C': df['C']}, + columns=['A', 'C', 'B']) + + result = df.rolling('2s', on='C').sum() + expected = expected1 + tm.assert_frame_equal(result, expected) + + expected = Series([0, 1, 3, np.nan, 4], name='B') + result = df.rolling('2s', on='C').B.sum() + tm.assert_series_equal(result, expected) + + expected = expected1[['A', 'B', 'C']] + result = df.rolling('2s', on='C')[['A', 'B', 'C']].sum() + tm.assert_frame_equal(result, expected) + + def test_basic_regular(self): + + df = self.regular.copy() + + df.index = pd.date_range('20130101', periods=5, freq='D') + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window='1D').sum() + tm.assert_frame_equal(result, expected) + + df.index = pd.date_range('20130101', periods=5, freq='2D') + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window='2D', min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window='2D', min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(window=1).sum() + result = df.rolling(window='2D').sum() + tm.assert_frame_equal(result, expected) + + def test_min_periods(self): + + # compare for min_periods + df = self.regular + + # these slightly different + expected = df.rolling(2, min_periods=1).sum() + result = df.rolling('2s').sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(2, min_periods=1).sum() + result = df.rolling('2s', min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + def test_closed(self): + + # xref GH13965 + + df = DataFrame({'A': [1] * 5}, + index=[Timestamp('20130101 09:00:01'), + Timestamp('20130101 09:00:02'), + Timestamp('20130101 09:00:03'), + Timestamp('20130101 09:00:04'), + Timestamp('20130101 09:00:06')]) + + # closed must be 'right', 'left', 'both', 'neither' + with pytest.raises(ValueError): + self.regular.rolling(window='2s', closed="blabla") + + expected = df.copy() + expected["A"] = [1.0, 2, 2, 2, 1] + result = df.rolling('2s', closed='right').sum() + tm.assert_frame_equal(result, expected) + + # default should be 'right' + result = df.rolling('2s').sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [1.0, 2, 3, 3, 2] + result = df.rolling('2s', closed='both').sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 2, 2, 1] + result = df.rolling('2s', closed='left').sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 1, 1, np.nan] + result = df.rolling('2s', closed='neither').sum() + tm.assert_frame_equal(result, expected) + + def test_ragged_sum(self): + + df = self.ragged + result = df.rolling(window='1s', min_periods=1).sum() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).sum() + expected = df.copy() + expected['B'] = [0.0, 1, 3, 3, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=2).sum() + expected = df.copy() + expected['B'] = [np.nan, np.nan, 3, np.nan, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='3s', min_periods=1).sum() + expected = df.copy() + expected['B'] = [0.0, 1, 3, 5, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='3s').sum() + expected = df.copy() + expected['B'] = [0.0, 1, 3, 5, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='4s', min_periods=1).sum() + expected = df.copy() + expected['B'] = [0.0, 1, 3, 6, 9] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='4s', min_periods=3).sum() + expected = df.copy() + expected['B'] = [np.nan, np.nan, 3, 6, 9] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).sum() + expected = df.copy() + expected['B'] = [0.0, 1, 3, 6, 10] + tm.assert_frame_equal(result, expected) + + def test_ragged_mean(self): + + df = self.ragged + result = df.rolling(window='1s', min_periods=1).mean() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).mean() + expected = df.copy() + expected['B'] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_median(self): + + df = self.ragged + result = df.rolling(window='1s', min_periods=1).median() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).median() + expected = df.copy() + expected['B'] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_quantile(self): + + df = self.ragged + result = df.rolling(window='1s', min_periods=1).quantile(0.5) + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).quantile(0.5) + expected = df.copy() + expected['B'] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_std(self): + + df = self.ragged + result = df.rolling(window='1s', min_periods=1).std(ddof=0) + expected = df.copy() + expected['B'] = [0.0] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='1s', min_periods=1).std(ddof=1) + expected = df.copy() + expected['B'] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='3s', min_periods=1).std(ddof=0) + expected = df.copy() + expected['B'] = [0.0] + [0.5] * 4 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).std(ddof=1) + expected = df.copy() + expected['B'] = [np.nan, 0.707107, 1.0, 1.0, 1.290994] + tm.assert_frame_equal(result, expected) + + def test_ragged_var(self): + + df = self.ragged + result = df.rolling(window='1s', min_periods=1).var(ddof=0) + expected = df.copy() + expected['B'] = [0.0] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='1s', min_periods=1).var(ddof=1) + expected = df.copy() + expected['B'] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='3s', min_periods=1).var(ddof=0) + expected = df.copy() + expected['B'] = [0.0] + [0.25] * 4 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).var(ddof=1) + expected = df.copy() + expected['B'] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.] + tm.assert_frame_equal(result, expected) + + def test_ragged_skew(self): + + df = self.ragged + result = df.rolling(window='3s', min_periods=1).skew() + expected = df.copy() + expected['B'] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).skew() + expected = df.copy() + expected['B'] = [np.nan] * 2 + [0.0, 0.0, 0.0] + tm.assert_frame_equal(result, expected) + + def test_ragged_kurt(self): + + df = self.ragged + result = df.rolling(window='3s', min_periods=1).kurt() + expected = df.copy() + expected['B'] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).kurt() + expected = df.copy() + expected['B'] = [np.nan] * 4 + [-1.2] + tm.assert_frame_equal(result, expected) + + def test_ragged_count(self): + + df = self.ragged + result = df.rolling(window='1s', min_periods=1).count() + expected = df.copy() + expected['B'] = [1.0, 1, 1, 1, 1] + tm.assert_frame_equal(result, expected) + + df = self.ragged + result = df.rolling(window='1s').count() + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).count() + expected = df.copy() + expected['B'] = [1.0, 1, 2, 1, 2] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=2).count() + expected = df.copy() + expected['B'] = [np.nan, np.nan, 2, np.nan, 2] + tm.assert_frame_equal(result, expected) + + def test_regular_min(self): + + df = DataFrame({'A': pd.date_range('20130101', + periods=5, + freq='s'), + 'B': [0.0, 1, 2, 3, 4]}).set_index('A') + result = df.rolling('1s').min() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + df = DataFrame({'A': pd.date_range('20130101', + periods=5, + freq='s'), + 'B': [5, 4, 3, 4, 5]}).set_index('A') + + tm.assert_frame_equal(result, expected) + result = df.rolling('2s').min() + expected = df.copy() + expected['B'] = [5.0, 4, 3, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling('5s').min() + expected = df.copy() + expected['B'] = [5.0, 4, 3, 3, 3] + tm.assert_frame_equal(result, expected) + + def test_ragged_min(self): + + df = self.ragged + + result = df.rolling(window='1s', min_periods=1).min() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).min() + expected = df.copy() + expected['B'] = [0.0, 1, 1, 3, 3] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).min() + expected = df.copy() + expected['B'] = [0.0, 0, 0, 1, 1] + tm.assert_frame_equal(result, expected) + + def test_perf_min(self): + + N = 10000 + + dfp = DataFrame({'B': np.random.randn(N)}, + index=pd.date_range('20130101', + periods=N, + freq='s')) + expected = dfp.rolling(2, min_periods=1).min() + result = dfp.rolling('2s').min() + assert ((result - expected) < 0.01).all().bool() + + expected = dfp.rolling(200, min_periods=1).min() + result = dfp.rolling('200s').min() + assert ((result - expected) < 0.01).all().bool() + + def test_ragged_max(self): + + df = self.ragged + + result = df.rolling(window='1s', min_periods=1).max() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).max() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).max() + expected = df.copy() + expected['B'] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + def test_ragged_apply(self): + + df = self.ragged + + f = lambda x: 1 + result = df.rolling(window='1s', min_periods=1).apply(f) + expected = df.copy() + expected['B'] = 1. + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='2s', min_periods=1).apply(f) + expected = df.copy() + expected['B'] = 1. + tm.assert_frame_equal(result, expected) + + result = df.rolling(window='5s', min_periods=1).apply(f) + expected = df.copy() + expected['B'] = 1. + tm.assert_frame_equal(result, expected) + + def test_all(self): + + # simple comparison of integer vs time-based windowing + df = self.regular * 2 + er = df.rolling(window=1) + r = df.rolling(window='1s') + + for f in ['sum', 'mean', 'count', 'median', 'std', + 'var', 'kurt', 'skew', 'min', 'max']: + + result = getattr(r, f)() + expected = getattr(er, f)() + tm.assert_frame_equal(result, expected) + + result = r.quantile(0.5) + expected = er.quantile(0.5) + tm.assert_frame_equal(result, expected) + + result = r.apply(lambda x: 1) + expected = er.apply(lambda x: 1) + tm.assert_frame_equal(result, expected) + + def test_all2(self): + + # more sophisticated comparison of integer vs. + # time-based windowing + df = DataFrame({'B': np.arange(50)}, + index=pd.date_range('20130101', + periods=50, freq='H') + ) + # in-range data + dft = df.between_time("09:00", "16:00") + + r = dft.rolling(window='5H') + + for f in ['sum', 'mean', 'count', 'median', 'std', + 'var', 'kurt', 'skew', 'min', 'max']: + + result = getattr(r, f)() + + # we need to roll the days separately + # to compare with a time-based roll + # finally groupby-apply will return a multi-index + # so we need to drop the day + def agg_by_day(x): + x = x.between_time("09:00", "16:00") + return getattr(x.rolling(5, min_periods=1), f)() + expected = df.groupby(df.index.day).apply( + agg_by_day).reset_index(level=0, drop=True) + + tm.assert_frame_equal(result, expected) + + def test_groupby_monotonic(self): + + # GH 15130 + # we don't need to validate monotonicity when grouping + + data = [ + ['David', '1/1/2015', 100], ['David', '1/5/2015', 500], + ['David', '5/30/2015', 50], ['David', '7/25/2015', 50], + ['Ryan', '1/4/2014', 100], ['Ryan', '1/19/2015', 500], + ['Ryan', '3/31/2016', 50], ['Joe', '7/1/2015', 100], + ['Joe', '9/9/2015', 500], ['Joe', '10/15/2015', 50]] + + df = DataFrame(data=data, columns=['name', 'date', 'amount']) + df['date'] = pd.to_datetime(df['date']) + + expected = df.set_index('date').groupby('name').apply( + lambda x: x.rolling('180D')['amount'].sum()) + result = df.groupby('name').rolling('180D', on='date')['amount'].sum() + tm.assert_series_equal(result, expected) + + def test_non_monotonic(self): + # GH 13966 (similar to #15130, closed by #15175) + + dates = pd.date_range(start='2016-01-01 09:30:00', + periods=20, freq='s') + df = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8, + 'B': np.concatenate((dates, dates)), + 'C': np.arange(40)}) + + result = df.groupby('A').rolling('4s', on='B').C.mean() + expected = df.set_index('B').groupby('A').apply( + lambda x: x.rolling('4s')['C'].mean()) + tm.assert_series_equal(result, expected) + + df2 = df.sort_values('B') + result = df2.groupby('A').rolling('4s', on='B').C.mean() + tm.assert_series_equal(result, expected) + + def test_rolling_cov_offset(self): + # GH16058 + + idx = pd.date_range('2017-01-01', periods=24, freq='1h') + ss = Series(np.arange(len(idx)), index=idx) + + result = ss.rolling('2h').cov() + expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx) + tm.assert_series_equal(result, expected) + + expected2 = ss.rolling(2, min_periods=1).cov() + tm.assert_series_equal(result, expected2) + + result = ss.rolling('3h').cov() + expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx) + tm.assert_series_equal(result, expected) + + expected2 = ss.rolling(3, min_periods=1).cov() + tm.assert_series_equal(result, expected2) diff --git a/pandas/tests/test_window.py b/pandas/tests/windows/test_window.py similarity index 93% rename from pandas/tests/test_window.py rename to pandas/tests/windows/test_window.py index dabdb1e8e689c..7b3796503369d 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/windows/test_window.py @@ -3,7 +3,7 @@ import warnings from warnings import catch_warnings -from datetime import datetime, timedelta +from datetime import datetime from numpy.random import randn import numpy as np @@ -328,287 +328,6 @@ def test_numpy_compat(self): getattr(w, func), dtype=np.float64) -class TestRolling(Base): - - def setup_method(self, method): - self._create_data() - - def test_doc_string(self): - - df = DataFrame({'B': [0, 1, 2, np.nan, 4]}) - df - df.rolling(2).sum() - df.rolling(2, min_periods=1).sum() - - def test_constructor(self): - # GH 12669 - - for o in [self.series, self.frame]: - c = o.rolling - - # valid - c(window=2) - c(window=2, min_periods=1) - c(window=2, min_periods=1, center=True) - c(window=2, min_periods=1, center=False) - - # GH 13383 - c(0) - with pytest.raises(ValueError): - c(-1) - - # not valid - for w in [2., 'foo', np.array([2])]: - with pytest.raises(ValueError): - c(window=w) - with pytest.raises(ValueError): - c(window=2, min_periods=w) - with pytest.raises(ValueError): - c(window=2, min_periods=1, center=w) - - @td.skip_if_no_scipy - def test_constructor_with_win_type(self): - # GH 13383 - for o in [self.series, self.frame]: - c = o.rolling - c(0, win_type='boxcar') - with pytest.raises(ValueError): - c(-1, win_type='boxcar') - - def test_constructor_with_timedelta_window(self): - # GH 15440 - n = 10 - df = DataFrame({'value': np.arange(n)}, - index=pd.date_range('2015-12-24', periods=n, freq="D")) - expected_data = np.append([0., 1.], np.arange(3., 27., 3)) - for window in [timedelta(days=3), pd.Timedelta(days=3)]: - result = df.rolling(window=window).sum() - expected = DataFrame({'value': expected_data}, - index=pd.date_range('2015-12-24', periods=n, - freq="D")) - tm.assert_frame_equal(result, expected) - expected = df.rolling('3D').sum() - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - 'window', [timedelta(days=3), pd.Timedelta(days=3), '3D']) - def test_constructor_with_timedelta_window_and_minperiods(self, window): - # GH 15305 - n = 10 - df = DataFrame({'value': np.arange(n)}, - index=pd.date_range('2017-08-08', periods=n, freq="D")) - expected = DataFrame( - {'value': np.append([np.NaN, 1.], np.arange(3., 27., 3))}, - index=pd.date_range('2017-08-08', periods=n, freq="D")) - result_roll_sum = df.rolling(window=window, min_periods=2).sum() - result_roll_generic = df.rolling(window=window, - min_periods=2).apply(sum) - tm.assert_frame_equal(result_roll_sum, expected) - tm.assert_frame_equal(result_roll_generic, expected) - - def test_numpy_compat(self): - # see gh-12811 - r = rwindow.Rolling(Series([2, 4, 6]), window=2) - - msg = "numpy operations are not valid with window objects" - - for func in ('std', 'mean', 'sum', 'max', 'min', 'var'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(r, func), 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(r, func), dtype=np.float64) - - def test_closed(self): - df = DataFrame({'A': [0, 1, 2, 3, 4]}) - # closed only allowed for datetimelike - with pytest.raises(ValueError): - df.rolling(window=3, closed='neither') - - @pytest.mark.parametrize('roller', ['1s', 1]) - def tests_empty_df_rolling(self, roller): - # GH 15819 Verifies that datetime and integer rolling windows can be - # applied to empty DataFrames - expected = DataFrame() - result = DataFrame().rolling(roller).sum() - tm.assert_frame_equal(result, expected) - - # Verifies that datetime and integer rolling windows can be applied to - # empty DataFrames with datetime index - expected = DataFrame(index=pd.DatetimeIndex([])) - result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() - tm.assert_frame_equal(result, expected) - - def test_missing_minp_zero(self): - # https://github.com/pandas-dev/pandas/pull/18921 - # minp=0 - x = pd.Series([np.nan]) - result = x.rolling(1, min_periods=0).sum() - expected = pd.Series([0.0]) - tm.assert_series_equal(result, expected) - - # minp=1 - result = x.rolling(1, min_periods=1).sum() - expected = pd.Series([np.nan]) - tm.assert_series_equal(result, expected) - - def test_missing_minp_zero_variable(self): - # https://github.com/pandas-dev/pandas/pull/18921 - x = pd.Series([np.nan] * 4, - index=pd.DatetimeIndex(['2017-01-01', '2017-01-04', - '2017-01-06', '2017-01-07'])) - result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() - expected = pd.Series(0.0, index=x.index) - tm.assert_series_equal(result, expected) - - def test_multi_index_names(self): - - # GH 16789, 16825 - cols = pd.MultiIndex.from_product([['A', 'B'], ['C', 'D', 'E']], - names=['1', '2']) - df = DataFrame(np.ones((10, 6)), columns=cols) - result = df.rolling(3).cov() - - tm.assert_index_equal(result.columns, df.columns) - assert result.index.names == [None, '1', '2'] - - -class TestExpanding(Base): - - def setup_method(self, method): - self._create_data() - - def test_doc_string(self): - - df = DataFrame({'B': [0, 1, 2, np.nan, 4]}) - df - df.expanding(2).sum() - - def test_constructor(self): - # GH 12669 - - for o in [self.series, self.frame]: - c = o.expanding - - # valid - c(min_periods=1) - c(min_periods=1, center=True) - c(min_periods=1, center=False) - - # not valid - for w in [2., 'foo', np.array([2])]: - with pytest.raises(ValueError): - c(min_periods=w) - with pytest.raises(ValueError): - c(min_periods=1, center=w) - - def test_numpy_compat(self): - # see gh-12811 - e = rwindow.Expanding(Series([2, 4, 6]), window=2) - - msg = "numpy operations are not valid with window objects" - - for func in ('std', 'mean', 'sum', 'max', 'min', 'var'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), dtype=np.float64) - - @pytest.mark.parametrize( - 'expander', - [1, pytest.param('ls', marks=pytest.mark.xfail( - reason='GH 16425 expanding with ' - 'offset not supported'))]) - def test_empty_df_expanding(self, expander): - # GH 15819 Verifies that datetime and integer expanding windows can be - # applied to empty DataFrames - - expected = DataFrame() - result = DataFrame().expanding(expander).sum() - tm.assert_frame_equal(result, expected) - - # Verifies that datetime and integer expanding windows can be applied - # to empty DataFrames with datetime index - expected = DataFrame(index=pd.DatetimeIndex([])) - result = DataFrame( - index=pd.DatetimeIndex([])).expanding(expander).sum() - tm.assert_frame_equal(result, expected) - - def test_missing_minp_zero(self): - # https://github.com/pandas-dev/pandas/pull/18921 - # minp=0 - x = pd.Series([np.nan]) - result = x.expanding(min_periods=0).sum() - expected = pd.Series([0.0]) - tm.assert_series_equal(result, expected) - - # minp=1 - result = x.expanding(min_periods=1).sum() - expected = pd.Series([np.nan]) - tm.assert_series_equal(result, expected) - - -class TestEWM(Base): - - def setup_method(self, method): - self._create_data() - - def test_doc_string(self): - - df = DataFrame({'B': [0, 1, 2, np.nan, 4]}) - df - df.ewm(com=0.5).mean() - - def test_constructor(self): - for o in [self.series, self.frame]: - c = o.ewm - - # valid - c(com=0.5) - c(span=1.5) - c(alpha=0.5) - c(halflife=0.75) - c(com=0.5, span=None) - c(alpha=0.5, com=None) - c(halflife=0.75, alpha=None) - - # not valid: mutually exclusive - with pytest.raises(ValueError): - c(com=0.5, alpha=0.5) - with pytest.raises(ValueError): - c(span=1.5, halflife=0.75) - with pytest.raises(ValueError): - c(alpha=0.5, span=1.5) - - # not valid: com < 0 - with pytest.raises(ValueError): - c(com=-0.5) - - # not valid: span < 1 - with pytest.raises(ValueError): - c(span=0.5) - - # not valid: halflife <= 0 - with pytest.raises(ValueError): - c(halflife=0) - - # not valid: alpha <= 0 or alpha > 1 - for alpha in (-0.5, 1.5): - with pytest.raises(ValueError): - c(alpha=alpha) - - def test_numpy_compat(self): - # see gh-12811 - e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5) - - msg = "numpy operations are not valid with window objects" - - for func in ('std', 'mean', 'var'): - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), 1, 2, 3) - tm.assert_raises_regex(UnsupportedFunctionCall, msg, - getattr(e, func), dtype=np.float64) - - # gh-12373 : rolling functions error on float32 data # make sure rolling functions works for different dtypes #
Splitting the test_window.py file into the test_rolling.py files, test_expanding.py and test_ewma.py - [x] closes #19228 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19667
2018-02-12T20:41:23Z
2018-07-08T16:08:25Z
null
2018-07-08T16:08:25Z
enable multivalues insert
diff --git a/doc/source/io.rst b/doc/source/io.rst index 0b9a610b50d7d..93f5c5bea53b4 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4711,6 +4711,12 @@ writes ``data`` to the database in batches of 1000 rows at a time: data.to_sql('data_chunked', engine, chunksize=1000) +.. note:: + + The function :func:`~pandas.DataFrame.to_sql` will perform a multivalue + insert if the engine dialect ``supports_multivalues_insert``. This will + greatly speed up the insert in some cases. + SQL data types ++++++++++++++ diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index feca90aae6237..233816600ec0f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -341,6 +341,8 @@ Other Enhancements - :func:`DataFrame.replace` now supports the ``method`` parameter, which can be used to specify the replacement method when ``to_replace`` is a scalar, list or tuple and ``value`` is ``None`` (:issue:`19632`) - :meth:`Timestamp.month_name`, :meth:`DatetimeIndex.month_name`, and :meth:`Series.dt.month_name` are now available (:issue:`12805`) - :meth:`Timestamp.day_name` and :meth:`DatetimeIndex.day_name` are now available to return day names with a specified locale (:issue:`12806`) +- :meth:`DataFrame.to_sql` now performs a multivalue insert if the underlying connection supports itk rather than inserting row by row. + ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/io/sql.py b/pandas/io/sql.py index a582d32741ae9..ccb8d2d99d734 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -572,8 +572,29 @@ def create(self): else: self._execute_create() - def insert_statement(self): - return self.table.insert() + def insert_statement(self, data, conn): + """ + Generate tuple of SQLAlchemy insert statement and any arguments + to be executed by connection (via `_execute_insert`). + + Parameters + ---------- + conn : SQLAlchemy connectable(engine/connection) + Connection to recieve the data + data : list of dict + The data to be inserted + + Returns + ------- + SQLAlchemy statement + insert statement + *, optional + Additional parameters to be passed when executing insert statement + """ + dialect = getattr(conn, 'dialect', None) + if dialect and getattr(dialect, 'supports_multivalues_insert', False): + return self.table.insert(data), + return self.table.insert(), data def insert_data(self): if self.index is not None: @@ -612,8 +633,9 @@ def insert_data(self): return column_names, data_list def _execute_insert(self, conn, keys, data_iter): + """Insert data into this table with database connection""" data = [{k: v for k, v in zip(keys, row)} for row in data_iter] - conn.execute(self.insert_statement(), data) + conn.execute(*self.insert_statement(data, conn)) def insert(self, chunksize=None): keys, data_list = self.insert_data() diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index f3ab74d37a2bc..4530cc9d2fba9 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -1665,6 +1665,29 @@ class Temporary(Base): tm.assert_frame_equal(df, expected) + def test_insert_multivalues(self): + # issues addressed + # https://github.com/pandas-dev/pandas/issues/14315 + # https://github.com/pandas-dev/pandas/issues/8953 + + db = sql.SQLDatabase(self.conn) + df = DataFrame({'A': [1, 0, 0], 'B': [1.1, 0.2, 4.3]}) + table = sql.SQLTable("test_table", db, frame=df) + data = [ + {'A': 1, 'B': 0.46}, + {'A': 0, 'B': -2.06} + ] + statement = table.insert_statement(data, conn=self.conn)[0] + + if self.supports_multivalues_insert: + assert statement.parameters == data, ( + 'insert statement should be multivalues' + ) + else: + assert statement.parameters is None, ( + 'insert statement should not be multivalues' + ) + class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy): @@ -1679,6 +1702,7 @@ class _TestSQLiteAlchemy(object): """ flavor = 'sqlite' + supports_multivalues_insert = True @classmethod def connect(cls): @@ -1727,6 +1751,7 @@ class _TestMySQLAlchemy(object): """ flavor = 'mysql' + supports_multivalues_insert = True @classmethod def connect(cls): @@ -1796,6 +1821,7 @@ class _TestPostgreSQLAlchemy(object): """ flavor = 'postgresql' + supports_multivalues_insert = True @classmethod def connect(cls):
### Summary Currently when pushing a dataframe to a database, lines are inserted one by one. This change enables multivalues inserts. TODO - [x] release note - [ ] address chunksize behavior ### Reference http://docs.sqlalchemy.org/en/rel_0_9/core/dml.html?highlight=insert%20values#sqlalchemy.sql.expression.Insert.values
https://api.github.com/repos/pandas-dev/pandas/pulls/19664
2018-02-12T19:47:31Z
2018-03-07T21:54:46Z
2018-03-07T21:54:46Z
2018-05-17T11:28:07Z
DOC: ignore Panel deprecation warnings during doc build
diff --git a/doc/source/conf.py b/doc/source/conf.py index c188f83f80250..7c4edd0486636 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -15,6 +15,8 @@ import re import inspect import importlib +import warnings + from pandas.compat import u, PY3 try: @@ -375,6 +377,13 @@ 'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s', 'wiki ')} + +# ignore all deprecation warnings from Panel during doc build +# (to avoid the need to add :okwarning: in many places) +warnings.filterwarnings("ignore", message="\nPanel is deprecated", + category=FutureWarning) + + ipython_exec_lines = [ 'import numpy as np', 'import pandas as pd',
The doc build log is currently full of Panel deprecation warnings. To avoid having to put `:okwarning:` on all code-blocks that use Panel, I put a general filterwarnings for this warning in the conf.py file which hopefully works (alternative would be to put this inside the .rst file itself at the top in a suppressed ipython code block)
https://api.github.com/repos/pandas-dev/pandas/pulls/19663
2018-02-12T19:26:48Z
2018-02-13T19:01:43Z
2018-02-13T19:01:43Z
2018-02-13T19:01:46Z
collect index formatting tests
diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index ea2731f66f0ef..0d1a9e65ce6c6 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -1,6 +1,10 @@ -from pandas import DatetimeIndex +from datetime import datetime +from pandas import DatetimeIndex, Series import numpy as np +import dateutil.tz +import pytz +import pytest import pandas.util.testing as tm import pandas as pd @@ -45,3 +49,172 @@ def test_to_native_types(): result = index.to_native_types(na_rep='pandas') tm.assert_numpy_array_equal(result, expected) + + +class TestDatetimeIndexRendering(object): + def test_dti_repr_short(self): + dr = pd.date_range(start='1/1/2012', periods=1) + repr(dr) + + dr = pd.date_range(start='1/1/2012', periods=2) + repr(dr) + + dr = pd.date_range(start='1/1/2012', periods=3) + repr(dr) + + @pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__']) + def test_dti_representation(self, method): + idxs = [] + idxs.append(DatetimeIndex([], freq='D')) + idxs.append(DatetimeIndex(['2011-01-01'], freq='D')) + idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')) + idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], + freq='D')) + idxs.append(DatetimeIndex( + ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00' + ], freq='H', tz='Asia/Tokyo')) + idxs.append(DatetimeIndex( + ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern')) + idxs.append(DatetimeIndex( + ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC')) + + exp = [] + exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""") + exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', " + "freq='D')") + exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], " + "dtype='datetime64[ns]', freq='D')") + exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " + "dtype='datetime64[ns]', freq='D')") + exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', " + "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']" + ", dtype='datetime64[ns, Asia/Tokyo]', freq='H')") + exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', " + "'2011-01-01 10:00:00-05:00', 'NaT'], " + "dtype='datetime64[ns, US/Eastern]', freq=None)") + exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', " + "'2011-01-01 10:00:00+00:00', 'NaT'], " + "dtype='datetime64[ns, UTC]', freq=None)""") + + with pd.option_context('display.width', 300): + for indx, expected in zip(idxs, exp): + result = getattr(indx, method)() + assert result == expected + + def test_dti_representation_to_series(self): + idx1 = DatetimeIndex([], freq='D') + idx2 = DatetimeIndex(['2011-01-01'], freq='D') + idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = DatetimeIndex( + ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', + '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') + idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], + tz='US/Eastern') + idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15']) + + exp1 = """Series([], dtype: datetime64[ns])""" + + exp2 = ("0 2011-01-01\n" + "dtype: datetime64[ns]") + + exp3 = ("0 2011-01-01\n" + "1 2011-01-02\n" + "dtype: datetime64[ns]") + + exp4 = ("0 2011-01-01\n" + "1 2011-01-02\n" + "2 2011-01-03\n" + "dtype: datetime64[ns]") + + exp5 = ("0 2011-01-01 09:00:00+09:00\n" + "1 2011-01-01 10:00:00+09:00\n" + "2 2011-01-01 11:00:00+09:00\n" + "dtype: datetime64[ns, Asia/Tokyo]") + + exp6 = ("0 2011-01-01 09:00:00-05:00\n" + "1 2011-01-01 10:00:00-05:00\n" + "2 NaT\n" + "dtype: datetime64[ns, US/Eastern]") + + exp7 = ("0 2011-01-01 09:00:00\n" + "1 2011-01-02 10:15:00\n" + "dtype: datetime64[ns]") + + with pd.option_context('display.width', 300): + for idx, expected in zip([idx1, idx2, idx3, idx4, + idx5, idx6, idx7], + [exp1, exp2, exp3, exp4, + exp5, exp6, exp7]): + result = repr(Series(idx)) + assert result == expected + + def test_dti_summary(self): + # GH#9116 + idx1 = DatetimeIndex([], freq='D') + idx2 = DatetimeIndex(['2011-01-01'], freq='D') + idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = DatetimeIndex( + ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') + idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', + '2011-01-01 11:00'], + freq='H', tz='Asia/Tokyo') + idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], + tz='US/Eastern') + + exp1 = ("DatetimeIndex: 0 entries\n" + "Freq: D") + + exp2 = ("DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\n" + "Freq: D") + + exp3 = ("DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\n" + "Freq: D") + + exp4 = ("DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\n" + "Freq: D") + + exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 " + "to 2011-01-01 11:00:00+09:00\n" + "Freq: H") + + exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], + [exp1, exp2, exp3, exp4, exp5, exp6]): + result = idx.summary() + assert result == expected + + def test_dti_business_repr(self): + # only really care that it works + repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))) + + def test_dti_business_summary(self): + rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1)) + rng.summary() + rng[2:2].summary() + + def test_dti_business_summary_pytz(self): + pd.bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() + + def test_dti_business_summary_dateutil(self): + pd.bdate_range('1/1/2005', '1/1/2009', + tz=dateutil.tz.tzutc()).summary() + + def test_dti_custom_business_repr(self): + # only really care that it works + repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1), + freq='C')) + + def test_dti_custom_business_summary(self): + rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1), + freq='C') + rng.summary() + rng[2:2].summary() + + def test_dti_custom_business_summary_pytz(self): + pd.bdate_range('1/1/2005', '1/1/2009', freq='C', tz=pytz.utc).summary() + + def test_dti_custom_business_summary_dateutil(self): + pd.bdate_range('1/1/2005', '1/1/2009', freq='C', + tz=dateutil.tz.tzutc()).summary() diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py index 4a46c3b04bbad..2013b5e6cd6dd 100644 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ b/pandas/tests/indexes/datetimes/test_misc.py @@ -83,16 +83,6 @@ def test_range_edges(self): '1970-01-03', '1970-01-04']) tm.assert_index_equal(idx, exp) - def test_datetimeindex_repr_short(self): - dr = date_range(start='1/1/2012', periods=1) - repr(dr) - - dr = date_range(start='1/1/2012', periods=2) - repr(dr) - - dr = date_range(start='1/1/2012', periods=3) - repr(dr) - class TestDatetime64(object): diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index bc43b427fe0aa..b42cd454803b8 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -1,6 +1,4 @@ -import pytz import pytest -import dateutil import warnings import numpy as np from datetime import datetime @@ -153,130 +151,6 @@ def test_repeat(self): tm.assert_raises_regex(ValueError, msg, np.repeat, rng, reps, axis=1) - def test_representation(self): - - idx = [] - idx.append(DatetimeIndex([], freq='D')) - idx.append(DatetimeIndex(['2011-01-01'], freq='D')) - idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')) - idx.append(DatetimeIndex( - ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')) - idx.append(DatetimeIndex( - ['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00' - ], freq='H', tz='Asia/Tokyo')) - idx.append(DatetimeIndex( - ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern')) - idx.append(DatetimeIndex( - ['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC')) - - exp = [] - exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""") - exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', " - "freq='D')") - exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], " - "dtype='datetime64[ns]', freq='D')") - exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " - "dtype='datetime64[ns]', freq='D')") - exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', " - "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']" - ", dtype='datetime64[ns, Asia/Tokyo]', freq='H')") - exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', " - "'2011-01-01 10:00:00-05:00', 'NaT'], " - "dtype='datetime64[ns, US/Eastern]', freq=None)") - exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', " - "'2011-01-01 10:00:00+00:00', 'NaT'], " - "dtype='datetime64[ns, UTC]', freq=None)""") - - with pd.option_context('display.width', 300): - for indx, expected in zip(idx, exp): - for func in ['__repr__', '__unicode__', '__str__']: - result = getattr(indx, func)() - assert result == expected - - def test_representation_to_series(self): - idx1 = DatetimeIndex([], freq='D') - idx2 = DatetimeIndex(['2011-01-01'], freq='D') - idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') - idx4 = DatetimeIndex( - ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') - idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', - '2011-01-01 11:00'], freq='H', tz='Asia/Tokyo') - idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], - tz='US/Eastern') - idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15']) - - exp1 = """Series([], dtype: datetime64[ns])""" - - exp2 = ("0 2011-01-01\n" - "dtype: datetime64[ns]") - - exp3 = ("0 2011-01-01\n" - "1 2011-01-02\n" - "dtype: datetime64[ns]") - - exp4 = ("0 2011-01-01\n" - "1 2011-01-02\n" - "2 2011-01-03\n" - "dtype: datetime64[ns]") - - exp5 = ("0 2011-01-01 09:00:00+09:00\n" - "1 2011-01-01 10:00:00+09:00\n" - "2 2011-01-01 11:00:00+09:00\n" - "dtype: datetime64[ns, Asia/Tokyo]") - - exp6 = ("0 2011-01-01 09:00:00-05:00\n" - "1 2011-01-01 10:00:00-05:00\n" - "2 NaT\n" - "dtype: datetime64[ns, US/Eastern]") - - exp7 = ("0 2011-01-01 09:00:00\n" - "1 2011-01-02 10:15:00\n" - "dtype: datetime64[ns]") - - with pd.option_context('display.width', 300): - for idx, expected in zip([idx1, idx2, idx3, idx4, - idx5, idx6, idx7], - [exp1, exp2, exp3, exp4, - exp5, exp6, exp7]): - result = repr(Series(idx)) - assert result == expected - - def test_summary(self): - # GH9116 - idx1 = DatetimeIndex([], freq='D') - idx2 = DatetimeIndex(['2011-01-01'], freq='D') - idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D') - idx4 = DatetimeIndex( - ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') - idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', - '2011-01-01 11:00'], - freq='H', tz='Asia/Tokyo') - idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], - tz='US/Eastern') - - exp1 = ("DatetimeIndex: 0 entries\n" - "Freq: D") - - exp2 = ("DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\n" - "Freq: D") - - exp3 = ("DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\n" - "Freq: D") - - exp4 = ("DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\n" - "Freq: D") - - exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 " - "to 2011-01-01 11:00:00+09:00\n" - "Freq: H") - - exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT""" - - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6], - [exp1, exp2, exp3, exp4, exp5, exp6]): - result = idx.summary() - assert result == expected - def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], @@ -544,10 +418,6 @@ def test_copy(self): repr(cp) tm.assert_index_equal(cp, self.rng) - def test_repr(self): - # only really care that it works - repr(self.rng) - def test_shift(self): shifted = self.rng.shift(5) assert shifted[0] == self.rng[5] @@ -565,16 +435,6 @@ def test_shift(self): shifted = rng.shift(1, freq=BDay()) assert shifted[0] == rng[0] + BDay() - def test_summary(self): - self.rng.summary() - self.rng[2:2].summary() - - def test_summary_pytz(self): - bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary() - - def test_summary_dateutil(self): - bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary() - def test_equals(self): assert not self.rng.equals(list(self.rng)) @@ -612,10 +472,6 @@ def test_copy(self): repr(cp) tm.assert_index_equal(cp, self.rng) - def test_repr(self): - # only really care that it works - repr(self.rng) - def test_shift(self): shifted = self.rng.shift(5) @@ -640,16 +496,5 @@ def test_pickle_unpickle(self): unpickled = tm.round_trip_pickle(self.rng) assert unpickled.offset is not None - def test_summary(self): - self.rng.summary() - self.rng[2:2].summary() - - def test_summary_pytz(self): - bdate_range('1/1/2005', '1/1/2009', freq='C', tz=pytz.utc).summary() - - def test_summary_dateutil(self): - bdate_range('1/1/2005', '1/1/2009', freq='C', - tz=dateutil.tz.tzutc()).summary() - def test_equals(self): assert not self.rng.equals(list(self.rng)) diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index 533481ce051f7..b1a1060bf86c4 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -1,6 +1,7 @@ from pandas import PeriodIndex import numpy as np +import pytest import pandas.util.testing as tm import pandas as pd @@ -46,3 +47,163 @@ def test_to_native_types(): result = index.to_native_types(na_rep='pandas') tm.assert_numpy_array_equal(result, expected) + + +class TestPeriodIndexRendering(object): + @pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__']) + def test_representation(self, method): + # GH#7601 + idx1 = PeriodIndex([], freq='D') + idx2 = PeriodIndex(['2011-01-01'], freq='D') + idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], + freq='D') + idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') + idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], + freq='H') + idx7 = pd.period_range('2013Q1', periods=1, freq="Q") + idx8 = pd.period_range('2013Q1', periods=2, freq="Q") + idx9 = pd.period_range('2013Q1', periods=3, freq="Q") + idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D') + + exp1 = """PeriodIndex([], dtype='period[D]', freq='D')""" + + exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')""" + + exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', " + "freq='D')") + + exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " + "dtype='period[D]', freq='D')") + + exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', " + "freq='A-DEC')") + + exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], " + "dtype='period[H]', freq='H')") + + exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', " + "freq='Q-DEC')") + + exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', " + "freq='Q-DEC')") + + exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], " + "dtype='period[Q-DEC]', freq='Q-DEC')") + + exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], " + "dtype='period[3D]', freq='3D')") + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, + idx6, idx7, idx8, idx9, idx10], + [exp1, exp2, exp3, exp4, exp5, + exp6, exp7, exp8, exp9, exp10]): + result = getattr(idx, method)() + assert result == expected + + def test_representation_to_series(self): + # GH#10971 + idx1 = PeriodIndex([], freq='D') + idx2 = PeriodIndex(['2011-01-01'], freq='D') + idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], + freq='D') + idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') + idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], + freq='H') + + idx7 = pd.period_range('2013Q1', periods=1, freq="Q") + idx8 = pd.period_range('2013Q1', periods=2, freq="Q") + idx9 = pd.period_range('2013Q1', periods=3, freq="Q") + + exp1 = """Series([], dtype: object)""" + + exp2 = """0 2011-01-01 +dtype: object""" + + exp3 = """0 2011-01-01 +1 2011-01-02 +dtype: object""" + + exp4 = """0 2011-01-01 +1 2011-01-02 +2 2011-01-03 +dtype: object""" + + exp5 = """0 2011 +1 2012 +2 2013 +dtype: object""" + + exp6 = """0 2011-01-01 09:00 +1 2012-02-01 10:00 +2 NaT +dtype: object""" + + exp7 = """0 2013Q1 +dtype: object""" + + exp8 = """0 2013Q1 +1 2013Q2 +dtype: object""" + + exp9 = """0 2013Q1 +1 2013Q2 +2 2013Q3 +dtype: object""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, + idx6, idx7, idx8, idx9], + [exp1, exp2, exp3, exp4, exp5, + exp6, exp7, exp8, exp9]): + result = repr(pd.Series(idx)) + assert result == expected + + def test_summary(self): + # GH#9116 + idx1 = PeriodIndex([], freq='D') + idx2 = PeriodIndex(['2011-01-01'], freq='D') + idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') + idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], + freq='D') + idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') + idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], + freq='H') + + idx7 = pd.period_range('2013Q1', periods=1, freq="Q") + idx8 = pd.period_range('2013Q1', periods=2, freq="Q") + idx9 = pd.period_range('2013Q1', periods=3, freq="Q") + + exp1 = """PeriodIndex: 0 entries +Freq: D""" + + exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01 +Freq: D""" + + exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02 +Freq: D""" + + exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03 +Freq: D""" + + exp5 = """PeriodIndex: 3 entries, 2011 to 2013 +Freq: A-DEC""" + + exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT +Freq: H""" + + exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1 +Freq: Q-DEC""" + + exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2 +Freq: Q-DEC""" + + exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3 +Freq: Q-DEC""" + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, + idx6, idx7, idx8, idx9], + [exp1, exp2, exp3, exp4, exp5, + exp6, exp7, exp8, exp9]): + result = idx.summary() + assert result == expected diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 6cb4226dffc5a..b913934195260 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -14,9 +14,6 @@ class TestGetItem(object): - def setup_method(self, method): - pass - def test_getitem(self): idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx') diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index 8745de0c2a7aa..f3cc554c192a8 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -115,164 +115,6 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, pr, out=0) - def test_representation(self): - # GH 7601 - idx1 = PeriodIndex([], freq='D') - idx2 = PeriodIndex(['2011-01-01'], freq='D') - idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') - idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], - freq='D') - idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') - idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', - 'NaT'], freq='H') - idx7 = pd.period_range('2013Q1', periods=1, freq="Q") - idx8 = pd.period_range('2013Q1', periods=2, freq="Q") - idx9 = pd.period_range('2013Q1', periods=3, freq="Q") - idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D') - - exp1 = """PeriodIndex([], dtype='period[D]', freq='D')""" - - exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')""" - - exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', " - "freq='D')") - - exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], " - "dtype='period[D]', freq='D')") - - exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', " - "freq='A-DEC')") - - exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], " - "dtype='period[H]', freq='H')") - - exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', " - "freq='Q-DEC')") - - exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', " - "freq='Q-DEC')") - - exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], " - "dtype='period[Q-DEC]', freq='Q-DEC')") - - exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], " - "dtype='period[3D]', freq='3D')") - - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, - idx6, idx7, idx8, idx9, idx10], - [exp1, exp2, exp3, exp4, exp5, - exp6, exp7, exp8, exp9, exp10]): - for func in ['__repr__', '__unicode__', '__str__']: - result = getattr(idx, func)() - assert result == expected - - def test_representation_to_series(self): - # GH 10971 - idx1 = PeriodIndex([], freq='D') - idx2 = PeriodIndex(['2011-01-01'], freq='D') - idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') - idx4 = PeriodIndex(['2011-01-01', '2011-01-02', - '2011-01-03'], freq='D') - idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') - idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', - 'NaT'], freq='H') - - idx7 = pd.period_range('2013Q1', periods=1, freq="Q") - idx8 = pd.period_range('2013Q1', periods=2, freq="Q") - idx9 = pd.period_range('2013Q1', periods=3, freq="Q") - - exp1 = """Series([], dtype: object)""" - - exp2 = """0 2011-01-01 -dtype: object""" - - exp3 = """0 2011-01-01 -1 2011-01-02 -dtype: object""" - - exp4 = """0 2011-01-01 -1 2011-01-02 -2 2011-01-03 -dtype: object""" - - exp5 = """0 2011 -1 2012 -2 2013 -dtype: object""" - - exp6 = """0 2011-01-01 09:00 -1 2012-02-01 10:00 -2 NaT -dtype: object""" - - exp7 = """0 2013Q1 -dtype: object""" - - exp8 = """0 2013Q1 -1 2013Q2 -dtype: object""" - - exp9 = """0 2013Q1 -1 2013Q2 -2 2013Q3 -dtype: object""" - - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, - idx6, idx7, idx8, idx9], - [exp1, exp2, exp3, exp4, exp5, - exp6, exp7, exp8, exp9]): - result = repr(pd.Series(idx)) - assert result == expected - - def test_summary(self): - # GH9116 - idx1 = PeriodIndex([], freq='D') - idx2 = PeriodIndex(['2011-01-01'], freq='D') - idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D') - idx4 = PeriodIndex( - ['2011-01-01', '2011-01-02', '2011-01-03'], freq='D') - idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A') - idx6 = PeriodIndex( - ['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H') - - idx7 = pd.period_range('2013Q1', periods=1, freq="Q") - idx8 = pd.period_range('2013Q1', periods=2, freq="Q") - idx9 = pd.period_range('2013Q1', periods=3, freq="Q") - - exp1 = """PeriodIndex: 0 entries -Freq: D""" - - exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01 -Freq: D""" - - exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02 -Freq: D""" - - exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03 -Freq: D""" - - exp5 = """PeriodIndex: 3 entries, 2011 to 2013 -Freq: A-DEC""" - - exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT -Freq: H""" - - exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1 -Freq: Q-DEC""" - - exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2 -Freq: Q-DEC""" - - exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3 -Freq: Q-DEC""" - - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, - idx6, idx7, idx8, idx9], - [exp1, exp2, exp3, exp4, exp5, - exp6, exp7, exp8, exp9]): - result = idx.summary() - assert result == expected - def test_resolution(self): for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'], diff --git a/pandas/tests/indexes/timedeltas/test_formats.py b/pandas/tests/indexes/timedeltas/test_formats.py new file mode 100644 index 0000000000000..a8375459d74e4 --- /dev/null +++ b/pandas/tests/indexes/timedeltas/test_formats.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- + +import pytest + +import pandas as pd +from pandas import TimedeltaIndex + + +class TestTimedeltaIndexRendering(object): + @pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__']) + def test_representation(self, method): + idx1 = TimedeltaIndex([], freq='D') + idx2 = TimedeltaIndex(['1 days'], freq='D') + idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') + idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') + idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) + + exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')""" + + exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', " + "freq='D')") + + exp3 = ("TimedeltaIndex(['1 days', '2 days'], " + "dtype='timedelta64[ns]', freq='D')") + + exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], " + "dtype='timedelta64[ns]', freq='D')") + + exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', " + "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)") + + with pd.option_context('display.width', 300): + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], + [exp1, exp2, exp3, exp4, exp5]): + result = getattr(idx, method)() + assert result == expected + + def test_representation_to_series(self): + idx1 = TimedeltaIndex([], freq='D') + idx2 = TimedeltaIndex(['1 days'], freq='D') + idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') + idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') + idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) + + exp1 = """Series([], dtype: timedelta64[ns])""" + + exp2 = ("0 1 days\n" + "dtype: timedelta64[ns]") + + exp3 = ("0 1 days\n" + "1 2 days\n" + "dtype: timedelta64[ns]") + + exp4 = ("0 1 days\n" + "1 2 days\n" + "2 3 days\n" + "dtype: timedelta64[ns]") + + exp5 = ("0 1 days 00:00:01\n" + "1 2 days 00:00:00\n" + "2 3 days 00:00:00\n" + "dtype: timedelta64[ns]") + + with pd.option_context('display.width', 300): + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], + [exp1, exp2, exp3, exp4, exp5]): + result = repr(pd.Series(idx)) + assert result == expected + + def test_summary(self): + # GH#9116 + idx1 = TimedeltaIndex([], freq='D') + idx2 = TimedeltaIndex(['1 days'], freq='D') + idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') + idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') + idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) + + exp1 = ("TimedeltaIndex: 0 entries\n" + "Freq: D") + + exp2 = ("TimedeltaIndex: 1 entries, 1 days to 1 days\n" + "Freq: D") + + exp3 = ("TimedeltaIndex: 2 entries, 1 days to 2 days\n" + "Freq: D") + + exp4 = ("TimedeltaIndex: 3 entries, 1 days to 3 days\n" + "Freq: D") + + exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days " + "00:00:00") + + for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], + [exp1, exp2, exp3, exp4, exp5]): + result = idx.summary() + assert result == expected diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index d154aa2172ef7..690ba66b6f5ef 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -73,94 +73,6 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, td, out=0) - def test_representation(self): - idx1 = TimedeltaIndex([], freq='D') - idx2 = TimedeltaIndex(['1 days'], freq='D') - idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') - idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') - idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) - - exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')""" - - exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', " - "freq='D')") - - exp3 = ("TimedeltaIndex(['1 days', '2 days'], " - "dtype='timedelta64[ns]', freq='D')") - - exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], " - "dtype='timedelta64[ns]', freq='D')") - - exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', " - "'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)") - - with pd.option_context('display.width', 300): - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], - [exp1, exp2, exp3, exp4, exp5]): - for func in ['__repr__', '__unicode__', '__str__']: - result = getattr(idx, func)() - assert result == expected - - def test_representation_to_series(self): - idx1 = TimedeltaIndex([], freq='D') - idx2 = TimedeltaIndex(['1 days'], freq='D') - idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') - idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') - idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) - - exp1 = """Series([], dtype: timedelta64[ns])""" - - exp2 = """0 1 days -dtype: timedelta64[ns]""" - - exp3 = """0 1 days -1 2 days -dtype: timedelta64[ns]""" - - exp4 = """0 1 days -1 2 days -2 3 days -dtype: timedelta64[ns]""" - - exp5 = """0 1 days 00:00:01 -1 2 days 00:00:00 -2 3 days 00:00:00 -dtype: timedelta64[ns]""" - - with pd.option_context('display.width', 300): - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], - [exp1, exp2, exp3, exp4, exp5]): - result = repr(pd.Series(idx)) - assert result == expected - - def test_summary(self): - # GH9116 - idx1 = TimedeltaIndex([], freq='D') - idx2 = TimedeltaIndex(['1 days'], freq='D') - idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D') - idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D') - idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days']) - - exp1 = ("TimedeltaIndex: 0 entries\n" - "Freq: D") - - exp2 = ("TimedeltaIndex: 1 entries, 1 days to 1 days\n" - "Freq: D") - - exp3 = ("TimedeltaIndex: 2 entries, 1 days to 2 days\n" - "Freq: D") - - exp4 = ("TimedeltaIndex: 3 entries, 1 days to 3 days\n" - "Freq: D") - - exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days " - "00:00:00") - - for idx, expected in zip([idx1, idx2, idx3, idx4, idx5], - [exp1, exp2, exp3, exp4, exp5]): - result = idx.summary() - assert result == expected - def test_value_counts_unique(self): # GH 7735
It looks like PeriodIndex/TDI/DTI tests are using different rules for what constitutes test_indexing/test_setops. Double-checking before making a PR: test_get_loc_* --> test_indexing test_get_indexer_* --> test_indexing test_insert_* --> test_indexing test_where_* --> test_indexing test_join_* --> test_setops test_append_* --> test_setops test_difference_* --> test_setops Many of these at the moment are in e.g. test_ops or test_period/test_datetime.
https://api.github.com/repos/pandas-dev/pandas/pulls/19661
2018-02-12T17:51:03Z
2018-02-18T16:09:45Z
2018-02-18T16:09:45Z
2018-02-18T18:21:44Z
DOC: spellchecked io.rst
diff --git a/doc/source/io.rst b/doc/source/io.rst index 1785de54b7dd6..7bb34e4d232dd 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -28,8 +28,11 @@ IO Tools (Text, CSV, HDF5, ...) =============================== -The pandas I/O API is a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas`` -object. The corresponding ``writer`` functions are object methods that are accessed like ``df.to_csv()`` +The pandas I/O API is a set of top level ``reader`` functions accessed like +:func:`pandas.read_csv` that generally return a pandas object. The corresponding +``writer`` functions are object methods that are accessed like +:meth:`DataFrame.to_csv`. Below is a table containing available ``readers`` and +``writers``. .. csv-table:: :header: "Format Type", "Data Description", "Reader", "Writer" @@ -65,13 +68,14 @@ CSV & Text files The two workhorse functions for reading text files (a.k.a. flat files) are :func:`read_csv` and :func:`read_table`. They both use the same parsing code to -intelligently convert tabular data into a DataFrame object. See the +intelligently convert tabular data into a ``DataFrame`` object. See the :ref:`cookbook<cookbook.csv>` for some advanced strategies. Parsing options ''''''''''''''' -:func:`read_csv` and :func:`read_table` accept the following arguments: +The functions :func:`read_csv` and :func:`read_table` accept the following +common arguments: Basic +++++ @@ -94,7 +98,7 @@ delimiter : str, default ``None`` delim_whitespace : boolean, default False Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be used as the delimiter. Equivalent to setting ``sep='\s+'``. - If this option is set to True, nothing should be passed in for the + If this option is set to ``True``, nothing should be passed in for the ``delimiter`` parameter. .. versionadded:: 0.18.1 support for the Python parser. @@ -122,7 +126,7 @@ names : array-like, default ``None`` explicitly pass ``header=None``. Duplicates in this list will cause a ``UserWarning`` to be issued. index_col : int or sequence or ``False``, default ``None`` - Column to use as the row labels of the DataFrame. If a sequence is given, a + Column to use as the row labels of the ``DataFrame``. If a sequence is given, a MultiIndex is used. If you have a malformed file with delimiters at the end of each line, you might consider ``index_col=False`` to force pandas to *not* use the first column as the index (row names). @@ -131,8 +135,8 @@ usecols : array-like or callable, default ``None`` be positional (i.e. integer indices into the document columns) or strings that correspond to column names provided either by the user in `names` or inferred from the document header row(s). For example, a valid array-like - `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element - order is ignored, so usecols=[0,1] is the same as [1, 0]. + `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. + Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. If callable, the callable function will be evaluated against the column names, returning names where the callable function evaluates to True: @@ -145,12 +149,12 @@ usecols : array-like or callable, default ``None`` Using this parameter results in much faster parsing time and lower memory usage. squeeze : boolean, default ``False`` - If the parsed data only contains one column then return a Series. + If the parsed data only contains one column then return a ``Series``. prefix : str, default ``None`` Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ... mangle_dupe_cols : boolean, default ``True`` Duplicate columns will be specified as 'X', 'X.1'...'X.N', rather than 'X'...'X'. - Passing in False will cause data to be overwritten if there are duplicate + Passing in ``False`` will cause data to be overwritten if there are duplicate names in the columns. General Parsing Configuration @@ -197,7 +201,7 @@ low_memory : boolean, default ``True`` Internally process the file in chunks, resulting in lower memory use while parsing, but possibly mixed type inference. To ensure no mixed types either set ``False``, or specify the type with the ``dtype`` parameter. - Note that the entire file is read into a single DataFrame regardless, + Note that the entire file is read into a single ``DataFrame`` regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in chunks. (Only valid with C parser) memory_map : boolean, default False @@ -217,16 +221,16 @@ keep_default_na : boolean, default ``True`` Whether or not to include the default NaN values when parsing the data. Depending on whether `na_values` is passed in, the behavior is as follows: - * If `keep_default_na` is True, and `na_values` are specified, `na_values` + * If `keep_default_na` is ``True``, and `na_values` are specified, `na_values` is appended to the default NaN values used for parsing. - * If `keep_default_na` is True, and `na_values` are not specified, only + * If `keep_default_na` is ``True``, and `na_values` are not specified, only the default NaN values are used for parsing. - * If `keep_default_na` is False, and `na_values` are specified, only + * If `keep_default_na` is ``False``, and `na_values` are specified, only the NaN values specified `na_values` are used for parsing. - * If `keep_default_na` is False, and `na_values` are not specified, no + * If `keep_default_na` is ``False``, and `na_values` are not specified, no strings will be parsed as NaN. - Note that if `na_filter` is passed in as False, the `keep_default_na` and + Note that if `na_filter` is passed in as ``False``, the `keep_default_na` and `na_values` parameters will be ignored. na_filter : boolean, default ``True`` Detect missing value markers (empty strings and the value of na_values). In @@ -341,9 +345,9 @@ Error Handling error_bad_lines : boolean, default ``True`` Lines with too many fields (e.g. a csv line with too many commas) will by - default cause an exception to be raised, and no DataFrame will be returned. If - ``False``, then these "bad lines" will dropped from the DataFrame that is - returned. See :ref:`bad lines <io.bad_lines>` + default cause an exception to be raised, and no ``DataFrame`` will be + returned. If ``False``, then these "bad lines" will dropped from the + ``DataFrame`` that is returned. See :ref:`bad lines <io.bad_lines>` below. warn_bad_lines : boolean, default ``True`` If error_bad_lines is ``False``, and warn_bad_lines is ``True``, a warning for @@ -354,8 +358,8 @@ warn_bad_lines : boolean, default ``True`` Specifying column data types '''''''''''''''''''''''''''' -You can indicate the data type for the whole DataFrame or -individual columns: +You can indicate the data type for the whole ``DataFrame`` or individual +columns: .. ipython:: python @@ -368,11 +372,11 @@ individual columns: df = pd.read_csv(StringIO(data), dtype={'b': object, 'c': np.float64}) df.dtypes -Fortunately, ``pandas`` offers more than one way to ensure that your column(s) +Fortunately, pandas offers more than one way to ensure that your column(s) contain only one ``dtype``. If you're unfamiliar with these concepts, you can see :ref:`here<basics.dtypes>` to learn more about dtypes, and :ref:`here<basics.object_conversion>` to learn more about ``object`` conversion in -``pandas``. +pandas. For instance, you can use the ``converters`` argument @@ -395,7 +399,7 @@ dtypes after reading in the data, df2 df2['col_1'].apply(type).value_counts() -which would convert all valid parsing to floats, leaving the invalid parsing +which will convert all valid parsing to floats, leaving the invalid parsing as ``NaN``. Ultimately, how you deal with reading in columns containing mixed dtypes @@ -407,7 +411,7 @@ worth trying. .. versionadded:: 0.20.0 support for the Python parser. - The ``dtype`` option is supported by the 'python' engine + The ``dtype`` option is supported by the 'python' engine. .. note:: In some cases, reading in abnormal data with columns containing mixed dtypes @@ -453,7 +457,8 @@ Specifying Categorical dtype pd.read_csv(StringIO(data)).dtypes pd.read_csv(StringIO(data), dtype='category').dtypes -Individual columns can be parsed as a ``Categorical`` using a dict specification +Individual columns can be parsed as a ``Categorical`` using a dict +specification: .. ipython:: python @@ -551,17 +556,18 @@ If the header is in a row other than the first, pass the row number to Duplicate names parsing ''''''''''''''''''''''' -If the file or header contains duplicate names, pandas by default will deduplicate -these names so as to prevent data overwrite: +If the file or header contains duplicate names, pandas will by default +distinguish between them so as to prevent overwriting data: .. ipython :: python data = 'a,b,a\n0,1,2\n3,4,5' pd.read_csv(StringIO(data)) -There is no more duplicate data because ``mangle_dupe_cols=True`` by default, which modifies -a series of duplicate columns 'X'...'X' to become 'X', 'X.1',...'X.N'. If ``mangle_dupe_cols -=False``, duplicate data can arise: +There is no more duplicate data because ``mangle_dupe_cols=True`` by default, +which modifies a series of duplicate columns 'X', ..., 'X' to become +'X', 'X.1', ..., 'X.N'. If ``mangle_dupe_cols=False``, duplicate data can +arise: .. code-block :: python @@ -716,7 +722,7 @@ result in byte strings being decoded to unicode in the result: Some formats which encode all characters as multiple bytes, like UTF-16, won't parse correctly at all without specifying the encoding. `Full list of Python standard encodings -<https://docs.python.org/3/library/codecs.html#standard-encodings>`_ +<https://docs.python.org/3/library/codecs.html#standard-encodings>`_. .. _io.index_col: @@ -724,7 +730,7 @@ Index columns and trailing delimiters ''''''''''''''''''''''''''''''''''''' If a file has one more column of data than the number of column names, the -first column will be used as the DataFrame's row names: +first column will be used as the ``DataFrame``'s row names: .. ipython:: python @@ -894,30 +900,31 @@ Pandas will try to call the ``date_parser`` function in three different ways. If an exception is raised, the next one is tried: 1. ``date_parser`` is first called with one or more arrays as arguments, - as defined using `parse_dates` (e.g., ``date_parser(['2013', '2013'], ['1', '2'])``) + as defined using `parse_dates` (e.g., ``date_parser(['2013', '2013'], ['1', '2'])``). 2. If #1 fails, ``date_parser`` is called with all the columns - concatenated row-wise into a single array (e.g., ``date_parser(['2013 1', '2013 2'])``) + concatenated row-wise into a single array (e.g., ``date_parser(['2013 1', '2013 2'])``). 3. If #2 fails, ``date_parser`` is called once for every row with one or more string arguments from the columns indicated with `parse_dates` (e.g., ``date_parser('2013', '1')`` for the first row, ``date_parser('2013', '2')`` - for the second, etc.) + for the second, etc.). Note that performance-wise, you should try these methods of parsing dates in order: -1. Try to infer the format using ``infer_datetime_format=True`` (see section below) +1. Try to infer the format using ``infer_datetime_format=True`` (see section below). 2. If you know the format, use ``pd.to_datetime()``: - ``date_parser=lambda x: pd.to_datetime(x, format=...)`` + ``date_parser=lambda x: pd.to_datetime(x, format=...)``. 3. If you have a really non-standard format, use a custom ``date_parser`` function. For optimal performance, this should be vectorized, i.e., it should accept arrays as arguments. -You can explore the date parsing functionality in ``date_converters.py`` and -add your own. We would love to turn this module into a community supported set -of date/time parsers. To get you started, ``date_converters.py`` contains +You can explore the date parsing functionality in +`date_converters.py <https://github.com/pandas-dev/pandas/blob/master/pandas/io/date_converters.py>`__ +and add your own. We would love to turn this module into a community supported +set of date/time parsers. To get you started, ``date_converters.py`` contains functions to parse dual date and time columns, year/month/day columns, and year/month/day/hour/minute/second columns. It also contains a ``generic_parser`` function so you can curry it with a function that deals with @@ -945,7 +952,7 @@ of strings. So in general, ``infer_datetime_format`` should not have any negative consequences if enabled. Here are some examples of datetime strings that can be guessed (All -representing December 30th, 2011 at 00:00:00) +representing December 30th, 2011 at 00:00:00): - "20111230" - "2011/12/30" @@ -954,7 +961,7 @@ representing December 30th, 2011 at 00:00:00) - "30/Dec/2011 00:00:00" - "30/December/2011 00:00:00" -``infer_datetime_format`` is sensitive to ``dayfirst``. With +Note that ``infer_datetime_format`` is sensitive to ``dayfirst``. With ``dayfirst=True``, it will guess "01/12/2011" to be December 1st. With ``dayfirst=False`` (default) it will guess "01/12/2011" to be January 12th. @@ -1030,7 +1037,7 @@ correctly: with open('tmp.csv', 'w') as fh: fh.write(data) -By default, numbers with a thousands separator will be parsed as strings +By default, numbers with a thousands separator will be parsed as strings: .. ipython:: python @@ -1040,7 +1047,7 @@ By default, numbers with a thousands separator will be parsed as strings df.level.dtype -The ``thousands`` keyword allows integers to be parsed correctly +The ``thousands`` keyword allows integers to be parsed correctly: .. ipython:: python @@ -1060,11 +1067,12 @@ The ``thousands`` keyword allows integers to be parsed correctly NA Values ''''''''' -To control which values are parsed as missing values (which are signified by ``NaN``), specify a -string in ``na_values``. If you specify a list of strings, then all values in -it are considered to be missing values. If you specify a number (a ``float``, like ``5.0`` or an ``integer`` like ``5``), -the corresponding equivalent values will also imply a missing value (in this case effectively -``[5.0,5]`` are recognized as ``NaN``. +To control which values are parsed as missing values (which are signified by +``NaN``), specify a string in ``na_values``. If you specify a list of strings, +then all values in it are considered to be missing values. If you specify a +number (a ``float``, like ``5.0`` or an ``integer`` like ``5``), the +corresponding equivalent values will also imply a missing value (in this case +effectively ``[5.0, 5]`` are recognized as ``NaN``). To completely override the default values that are recognized as missing, specify ``keep_default_na=False``. @@ -1073,29 +1081,34 @@ To completely override the default values that are recognized as missing, specif The default ``NaN`` recognized values are ``['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', '']``. +Let us consider some examples: + .. code-block:: python read_csv(path, na_values=[5]) -the default values, in addition to ``5`` , ``5.0`` when interpreted as numbers are recognized as ``NaN`` +In the example above ``5`` and ``5.0`` will be recognized as ``NaN``, in +addition to the defaults. A string will first be interpreted as a numerical +``5``, then as a ``NaN``. .. code-block:: python read_csv(path, keep_default_na=False, na_values=[""]) -only an empty field will be ``NaN`` +Above, only an empty field will be recognized as ``NaN``. .. code-block:: python read_csv(path, keep_default_na=False, na_values=["NA", "0"]) -only ``NA`` and ``0`` as strings are ``NaN`` +Above, both ``NA`` and ``0`` as strings are ``NaN``. .. code-block:: python read_csv(path, na_values=["Nope"]) -the default values, in addition to the string ``"Nope"`` are recognized as ``NaN`` +The default values, in addition to the string ``"Nope"`` are recognized as +``NaN``. .. _io.infinity: @@ -1143,9 +1156,9 @@ Boolean values '''''''''''''' The common values ``True``, ``False``, ``TRUE``, and ``FALSE`` are all -recognized as boolean. Sometime you would want to recognize some other values -as being boolean. To do this use the ``true_values`` and ``false_values`` -options: +recognized as boolean. Occasionally you might want to recognize other values +as being boolean. To do this, use the ``true_values`` and ``false_values`` +options as follows: .. ipython:: python @@ -1161,7 +1174,7 @@ Handling "bad" lines Some files may have malformed lines with too few fields or too many. Lines with too few fields will have NA values filled in the trailing fields. Lines with -too many will cause an error by default: +too many fields will raise an error by default: .. ipython:: python :suppress: @@ -1228,7 +1241,7 @@ By default, ``read_csv`` uses the Excel dialect and treats the double quote as the quote character, which causes it to fail when it finds a newline before it finds the closing double quote. -We can get around this using ``dialect`` +We can get around this using ``dialect``: .. ipython:: python :okwarning: @@ -1253,9 +1266,9 @@ after a delimiter: print(data) pd.read_csv(StringIO(data), skipinitialspace=True) -The parsers make every attempt to "do the right thing" and not be very -fragile. Type inference is a pretty big deal. So if a column can be coerced to -integer dtype without altering the contents, it will do so. Any non-numeric +The parsers make every attempt to "do the right thing" and not be fragile. Type +inference is a pretty big deal. If a column can be coerced to integer dtype +without altering the contents, the parser will do so. Any non-numeric columns will come through as object dtype as with the rest of pandas objects. .. _io.quoting: @@ -1278,7 +1291,7 @@ should pass the ``escapechar`` option: Files with Fixed Width Columns '''''''''''''''''''''''''''''' -While ``read_csv`` reads delimited data, the :func:`read_fwf` function works +While :func:`read_csv` reads delimited data, the :func:`read_fwf` function works with data files that have known and fixed column widths. The function parameters to ``read_fwf`` are largely the same as `read_csv` with two extra parameters, and a different usage of the ``delimiter`` parameter: @@ -1287,7 +1300,7 @@ a different usage of the ``delimiter`` parameter: fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data. Default - behaviour, if not specified, is to infer. + behavior, if not specified, is to infer. - ``widths``: A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. - ``delimiter``: Characters to consider as filler characters in the fixed-width file. @@ -1312,7 +1325,7 @@ Consider a typical fixed-width data file: print(open('bar.csv').read()) -In order to parse this file into a DataFrame, we simply need to supply the +In order to parse this file into a ``DataFrame``, we simply need to supply the column specifications to the `read_fwf` function along with the file name: .. ipython:: python @@ -1383,7 +1396,7 @@ column: print(open('foo.csv').read()) In this special case, ``read_csv`` assumes that the first column is to be used -as the index of the DataFrame: +as the index of the ``DataFrame``: .. ipython:: python @@ -1436,10 +1449,10 @@ rows will skip the intervening rows. .. ipython:: python from pandas.util.testing import makeCustomDataframe as mkdf - df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4) + df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4) df.to_csv('mi.csv') print(open('mi.csv').read()) - pd.read_csv('mi.csv',header=[0,1,2,3],index_col=[0,1]) + pd.read_csv('mi.csv', header=[0, 1, 2, 3], index_col=[0, 1]) ``read_csv`` is also able to interpret a more common format of multi-columns indices. @@ -1448,17 +1461,17 @@ of multi-columns indices. :suppress: data = ",a,a,a,b,c,c\n,q,r,s,t,u,v\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12" - fh = open('mi2.csv','w') + fh = open('mi2.csv', 'w') fh.write(data) fh.close() .. ipython:: python print(open('mi2.csv').read()) - pd.read_csv('mi2.csv',header=[0,1],index_col=0) + pd.read_csv('mi2.csv', header=[0, 1], index_col=0) Note: If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it -with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*. +with ``df.to_csv(..., index=False)``, then any ``names`` on the columns index will be *lost*. .. ipython:: python :suppress: @@ -1578,7 +1591,7 @@ Writing out Data Writing to CSV format +++++++++++++++++++++ -The Series and DataFrame objects have an instance method ``to_csv`` which +The ``Series`` and ``DataFrame`` objects have an instance method ``to_csv`` which allows storing the contents of the object as a comma-separated-values file. The function takes a number of arguments. Only the first is required. @@ -1591,7 +1604,7 @@ function takes a number of arguments. Only the first is required. - ``index``: whether to write row (index) names (default True) - ``index_label``: Column label(s) for index column(s) if desired. If None (default), and `header` and `index` are True, then the index names are - used. (A sequence should be given if the DataFrame uses MultiIndex). + used. (A sequence should be given if the ``DataFrame`` uses MultiIndex). - ``mode`` : Python write mode, default 'w' - ``encoding``: a string representing the encoding to use if the contents are non-ASCII, for Python versions prior to 3 @@ -1611,7 +1624,7 @@ Writing a formatted string .. _io.formatting: -The DataFrame object has an instance method ``to_string`` which allows control +The ``DataFrame`` object has an instance method ``to_string`` which allows control over the string representation of the object. All arguments are optional: - ``buf`` default None, for example a StringIO object @@ -1622,8 +1635,8 @@ over the string representation of the object. All arguments are optional: which takes a single argument and returns a formatted string - ``float_format`` default None, a function which takes a single (float) argument and returns a formatted string; to be applied to floats in the - DataFrame. - - ``sparsify`` default True, set to False for a DataFrame with a hierarchical + ``DataFrame``. + - ``sparsify`` default True, set to False for a ``DataFrame`` with a hierarchical index to print every multiindex key at each row. - ``index_names`` default True, will print the names of the indices - ``index`` default True, will print the index (ie, row labels) @@ -1631,7 +1644,7 @@ over the string representation of the object. All arguments are optional: - ``justify`` default ``left``, will print column headers left- or right-justified -The Series object also has a ``to_string`` method, but with only the ``buf``, +The ``Series`` object also has a ``to_string`` method, but with only the ``buf``, ``na_rep``, ``float_format`` arguments. There is also a ``length`` argument which, if set to ``True``, will additionally output the length of the Series. @@ -1654,11 +1667,11 @@ with optional parameters: This can be ``None`` in which case a JSON string is returned - ``orient`` : - Series : + ``Series``: - default is ``index`` - allowed values are {``split``, ``records``, ``index``} - DataFrame + ``DataFrame``: - default is ``columns`` - allowed values are {``split``, ``records``, ``index``, ``columns``, ``values``, ``table``} @@ -1693,7 +1706,7 @@ Orient Options ++++++++++++++ There are a number of different options for the format of the resulting JSON -file / string. Consider the following DataFrame and Series: +file / string. Consider the following ``DataFrame`` and ``Series``: .. ipython:: python @@ -1720,8 +1733,8 @@ but the index labels are now primary: sjo.to_json(orient="index") **Record oriented** serializes the data to a JSON array of column -> value records, -index labels are not included. This is useful for passing DataFrame data to plotting -libraries, for example the JavaScript library d3.js: +index labels are not included. This is useful for passing ``DataFrame`` data to plotting +libraries, for example the JavaScript library ``d3.js``: .. ipython:: python @@ -1756,7 +1769,7 @@ preservation of metadata including but not limited to dtypes and index names. Date Handling +++++++++++++ -Writing in ISO date format +Writing in ISO date format: .. ipython:: python @@ -1766,21 +1779,21 @@ Writing in ISO date format json = dfd.to_json(date_format='iso') json -Writing in ISO date format, with microseconds +Writing in ISO date format, with microseconds: .. ipython:: python json = dfd.to_json(date_format='iso', date_unit='us') json -Epoch timestamps, in seconds +Epoch timestamps, in seconds: .. ipython:: python json = dfd.to_json(date_format='epoch', date_unit='s') json -Writing to a file, with a date index and a date column +Writing to a file, with a date index and a date column: .. ipython:: python @@ -1795,7 +1808,8 @@ Writing to a file, with a date index and a date column Fallback Behavior +++++++++++++++++ -If the JSON serializer cannot handle the container contents directly it will fallback in the following manner: +If the JSON serializer cannot handle the container contents directly it will +fall back in the following manner: - if the dtype is unsupported (e.g. ``np.complex``) then the ``default_handler``, if provided, will be called for each value, otherwise an exception is raised. @@ -1864,13 +1878,13 @@ is ``None``. To explicitly force ``Series`` parsing, pass ``typ=series`` ``table``; adhering to the JSON `Table Schema`_ -- ``dtype`` : if True, infer dtypes, if a dict of column to dtype, then use those, if False, then don't infer dtypes at all, default is True, apply only to the data -- ``convert_axes`` : boolean, try to convert the axes to the proper dtypes, default is True -- ``convert_dates`` : a list of columns to parse for dates; If True, then try to parse date-like columns, default is True -- ``keep_default_dates`` : boolean, default True. If parsing dates, then parse the default date-like columns -- ``numpy`` : direct decoding to NumPy arrays. default is False; - Supports numeric data only, although labels may be non-numeric. Also note that the JSON ordering **MUST** be the same for each term if ``numpy=True`` -- ``precise_float`` : boolean, default ``False``. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (``False``) is to use fast but less precise builtin functionality +- ``dtype`` : if True, infer dtypes, if a dict of column to dtype, then use those, if ``False``, then don't infer dtypes at all, default is True, apply only to the data. +- ``convert_axes`` : boolean, try to convert the axes to the proper dtypes, default is ``True`` +- ``convert_dates`` : a list of columns to parse for dates; If ``True``, then try to parse date-like columns, default is ``True``. +- ``keep_default_dates`` : boolean, default ``True``. If parsing dates, then parse the default date-like columns. +- ``numpy`` : direct decoding to NumPy arrays. default is ``False``; + Supports numeric data only, although labels may be non-numeric. Also note that the JSON ordering **MUST** be the same for each term if ``numpy=True``. +- ``precise_float`` : boolean, default ``False``. Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (``False``) is to use fast but less precise builtin functionality. - ``date_unit`` : string, the timestamp unit to detect if converting dates. Default None. By default the timestamp precision will be detected, if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force timestamp precision to @@ -1888,9 +1902,11 @@ overview. Data Conversion +++++++++++++++ -The default of ``convert_axes=True``, ``dtype=True``, and ``convert_dates=True`` will try to parse the axes, and all of the data -into appropriate types, including dates. If you need to override specific dtypes, pass a dict to ``dtype``. ``convert_axes`` should only -be set to ``False`` if you need to preserve string-like numbers (e.g. '1', '2') in an axes. +The default of ``convert_axes=True``, ``dtype=True``, and ``convert_dates=True`` +will try to parse the axes, and all of the data into appropriate types, +including dates. If you need to override specific dtypes, pass a dict to +``dtype``. ``convert_axes`` should only be set to ``False`` if you need to +preserve string-like numbers (e.g. '1', '2') in an axes. .. note:: @@ -2175,7 +2191,7 @@ A few notes on the generated table schema: - Periods are converted to timestamps before serialization, and so have the same behavior of being converted to UTC. In addition, periods will contain - and additional field ``freq`` with the period's frequency, e.g. ``'A-DEC'`` + and additional field ``freq`` with the period's frequency, e.g. ``'A-DEC'``. .. ipython:: python @@ -2184,7 +2200,7 @@ A few notes on the generated table schema: build_table_schema(s_per) - Categoricals use the ``any`` type and an ``enum`` constraint listing - the set of possible values. Additionally, an ``ordered`` field is included + the set of possible values. Additionally, an ``ordered`` field is included: .. ipython:: python @@ -2212,7 +2228,7 @@ A few notes on the generated table schema: + For series, the ``object.name`` is used. If that's none, then the name is ``values`` - + For DataFrames, the stringified version of the column name is used + + For ``DataFrames``, the stringified version of the column name is used + For ``Index`` (not ``MultiIndex``), ``index.name`` is used, with a fallback to ``index`` if that is None. + For ``MultiIndex``, ``mi.names`` is used. If any level has no name, @@ -2268,15 +2284,15 @@ Reading HTML Content below regarding the issues surrounding the BeautifulSoup4/html5lib/lxml parsers. The top-level :func:`~pandas.io.html.read_html` function can accept an HTML -string/file/URL and will parse HTML tables into list of pandas DataFrames. +string/file/URL and will parse HTML tables into list of pandas ``DataFrames``. Let's look at a few examples. .. note:: ``read_html`` returns a ``list`` of ``DataFrame`` objects, even if there is - only a single table contained in the HTML content + only a single table contained in the HTML content. -Read a URL with no options +Read a URL with no options: .. ipython:: python @@ -2290,7 +2306,7 @@ Read a URL with no options and the data below may be slightly different. Read in the content of the file from the above URL and pass it to ``read_html`` -as a string +as a string: .. ipython:: python :suppress: @@ -2304,7 +2320,7 @@ as a string dfs = pd.read_html(f.read()) dfs -You can even pass in an instance of ``StringIO`` if you so desire +You can even pass in an instance of ``StringIO`` if you so desire: .. ipython:: python @@ -2323,7 +2339,7 @@ You can even pass in an instance of ``StringIO`` if you so desire <http://www.github.com/pandas-dev/pandas/issues>`__. -Read a URL and match a table that contains specific text +Read a URL and match a table that contains specific text: .. code-block:: python @@ -2339,26 +2355,26 @@ from the data minus the parsed header elements (``<th>`` elements). dfs = pd.read_html(url, header=0) -Specify an index column +Specify an index column: .. code-block:: python dfs = pd.read_html(url, index_col=0) -Specify a number of rows to skip +Specify a number of rows to skip: .. code-block:: python dfs = pd.read_html(url, skiprows=0) Specify a number of rows to skip using a list (``xrange`` (Python 2 only) works -as well) +as well): .. code-block:: python dfs = pd.read_html(url, skiprows=range(2)) -Specify an HTML attribute +Specify an HTML attribute: .. code-block:: python @@ -2366,7 +2382,7 @@ Specify an HTML attribute dfs2 = pd.read_html(url, attrs={'class': 'sortable'}) print(np.array_equal(dfs1[0], dfs2[0])) # Should be True -Specify values that should be converted to NaN +Specify values that should be converted to NaN: .. code-block:: python @@ -2374,7 +2390,7 @@ Specify values that should be converted to NaN .. versionadded:: 0.19 -Specify whether to keep the default set of NaN values +Specify whether to keep the default set of NaN values: .. code-block:: python @@ -2384,7 +2400,7 @@ Specify whether to keep the default set of NaN values Specify converters for columns. This is useful for numerical text data that has leading zeros. By default columns that are numerical are cast to numeric -types and the leading zeros are lost. To avoid this, we can convert these +types and the leading zeros are lost. To avoid this, we can convert these columns to strings. .. code-block:: python @@ -2395,13 +2411,13 @@ columns to strings. .. versionadded:: 0.19 -Use some combination of the above +Use some combination of the above: .. code-block:: python dfs = pd.read_html(url, match='Metcalf Bank', index_col=0) -Read in pandas ``to_html`` output (with some loss of floating point precision) +Read in pandas ``to_html`` output (with some loss of floating point precision): .. code-block:: python @@ -2410,15 +2426,15 @@ Read in pandas ``to_html`` output (with some loss of floating point precision) dfin = pd.read_html(s, index_col=0) The ``lxml`` backend will raise an error on a failed parse if that is the only -parser you provide (if you only have a single parser you can provide just a +parser you provide. If you only have a single parser you can provide just a string, but it is considered good practice to pass a list with one string if, -for example, the function expects a sequence of strings) +for example, the function expects a sequence of strings. You may use: .. code-block:: python dfs = pd.read_html(url, 'Metcalf Bank', index_col=0, flavor=['lxml']) -or +Or you could pass ``flavor='lxml'`` without a list: .. code-block:: python @@ -2472,7 +2488,7 @@ HTML: .. raw:: html :file: _static/basic.html -The ``columns`` argument will limit the columns shown +The ``columns`` argument will limit the columns shown: .. ipython:: python @@ -2489,7 +2505,7 @@ HTML: :file: _static/columns.html ``float_format`` takes a Python callable to control the precision of floating -point values +point values: .. ipython:: python @@ -2506,7 +2522,7 @@ HTML: :file: _static/float_format.html ``bold_rows`` will make the row labels bold by default, but you can turn that -off +off: .. ipython:: python @@ -2579,7 +2595,7 @@ parse HTML tables in the top-level pandas io function ``read_html``. * Benefits - * |lxml|_ is very fast + * |lxml|_ is very fast. * |lxml|_ requires Cython to install correctly. @@ -2652,8 +2668,8 @@ The :func:`~pandas.read_excel` method can read Excel 2003 (``.xls``) and Excel 2007+ (``.xlsx``) files using the ``xlrd`` Python module. The :meth:`~DataFrame.to_excel` instance method is used for saving a ``DataFrame`` to Excel. Generally the semantics are -similar to working with :ref:`csv<io.read_csv_table>` data. See the :ref:`cookbook<cookbook.excel>` for some -advanced strategies +similar to working with :ref:`csv<io.read_csv_table>` data. +See the :ref:`cookbook<cookbook.excel>` for some advanced strategies. .. _io.excel_reader: @@ -2696,7 +2712,7 @@ The ``sheet_names`` property will generate a list of the sheet names in the file. The primary use-case for an ``ExcelFile`` is parsing multiple sheets with -different parameters +different parameters: .. code-block:: python @@ -2725,7 +2741,7 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc Specifying Sheets +++++++++++++++++ -.. note :: The second argument is ``sheet_name``, not to be confused with ``ExcelFile.sheet_names`` +.. note :: The second argument is ``sheet_name``, not to be confused with ``ExcelFile.sheet_names``. .. note :: An ExcelFile's attribute ``sheet_names`` provides access to a list of sheets. @@ -2802,12 +2818,12 @@ parameters. df.index = df.index.set_names(['lvl1', 'lvl2']) df.to_excel('path_to_file.xlsx') - df = pd.read_excel('path_to_file.xlsx', index_col=[0,1]) + df = pd.read_excel('path_to_file.xlsx', index_col=[0, 1]) df If the source file has both ``MultiIndex`` index and columns, lists specifying each -should be passed to ``index_col`` and ``header`` +should be passed to ``index_col`` and ``header``: .. ipython:: python @@ -2828,10 +2844,10 @@ Parsing Specific Columns ++++++++++++++++++++++++ It is often the case that users will insert columns to do temporary computations -in Excel and you may not want to read in those columns. `read_excel` takes -a `usecols` keyword to allow you to specify a subset of columns to parse. +in Excel and you may not want to read in those columns. ``read_excel`` takes +a ``usecols`` keyword to allow you to specify a subset of columns to parse. -If `usecols` is an integer, then it is assumed to indicate the last column +If ``usecols`` is an integer, then it is assumed to indicate the last column to be parsed. .. code-block:: python @@ -2840,11 +2856,12 @@ to be parsed. If `usecols` is a list of integers, then it is assumed to be the file column indices to be parsed. + .. code-block:: python read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3]) -Element order is ignored, so usecols=[0,1] is the same as [1,0]. +Element order is ignored, so ``usecols=[0,1]`` is the same as ``[1,0]``. Parsing Dates +++++++++++++ @@ -2852,7 +2869,7 @@ Parsing Dates Datetime-like values are normally automatically converted to the appropriate dtype when reading the excel file. But if you have a column of strings that *look* like dates (but are not actually formatted as dates in excel), you can -use the `parse_dates` keyword to parse those strings to datetimes: +use the ``parse_dates`` keyword to parse those strings to datetimes: .. code-block:: python @@ -2862,7 +2879,7 @@ use the `parse_dates` keyword to parse those strings to datetimes: Cell Converters +++++++++++++++ -It is possible to transform the contents of Excel cells via the `converters` +It is possible to transform the contents of Excel cells via the ``converters`` option. For instance, to convert a column to boolean: .. code-block:: python @@ -2903,11 +2920,11 @@ Writing Excel Files Writing Excel Files to Disk +++++++++++++++++++++++++++ -To write a DataFrame object to a sheet of an Excel file, you can use the +To write a ``DataFrame`` object to a sheet of an Excel file, you can use the ``to_excel`` instance method. The arguments are largely the same as ``to_csv`` described above, the first argument being the name of the excel file, and the -optional second argument the name of the sheet to which the DataFrame should be -written. For example: +optional second argument the name of the sheet to which the ``DataFrame`` should be +written. For example: .. code-block:: python @@ -2917,7 +2934,7 @@ Files with a ``.xls`` extension will be written using ``xlwt`` and those with a ``.xlsx`` extension will be written using ``xlsxwriter`` (if available) or ``openpyxl``. -The DataFrame will be written in a way that tries to mimic the REPL output. +The ``DataFrame`` will be written in a way that tries to mimic the REPL output. The ``index_label`` will be placed in the second row instead of the first. You can place it in the first row by setting the ``merge_cells`` option in ``to_excel()`` to ``False``: @@ -2926,10 +2943,7 @@ row instead of the first. You can place it in the first row by setting the df.to_excel('path_to_file.xlsx', index_label='label', merge_cells=False) -The Panel class also has a ``to_excel`` instance method, -which writes each DataFrame in the Panel to a separate sheet. - -In order to write separate DataFrames to separate sheets in a single Excel file, +In order to write separate ``DataFrames`` to separate sheets in a single Excel file, one can pass an :class:`~pandas.io.excel.ExcelWriter`. .. code-block:: python @@ -2990,13 +3004,13 @@ Pandas supports writing Excel files to buffer-like objects such as ``StringIO`` Excel writer engines '''''''''''''''''''' -``pandas`` chooses an Excel writer via two methods: +Pandas chooses an Excel writer via two methods: 1. the ``engine`` keyword argument 2. the filename extension (via the default specified in config options) -By default, ``pandas`` uses the `XlsxWriter`_ for ``.xlsx`` and `openpyxl`_ -for ``.xlsm`` files and `xlwt`_ for ``.xls`` files. If you have multiple +By default, pandas uses the `XlsxWriter`_ for ``.xlsx``, `openpyxl`_ +for ``.xlsm``, and `xlwt`_ for ``.xls`` files. If you have multiple engines installed, you can set the default engine through :ref:`setting the config options <options>` ``io.excel.xlsx.writer`` and ``io.excel.xls.writer``. pandas will fall back on `openpyxl`_ for ``.xlsx`` @@ -3034,8 +3048,8 @@ Style and Formatting The look and feel of Excel worksheets created from pandas can be modified using the following parameters on the ``DataFrame``'s ``to_excel`` method. -- ``float_format`` : Format string for floating point numbers (default None) -- ``freeze_panes`` : A tuple of two integers representing the bottommost row and rightmost column to freeze. Each of these parameters is one-based, so (1, 1) will freeze the first row and first column (default None) +- ``float_format`` : Format string for floating point numbers (default ``None``). +- ``freeze_panes`` : A tuple of two integers representing the bottommost row and rightmost column to freeze. Each of these parameters is one-based, so (1, 1) will freeze the first row and first column (default ``None``). @@ -3044,10 +3058,10 @@ The look and feel of Excel worksheets created from pandas can be modified using Clipboard --------- -A handy way to grab data is to use the ``read_clipboard`` method, which takes -the contents of the clipboard buffer and passes them to the ``read_table`` -method. For instance, you can copy the following -text to the clipboard (CTRL-C on many operating systems): +A handy way to grab data is to use the :meth:`~DataFrame.read_clipboard` method, +which takes the contents of the clipboard buffer and passes them to the +``read_table`` method. For instance, you can copy the following text to the +clipboard (CTRL-C on many operating systems): .. code-block:: python @@ -3056,7 +3070,7 @@ text to the clipboard (CTRL-C on many operating systems): y 2 5 q z 3 6 r -And then import the data directly to a DataFrame by calling: +And then import the data directly to a ``DataFrame`` by calling: .. code-block:: python @@ -3066,10 +3080,11 @@ And then import the data directly to a DataFrame by calling: clipdf -The ``to_clipboard`` method can be used to write the contents of a DataFrame to + +The ``to_clipboard`` method can be used to write the contents of a ``DataFrame`` to the clipboard. Following which you can paste the clipboard contents into other applications (CTRL-V on many operating systems). Here we illustrate writing a -DataFrame into clipboard and reading it back. +``DataFrame`` into clipboard and reading it back. .. ipython:: python @@ -3121,7 +3136,7 @@ any pickled pandas object (or any other pickled object) from file: Several internal refactorings have been done while still preserving compatibility with pickles created with older versions of pandas. However, - for such cases, pickled dataframes, series etc, must be read with + for such cases, pickled ``DataFrames``, ``Series`` etc, must be read with ``pd.read_pickle``, rather than ``pickle.load``. See `here <http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#whatsnew-0130-refactoring>`__ @@ -3139,8 +3154,8 @@ Compressed pickle files :func:`read_pickle`, :meth:`DataFrame.to_pickle` and :meth:`Series.to_pickle` can read and write compressed pickle files. The compression types of ``gzip``, ``bz2``, ``xz`` are supported for reading and writing. -`zip`` file supports read only and must contain only one data file -to be read in. +The ``zip`` file format only supports reading and must contain only one data file +to be read. The compression type can be an explicit parameter or be inferred from the file extension. If 'infer', then use ``gzip``, ``bz2``, ``zip``, or ``xz`` if filename ends in ``'.gz'``, ``'.bz2'``, ``'.zip'``, or @@ -3154,7 +3169,7 @@ If 'infer', then use ``gzip``, ``bz2``, ``zip``, or ``xz`` if filename ends in ` 'C': pd.date_range('20130101', periods=1000, freq='s')}) df -Using an explicit compression type +Using an explicit compression type: .. ipython:: python @@ -3162,7 +3177,7 @@ Using an explicit compression type rt = pd.read_pickle("data.pkl.compress", compression="gzip") rt -Inferring compression type from the extension +Inferring compression type from the extension: .. ipython:: python @@ -3170,7 +3185,7 @@ Inferring compression type from the extension rt = pd.read_pickle("data.pkl.xz", compression="infer") rt -The default is to 'infer +The default is to 'infer': .. ipython:: python @@ -3221,14 +3236,14 @@ You can pass a list of objects and you will receive them back on deserialization pd.to_msgpack('foo.msg', df, 'foo', np.array([1,2,3]), s) pd.read_msgpack('foo.msg') -You can pass ``iterator=True`` to iterate over the unpacked results +You can pass ``iterator=True`` to iterate over the unpacked results: .. ipython:: python for o in pd.read_msgpack('foo.msg',iterator=True): print(o) -You can pass ``append=True`` to the writer to append to an existing pack +You can pass ``append=True`` to the writer to append to an existing pack: .. ipython:: python @@ -3331,7 +3346,7 @@ In a current or later Python session, you can retrieve stored objects: # dotted (attribute) access provides get as well store.df -Deletion of the object specified by the key +Deletion of the object specified by the key: .. ipython:: python @@ -3340,7 +3355,7 @@ Deletion of the object specified by the key store -Closing a Store, Context Manager +Closing a Store and using a context manager: .. ipython:: python @@ -3348,8 +3363,7 @@ Closing a Store, Context Manager store store.is_open - # Working with, and automatically closing the store with the context - # manager + # Working with, and automatically closing the store using a context manager with pd.HDFStore('store.h5') as store: store.keys() @@ -3449,17 +3463,17 @@ the ``fixed`` format. These types of stores are **not** appendable once written remove them and rewrite). Nor are they **queryable**; they must be retrieved in their entirety. They also do not support dataframes with non-unique column names. The ``fixed`` format stores offer very fast writing and slightly faster reading than ``table`` stores. -This format is specified by default when using ``put`` or ``to_hdf`` or by ``format='fixed'`` or ``format='f'`` +This format is specified by default when using ``put`` or ``to_hdf`` or by ``format='fixed'`` or ``format='f'``. .. warning:: - A ``fixed`` format will raise a ``TypeError`` if you try to retrieve using a ``where`` . + A ``fixed`` format will raise a ``TypeError`` if you try to retrieve using a ``where``: .. code-block:: python - pd.DataFrame(randn(10,2)).to_hdf('test_fixed.h5','df') + pd.DataFrame(randn(10, 2)).to_hdf('test_fixed.h5', 'df') - pd.read_hdf('test_fixed.h5','df',where='index>5') + pd.read_hdf('test_fixed.h5', 'df', where='index>5') TypeError: cannot pass a where specification when reading a fixed format. this store must be selected in its entirety @@ -3472,9 +3486,9 @@ Table Format ``HDFStore`` supports another ``PyTables`` format on disk, the ``table`` format. Conceptually a ``table`` is shaped very much like a DataFrame, with rows and columns. A ``table`` may be appended to in the same or -other sessions. In addition, delete & query type operations are +other sessions. In addition, delete and query type operations are supported. This format is specified by ``format='table'`` or ``format='t'`` -to ``append`` or ``put`` or ``to_hdf`` +to ``append`` or ``put`` or ``to_hdf``. This format can be set as an option as well ``pd.set_option('io.hdf.default_format','table')`` to enable ``put/append/to_hdf`` to by default store in the ``table`` format. @@ -3514,9 +3528,9 @@ Hierarchical Keys Keys to a store can be specified as a string. These can be in a hierarchical path-name like format (e.g. ``foo/bar/bah``), which will generate a hierarchy of sub-stores (or ``Groups`` in PyTables -parlance). Keys can be specified with out the leading '/' and are ALWAYS +parlance). Keys can be specified with out the leading '/' and are **always** absolute (e.g. 'foo' refers to '/foo'). Removal operations can remove -everything in the sub-store and BELOW, so be *careful*. +everything in the sub-store and **below**, so be *careful*. .. ipython:: python @@ -3547,7 +3561,7 @@ everything in the sub-store and BELOW, so be *careful*. /foo/bar/bah (Group) '' children := ['block0_items' (Array), 'block0_values' (Array), 'axis0' (Array), 'axis1' (Array)] - Instead, use explicit string based keys + Instead, use explicit string based keys: .. ipython:: python @@ -3596,8 +3610,8 @@ defaults to `nan`. Storing Multi-Index DataFrames ++++++++++++++++++++++++++++++ -Storing multi-index dataframes as tables is very similar to -storing/selecting from homogeneous index DataFrames. +Storing multi-index ``DataFrames`` as tables is very similar to +storing/selecting from homogeneous index ``DataFrames``. .. ipython:: python @@ -3632,10 +3646,10 @@ data. A query is specified using the ``Term`` class under the hood, as a boolean expression. -- ``index`` and ``columns`` are supported indexers of a DataFrame +- ``index`` and ``columns`` are supported indexers of a ``DataFrames``. - ``major_axis``, ``minor_axis``, and ``items`` are supported indexers of - the Panel -- if ``data_columns`` are specified, these can be used as additional indexers + the Panel. +- if ``data_columns`` are specified, these can be used as additional indexers. Valid comparison operators are: @@ -3849,7 +3863,7 @@ to perform queries (other than the `indexable` columns, which you can always query). For instance say you want to perform this common operation, on-disk, and return just the frame that matches this query. You can specify ``data_columns = True`` to force all columns to -be data_columns +be ``data_columns``. .. ipython:: python @@ -3879,7 +3893,7 @@ There is some performance degradation by making lots of columns into `data columns`, so it is up to the user to designate these. In addition, you cannot change data columns (nor indexables) after the first append/put operation (Of course you can simply read in the data and -create a new table!) +create a new table!). Iterator ++++++++ @@ -3912,7 +3926,7 @@ chunks. .. ipython:: python - dfeq = pd.DataFrame({'number': np.arange(1,11)}) + dfeq = pd.DataFrame({'number': np.arange(1, 11)}) dfeq store.append('dfeq', dfeq, data_columns=['number']) @@ -3921,9 +3935,9 @@ chunks. return [l[i:i+n] for i in range(0, len(l), n)] evens = [2,4,6,8,10] - coordinates = store.select_as_coordinates('dfeq','number=evens') + coordinates = store.select_as_coordinates('dfeq', 'number=evens') for c in chunks(coordinates, 2): - print(store.select('dfeq',where=c)) + print(store.select('dfeq', where=c)) Advanced Queries ++++++++++++++++ @@ -4005,7 +4019,7 @@ table names to a list of 'columns' you want in that table. If `None` is used in place of a list, that table will have the remaining unspecified columns of the given DataFrame. The argument ``selector`` defines which table is the selector table (which you can make queries from). -The argument ``dropna`` will drop rows from the input DataFrame to ensure +The argument ``dropna`` will drop rows from the input ``DataFrame`` to ensure tables are synchronized. This means that if a row for one of the tables being written to is entirely ``np.NaN``, that row will be dropped from all tables. @@ -4081,7 +4095,7 @@ the table using a ``where`` that selects all but the missing data. automatically. Thus, repeatedly deleting (or removing nodes) and adding again, **WILL TEND TO INCREASE THE FILE SIZE**. - To *repack and clean* the file, use :ref:`ptrepack <io.hdf5-ptrepack>` + To *repack and clean* the file, use :ref:`ptrepack <io.hdf5-ptrepack>`. .. _io.hdf5-notes: @@ -4464,7 +4478,7 @@ Several caveats. - Non supported types include ``Period`` and actual Python object types. These will raise a helpful error message on an attempt at serialization. -See the `Full Documentation <https://github.com/wesm/feather>`__ +See the `Full Documentation <https://github.com/wesm/feather>`__. .. ipython:: python @@ -4522,8 +4536,8 @@ dtypes, including extension dtypes such as datetime with tz. Several caveats. -- Duplicate column names and non-string columns names are not supported -- Index level names, if specified, must be strings +- Duplicate column names and non-string columns names are not supported. +- Index level names, if specified, must be strings. - Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype. - Non supported types include ``Period`` and actual Python object types. These will raise a helpful error message on an attempt at serialization. @@ -4532,7 +4546,7 @@ You can specify an ``engine`` to direct the serialization. This can be one of `` If the engine is NOT specified, then the ``pd.options.io.parquet.engine`` option is checked; if this is also ``auto``, then ``pyarrow`` is tried, and falling back to ``fastparquet``. -See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and `fastparquet <https://fastparquet.readthedocs.io/en/latest/>`__ +See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and `fastparquet <https://fastparquet.readthedocs.io/en/latest/>`__. .. note:: @@ -4652,7 +4666,7 @@ If you want to manage your own connections you can pass one of those instead: Writing DataFrames '''''''''''''''''' -Assuming the following data is in a DataFrame ``data``, we can insert it into +Assuming the following data is in a ``DataFrame`` ``data``, we can insert it into the database using :func:`~pandas.DataFrame.to_sql`. +-----+------------+-------+-------+-------+ @@ -4738,7 +4752,7 @@ table name and optionally a subset of columns to read. pd.read_sql_table('data', engine) -You can also specify the name of the column as the DataFrame index, +You can also specify the name of the column as the ``DataFrame`` index, and specify a subset of columns to be read. .. ipython:: python @@ -4807,7 +4821,7 @@ Specifying this will return an iterator through chunks of the query result: for chunk in pd.read_sql_query("SELECT * FROM data_chunks", engine, chunksize=5): print(chunk) -You can also run a plain query without creating a dataframe with +You can also run a plain query without creating a ``DataFrame`` with :func:`~pandas.io.sql.execute`. This is useful for queries that don't return values, such as INSERT. This is functionally equivalent to calling ``execute`` on the SQLAlchemy engine or db connection object. Again, you must use the SQL syntax @@ -4923,7 +4937,7 @@ pandas integrates with this external package. if ``pandas-gbq`` is installed, yo use the pandas methods ``pd.read_gbq`` and ``DataFrame.to_gbq``, which will call the respective functions from ``pandas-gbq``. -Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__ +Full documentation can be found `here <https://pandas-gbq.readthedocs.io/>`__. .. _io.stata: @@ -4986,7 +5000,7 @@ Reading from Stata format ''''''''''''''''''''''''' The top-level function ``read_stata`` will read a dta file and return -either a DataFrame or a :class:`~pandas.io.stata.StataReader` that can +either a ``DataFrame`` or a :class:`~pandas.io.stata.StataReader` that can be used to read the file incrementally. .. ipython:: python @@ -5084,7 +5098,7 @@ whether imported ``Categorical`` variables are ordered. .. note:: - *Stata* supports partially labeled series. These series have value labels for + *Stata* supports partially labeled series. These series have value labels for some but not all data values. Importing a partially labeled series will produce a ``Categorical`` with string categories for the values that are labeled and numeric categories for values with no label. @@ -5144,7 +5158,7 @@ into and from pandas, we recommend these packages from the broader community. netCDF '''''' -xarray_ provides data structures inspired by the pandas DataFrame for working +xarray_ provides data structures inspired by the pandas ``DataFrame`` for working with multi-dimensional datasets, with a focus on the netCDF file format and easy conversion to and from pandas. @@ -5173,7 +5187,8 @@ ignored. dtypes: float64(1), int64(1) memory usage: 15.3 MB -Writing +When writing, the top-three functions in terms of speed are are +``test_pickle_write``, ``test_feather_write`` and ``test_hdf_fixed_write_compress``. .. code-block:: ipython @@ -5204,7 +5219,8 @@ Writing In [32]: %timeit test_pickle_write_compress(df) 3.33 s Β± 55.2 ms per loop (mean Β± std. dev. of 7 runs, 1 loop each) -Reading +When reading, the top three are ``test_feather_read``, ``test_pickle_read`` and +``test_hdf_fixed_read``. .. code-block:: ipython @@ -5249,7 +5265,7 @@ Space on disk (in bytes) 16000848 Aug 21 18:00 test.pkl 7554108 Aug 21 18:00 test.pkl.compress -And here's the code +And here's the code: .. code-block:: python
Spellcheck of the docs, specifically of `io.rst`. I did not read everything thoroughly, but I believe the suggested changes will help enforce a greater level of consistency in the docs. * Backticks ` `` ` around Series, DataFrame. * Added some function references, i.e. `` :meth:`~DataFrame.method` ``. * Consistency of booleans, typsetting as ``False`` instead of False or *False*. * Minor rephrasing of sentences, spelling, periods, colons etc. Cheers!
https://api.github.com/repos/pandas-dev/pandas/pulls/19660
2018-02-12T17:07:07Z
2018-02-14T11:31:20Z
2018-02-14T11:31:20Z
2018-02-14T11:31:39Z