title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
TST: change pyarrow skips to xfails
diff --git a/pandas/tests/io/parser/common/test_chunksize.py b/pandas/tests/io/parser/common/test_chunksize.py index d407f98029e8d..0f42aa81e4b37 100644 --- a/pandas/tests/io/parser/common/test_chunksize.py +++ b/pandas/tests/io/parser/common/test_chunksize.py @@ -16,9 +16,13 @@ ) import pandas._testing as tm -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +@xfail_pyarrow # The 'chunksize' option is not supported @pytest.mark.parametrize("index_col", [0, "index"]) def test_read_chunksize_with_index(all_parsers, index_col): parser = all_parsers @@ -51,6 +55,7 @@ def test_read_chunksize_with_index(all_parsers, index_col): tm.assert_frame_equal(chunks[2], expected[4:]) +@xfail_pyarrow # AssertionError: Regex pattern did not match @pytest.mark.parametrize("chunksize", [1.3, "foo", 0]) def test_read_chunksize_bad(all_parsers, chunksize): data = """index,A,B,C,D @@ -69,6 +74,7 @@ def test_read_chunksize_bad(all_parsers, chunksize): pass +@xfail_pyarrow # The 'nrows' option is not supported @pytest.mark.parametrize("chunksize", [2, 8]) def test_read_chunksize_and_nrows(all_parsers, chunksize): # see gh-15755 @@ -88,6 +94,7 @@ def test_read_chunksize_and_nrows(all_parsers, chunksize): tm.assert_frame_equal(concat(reader), expected) +@xfail_pyarrow # The 'chunksize' option is not supported def test_read_chunksize_and_nrows_changing_size(all_parsers): data = """index,A,B,C,D foo,2,3,4,5 @@ -109,6 +116,7 @@ def test_read_chunksize_and_nrows_changing_size(all_parsers): reader.get_chunk(size=3) +@xfail_pyarrow # The 'chunksize' option is not supported def test_get_chunk_passed_chunksize(all_parsers): parser = all_parsers data = """A,B,C @@ -124,6 +132,7 @@ def test_get_chunk_passed_chunksize(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # The 'chunksize' option is not supported @pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}]) def test_read_chunksize_compat(all_parsers, kwargs): # see gh-12185 @@ -141,6 +150,7 @@ def test_read_chunksize_compat(all_parsers, kwargs): tm.assert_frame_equal(concat(reader), result) +@xfail_pyarrow # The 'chunksize' option is not supported def test_read_chunksize_jagged_names(all_parsers): # see gh-23509 parser = all_parsers @@ -171,7 +181,11 @@ def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch): data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers) # Coercions should work without warnings. - with tm.assert_produces_warning(None): + warn = None + if parser.engine == "pyarrow": + warn = DeprecationWarning + depr_msg = "Passing a BlockManager to DataFrame" + with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False): with monkeypatch.context() as m: m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic) result = parser.read_csv(StringIO(data)) @@ -180,6 +194,7 @@ def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch): assert result.a.dtype == float +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_warn_if_chunks_have_mismatched_type(all_parsers): warning_type = None parser = all_parsers @@ -207,6 +222,7 @@ def test_warn_if_chunks_have_mismatched_type(all_parsers): assert df.a.dtype == object +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported @pytest.mark.parametrize("iterator", [True, False]) def test_empty_with_nrows_chunksize(all_parsers, iterator): # see gh-9535 @@ -225,6 +241,7 @@ def test_empty_with_nrows_chunksize(all_parsers, iterator): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_read_csv_memory_growth_chunksize(all_parsers): # see gh-24805 # @@ -242,6 +259,7 @@ def test_read_csv_memory_growth_chunksize(all_parsers): pass +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_chunksize_with_usecols_second_block_shorter(all_parsers): # GH#21211 parser = all_parsers @@ -267,6 +285,7 @@ def test_chunksize_with_usecols_second_block_shorter(all_parsers): tm.assert_frame_equal(result, expected_frames[i]) +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_chunksize_second_block_shorter(all_parsers): # GH#21211 parser = all_parsers diff --git a/pandas/tests/io/parser/common/test_file_buffer_url.py b/pandas/tests/io/parser/common/test_file_buffer_url.py index 5ee629947db48..5d5814e880f8b 100644 --- a/pandas/tests/io/parser/common/test_file_buffer_url.py +++ b/pandas/tests/io/parser/common/test_file_buffer_url.py @@ -22,9 +22,11 @@ from pandas import DataFrame import pandas._testing as tm -# TODO(1.4) Please xfail individual tests at release time -# instead of skip -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") @pytest.mark.network @@ -60,6 +62,7 @@ def test_local_file(all_parsers, csv_dir_path): pytest.skip("Failing on: " + " ".join(platform.uname())) +@xfail_pyarrow # AssertionError: DataFrame.index are different def test_path_path_lib(all_parsers): parser = all_parsers df = tm.makeDataFrame() @@ -67,6 +70,7 @@ def test_path_path_lib(all_parsers): tm.assert_frame_equal(df, result) +@xfail_pyarrow # AssertionError: DataFrame.index are different def test_path_local_path(all_parsers): parser = all_parsers df = tm.makeDataFrame() @@ -206,10 +210,14 @@ def test_no_permission(all_parsers): "in-quoted-field", ], ) -def test_eof_states(all_parsers, data, kwargs, expected, msg): +def test_eof_states(all_parsers, data, kwargs, expected, msg, request): # see gh-10728, gh-10548 parser = all_parsers + if parser.engine == "pyarrow" and "\r" not in data: + mark = pytest.mark.xfail(reason="The 'comment' option is not supported") + request.applymarker(mark) + if expected is None: with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), **kwargs) @@ -218,6 +226,7 @@ def test_eof_states(all_parsers, data, kwargs, expected, msg): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: the 'pyarrow' engine does not support regex separators def test_temporary_file(all_parsers): # see gh-13398 parser = all_parsers @@ -347,6 +356,7 @@ def test_read_csv_file_handle(all_parsers, io_class, encoding): assert not handle.closed +@xfail_pyarrow # ValueError: The 'memory_map' option is not supported def test_memory_map_compression(all_parsers, compression): """ Support memory map for compressed files. @@ -365,6 +375,7 @@ def test_memory_map_compression(all_parsers, compression): ) +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_context_manager(all_parsers, datapath): # make sure that opened files are closed parser = all_parsers @@ -381,6 +392,7 @@ def test_context_manager(all_parsers, datapath): assert reader.handles.handle.closed +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_context_manageri_user_provided(all_parsers, datapath): # make sure that user-provided handles are not closed parser = all_parsers @@ -396,6 +408,7 @@ def test_context_manageri_user_provided(all_parsers, datapath): assert not reader.handles.handle.closed +@xfail_pyarrow # ParserError: Empty CSV file def test_file_descriptor_leak(all_parsers, using_copy_on_write): # GH 31488 parser = all_parsers @@ -404,6 +417,7 @@ def test_file_descriptor_leak(all_parsers, using_copy_on_write): parser.read_csv(path) +@xfail_pyarrow # ValueError: The 'memory_map' option is not supported def test_memory_map(all_parsers, csv_dir_path): mmap_file = os.path.join(csv_dir_path, "test_mmap.csv") parser = all_parsers diff --git a/pandas/tests/io/parser/common/test_float.py b/pandas/tests/io/parser/common/test_float.py index 8ec372420a0f0..63ad3bcb249ea 100644 --- a/pandas/tests/io/parser/common/test_float.py +++ b/pandas/tests/io/parser/common/test_float.py @@ -12,9 +12,13 @@ from pandas import DataFrame import pandas._testing as tm -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +@xfail_pyarrow # ParserError: CSV parse error: Empty CSV file or block def test_float_parser(all_parsers): # see gh-9565 parser = all_parsers @@ -46,6 +50,7 @@ def test_very_negative_exponent(all_parsers_all_precisions, neg_exp): tm.assert_frame_equal(result, expected) +@xfail_pyarrow @pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999]) def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request): # GH#38753 diff --git a/pandas/tests/io/parser/common/test_iterator.py b/pandas/tests/io/parser/common/test_iterator.py index 58e5886aedd6b..26619857bd231 100644 --- a/pandas/tests/io/parser/common/test_iterator.py +++ b/pandas/tests/io/parser/common/test_iterator.py @@ -12,9 +12,13 @@ ) import pandas._testing as tm -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +@xfail_pyarrow # ValueError: The 'iterator' option is not supported def test_iterator(all_parsers): # see gh-6607 data = """index,A,B,C,D @@ -37,6 +41,7 @@ def test_iterator(all_parsers): tm.assert_frame_equal(last_chunk, expected[3:]) +@xfail_pyarrow # ValueError: The 'iterator' option is not supported def test_iterator2(all_parsers): parser = all_parsers data = """A,B,C @@ -56,6 +61,7 @@ def test_iterator2(all_parsers): tm.assert_frame_equal(result[0], expected) +@xfail_pyarrow # ValueError: The 'chunksize' option is not supported def test_iterator_stop_on_chunksize(all_parsers): # gh-3967: stopping iteration when chunksize is specified parser = all_parsers @@ -77,6 +83,7 @@ def test_iterator_stop_on_chunksize(all_parsers): tm.assert_frame_equal(concat(result), expected) +@xfail_pyarrow # AssertionError: Regex pattern did not match @pytest.mark.parametrize( "kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}] ) diff --git a/pandas/tests/io/parser/common/test_verbose.py b/pandas/tests/io/parser/common/test_verbose.py index 335065db974dc..bcfb9cd4032ad 100644 --- a/pandas/tests/io/parser/common/test_verbose.py +++ b/pandas/tests/io/parser/common/test_verbose.py @@ -6,9 +6,10 @@ import pytest -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +@xfail_pyarrow # ValueError: The 'verbose' option is not supported def test_verbose_read(all_parsers, capsys): parser = all_parsers data = """a,b,c,d @@ -32,6 +33,7 @@ def test_verbose_read(all_parsers, capsys): assert captured.out == "Filled 3 NA values in column a\n" +@xfail_pyarrow # ValueError: The 'verbose' option is not supported def test_verbose_read2(all_parsers, capsys): parser = all_parsers data = """a,b,c,d diff --git a/pandas/tests/io/parser/dtypes/test_empty.py b/pandas/tests/io/parser/dtypes/test_empty.py index 1f709a3cd8f28..8759c52485533 100644 --- a/pandas/tests/io/parser/dtypes/test_empty.py +++ b/pandas/tests/io/parser/dtypes/test_empty.py @@ -17,10 +17,10 @@ ) import pandas._testing as tm -# TODO(1.4): Change me into individual xfails at release time -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_dtype_all_columns_empty(all_parsers): # see gh-12048 parser = all_parsers @@ -30,6 +30,7 @@ def test_dtype_all_columns_empty(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_pass_dtype(all_parsers): parser = all_parsers @@ -42,6 +43,7 @@ def test_empty_pass_dtype(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_with_index_pass_dtype(all_parsers): parser = all_parsers @@ -56,6 +58,7 @@ def test_empty_with_index_pass_dtype(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_with_multi_index_pass_dtype(all_parsers): parser = all_parsers @@ -72,6 +75,7 @@ def test_empty_with_multi_index_pass_dtype(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers): parser = all_parsers @@ -84,6 +88,7 @@ def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers): parser = all_parsers @@ -96,6 +101,7 @@ def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers): # see gh-9424 parser = all_parsers @@ -165,6 +171,7 @@ def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers): ), ], ) +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_dtype(all_parsers, dtype, expected): # see gh-14712 parser = all_parsers diff --git a/pandas/tests/io/parser/test_comment.py b/pandas/tests/io/parser/test_comment.py index 5b738446ea441..1724f3390ea46 100644 --- a/pandas/tests/io/parser/test_comment.py +++ b/pandas/tests/io/parser/test_comment.py @@ -10,9 +10,10 @@ from pandas import DataFrame import pandas._testing as tm -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +@xfail_pyarrow # ValueError: The 'comment' option is not supported @pytest.mark.parametrize("na_values", [None, ["NaN"]]) def test_comment(all_parsers, na_values): parser = all_parsers @@ -27,6 +28,7 @@ def test_comment(all_parsers, na_values): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'comment' option is not supported @pytest.mark.parametrize( "read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}] ) @@ -58,6 +60,7 @@ def test_line_comment(all_parsers, read_kwargs, request): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'comment' option is not supported def test_comment_skiprows(all_parsers): parser = all_parsers data = """# empty @@ -76,6 +79,7 @@ def test_comment_skiprows(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'comment' option is not supported def test_comment_header(all_parsers): parser = all_parsers data = """# empty @@ -93,6 +97,7 @@ def test_comment_header(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'comment' option is not supported def test_comment_skiprows_header(all_parsers): parser = all_parsers data = """# empty @@ -114,6 +119,7 @@ def test_comment_skiprows_header(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'comment' option is not supported @pytest.mark.parametrize("comment_char", ["#", "~", "&", "^", "*", "@"]) def test_custom_comment_char(all_parsers, comment_char): parser = all_parsers @@ -126,6 +132,7 @@ def test_custom_comment_char(all_parsers, comment_char): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'comment' option is not supported @pytest.mark.parametrize("header", ["infer", None]) def test_comment_first_line(all_parsers, header): # see gh-4623 @@ -141,6 +148,7 @@ def test_comment_first_line(all_parsers, header): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'comment' option is not supported def test_comment_char_in_default_value(all_parsers, request): # GH#34002 if all_parsers.engine == "c": diff --git a/pandas/tests/io/parser/test_converters.py b/pandas/tests/io/parser/test_converters.py index 85f3db0398080..b16542bbdecec 100644 --- a/pandas/tests/io/parser/test_converters.py +++ b/pandas/tests/io/parser/test_converters.py @@ -15,9 +15,10 @@ ) import pandas._testing as tm -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +@xfail_pyarrow # ValueError: The 'converters' option is not supported def test_converters_type_must_be_dict(all_parsers): parser = all_parsers data = """index,A,B,C,D @@ -28,6 +29,7 @@ def test_converters_type_must_be_dict(all_parsers): parser.read_csv(StringIO(data), converters=0) +@xfail_pyarrow # ValueError: The 'converters' option is not supported @pytest.mark.parametrize("column", [3, "D"]) @pytest.mark.parametrize( "converter", [parse, lambda x: int(x.split("/")[2])] # Produce integer. @@ -47,6 +49,7 @@ def test_converters(all_parsers, column, converter): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'converters' option is not supported def test_converters_no_implicit_conv(all_parsers): # see gh-2184 parser = all_parsers @@ -60,6 +63,7 @@ def test_converters_no_implicit_conv(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'converters' option is not supported def test_converters_euro_decimal_format(all_parsers): # see gh-583 converters = {} @@ -85,6 +89,7 @@ def test_converters_euro_decimal_format(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'converters' option is not supported def test_converters_corner_with_nans(all_parsers): parser = all_parsers data = """id,score,days @@ -152,6 +157,7 @@ def convert_score(x): tm.assert_frame_equal(results[0], results[1]) +@xfail_pyarrow # ValueError: The 'converters' option is not supported @pytest.mark.parametrize("conv_f", [lambda x: x, str]) def test_converter_index_col_bug(all_parsers, conv_f): # see gh-1835 , GH#40589 @@ -166,6 +172,7 @@ def test_converter_index_col_bug(all_parsers, conv_f): tm.assert_frame_equal(rs, xp) +@xfail_pyarrow # ValueError: The 'converters' option is not supported def test_converter_identity_object(all_parsers): # GH#40589 parser = all_parsers @@ -177,6 +184,7 @@ def test_converter_identity_object(all_parsers): tm.assert_frame_equal(rs, xp) +@xfail_pyarrow # ValueError: The 'converters' option is not supported def test_converter_multi_index(all_parsers): # GH 42446 parser = all_parsers diff --git a/pandas/tests/io/parser/test_dialect.py b/pandas/tests/io/parser/test_dialect.py index 7d2bb6c083cda..fbea895435699 100644 --- a/pandas/tests/io/parser/test_dialect.py +++ b/pandas/tests/io/parser/test_dialect.py @@ -13,7 +13,10 @@ from pandas import DataFrame import pandas._testing as tm -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") @pytest.fixture @@ -30,6 +33,7 @@ def custom_dialect(): return dialect_name, dialect_kwargs +@xfail_pyarrow # ValueError: The 'dialect' option is not supported def test_dialect(all_parsers): parser = all_parsers data = """\ @@ -52,6 +56,7 @@ def test_dialect(all_parsers): tm.assert_frame_equal(df, exp) +@xfail_pyarrow # ValueError: The 'dialect' option is not supported def test_dialect_str(all_parsers): dialect_name = "mydialect" parser = all_parsers @@ -79,6 +84,7 @@ class InvalidDialect: parser.read_csv(StringIO(data), dialect=InvalidDialect) +@xfail_pyarrow # ValueError: The 'dialect' option is not supported @pytest.mark.parametrize( "arg", [None, "doublequote", "escapechar", "skipinitialspace", "quotechar", "quoting"], @@ -118,6 +124,7 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'dialect' option is not supported @pytest.mark.parametrize( "kwargs,warning_klass", [ diff --git a/pandas/tests/io/parser/test_multi_thread.py b/pandas/tests/io/parser/test_multi_thread.py index c5b757d619e7a..da9b9bddd30cd 100644 --- a/pandas/tests/io/parser/test_multi_thread.py +++ b/pandas/tests/io/parser/test_multi_thread.py @@ -13,15 +13,17 @@ from pandas import DataFrame import pandas._testing as tm +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") + # We'll probably always skip these for pyarrow # Maybe we'll add our own tests for pyarrow too pytestmark = [ pytest.mark.single_cpu, pytest.mark.slow, - pytest.mark.usefixtures("pyarrow_skip"), ] +@xfail_pyarrow # ValueError: Found non-unique column index def test_multi_thread_string_io_read_csv(all_parsers): # see gh-11786 parser = all_parsers @@ -116,6 +118,7 @@ def reader(arg): return final_dataframe +@xfail_pyarrow # ValueError: The 'nrows' option is not supported def test_multi_thread_path_multipart_read_csv(all_parsers): # see gh-11786 num_tasks = 4 diff --git a/pandas/tests/io/parser/test_quoting.py b/pandas/tests/io/parser/test_quoting.py index b8b05af609aa2..a677d9caa4b19 100644 --- a/pandas/tests/io/parser/test_quoting.py +++ b/pandas/tests/io/parser/test_quoting.py @@ -14,7 +14,10 @@ from pandas import DataFrame import pandas._testing as tm -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") @pytest.mark.parametrize( @@ -28,6 +31,7 @@ ({"quotechar": 2}, '"quotechar" must be string( or None)?, not int'), ], ) +@xfail_pyarrow # ParserError: CSV parse error: Empty CSV file or block def test_bad_quote_char(all_parsers, kwargs, msg): data = "1,2,3" parser = all_parsers @@ -43,6 +47,7 @@ def test_bad_quote_char(all_parsers, kwargs, msg): (10, 'bad "quoting" value'), # quoting must be in the range [0, 3] ], ) +@xfail_pyarrow # ValueError: The 'quoting' option is not supported def test_bad_quoting(all_parsers, quoting, msg): data = "1,2,3" parser = all_parsers @@ -60,6 +65,7 @@ def test_quote_char_basic(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'quoting' option is not supported @pytest.mark.parametrize("quote_char", ["~", "*", "%", "$", "@", "P"]) def test_quote_char_various(all_parsers, quote_char): parser = all_parsers @@ -72,6 +78,7 @@ def test_quote_char_various(all_parsers, quote_char): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'quoting' option is not supported @pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE]) @pytest.mark.parametrize("quote_char", ["", None]) def test_null_quote_char(all_parsers, quoting, quote_char): @@ -112,6 +119,7 @@ def test_null_quote_char(all_parsers, quoting, quote_char): ({"quotechar": '"', "quoting": csv.QUOTE_NONNUMERIC}, [[1.0, 2.0, "foo"]]), ], ) +@xfail_pyarrow # ValueError: The 'quoting' option is not supported def test_quoting_various(all_parsers, kwargs, exp_data): data = '1,2,"foo"' parser = all_parsers @@ -125,10 +133,14 @@ def test_quoting_various(all_parsers, kwargs, exp_data): @pytest.mark.parametrize( "doublequote,exp_data", [(True, [[3, '4 " 5']]), (False, [[3, '4 " 5"']])] ) -def test_double_quote(all_parsers, doublequote, exp_data): +def test_double_quote(all_parsers, doublequote, exp_data, request): parser = all_parsers data = 'a,b\n3,"4 "" 5"' + if parser.engine == "pyarrow" and not doublequote: + mark = pytest.mark.xfail(reason="Mismatched result") + request.applymarker(mark) + result = parser.read_csv(StringIO(data), quotechar='"', doublequote=doublequote) expected = DataFrame(exp_data, columns=["a", "b"]) tm.assert_frame_equal(result, expected) @@ -146,11 +158,15 @@ def test_quotechar_unicode(all_parsers, quotechar): @pytest.mark.parametrize("balanced", [True, False]) -def test_unbalanced_quoting(all_parsers, balanced): +def test_unbalanced_quoting(all_parsers, balanced, request): # see gh-22789. parser = all_parsers data = 'a,b,c\n1,2,"3' + if parser.engine == "pyarrow" and not balanced: + mark = pytest.mark.xfail(reason="Mismatched result") + request.applymarker(mark) + if balanced: # Re-balance the quoting and read in without errors. expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"]) diff --git a/pandas/tests/io/parser/test_skiprows.py b/pandas/tests/io/parser/test_skiprows.py index 4b509edc36925..9146af3f969e6 100644 --- a/pandas/tests/io/parser/test_skiprows.py +++ b/pandas/tests/io/parser/test_skiprows.py @@ -17,10 +17,13 @@ ) import pandas._testing as tm -# XFAIL ME PLS once hanging tests issues identified -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +@xfail_pyarrow # ValueError: skiprows argument must be an integer @pytest.mark.parametrize("skiprows", [list(range(6)), 6]) def test_skip_rows_bug(all_parsers, skiprows): # see gh-505 @@ -48,6 +51,7 @@ def test_skip_rows_bug(all_parsers, skiprows): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_deep_skip_rows(all_parsers): # see gh-4382 parser = all_parsers @@ -63,6 +67,7 @@ def test_deep_skip_rows(all_parsers): tm.assert_frame_equal(result, condensed_result) +@xfail_pyarrow def test_skip_rows_blank(all_parsers): # see gh-9832 parser = all_parsers @@ -122,6 +127,7 @@ def test_skip_rows_blank(all_parsers): ), ], ) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_skip_row_with_newline(all_parsers, data, kwargs, expected): # see gh-12775 and gh-10911 parser = all_parsers @@ -129,6 +135,7 @@ def test_skip_row_with_newline(all_parsers, data, kwargs, expected): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_skip_row_with_quote(all_parsers): # see gh-12775 and gh-10911 parser = all_parsers @@ -170,6 +177,7 @@ def test_skip_row_with_quote(all_parsers): ), ], ) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data): # see gh-12775 and gh-10911 parser = all_parsers @@ -179,6 +187,7 @@ def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: The 'delim_whitespace' option is not supported @pytest.mark.parametrize( "lineterminator", ["\n", "\r\n", "\r"] # "LF" # "CRLF" # "CR" ) @@ -216,6 +225,7 @@ def test_skiprows_lineterminator(all_parsers, lineterminator, request): tm.assert_frame_equal(result, expected) +@xfail_pyarrow def test_skiprows_infield_quote(all_parsers): # see gh-14459 parser = all_parsers @@ -226,6 +236,7 @@ def test_skiprows_infield_quote(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: skiprows argument must be an integer @pytest.mark.parametrize( "kwargs,expected", [ @@ -241,6 +252,7 @@ def test_skip_rows_callable(all_parsers, kwargs, expected): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_skip_rows_callable_not_in(all_parsers): parser = all_parsers data = "0,a\n1,b\n2,c\n3,d\n4,e" @@ -252,6 +264,7 @@ def test_skip_rows_callable_not_in(all_parsers): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_skip_rows_skip_all(all_parsers): parser = all_parsers data = "a\n1\n2\n3\n4\n5" @@ -261,6 +274,7 @@ def test_skip_rows_skip_all(all_parsers): parser.read_csv(StringIO(data), skiprows=lambda x: True) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_skip_rows_bad_callable(all_parsers): msg = "by zero" parser = all_parsers @@ -270,6 +284,7 @@ def test_skip_rows_bad_callable(all_parsers): parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0) +@xfail_pyarrow # ValueError: skiprows argument must be an integer def test_skip_rows_and_n_rows(all_parsers): # GH#44021 data = """a,b diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py index 069ac2a69d224..f223810772225 100644 --- a/pandas/tests/io/parser/usecols/test_parse_dates.py +++ b/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -15,9 +15,15 @@ # TODO(1.4): Change these to xfails whenever parse_dates support(which was # intentionally disable to keep small PR sizes) is added back -pytestmark = pytest.mark.usefixtures("pyarrow_skip") +# pytestmark = pytest.mark.usefixtures("pyarrow_skip") + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") +@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]]) def test_usecols_with_parse_dates(all_parsers, usecols): # see gh-9755 @@ -36,6 +42,7 @@ def test_usecols_with_parse_dates(all_parsers, usecols): tm.assert_frame_equal(result, expected) +@xfail_pyarrow # pyarrow.lib.ArrowKeyError: Column 'fdate' in include_columns def test_usecols_with_parse_dates2(all_parsers): # see gh-13604 parser = all_parsers @@ -131,13 +138,19 @@ def test_usecols_with_parse_dates4(all_parsers): list("acd"), # Names span only the selected columns. ], ) -def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names): +def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names, request): # see gh-9755 s = """0,1,2014-01-01,09:00,4 0,1,2014-01-02,10:00,4""" parse_dates = [[1, 2]] parser = all_parsers + if parser.engine == "pyarrow" and not (len(names) == 3 and usecols[0] == 0): + mark = pytest.mark.xfail( + reason="Length mismatch in some cases, UserWarning in other" + ) + request.applymarker(mark) + cols = { "a": [0, 0], "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
orthogonal to #55576. These two together close #49411
https://api.github.com/repos/pandas-dev/pandas/pulls/55637
2023-10-22T21:00:36Z
2023-10-23T09:26:56Z
2023-10-23T09:26:56Z
2023-10-23T14:57:26Z
DEPR: correct class of Warning for offsets deprecated frequency ('M' to 'ME')
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 5c9da24185060..fad41fff6abb6 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -4731,7 +4731,7 @@ cpdef to_offset(freq, bint is_period=False): warnings.warn( f"\'{name}\' will be deprecated, please use " f"\'{c_OFFSET_DEPR_FREQSTR.get(name)}\' instead.", - UserWarning, + FutureWarning, stacklevel=find_stack_level(), ) name = c_OFFSET_DEPR_FREQSTR[name] diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py index bc6e74d5b1f00..2faf5beda9fe6 100644 --- a/pandas/tests/frame/methods/test_asfreq.py +++ b/pandas/tests/frame/methods/test_asfreq.py @@ -240,6 +240,6 @@ def test_asfreq_frequency_M_deprecated(self): index = date_range("1/1/2000", periods=4, freq="ME") df = DataFrame({"s": Series([0.0, 1.0, 2.0, 3.0], index=index)}) expected = df.asfreq(freq="5ME") - with tm.assert_produces_warning(UserWarning, match=depr_msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): result = df.asfreq(freq="5M") tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index e8113b851fe87..ed2c2b473c2df 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -128,7 +128,7 @@ def test_date_range_frequency_M_deprecated(self): depr_msg = "'M' will be deprecated, please use 'ME' instead." expected = date_range("1/1/2000", periods=4, freq="2ME") - with tm.assert_produces_warning(UserWarning, match=depr_msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): result = date_range("1/1/2000", periods=4, freq="2M") tm.assert_index_equal(result, expected) diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py index e0ba7902a8a6c..53bd4b056cfbd 100644 --- a/pandas/tests/resample/test_datetime_index.py +++ b/pandas/tests/resample/test_datetime_index.py @@ -2015,7 +2015,7 @@ def test_resample_M_deprecated(): s = Series(range(10), index=date_range("20130101", freq="d", periods=10)) expected = s.resample("2ME").mean() - with tm.assert_produces_warning(UserWarning, match=depr_msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): result = s.resample("2M").mean() tm.assert_series_equal(result, expected)
xref #52064, #55553 for offsets deprecated frequency ('M' to 'ME', etc.) replaced UserWarning with FutureWarning
https://api.github.com/repos/pandas-dev/pandas/pulls/55636
2023-10-22T20:23:38Z
2023-10-23T07:48:59Z
2023-10-23T07:48:59Z
2023-10-23T07:49:00Z
Backport PR #55537 on branch 2.1.x (BUG: Series inferring new string dtype even if dtype is given for scalar value)
diff --git a/pandas/core/construction.py b/pandas/core/construction.py index e661d590ab330..5903187769f08 100644 --- a/pandas/core/construction.py +++ b/pandas/core/construction.py @@ -540,6 +540,7 @@ def sanitize_array( ------- np.ndarray or ExtensionArray """ + original_dtype = dtype if isinstance(data, ma.MaskedArray): data = sanitize_masked_array(data) @@ -562,7 +563,11 @@ def sanitize_array( if not is_list_like(data): if index is None: raise ValueError("index must be specified when data is not list-like") - if isinstance(data, str) and using_pyarrow_string_dtype(): + if ( + isinstance(data, str) + and using_pyarrow_string_dtype() + and original_dtype is None + ): from pandas.core.arrays.string_ import StringDtype dtype = StringDtype("pyarrow_numpy") diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 2c3fdf627788a..8cc891e1ee9c0 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -2123,6 +2123,14 @@ def test_series_string_inference_storage_definition(self): result = Series(["a", "b"], dtype="string") tm.assert_series_equal(result, expected) + def test_series_constructor_infer_string_scalar(self): + # GH#55537 + with pd.option_context("future.infer_string", True): + ser = Series("a", index=[1, 2], dtype="string[python]") + expected = Series(["a", "a"], index=[1, 2], dtype="string[python]") + tm.assert_series_equal(ser, expected) + assert ser.dtype.storage == "python" + class TestSeriesConstructorIndexCoercion: def test_series_constructor_datetimelike_index_coercion(self):
Backport PR #55537: BUG: Series inferring new string dtype even if dtype is given for scalar value
https://api.github.com/repos/pandas-dev/pandas/pulls/55635
2023-10-22T19:49:11Z
2023-10-22T22:37:16Z
2023-10-22T22:37:16Z
2023-10-22T22:37:16Z
BUG: indexing raises for ea into numpy series
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index c8c27f2f2e178..cf3756eb101be 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -344,9 +344,9 @@ Interval Indexing ^^^^^^^^ +- Bug in :meth:`DataFrame.loc` when setting :class:`Series` with extension dtype into NumPy dtype (:issue:`55604`) - Bug in :meth:`Index.difference` not returning a unique set of values when ``other`` is empty or ``other`` is considered non-comparable (:issue:`55113`) - Bug in setting :class:`Categorical` values into a :class:`DataFrame` with numpy dtypes raising ``RecursionError`` (:issue:`52927`) -- Missing ^^^^^^^ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 28e83fd70519c..27625db766862 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1816,7 +1816,8 @@ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: if not isinstance(tipo, np.dtype): # i.e. nullable IntegerDtype; we can put this into an ndarray # losslessly iff it has no NAs - if element._hasna: + arr = element._values if isinstance(element, ABCSeries) else element + if arr._hasna: raise LossySetitemError return element diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index ccc1249088f9a..7db8f51afe291 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -761,6 +761,17 @@ def test_setitem_frame_midx_columns(self): df[col_name] = df[[col_name]] tm.assert_frame_equal(df, expected) + def test_loc_setitem_ea_dtype(self): + # GH#55604 + df = DataFrame({"a": np.array([10], dtype="i8")}) + df.loc[:, "a"] = Series([11], dtype="Int64") + expected = DataFrame({"a": np.array([11], dtype="i8")}) + tm.assert_frame_equal(df, expected) + + df = DataFrame({"a": np.array([10], dtype="i8")}) + df.iloc[:, 0] = Series([11], dtype="Int64") + tm.assert_frame_equal(df, expected) + class TestSetitemTZAwareValues: @pytest.fixture
- [x] closes #55604 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55633
2023-10-22T19:35:35Z
2023-10-23T18:08:35Z
2023-10-23T18:08:35Z
2023-11-16T16:59:09Z
Backport PR #55620 on branch 2.1.x (BUG: Groupby not keeping object dtype when infer string is set)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 97a718dd496e9..175c534ce0d04 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -23,6 +23,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Fixed bug in :class:`.DataFrameGroupBy` reductions not preserving object dtype when ``infer_string`` is set (:issue:`55620`) - Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.__setitem__` not inferring string dtype for zero-dimensional array with ``infer_string=True`` (:issue:`55366`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 7fa69a70431e4..d9fdf72eeda2b 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1855,8 +1855,7 @@ def _agg_py_fallback( ser = Series(values, copy=False) else: # We only get here with values.dtype == object - # TODO: special case not needed with ArrayManager - df = DataFrame(values.T) + df = DataFrame(values.T, dtype=values.dtype) # bc we split object blocks in grouped_reduce, we have only 1 col # otherwise we'd have to worry about block-splitting GH#39329 assert df.shape[1] == 1 diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 78a0497ef611e..7726dbe6994ef 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -5,6 +5,7 @@ import numpy as np import pytest +from pandas.compat import pa_version_under7p0 from pandas.errors import ( PerformanceWarning, SpecificationError, @@ -2513,13 +2514,24 @@ def test_groupby_column_index_name_lost(func): tm.assert_index_equal(result, expected) -def test_groupby_duplicate_columns(): +@pytest.mark.parametrize( + "infer_string", + [ + False, + pytest.param( + True, + marks=pytest.mark.skipif(pa_version_under7p0, reason="arrow not installed"), + ), + ], +) +def test_groupby_duplicate_columns(infer_string): # GH: 31735 df = DataFrame( {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]} ).astype(object) df.columns = ["A", "B", "B"] - result = df.groupby([0, 0, 0, 0]).min() + with pd.option_context("future.infer_string", infer_string): + result = df.groupby([0, 0, 0, 0]).min() expected = DataFrame( [["e", "a", 1]], index=np.array([0]), columns=["A", "B", "B"], dtype=object )
Backport PR #55620: BUG: Groupby not keeping object dtype when infer string is set
https://api.github.com/repos/pandas-dev/pandas/pulls/55629
2023-10-22T15:20:41Z
2023-10-22T17:06:47Z
2023-10-22T17:06:47Z
2023-10-22T17:06:47Z
BUG: value_counts returning incorrect dtype for string dtype
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 8298cc0607512..f9c37206cf4f8 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -24,6 +24,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Fixed bug in :class:`.DataFrameGroupBy` reductions not preserving object dtype when ``infer_string`` is set (:issue:`55620`) +- Fixed bug in :meth:`.SeriesGroupBy.value_counts` returning incorrect dtype for string columns (:issue:`55627`) - Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.__setitem__` not inferring string dtype for zero-dimensional array with ``infer_string=True`` (:issue:`55366`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f1b97fc5e88f6..5cfb075b807f4 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -108,6 +108,10 @@ class providing the base-class of operations. SparseArray, ) from pandas.core.arrays.string_ import StringDtype +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) from pandas.core.base import ( PandasObject, SelectionMixin, @@ -2854,7 +2858,9 @@ def _value_counts( result_series.name = name result_series.index = index.set_names(range(len(columns))) result_frame = result_series.reset_index() - result_frame.columns = columns + [name] + orig_dtype = self.grouper.groupings[0].obj.columns.dtype # type: ignore[union-attr] # noqa: E501 + cols = Index(columns, dtype=orig_dtype).insert(len(columns), name) + result_frame.columns = cols result = result_frame return result.__finalize__(self.obj, method="value_counts") @@ -3006,7 +3012,12 @@ def size(self) -> DataFrame | Series: dtype_backend: None | Literal["pyarrow", "numpy_nullable"] = None if isinstance(self.obj, Series): if isinstance(self.obj.array, ArrowExtensionArray): - dtype_backend = "pyarrow" + if isinstance(self.obj.array, ArrowStringArrayNumpySemantics): + dtype_backend = None + elif isinstance(self.obj.array, ArrowStringArray): + dtype_backend = "numpy_nullable" + else: + dtype_backend = "pyarrow" elif isinstance(self.obj.array, BaseMaskedArray): dtype_backend = "numpy_nullable" # TODO: For DataFrames what if columns are mixed arrow/numpy/masked? diff --git a/pandas/tests/groupby/methods/test_size.py b/pandas/tests/groupby/methods/test_size.py index c275db9d1788c..93a4e743d0d71 100644 --- a/pandas/tests/groupby/methods/test_size.py +++ b/pandas/tests/groupby/methods/test_size.py @@ -1,6 +1,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas.core.dtypes.common import is_integer_dtype from pandas import ( @@ -104,3 +106,25 @@ def test_size_series_masked_type_returns_Int64(dtype): result = ser.groupby(level=0).size() expected = Series([2, 1], dtype="Int64", index=["a", "b"]) tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [ + object, + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], +) +def test_size_strings(dtype): + # GH#55627 + df = DataFrame({"a": ["a", "a", "b"], "b": "a"}, dtype=dtype) + result = df.groupby("a")["b"].size() + exp_dtype = "Int64" if dtype == "string[pyarrow]" else "int64" + expected = Series( + [2, 1], + index=Index(["a", "b"], name="a", dtype=dtype), + name="b", + dtype=exp_dtype, + ) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/methods/test_value_counts.py b/pandas/tests/groupby/methods/test_value_counts.py index c1ee107715b71..37f9d26284f52 100644 --- a/pandas/tests/groupby/methods/test_value_counts.py +++ b/pandas/tests/groupby/methods/test_value_counts.py @@ -9,6 +9,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + from pandas import ( Categorical, CategoricalIndex, @@ -369,6 +371,14 @@ def test_against_frame_and_seriesgroupby( tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize( + "dtype", + [ + object, + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], +) @pytest.mark.parametrize("normalize", [True, False]) @pytest.mark.parametrize( "sort, ascending, expected_rows, expected_count, expected_group_size", @@ -386,7 +396,10 @@ def test_compound( expected_rows, expected_count, expected_group_size, + dtype, ): + education_df = education_df.astype(dtype) + education_df.columns = education_df.columns.astype(dtype) # Multiple groupby keys and as_index=False gp = education_df.groupby(["country", "gender"], as_index=False, sort=False) result = gp["education"].value_counts( @@ -395,11 +408,17 @@ def test_compound( expected = DataFrame() for column in ["country", "gender", "education"]: expected[column] = [education_df[column][row] for row in expected_rows] + expected = expected.astype(dtype) + expected.columns = expected.columns.astype(dtype) if normalize: expected["proportion"] = expected_count expected["proportion"] /= expected_group_size + if dtype == "string[pyarrow]": + expected["proportion"] = expected["proportion"].convert_dtypes() else: expected["count"] = expected_count + if dtype == "string[pyarrow]": + expected["count"] = expected["count"].convert_dtypes() tm.assert_frame_equal(result, expected) @@ -1146,3 +1165,14 @@ def test_value_counts_time_grouper(utc): ) expected = Series(1, index=index, name="count") tm.assert_series_equal(result, expected) + + +def test_value_counts_integer_columns(): + # GH#55627 + df = DataFrame({1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"]}) + gp = df.groupby([1, 2], as_index=False, sort=False) + result = gp[3].value_counts() + expected = DataFrame( + {1: ["a", "a", "a"], 2: ["a", "a", "d"], 3: ["a", "b", "c"], "count": 1} + ) + tm.assert_frame_equal(result, expected)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55627
2023-10-22T14:43:53Z
2023-10-25T10:46:27Z
2023-10-25T10:46:27Z
2023-10-25T10:58:35Z
DOC: Change core paths to api.typing in URLs
diff --git a/doc/source/reference/groupby.rst b/doc/source/reference/groupby.rst index 771163ae1b0bc..fe02fa106f941 100644 --- a/doc/source/reference/groupby.rst +++ b/doc/source/reference/groupby.rst @@ -5,7 +5,7 @@ ======= GroupBy ======= -.. currentmodule:: pandas.core.groupby +.. currentmodule:: pandas.api.typing :class:`pandas.api.typing.DataFrameGroupBy` and :class:`pandas.api.typing.SeriesGroupBy` instances are returned by groupby calls :func:`pandas.DataFrame.groupby` and @@ -40,7 +40,7 @@ Function application helper NamedAgg -.. currentmodule:: pandas.core.groupby +.. currentmodule:: pandas.api.typing Function application -------------------- diff --git a/doc/source/reference/resampling.rst b/doc/source/reference/resampling.rst index edbc8090fc849..2b52087cec280 100644 --- a/doc/source/reference/resampling.rst +++ b/doc/source/reference/resampling.rst @@ -5,7 +5,7 @@ ========== Resampling ========== -.. currentmodule:: pandas.core.resample +.. currentmodule:: pandas.api.typing :class:`pandas.api.typing.Resampler` instances are returned by resample calls: :func:`pandas.DataFrame.resample`, :func:`pandas.Series.resample`. diff --git a/doc/source/reference/window.rst b/doc/source/reference/window.rst index 14af2b8a120e0..75faf69583cfe 100644 --- a/doc/source/reference/window.rst +++ b/doc/source/reference/window.rst @@ -17,7 +17,7 @@ calls: :func:`pandas.DataFrame.ewm` and :func:`pandas.Series.ewm`. Rolling window functions ------------------------ -.. currentmodule:: pandas.core.window.rolling +.. currentmodule:: pandas.api.typing .. autosummary:: :toctree: api/ @@ -44,7 +44,7 @@ Rolling window functions Weighted window functions ------------------------- -.. currentmodule:: pandas.core.window.rolling +.. currentmodule:: pandas.api.typing .. autosummary:: :toctree: api/ @@ -58,7 +58,7 @@ Weighted window functions Expanding window functions -------------------------- -.. currentmodule:: pandas.core.window.expanding +.. currentmodule:: pandas.api.typing .. autosummary:: :toctree: api/ @@ -85,7 +85,7 @@ Expanding window functions Exponentially-weighted window functions --------------------------------------- -.. currentmodule:: pandas.core.window.ewm +.. currentmodule:: pandas.api.typing .. autosummary:: :toctree: api/
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. @phofl pointed out that #27522 would change the URLs in the API docs from `pandas.core` to `pandas._core`. We shouldn't be leaking the locations of the internals via the URLs. While `pandas.api.typing` is perhaps a little odd, it is at least public. cc @jorisvandenbossche @jbrockmendel @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/55626
2023-10-22T14:40:26Z
2023-10-22T19:18:53Z
2023-10-22T19:18:53Z
2023-12-14T20:12:25Z
Backport PR #55518 on branch 2.1.x (CoW: Use exponential backoff when clearing dead references)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 97a718dd496e9..ef69e874fdaf3 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.join` where result has missing values and dtype is arrow backed string (:issue:`55348`) - Fixed regression in :meth:`DataFrame.resample` which was extrapolating back to ``origin`` when ``origin`` was outside its bounds (:issue:`55064`) - Fixed regression in :meth:`DataFrame.sort_index` which was not sorting correctly when the index was a sliced :class:`MultiIndex` (:issue:`55379`) +- Fixed performance regression with wide DataFrames, typically involving methods where all columns were accessed individually (:issue:`55256`, :issue:`55245`) .. --------------------------------------------------------------------------- .. _whatsnew_212.bug_fixes: diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 859ebce36eaa8..10b0f4776e0d1 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -944,17 +944,29 @@ cdef class BlockValuesRefs: """ cdef: public list referenced_blocks + public int clear_counter def __cinit__(self, blk: SharedBlock | None = None) -> None: if blk is not None: self.referenced_blocks = [weakref.ref(blk)] else: self.referenced_blocks = [] - - def _clear_dead_references(self) -> None: - self.referenced_blocks = [ - ref for ref in self.referenced_blocks if ref() is not None - ] + self.clear_counter = 500 # set reasonably high + + def _clear_dead_references(self, force=False) -> None: + # Use exponential backoff to decide when we want to clear references + # if force=False. Clearing for every insertion causes slowdowns if + # all these objects stay alive, e.g. df.items() for wide DataFrames + # see GH#55245 and GH#55008 + if force or len(self.referenced_blocks) > self.clear_counter: + self.referenced_blocks = [ + ref for ref in self.referenced_blocks if ref() is not None + ] + nr_of_refs = len(self.referenced_blocks) + if nr_of_refs < self.clear_counter // 2: + self.clear_counter = max(self.clear_counter // 2, 500) + elif nr_of_refs > self.clear_counter: + self.clear_counter = max(self.clear_counter * 2, nr_of_refs) def add_reference(self, blk: SharedBlock) -> None: """Adds a new reference to our reference collection. @@ -988,6 +1000,6 @@ cdef class BlockValuesRefs: ------- bool """ - self._clear_dead_references() + self._clear_dead_references(force=True) # Checking for more references than block pointing to itself return len(self.referenced_blocks) > 1 diff --git a/pandas/tests/copy_view/test_internals.py b/pandas/tests/copy_view/test_internals.py index 9180bd5a3a426..a727331307d7e 100644 --- a/pandas/tests/copy_view/test_internals.py +++ b/pandas/tests/copy_view/test_internals.py @@ -119,3 +119,33 @@ def test_iset_splits_blocks_inplace(using_copy_on_write, locs, arr, dtype): else: for col in df.columns: assert not np.shares_memory(get_array(df, col), get_array(df2, col)) + + +def test_exponential_backoff(): + # GH#55518 + df = DataFrame({"a": [1, 2, 3]}) + for i in range(490): + df.copy(deep=False) + + assert len(df._mgr.blocks[0].refs.referenced_blocks) == 491 + + df = DataFrame({"a": [1, 2, 3]}) + dfs = [df.copy(deep=False) for i in range(510)] + + for i in range(20): + df.copy(deep=False) + assert len(df._mgr.blocks[0].refs.referenced_blocks) == 531 + assert df._mgr.blocks[0].refs.clear_counter == 1000 + + for i in range(500): + df.copy(deep=False) + + # Don't reduce since we still have over 500 objects alive + assert df._mgr.blocks[0].refs.clear_counter == 1000 + + dfs = dfs[:300] + for i in range(500): + df.copy(deep=False) + + # Reduce since there are less than 500 objects alive + assert df._mgr.blocks[0].refs.clear_counter == 500
Backport PR #55518: CoW: Use exponential backoff when clearing dead references
https://api.github.com/repos/pandas-dev/pandas/pulls/55625
2023-10-22T13:42:15Z
2023-10-22T19:18:02Z
2023-10-22T19:18:02Z
2023-10-22T19:18:02Z
BUG: pytables with non-nano dt64
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index 9eb5bbc8f07d5..fc199d3fd00f8 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -365,8 +365,10 @@ I/O - Bug in :func:`read_csv` with ``engine="pyarrow"`` where ``usecols`` wasn't working with a csv with no headers (:issue:`54459`) - Bug in :func:`read_excel`, with ``engine="xlrd"`` (``xls`` files) erroring when file contains NaNs/Infs (:issue:`54564`) - Bug in :func:`to_excel`, with ``OdsWriter`` (``ods`` files) writing boolean/string value (:issue:`54994`) +- Bug in :meth:`DataFrame.to_hdf` and :func:`read_hdf` with ``datetime64`` dtypes with non-nanosecond resolution failing to round-trip correctly (:issue:`55622`) - Bug in :meth:`pandas.read_excel` with an ODS file without cached formatted cell for float values (:issue:`55219`) - Bug where :meth:`DataFrame.to_json` would raise an ``OverflowError`` instead of a ``TypeError`` with unsupported NumPy types (:issue:`55403`) +- Period ^^^^^^ diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 138a3ee42f686..04a8ad7ef0be6 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -217,7 +217,7 @@ def stringify(value): kind = ensure_decoded(self.kind) meta = ensure_decoded(self.meta) - if kind in ("datetime64", "datetime"): + if kind == "datetime" or (kind and kind.startswith("datetime64")): if isinstance(v, (int, float)): v = stringify(v) v = ensure_decoded(v) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index c0a27ecfec803..91f7b2afec56c 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2152,7 +2152,6 @@ def convert( val_kind = _ensure_decoded(self.kind) values = _maybe_convert(values, val_kind, encoding, errors) - kwargs = {} kwargs["name"] = _ensure_decoded(self.index_name) @@ -2577,7 +2576,7 @@ def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): dtype = _ensure_decoded(dtype_name) # reverse converts - if dtype == "datetime64": + if dtype.startswith("datetime64"): # recreate with tz if indicated converted = _set_tz(converted, tz, coerce=True) @@ -2870,7 +2869,9 @@ def _get_index_factory(self, attrs): def f(values, freq=None, tz=None): # data are already in UTC, localize and convert if tz present - dta = DatetimeArray._simple_new(values.values, freq=freq) + dta = DatetimeArray._simple_new( + values.values, dtype=values.dtype, freq=freq + ) result = DatetimeIndex._simple_new(dta, name=None) if tz is not None: result = result.tz_localize("UTC").tz_convert(tz) @@ -2961,7 +2962,7 @@ def read_array(self, key: str, start: int | None = None, stop: int | None = None else: ret = node[start:stop] - if dtype == "datetime64": + if dtype and dtype.startswith("datetime64"): # reconstruct a timezone if indicated tz = getattr(attrs, "tz", None) ret = _set_tz(ret, tz, coerce=True) @@ -3170,7 +3171,7 @@ def write_array( elif lib.is_np_dtype(value.dtype, "M"): self._handle.create_array(self.group, key, value.view("i8")) - getattr(self.group, key)._v_attrs.value_type = "datetime64" + getattr(self.group, key)._v_attrs.value_type = str(value.dtype) elif isinstance(value.dtype, DatetimeTZDtype): # store as UTC # with a zone @@ -3185,7 +3186,7 @@ def write_array( # error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no # attribute "tz" node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr] - node._v_attrs.value_type = "datetime64" + node._v_attrs.value_type = f"datetime64[{value.dtype.unit}]" elif lib.is_np_dtype(value.dtype, "m"): self._handle.create_array(self.group, key, value.view("i8")) getattr(self.group, key)._v_attrs.value_type = "timedelta64" @@ -4689,7 +4690,6 @@ def read( selection = Selection(self, where=where, start=start, stop=stop) # apply the selection filters & axis orderings df = self.process_axes(df, selection=selection, columns=columns) - return df @@ -4932,11 +4932,12 @@ def _set_tz( # call below (which returns an ndarray). So we are only non-lossy # if `tz` matches `values.tz`. assert values.tz is None or values.tz == tz + if values.tz is not None: + return values if tz is not None: if isinstance(values, DatetimeIndex): name = values.name - values = values.asi8 else: name = None values = values.ravel() @@ -5019,8 +5020,12 @@ def _convert_index(name: str, index: Index, encoding: str, errors: str) -> Index def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index: index: Index | np.ndarray - if kind == "datetime64": - index = DatetimeIndex(data) + if kind.startswith("datetime64"): + if kind == "datetime64": + # created before we stored resolution information + index = DatetimeIndex(data) + else: + index = DatetimeIndex(data.view(kind)) elif kind == "timedelta64": index = TimedeltaIndex(data) elif kind == "date": @@ -5194,6 +5199,8 @@ def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str def _get_converter(kind: str, encoding: str, errors: str): if kind == "datetime64": return lambda x: np.asarray(x, dtype="M8[ns]") + elif "datetime64" in kind: + return lambda x: np.asarray(x, dtype=kind) elif kind == "string": return lambda x: _unconvert_string_array( x, nan_rep=None, encoding=encoding, errors=errors @@ -5203,7 +5210,7 @@ def _get_converter(kind: str, encoding: str, errors: str): def _need_convert(kind: str) -> bool: - if kind in ("datetime64", "string"): + if kind in ("datetime64", "string") or "datetime64" in kind: return True return False @@ -5248,7 +5255,7 @@ def _dtype_to_kind(dtype_str: str) -> str: elif dtype_str.startswith(("int", "uint")): kind = "integer" elif dtype_str.startswith("datetime64"): - kind = "datetime64" + kind = dtype_str elif dtype_str.startswith("timedelta"): kind = "timedelta64" elif dtype_str.startswith("bool"): @@ -5273,8 +5280,11 @@ def _get_data_and_dtype_name(data: ArrayLike): if isinstance(data, Categorical): data = data.codes - # For datetime64tz we need to drop the TZ in tests TODO: why? - dtype_name = data.dtype.name.split("[")[0] + if isinstance(data.dtype, DatetimeTZDtype): + # For datetime64tz we need to drop the TZ in tests TODO: why? + dtype_name = f"datetime64[{data.dtype.unit}]" + else: + dtype_name = data.dtype.name if data.dtype.kind in "mM": data = np.asarray(data.view("i8")) diff --git a/pandas/tests/io/pytables/test_append.py b/pandas/tests/io/pytables/test_append.py index 3ad9abc138ed1..dace8435595ee 100644 --- a/pandas/tests/io/pytables/test_append.py +++ b/pandas/tests/io/pytables/test_append.py @@ -772,7 +772,7 @@ def test_append_raise(setup_path): "dtype->bytes24,kind->string,shape->(1, 30)] " "vs current table " "[name->values_block_1,cname->values_block_1," - "dtype->datetime64,kind->datetime64,shape->None]" + "dtype->datetime64[s],kind->datetime64[s],shape->None]" ) with pytest.raises(ValueError, match=msg): store.append("df", df) diff --git a/pandas/tests/io/pytables/test_errors.py b/pandas/tests/io/pytables/test_errors.py index 4c1f7667873e1..adf42cc7e8d39 100644 --- a/pandas/tests/io/pytables/test_errors.py +++ b/pandas/tests/io/pytables/test_errors.py @@ -49,7 +49,7 @@ def test_table_index_incompatible_dtypes(setup_path): with ensure_clean_store(setup_path) as store: store.put("frame", df1, format="table") - msg = re.escape("incompatible kind in col [integer - datetime64]") + msg = re.escape("incompatible kind in col [integer - datetime64[ns]]") with pytest.raises(TypeError, match=msg): store.put("frame", df2, format="table", append=True) diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py index f031ac46c670c..b61b6f0772251 100644 --- a/pandas/tests/io/pytables/test_store.py +++ b/pandas/tests/io/pytables/test_store.py @@ -541,16 +541,22 @@ def test_store_index_name(setup_path): tm.assert_frame_equal(recons, df) +@pytest.mark.parametrize("tz", [None, "US/Pacific"]) +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) @pytest.mark.parametrize("table_format", ["table", "fixed"]) -def test_store_index_name_numpy_str(tmp_path, table_format, setup_path): +def test_store_index_name_numpy_str(tmp_path, table_format, setup_path, unit, tz): # GH #13492 idx = Index( pd.to_datetime([dt.date(2000, 1, 1), dt.date(2000, 1, 2)]), name="cols\u05d2", - ) - idx1 = Index( - pd.to_datetime([dt.date(2010, 1, 1), dt.date(2010, 1, 2)]), - name="rows\u05d0", + ).tz_localize(tz) + idx1 = ( + Index( + pd.to_datetime([dt.date(2010, 1, 1), dt.date(2010, 1, 2)]), + name="rows\u05d0", + ) + .as_unit(unit) + .tz_localize(tz) ) df = DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Surfaced while implementing #55564
https://api.github.com/repos/pandas-dev/pandas/pulls/55622
2023-10-21T21:52:38Z
2023-10-23T16:51:15Z
2023-10-23T16:51:15Z
2023-10-23T16:59:28Z
BUG: mode not sorting values for arrow backed strings
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 8298cc0607512..db65e0b9f4315 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -31,6 +31,7 @@ Bug fixes - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) - Fixed bug in :meth:`Series.all` and :meth:`Series.any` not treating missing values correctly for ``dtype="string[pyarrow_numpy]"`` (:issue:`55367`) - Fixed bug in :meth:`Series.floordiv` for :class:`ArrowDtype` (:issue:`55561`) +- Fixed bug in :meth:`Series.mode` not sorting values for arrow backed string dtype (:issue:`55621`) - Fixed bug in :meth:`Series.rank` for ``string[pyarrow_numpy]`` dtype (:issue:`55362`) - Fixed bug in :meth:`Series.str.extractall` for :class:`ArrowDtype` dtype being converted to object (:issue:`53846`) - Fixed bug where PDEP-6 warning about setting an item of an incompatible dtype was being shown when creating a new conditional column (:issue:`55025`) diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 8f0ad53792855..4bcc03643dac8 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -1919,6 +1919,7 @@ def _mode(self, dropna: bool = True) -> Self: if pa.types.is_temporal(pa_type): most_common = most_common.cast(pa_type) + most_common = most_common.take(pc.array_sort_indices(most_common)) return type(self)(most_common) def _maybe_convert_setitem_value(self, value): diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index c2f6dc9e579a4..fe6bde65420f7 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -1333,7 +1333,7 @@ def test_quantile(data, interpolation, quantile, request): @pytest.mark.parametrize( "take_idx, exp_idx", - [[[0, 0, 2, 2, 4, 4], [0, 4]], [[0, 0, 0, 2, 4, 4], [0]]], + [[[0, 0, 2, 2, 4, 4], [4, 0]], [[0, 0, 0, 2, 4, 4], [0]]], ids=["multi_mode", "single_mode"], ) def test_mode_dropna_true(data_for_grouping, take_idx, exp_idx): @@ -1351,7 +1351,7 @@ def test_mode_dropna_false_mode_na(data): expected = pd.Series([None], dtype=data.dtype) tm.assert_series_equal(result, expected) - expected = pd.Series([None, data[0]], dtype=data.dtype) + expected = pd.Series([data[0], None], dtype=data.dtype) result = expected.mode(dropna=False) tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 9485a84a9d5a8..5992731643d31 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -9,6 +9,7 @@ PerformanceWarning, SpecificationError, ) +import pandas.util._test_decorators as td import pandas as pd from pandas import ( @@ -2541,7 +2542,7 @@ def test_groupby_column_index_name_lost(func): "infer_string", [ False, - True, + pytest.param(True, marks=td.skip_if_no("pyarrow")), ], ) def test_groupby_duplicate_columns(infer_string): @@ -2773,13 +2774,20 @@ def test_rolling_wrong_param_min_period(): test_df.groupby("name")["val"].rolling(window=2, min_period=1).sum() -def test_by_column_values_with_same_starting_value(): +@pytest.mark.parametrize( + "dtype", + [ + object, + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ], +) +def test_by_column_values_with_same_starting_value(dtype): # GH29635 df = DataFrame( { "Name": ["Thomas", "Thomas", "Thomas John"], "Credit": [1200, 1300, 900], - "Mood": ["sad", "happy", "happy"], + "Mood": Series(["sad", "happy", "happy"], dtype=dtype), } ) aggregate_details = {"Mood": Series.mode, "Credit": "sum"}
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55621
2023-10-21T19:42:40Z
2023-10-25T01:31:32Z
2023-10-25T01:31:32Z
2023-10-25T09:38:01Z
BUG: Groupby not keeping object dtype when infer string is set
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 97a718dd496e9..175c534ce0d04 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -23,6 +23,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ +- Fixed bug in :class:`.DataFrameGroupBy` reductions not preserving object dtype when ``infer_string`` is set (:issue:`55620`) - Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.__setitem__` not inferring string dtype for zero-dimensional array with ``infer_string=True`` (:issue:`55366`) - Fixed bug in :meth:`DataFrame.idxmin` and :meth:`DataFrame.idxmax` raising for arrow dtypes (:issue:`55368`) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e33c4b3579c69..f1b97fc5e88f6 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -1904,8 +1904,7 @@ def _agg_py_fallback( ser = Series(values, copy=False) else: # We only get here with values.dtype == object - # TODO: special case not needed with ArrayManager - df = DataFrame(values.T) + df = DataFrame(values.T, dtype=values.dtype) # bc we split object blocks in grouped_reduce, we have only 1 col # otherwise we'd have to worry about block-splitting GH#39329 assert df.shape[1] == 1 diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 7297d049587e6..0a5a2ed99caae 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -5,6 +5,7 @@ import numpy as np import pytest +from pandas.compat import pa_version_under7p0 from pandas.errors import ( PerformanceWarning, SpecificationError, @@ -2537,13 +2538,24 @@ def test_groupby_column_index_name_lost(func): tm.assert_index_equal(result, expected) -def test_groupby_duplicate_columns(): +@pytest.mark.parametrize( + "infer_string", + [ + False, + pytest.param( + True, + marks=pytest.mark.skipif(pa_version_under7p0, reason="arrow not installed"), + ), + ], +) +def test_groupby_duplicate_columns(infer_string): # GH: 31735 df = DataFrame( {"A": ["f", "e", "g", "h"], "B": ["a", "b", "c", "d"], "C": [1, 2, 3, 4]} ).astype(object) df.columns = ["A", "B", "B"] - result = df.groupby([0, 0, 0, 0]).min() + with pd.option_context("future.infer_string", infer_string): + result = df.groupby([0, 0, 0, 0]).min() expected = DataFrame( [["e", "a", 1]], index=np.array([0]), columns=["A", "B", "B"], dtype=object )
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55620
2023-10-21T19:13:57Z
2023-10-22T15:20:33Z
2023-10-22T15:20:33Z
2023-10-22T15:22:11Z
BUG: Groupby not keeping string dtype for empty objects
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 8863bfa9f3f69..1dd7bc49fad42 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -26,6 +26,7 @@ Fixed regressions Bug fixes ~~~~~~~~~ - Fixed bug in :class:`.DataFrameGroupBy` reductions not preserving object dtype when ``infer_string`` is set (:issue:`55620`) +- Fixed bug in :meth:`.DataFrameGroupBy.min()` and :meth:`.DataFrameGroupBy.max()` not preserving extension dtype for empty object (:issue:`55619`) - Fixed bug in :meth:`.SeriesGroupBy.value_counts` returning incorrect dtype for string columns (:issue:`55627`) - Fixed bug in :meth:`Categorical.equals` if other has arrow backed string dtype (:issue:`55364`) - Fixed bug in :meth:`DataFrame.__setitem__` not inferring string dtype for zero-dimensional array with ``infer_string=True`` (:issue:`55366`) diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index 05e6fc09a5ef6..3d97711d5f8c3 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -2352,6 +2352,9 @@ def _groupby_op( # GH#43682 if isinstance(self.dtype, StringDtype): # StringArray + if op.how not in ["any", "all"]: + # Fail early to avoid conversion to object + op._get_cython_function(op.kind, op.how, np.dtype(object), False) npvalues = self.to_numpy(object, na_value=np.nan) else: raise NotImplementedError( diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 607059e5183ec..e4cba7ce8f1cd 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -33,6 +33,7 @@ from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly +from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import ( maybe_cast_pointwise_result, maybe_downcast_to_dtype, @@ -837,10 +838,8 @@ def agg_series( ------- np.ndarray or ExtensionArray """ - # test_groupby_empty_with_category gets here with self.ngroups == 0 - # and len(obj) > 0 - if len(obj) > 0 and not isinstance(obj._values, np.ndarray): + if not isinstance(obj._values, np.ndarray): # we can preserve a little bit more aggressively with EA dtype # because maybe_cast_pointwise_result will do a try/except # with _from_sequence. NB we are assuming here that _from_sequence @@ -849,11 +848,18 @@ def agg_series( result = self._aggregate_series_pure_python(obj, func) - npvalues = lib.maybe_convert_objects(result, try_float=False) - if preserve_dtype: - out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True) + if len(obj) == 0 and len(result) == 0 and isinstance(obj.dtype, ExtensionDtype): + cls = obj.dtype.construct_array_type() + out = cls._from_sequence(result) + else: - out = npvalues + npvalues = lib.maybe_convert_objects(result, try_float=False) + if preserve_dtype: + out = maybe_cast_pointwise_result( + npvalues, obj.dtype, numeric_only=True + ) + else: + out = npvalues return out @final diff --git a/pandas/tests/groupby/test_reductions.py b/pandas/tests/groupby/test_reductions.py index fdfb211ac2269..35ad8e3f5dc61 100644 --- a/pandas/tests/groupby/test_reductions.py +++ b/pandas/tests/groupby/test_reductions.py @@ -575,6 +575,19 @@ def test_groupby_min_max_categorical(func): tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize("func", ["min", "max"]) +def test_min_empty_string_dtype(func): + # GH#55619 + pytest.importorskip("pyarrow") + dtype = "string[pyarrow_numpy]" + df = DataFrame({"a": ["a"], "b": "a", "c": "a"}, dtype=dtype).iloc[:0] + result = getattr(df.groupby("a"), func)() + expected = DataFrame( + columns=["b", "c"], dtype=dtype, index=pd.Index([], dtype=dtype, name="a") + ) + tm.assert_frame_equal(result, expected) + + def test_max_nan_bug(): raw = """,Date,app,File -04-23,2013-04-23 00:00:00,,log080001.log
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. cc @rhshadrach
https://api.github.com/repos/pandas-dev/pandas/pulls/55619
2023-10-21T18:08:25Z
2023-10-26T13:10:49Z
2023-10-26T13:10:49Z
2023-10-26T14:14:54Z
CLN: assorted
diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index c3ee68e14a8d4..25249e4f6f225 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -484,7 +484,6 @@ import operator cdef int op_to_op_code(op): - # TODO: should exist somewhere? if op is operator.eq: return Py_EQ if op is operator.ne: diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 154d78a8cd85a..9a2fe38fe3ed4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2165,7 +2165,7 @@ def __contains__(self, key) -> bool: # Rendering Methods def _formatter(self, boxed: bool = False): - # Defer to CategoricalFormatter's formatter. + # Returning None here will cause format_array to do inference. return None def _repr_categories(self) -> list[str]: diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 22ffaceeff1bb..037e9dd06bb6e 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -2013,8 +2013,9 @@ def freq(self, value) -> None: self._freq = value + @final @classmethod - def _validate_frequency(cls, index, freq, **kwargs): + def _validate_frequency(cls, index, freq: BaseOffset, **kwargs): """ Validate that a frequency is compatible with the values of a given Datetime Array/Index or Timedelta Array/Index diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index b6eef812ead05..3cf4dde3015c9 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2266,7 +2266,8 @@ def _sequence_to_dt64ns( elif lib.is_np_dtype(data_dtype, "M"): # tz-naive DatetimeArray or ndarray[datetime64] - data = getattr(data, "_ndarray", data) + if isinstance(data, DatetimeArray): + data = data._ndarray new_dtype = data.dtype data_unit = get_unit_from_dtype(new_dtype) if not is_supported_unit(data_unit): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 9f85f82df666f..08eb0dbb56ec8 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -55,6 +55,7 @@ from pandas.core.dtypes.generic import ( ABCCategoricalIndex, ABCIndex, + ABCRangeIndex, ) from pandas.core.dtypes.inference import ( is_bool, @@ -464,8 +465,7 @@ def __repr__(self) -> str_type: dtype = "None" else: data = self.categories._format_data(name=type(self).__name__) - if data is None: - # self.categories is RangeIndex + if isinstance(self.categories, ABCRangeIndex): data = str(self.categories._range) data = data.rstrip(", ") dtype = self.categories.dtype diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 252e88d7c7d51..da88c55ea814e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -127,6 +127,7 @@ ABCIntervalIndex, ABCMultiIndex, ABCPeriodIndex, + ABCRangeIndex, ABCSeries, ABCTimedeltaIndex, ) @@ -1292,11 +1293,6 @@ def __repr__(self) -> str_t: attrs_str = [f"{k}={v}" for k, v in attrs] prepr = ", ".join(attrs_str) - # no data provided, just attributes - if data is None: - # i.e. RangeIndex - data = "" - return f"{klass_name}({data}{prepr})" @property @@ -1306,6 +1302,7 @@ def _formatter_func(self): """ return default_pprint + @final def _format_data(self, name=None) -> str_t: """ Return the formatted data as a unicode string. @@ -1319,6 +1316,9 @@ def _format_data(self, name=None) -> str_t: self = cast("CategoricalIndex", self) if is_object_dtype(self.categories.dtype): is_justify = False + elif isinstance(self, ABCRangeIndex): + # We will do the relevant formatting via attrs + return "" return format_object_summary( self, @@ -6961,6 +6961,7 @@ def drop( indexer = indexer[~mask] return self.delete(indexer) + @final def infer_objects(self, copy: bool = True) -> Index: """ If we have an object dtype, try to infer a non-object dtype. @@ -6992,6 +6993,7 @@ def infer_objects(self, copy: bool = True) -> Index: result._references.add_index_reference(result) return result + @final def diff(self, periods: int = 1) -> Self: """ Computes the difference between consecutive values in the Index object. @@ -7020,6 +7022,7 @@ def diff(self, periods: int = 1) -> Self: """ return self._constructor(self.to_series().diff(periods)) + @final def round(self, decimals: int = 0) -> Self: """ Round each value in the Index to the given number of decimals. diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 136327e3787f9..73143730085d6 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -417,8 +417,10 @@ def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: # -------------------------------------------------------------------- # Rendering Methods - @property + @cache_readonly def _formatter_func(self): + # Note this is equivalent to the DatetimeIndexOpsMixin method but + # uses the maybe-cached self._is_dates_only instead of re-computing it. from pandas.io.formats.format import get_format_datetime64 formatter = get_format_datetime64(is_dates_only=self._is_dates_only) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index bb2ccfe930ab8..30b11aea6317f 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -240,7 +240,7 @@ def _data(self) -> np.ndarray: # type: ignore[override] """ return np.arange(self.start, self.stop, self.step, dtype=np.int64) - def _get_data_as_items(self): + def _get_data_as_items(self) -> list[tuple[str, int]]: """return a list of tuples of start, stop, step""" rng = self._range return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)] @@ -257,15 +257,11 @@ def _format_attrs(self): """ Return a list of tuples of the (attr, formatted_value) """ - attrs = self._get_data_as_items() + attrs = cast("list[tuple[str, str | int]]", self._get_data_as_items()) if self._name is not None: attrs.append(("name", ibase.default_pprint(self._name))) return attrs - def _format_data(self, name=None): - # we are formatting thru the attributes - return None - def _format_with_header(self, *, header: list[str], na_rep: str) -> list[str]: # Equivalent to Index implementation, but faster if not len(self._range): diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 969748209772a..82f3efce1fdd8 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -189,6 +189,7 @@ def __init__( else: self.exclusions = frozenset() + @final def __str__(self) -> str: """ Provide a nice str repr of our rolling object. @@ -200,6 +201,7 @@ def __str__(self) -> str: ) return f"{type(self).__name__} [{', '.join(attrs)}]" + @final def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) @@ -210,6 +212,7 @@ def __getattr__(self, attr: str): return object.__getattribute__(self, attr) + @final @property def _from_selection(self) -> bool: """ @@ -249,6 +252,7 @@ def _get_binner(self): bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer) return binner, bin_grouper + @final @Substitution( klass="Resampler", examples=""" @@ -334,6 +338,7 @@ def pipe( """ ) + @final @doc( _shared_docs["aggregate"], see_also=_agg_see_also_doc, @@ -352,6 +357,7 @@ def aggregate(self, func=None, *args, **kwargs): agg = aggregate apply = aggregate + @final def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group. @@ -471,6 +477,7 @@ def _groupby_and_aggregate(self, how, *args, **kwargs): return self._wrap_result(result) + @final def _get_resampler_for_grouping( self, groupby: GroupBy, key, include_groups: bool = True ): @@ -506,6 +513,7 @@ def _wrap_result(self, result): return result + @final def ffill(self, limit: int | None = None): """ Forward fill the values. @@ -574,6 +582,7 @@ def ffill(self, limit: int | None = None): """ return self._upsample("ffill", limit=limit) + @final def nearest(self, limit: int | None = None): """ Resample by using the nearest value. @@ -634,6 +643,7 @@ def nearest(self, limit: int | None = None): """ return self._upsample("nearest", limit=limit) + @final def bfill(self, limit: int | None = None): """ Backward fill the new missing values in the resampled data. @@ -736,6 +746,7 @@ def bfill(self, limit: int | None = None): """ return self._upsample("bfill", limit=limit) + @final def fillna(self, method, limit: int | None = None): """ Fill missing values introduced by upsampling. @@ -903,6 +914,7 @@ def fillna(self, method, limit: int | None = None): ) return self._upsample(method, limit=limit) + @final def interpolate( self, method: InterpolateOptions = "linear", @@ -1084,6 +1096,7 @@ def interpolate( **kwargs, ) + @final def asfreq(self, fill_value=None): """ Return the values at the new freq, essentially a reindex. @@ -1122,6 +1135,7 @@ def asfreq(self, fill_value=None): """ return self._upsample("asfreq", fill_value=fill_value) + @final def sum( self, numeric_only: bool = False, @@ -1169,6 +1183,7 @@ def sum( nv.validate_resampler_func("sum", args, kwargs) return self._downsample("sum", numeric_only=numeric_only, min_count=min_count) + @final def prod( self, numeric_only: bool = False, @@ -1216,6 +1231,7 @@ def prod( nv.validate_resampler_func("prod", args, kwargs) return self._downsample("prod", numeric_only=numeric_only, min_count=min_count) + @final def min( self, numeric_only: bool = False, @@ -1250,6 +1266,7 @@ def min( nv.validate_resampler_func("min", args, kwargs) return self._downsample("min", numeric_only=numeric_only, min_count=min_count) + @final def max( self, numeric_only: bool = False, @@ -1283,6 +1300,7 @@ def max( nv.validate_resampler_func("max", args, kwargs) return self._downsample("max", numeric_only=numeric_only, min_count=min_count) + @final @doc(GroupBy.first) def first( self, @@ -1295,6 +1313,7 @@ def first( nv.validate_resampler_func("first", args, kwargs) return self._downsample("first", numeric_only=numeric_only, min_count=min_count) + @final @doc(GroupBy.last) def last( self, @@ -1307,12 +1326,14 @@ def last( nv.validate_resampler_func("last", args, kwargs) return self._downsample("last", numeric_only=numeric_only, min_count=min_count) + @final @doc(GroupBy.median) def median(self, numeric_only: bool = False, *args, **kwargs): maybe_warn_args_and_kwargs(type(self), "median", args, kwargs) nv.validate_resampler_func("median", args, kwargs) return self._downsample("median", numeric_only=numeric_only) + @final def mean( self, numeric_only: bool = False, @@ -1356,6 +1377,7 @@ def mean( nv.validate_resampler_func("mean", args, kwargs) return self._downsample("mean", numeric_only=numeric_only) + @final def std( self, ddof: int = 1, @@ -1403,6 +1425,7 @@ def std( nv.validate_resampler_func("std", args, kwargs) return self._downsample("std", ddof=ddof, numeric_only=numeric_only) + @final def var( self, ddof: int = 1, @@ -1456,6 +1479,7 @@ def var( nv.validate_resampler_func("var", args, kwargs) return self._downsample("var", ddof=ddof, numeric_only=numeric_only) + @final @doc(GroupBy.sem) def sem( self, @@ -1468,6 +1492,7 @@ def sem( nv.validate_resampler_func("sem", args, kwargs) return self._downsample("sem", ddof=ddof, numeric_only=numeric_only) + @final @doc(GroupBy.ohlc) def ohlc( self, @@ -1495,6 +1520,7 @@ def ohlc( return self._downsample("ohlc") + @final @doc(SeriesGroupBy.nunique) def nunique( self, @@ -1505,6 +1531,7 @@ def nunique( nv.validate_resampler_func("nunique", args, kwargs) return self._downsample("nunique") + @final @doc(GroupBy.size) def size(self): result = self._downsample("size") @@ -1524,6 +1551,7 @@ def size(self): result = Series([], index=result.index, dtype="int64", name=name) return result + @final @doc(GroupBy.count) def count(self): result = self._downsample("count") @@ -1541,6 +1569,7 @@ def count(self): return result + @final def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs): """ Return value at the given quantile. diff --git a/pandas/core/series.py b/pandas/core/series.py index 724a54c85da9b..c6625eefa2249 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1047,7 +1047,7 @@ def _ixs(self, i: int, axis: AxisInt = 0) -> Any: Returns ------- - scalar (int) or Series (slice, sequence) + scalar """ return self._values[i] diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 1a7e4d7a80e13..6b201f6bf3cfe 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -69,7 +69,6 @@ from pandas.core.arrays.string_ import StringDtype from pandas.core.base import PandasObject import pandas.core.common as com -from pandas.core.construction import extract_array from pandas.core.indexes.api import ( Index, MultiIndex, @@ -1242,7 +1241,7 @@ def _format(x): # object dtype return str(formatter(x)) - vals = extract_array(self.values, extract_numpy=True) + vals = self.values if not isinstance(vals, np.ndarray): raise TypeError( "ExtensionArray formatting should use _ExtensionArrayFormatter" @@ -1505,12 +1504,10 @@ def _format_strings(self) -> list[str]: """we by definition have DO NOT have a TZ""" values = self.values - dti = DatetimeIndex(values) - - if self.formatter is not None and callable(self.formatter): - return [self.formatter(x) for x in dti] + if self.formatter is not None: + return [self.formatter(x) for x in values] - fmt_values = dti._data._format_native_types( + fmt_values = values._format_native_types( na_rep=self.nat_rep, date_format=self.date_format ) return fmt_values.tolist() @@ -1520,8 +1517,7 @@ class _ExtensionArrayFormatter(_GenericArrayFormatter): values: ExtensionArray def _format_strings(self) -> list[str]: - values = extract_array(self.values, extract_numpy=True) - values = cast(ExtensionArray, values) + values = self.values formatter = self.formatter fallback_formatter = None diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 707ce8d1a4985..70294e8a62cca 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -237,7 +237,7 @@ # TODO: Add typing. As of January 2020 it is not possible to type this function since # mypy doesn't understand that a Series and an int can be combined using mathematical # operations. (+, -). -def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series: +def _stata_elapsed_date_to_datetime_vec(dates: Series, fmt: str) -> Series: """ Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime diff --git a/pandas/plotting/_matplotlib/timeseries.py b/pandas/plotting/_matplotlib/timeseries.py index 77de5c1ad7b42..d3cddc81d3cc4 100644 --- a/pandas/plotting/_matplotlib/timeseries.py +++ b/pandas/plotting/_matplotlib/timeseries.py @@ -7,6 +7,7 @@ TYPE_CHECKING, cast, ) +import warnings import numpy as np @@ -282,8 +283,6 @@ def maybe_convert_index(ax: Axes, data): freq_str = _get_period_alias(freq) - import warnings - with warnings.catch_warnings(): # suppress Period[B] deprecation warning # TODO: need to find an alternative to this before the deprecation diff --git a/pandas/tests/arrays/period/test_astype.py b/pandas/tests/arrays/period/test_astype.py index 57634cf07bdb9..bb9b07a113ed3 100644 --- a/pandas/tests/arrays/period/test_astype.py +++ b/pandas/tests/arrays/period/test_astype.py @@ -52,16 +52,16 @@ def test_astype_period(): tm.assert_period_array_equal(result, expected) -@pytest.mark.parametrize("other", ["datetime64[ns]", "timedelta64[ns]"]) -def test_astype_datetime(other): +@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) +def test_astype_datetime(dtype): arr = period_array(["2000", "2001", None], freq="D") # slice off the [ns] so that the regex matches. - if other == "timedelta64[ns]": - with pytest.raises(TypeError, match=other[:-4]): - arr.astype(other) + if dtype == "timedelta64[ns]": + with pytest.raises(TypeError, match=dtype[:-4]): + arr.astype(dtype) else: # GH#45038 allow period->dt64 because we allow dt64->period - result = arr.astype(other) + result = arr.astype(dtype) expected = pd.DatetimeIndex(["2000", "2001", pd.NaT])._data tm.assert_datetime_array_equal(result, expected) diff --git a/pandas/tests/extension/list/test_list.py b/pandas/tests/extension/list/test_list.py index 295f08679c3eb..ac396cd3c60d4 100644 --- a/pandas/tests/extension/list/test_list.py +++ b/pandas/tests/extension/list/test_list.py @@ -27,7 +27,7 @@ def data(): def test_to_csv(data): # https://github.com/pandas-dev/pandas/issues/28840 # array with list-likes fail when doing astype(str) on the numpy array - # which was done in to_native_types + # which was done in get_values_for_csv df = pd.DataFrame({"a": data}) res = df.to_csv() assert str(data[0]) in res diff --git a/pandas/tests/indexes/interval/test_formats.py b/pandas/tests/indexes/interval/test_formats.py index 5b509edc9ff88..d080516917892 100644 --- a/pandas/tests/indexes/interval/test_formats.py +++ b/pandas/tests/indexes/interval/test_formats.py @@ -101,7 +101,7 @@ def test_repr_floats(self): ), ], ) - def test_to_native_types(self, tuples, closed, expected_data): + def test_get_values_for_csv(self, tuples, closed, expected_data): # GH 28210 index = IntervalIndex.from_tuples(tuples, closed=closed) result = index._get_values_for_csv(na_rep="NaN") diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index c0977be03adef..d53fc296e1777 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -2029,7 +2029,7 @@ def test_date_parser_usecols_thousands(all_parsers): @skip_pyarrow -def test_parse_dates_and_keep_orgin_column(all_parsers): +def test_parse_dates_and_keep_original_column(all_parsers): # GH#13378 parser = all_parsers data = """A diff --git a/pandas/tests/util/test_assert_almost_equal.py b/pandas/tests/util/test_assert_almost_equal.py index 8527efdbf7867..4e692084f7352 100644 --- a/pandas/tests/util/test_assert_almost_equal.py +++ b/pandas/tests/util/test_assert_almost_equal.py @@ -340,7 +340,7 @@ def test_mismatched_na_assert_almost_equal_deprecation(left, right): # TODO: to get the same deprecation in assert_numpy_array_equal we need # to change/deprecate the default for strict_nan to become True - # TODO: to get the same deprecateion in assert_index_equal we need to + # TODO: to get the same deprecation in assert_index_equal we need to # change/deprecate array_equivalent_object to be stricter, as # assert_index_equal uses Index.equal which uses array_equivalent. with tm.assert_produces_warning(FutureWarning, match=msg):
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55618
2023-10-21T16:08:05Z
2023-10-22T19:25:05Z
2023-10-22T19:25:05Z
2023-10-22T20:31:42Z
REF: implement ParseState
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 4989feaf84006..8fdbd9affa58a 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -46,7 +46,10 @@ from pandas._libs.tslibs.np_datetime cimport ( import_pandas_datetime() -from pandas._libs.tslibs.strptime cimport parse_today_now +from pandas._libs.tslibs.strptime cimport ( + DatetimeParseState, + parse_today_now, +) from pandas._libs.util cimport ( is_float_object, is_integer_object, @@ -58,7 +61,6 @@ from pandas._libs.tslibs.conversion cimport ( _TSObject, cast_from_unit, convert_str_to_tsobject, - convert_timezone, get_datetime64_nanos, parse_pydatetime, ) @@ -454,9 +456,9 @@ cpdef array_to_datetime( float tz_offset set out_tzoffset_vals = set() tzinfo tz_out = None - bint found_tz = False, found_naive = False cnp.flatiter it = cnp.PyArray_IterNew(values) NPY_DATETIMEUNIT creso = NPY_FR_ns + DatetimeParseState state = DatetimeParseState() # specify error conditions assert is_raise or is_ignore or is_coerce @@ -474,17 +476,7 @@ cpdef array_to_datetime( iresult[i] = NPY_NAT elif PyDateTime_Check(val): - if val.tzinfo is not None: - found_tz = True - else: - found_naive = True - tz_out = convert_timezone( - val.tzinfo, - tz_out, - found_naive, - found_tz, - utc_convert, - ) + tz_out = state.process_datetime(val, tz_out, utc_convert) iresult[i] = parse_pydatetime(val, &dts, utc_convert, creso=creso) elif PyDate_Check(val): diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 3e5a79e833a25..36c57a8b9ce24 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -47,13 +47,6 @@ cpdef (int64_t, int) precision_from_unit(str unit, NPY_DATETIMEUNIT out_reso=*) cdef maybe_localize_tso(_TSObject obj, tzinfo tz, NPY_DATETIMEUNIT reso) -cdef tzinfo convert_timezone( - tzinfo tz_in, - tzinfo tz_out, - bint found_naive, - bint found_tz, - bint utc_convert, -) cdef int64_t parse_pydatetime( datetime val, diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index a1c59489b3d38..0568576c83870 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -673,59 +673,6 @@ cpdef inline datetime localize_pydatetime(datetime dt, tzinfo tz): return _localize_pydatetime(dt, tz) -cdef tzinfo convert_timezone( - tzinfo tz_in, - tzinfo tz_out, - bint found_naive, - bint found_tz, - bint utc_convert, -): - """ - Validate that ``tz_in`` can be converted/localized to ``tz_out``. - - Parameters - ---------- - tz_in : tzinfo or None - Timezone info of element being processed. - tz_out : tzinfo or None - Timezone info of output. - found_naive : bool - Whether a timezone-naive element has been found so far. - found_tz : bool - Whether a timezone-aware element has been found so far. - utc_convert : bool - Whether to convert/localize to UTC. - - Returns - ------- - tz_info - Timezone info of output. - - Raises - ------ - ValueError - If ``tz_in`` can't be converted/localized to ``tz_out``. - """ - if tz_in is not None: - if utc_convert: - pass - elif found_naive: - raise ValueError("Tz-aware datetime.datetime " - "cannot be converted to " - "datetime64 unless utc=True") - elif tz_out is not None and not tz_compare(tz_out, tz_in): - raise ValueError("Tz-aware datetime.datetime " - "cannot be converted to " - "datetime64 unless utc=True") - else: - tz_out = tz_in - else: - if found_tz and not utc_convert: - raise ValueError("Cannot mix tz-aware with " - "tz-naive values") - return tz_out - - cdef int64_t parse_pydatetime( datetime val, npy_datetimestruct *dts, diff --git a/pandas/_libs/tslibs/strptime.pxd b/pandas/_libs/tslibs/strptime.pxd index 175195d4362e4..d09612f4fbf7d 100644 --- a/pandas/_libs/tslibs/strptime.pxd +++ b/pandas/_libs/tslibs/strptime.pxd @@ -1,4 +1,16 @@ +from cpython.datetime cimport ( + datetime, + tzinfo, +) from numpy cimport int64_t cdef bint parse_today_now(str val, int64_t* iresult, bint utc) + + +cdef class DatetimeParseState: + cdef: + bint found_tz + bint found_naive + + cdef tzinfo process_datetime(self, datetime dt, tzinfo tz, bint utc_convert) diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 717ec41a918d8..d1c6bab180576 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -48,10 +48,7 @@ from numpy cimport ( ) from pandas._libs.missing cimport checknull_with_nat_and_na -from pandas._libs.tslibs.conversion cimport ( - convert_timezone, - get_datetime64_nanos, -) +from pandas._libs.tslibs.conversion cimport get_datetime64_nanos from pandas._libs.tslibs.nattype cimport ( NPY_NAT, c_nat_strings as nat_strings, @@ -73,6 +70,7 @@ import_pandas_datetime() from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.timestamps cimport _Timestamp +from pandas._libs.tslibs.timezones cimport tz_compare from pandas._libs.util cimport ( is_float_object, is_integer_object, @@ -156,6 +154,37 @@ cdef dict _parse_code_table = {"y": 0, "u": 22} +cdef class DatetimeParseState: + def __cinit__(self): + self.found_tz = False + self.found_naive = False + + cdef tzinfo process_datetime(self, datetime dt, tzinfo tz, bint utc_convert): + if dt.tzinfo is not None: + self.found_tz = True + else: + self.found_naive = True + + if dt.tzinfo is not None: + if utc_convert: + pass + elif self.found_naive: + raise ValueError("Tz-aware datetime.datetime " + "cannot be converted to " + "datetime64 unless utc=True") + elif tz is not None and not tz_compare(tz, dt.tzinfo): + raise ValueError("Tz-aware datetime.datetime " + "cannot be converted to " + "datetime64 unless utc=True") + else: + tz = dt.tzinfo + else: + if self.found_tz and not utc_convert: + raise ValueError("Cannot mix tz-aware with " + "tz-naive values") + return tz + + def array_strptime( ndarray[object] values, str fmt, @@ -183,13 +212,12 @@ def array_strptime( bint is_raise = errors=="raise" bint is_ignore = errors=="ignore" bint is_coerce = errors=="coerce" - bint found_naive = False - bint found_tz = False tzinfo tz_out = None bint iso_format = format_is_iso(fmt) NPY_DATETIMEUNIT out_bestunit int out_local = 0, out_tzoffset = 0 bint string_to_dts_succeeded = 0 + DatetimeParseState state = DatetimeParseState() assert is_raise or is_ignore or is_coerce @@ -276,17 +304,7 @@ def array_strptime( iresult[i] = NPY_NAT continue elif PyDateTime_Check(val): - if val.tzinfo is not None: - found_tz = True - else: - found_naive = True - tz_out = convert_timezone( - val.tzinfo, - tz_out, - found_naive, - found_tz, - utc, - ) + tz_out = state.process_datetime(val, tz_out, utc) if isinstance(val, _Timestamp): iresult[i] = val.tz_localize(None).as_unit("ns")._value else:
In the near future, the state being tracked in array_to_datetime and array_strptime will include a resolution (xref #55564). This implements a ParseState object to de-duplicate tracking, validation, and updating.
https://api.github.com/repos/pandas-dev/pandas/pulls/55617
2023-10-21T15:54:11Z
2023-10-23T17:27:09Z
2023-10-23T17:27:09Z
2023-10-23T18:46:55Z
REF: Misplaced DatetimeIndex tests
diff --git a/pandas/tests/indexes/datetimes/test_asof.py b/pandas/tests/indexes/datetimes/methods/test_asof.py similarity index 100% rename from pandas/tests/indexes/datetimes/test_asof.py rename to pandas/tests/indexes/datetimes/methods/test_asof.py diff --git a/pandas/tests/indexes/datetimes/methods/test_astype.py b/pandas/tests/indexes/datetimes/methods/test_astype.py index 94390acb35624..e527f6f28cd9d 100644 --- a/pandas/tests/indexes/datetimes/methods/test_astype.py +++ b/pandas/tests/indexes/datetimes/methods/test_astype.py @@ -18,6 +18,25 @@ class TestDatetimeIndex: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_astype_asobject_around_dst_transition(self, tzstr): + # GH#1345 + + # dates around a dst transition + rng = date_range("2/13/2010", "5/6/2010", tz=tzstr) + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + def test_astype(self): # GH 13149, GH 13209 idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan], name="idx") diff --git a/pandas/tests/indexes/datetimes/test_delete.py b/pandas/tests/indexes/datetimes/methods/test_delete.py similarity index 52% rename from pandas/tests/indexes/datetimes/test_delete.py rename to pandas/tests/indexes/datetimes/methods/test_delete.py index 90dfdb46cdfa5..620e41bf2d43a 100644 --- a/pandas/tests/indexes/datetimes/test_delete.py +++ b/pandas/tests/indexes/datetimes/methods/test_delete.py @@ -40,28 +40,29 @@ def test_delete(self): # either depending on numpy version idx.delete(5) - for tz in [None, "Asia/Tokyo", "US/Pacific"]: - idx = date_range( - start="2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz - ) - - expected = date_range( - start="2000-01-01 10:00", periods=9, freq="h", name="idx", tz=tz - ) - result = idx.delete(0) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freqstr == "h" - assert result.tz == expected.tz + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Pacific"]) + def test_delete2(self, tz): + idx = date_range( + start="2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz + ) - expected = date_range( - start="2000-01-01 09:00", periods=9, freq="h", name="idx", tz=tz - ) - result = idx.delete(-1) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freqstr == "h" - assert result.tz == expected.tz + expected = date_range( + start="2000-01-01 10:00", periods=9, freq="h", name="idx", tz=tz + ) + result = idx.delete(0) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "h" + assert result.tz == expected.tz + + expected = date_range( + start="2000-01-01 09:00", periods=9, freq="h", name="idx", tz=tz + ) + result = idx.delete(-1) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "h" + assert result.tz == expected.tz def test_delete_slice(self): idx = date_range(start="2000-01-01", periods=10, freq="D", name="idx") @@ -101,38 +102,40 @@ def test_delete_slice(self): assert result.name == expected.name assert result.freq == expected.freq - for tz in [None, "Asia/Tokyo", "US/Pacific"]: - ts = Series( - 1, - index=date_range( - "2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz - ), - ) - # preserve freq - result = ts.drop(ts.index[:5]).index - expected = date_range( - "2000-01-01 14:00", periods=5, freq="h", name="idx", tz=tz - ) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - assert result.tz == expected.tz - - # reset freq to None - result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index - expected = DatetimeIndex( - [ - "2000-01-01 09:00", - "2000-01-01 11:00", - "2000-01-01 13:00", - "2000-01-01 15:00", - "2000-01-01 17:00", - ], - freq=None, - name="idx", - tz=tz, - ) - tm.assert_index_equal(result, expected) - assert result.name == expected.name - assert result.freq == expected.freq - assert result.tz == expected.tz + # TODO: belongs in Series.drop tests? + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Pacific"]) + def test_delete_slice2(self, tz): + ts = Series( + 1, + index=date_range( + "2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz + ), + ) + # preserve freq + result = ts.drop(ts.index[:5]).index + expected = date_range( + "2000-01-01 14:00", periods=5, freq="h", name="idx", tz=tz + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + # reset freq to None + result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index + expected = DatetimeIndex( + [ + "2000-01-01 09:00", + "2000-01-01 11:00", + "2000-01-01 13:00", + "2000-01-01 15:00", + "2000-01-01 17:00", + ], + freq=None, + name="idx", + tz=tz, + ) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz diff --git a/pandas/tests/indexes/datetimes/methods/test_isocalendar.py b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py index 128a8b3e10eb3..3f5a18675735a 100644 --- a/pandas/tests/indexes/datetimes/methods/test_isocalendar.py +++ b/pandas/tests/indexes/datetimes/methods/test_isocalendar.py @@ -18,3 +18,10 @@ def test_isocalendar_returns_correct_values_close_to_new_year_with_tz(): dtype="UInt32", ) tm.assert_frame_equal(result, expected_data_frame) + + +def test_dti_timestamp_isocalendar_fields(): + idx = tm.makeDateIndex(100) + expected = tuple(idx.isocalendar().iloc[-1].to_list()) + result = idx[-1].isocalendar() + assert result == expected diff --git a/pandas/tests/indexes/datetimes/test_map.py b/pandas/tests/indexes/datetimes/methods/test_map.py similarity index 100% rename from pandas/tests/indexes/datetimes/test_map.py rename to pandas/tests/indexes/datetimes/methods/test_map.py diff --git a/pandas/tests/indexes/datetimes/methods/test_normalize.py b/pandas/tests/indexes/datetimes/methods/test_normalize.py new file mode 100644 index 0000000000000..74711f67e6446 --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_normalize.py @@ -0,0 +1,95 @@ +from dateutil.tz import tzlocal +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DatetimeIndex, + NaT, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestNormalize: + def test_normalize(self): + rng = date_range("1/1/2000 9:30", periods=10, freq="D") + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D") + tm.assert_index_equal(result, expected) + + arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype( + "datetime64[ns]" + ) + rng_ns = DatetimeIndex(arr_ns) + rng_ns_normalized = rng_ns.normalize() + + arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype( + "datetime64[ns]" + ) + expected = DatetimeIndex(arr_ns) + tm.assert_index_equal(rng_ns_normalized, expected) + + assert result.is_normalized + assert not rng.is_normalized + + def test_normalize_nat(self): + dti = DatetimeIndex([NaT, Timestamp("2018-01-01 01:00:00")]) + result = dti.normalize() + expected = DatetimeIndex([NaT, Timestamp("2018-01-01")]) + tm.assert_index_equal(result, expected) + + def test_normalize_tz(self): + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern") + + result = rng.normalize() # does not preserve freq + expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern") + tm.assert_index_equal(result, expected._with_freq(None)) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC") + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) + result = rng.normalize() # does not preserve freq + expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) + tm.assert_index_equal(result, expected._with_freq(None)) + + assert result.is_normalized + assert not rng.is_normalized + + @td.skip_if_windows + @pytest.mark.parametrize( + "timezone", + [ + "US/Pacific", + "US/Eastern", + "UTC", + "Asia/Kolkata", + "Asia/Shanghai", + "Australia/Canberra", + ], + ) + def test_normalize_tz_local(self, timezone): + # GH#13459 + with tm.set_timezone(timezone): + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) + expected = expected._with_freq(None) + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized diff --git a/pandas/tests/indexes/datetimes/methods/test_resolution.py b/pandas/tests/indexes/datetimes/methods/test_resolution.py new file mode 100644 index 0000000000000..6336fc8277947 --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_resolution.py @@ -0,0 +1,31 @@ +from dateutil.tz import tzlocal +import pytest + +from pandas.compat import IS64 + +from pandas import date_range + + +@pytest.mark.parametrize( + "freq,expected", + [ + ("Y", "day"), + ("Q", "day"), + ("ME", "day"), + ("D", "day"), + ("h", "hour"), + ("min", "minute"), + ("s", "second"), + ("ms", "millisecond"), + ("us", "microsecond"), + ], +) +def test_dti_resolution(request, tz_naive_fixture, freq, expected): + tz = tz_naive_fixture + if freq == "Y" and not IS64 and isinstance(tz, tzlocal): + request.applymarker( + pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038") + ) + + idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz) + assert idx.resolution == expected diff --git a/pandas/tests/indexes/datetimes/methods/test_round.py b/pandas/tests/indexes/datetimes/methods/test_round.py new file mode 100644 index 0000000000000..6c9fb8ded0650 --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_round.py @@ -0,0 +1,213 @@ +import pytest + +from pandas._libs.tslibs import to_offset +from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG + +from pandas import ( + DatetimeIndex, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeIndexRound: + def test_round_daily(self): + dti = date_range("20130101 09:10:11", periods=5) + result = dti.round("D") + expected = date_range("20130101", periods=5) + tm.assert_index_equal(result, expected) + + dti = dti.tz_localize("UTC").tz_convert("US/Eastern") + result = dti.round("D") + expected = date_range("20130101", periods=5).tz_localize("US/Eastern") + tm.assert_index_equal(result, expected) + + result = dti.round("s") + tm.assert_index_equal(result, dti) + + @pytest.mark.parametrize( + "freq, error_msg", + [ + ("Y", "<YearEnd: month=12> is a non-fixed frequency"), + ("ME", "<MonthEnd> is a non-fixed frequency"), + ("foobar", "Invalid frequency: foobar"), + ], + ) + def test_round_invalid(self, freq, error_msg): + dti = date_range("20130101 09:10:11", periods=5) + dti = dti.tz_localize("UTC").tz_convert("US/Eastern") + with pytest.raises(ValueError, match=error_msg): + dti.round(freq) + + def test_round(self, tz_naive_fixture): + tz = tz_naive_fixture + rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz) + elt = rng[1] + + expected_rng = DatetimeIndex( + [ + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 01:00:00", tz=tz), + Timestamp("2016-01-01 02:00:00", tz=tz), + Timestamp("2016-01-01 02:00:00", tz=tz), + ] + ) + expected_elt = expected_rng[1] + + tm.assert_index_equal(rng.round(freq="h"), expected_rng) + assert elt.round(freq="h") == expected_elt + + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + rng.round(freq="foo") + with pytest.raises(ValueError, match=msg): + elt.round(freq="foo") + + msg = "<MonthEnd> is a non-fixed frequency" + with pytest.raises(ValueError, match=msg): + rng.round(freq="ME") + with pytest.raises(ValueError, match=msg): + elt.round(freq="ME") + + # GH#14440 & GH#15578 + index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz) + result = index.round("ms") + expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz) + tm.assert_index_equal(result, expected) + + for freq in ["us", "ns"]: + tm.assert_index_equal(index, index.round(freq)) + + index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz) + result = index.round("ms") + expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz) + tm.assert_index_equal(result, expected) + + index = DatetimeIndex(["2016-10-17 12:00:00.001501031"]) + result = index.round("10ns") + expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"]) + tm.assert_index_equal(result, expected) + + with tm.assert_produces_warning(False): + ts = "2016-10-17 12:00:00.001501031" + DatetimeIndex([ts]).round("1010ns") + + def test_no_rounding_occurs(self, tz_naive_fixture): + # GH 21262 + tz = tz_naive_fixture + rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz) + + expected_rng = DatetimeIndex( + [ + Timestamp("2016-01-01 00:00:00", tz=tz), + Timestamp("2016-01-01 00:02:00", tz=tz), + Timestamp("2016-01-01 00:04:00", tz=tz), + Timestamp("2016-01-01 00:06:00", tz=tz), + Timestamp("2016-01-01 00:08:00", tz=tz), + ] + ) + + tm.assert_index_equal(rng.round(freq="2min"), expected_rng) + + @pytest.mark.parametrize( + "test_input, rounder, freq, expected", + [ + (["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]), + (["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]), + ( + ["2117-01-01 00:00:45.000000012"], + "floor", + "10ns", + ["2117-01-01 00:00:45.000000010"], + ), + ( + ["1823-01-01 00:00:01.000000012"], + "ceil", + "10ns", + ["1823-01-01 00:00:01.000000020"], + ), + (["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]), + (["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]), + (["2018-01-01 00:15:00"], "ceil", "15min", ["2018-01-01 00:15:00"]), + (["2018-01-01 00:15:00"], "floor", "15min", ["2018-01-01 00:15:00"]), + (["1823-01-01 03:00:00"], "ceil", "3h", ["1823-01-01 03:00:00"]), + (["1823-01-01 03:00:00"], "floor", "3h", ["1823-01-01 03:00:00"]), + ( + ("NaT", "1823-01-01 00:00:01"), + "floor", + "1s", + ("NaT", "1823-01-01 00:00:01"), + ), + ( + ("NaT", "1823-01-01 00:00:01"), + "ceil", + "1s", + ("NaT", "1823-01-01 00:00:01"), + ), + ], + ) + def test_ceil_floor_edge(self, test_input, rounder, freq, expected): + dt = DatetimeIndex(list(test_input)) + func = getattr(dt, rounder) + result = func(freq) + expected = DatetimeIndex(list(expected)) + assert expected.equals(result) + + @pytest.mark.parametrize( + "start, index_freq, periods", + [("2018-01-01", "12h", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)], + ) + @pytest.mark.parametrize( + "round_freq", + [ + "2ns", + "3ns", + "4ns", + "5ns", + "6ns", + "7ns", + "250ns", + "500ns", + "750ns", + "1us", + "19us", + "250us", + "500us", + "750us", + "1s", + "2s", + "3s", + "12h", + "1D", + ], + ) + def test_round_int64(self, start, index_freq, periods, round_freq): + dt = date_range(start=start, freq=index_freq, periods=periods) + unit = to_offset(round_freq).nanos + + # test floor + result = dt.floor(round_freq) + diff = dt.asi8 - result.asi8 + mod = result.asi8 % unit + assert (mod == 0).all(), f"floor not a {round_freq} multiple" + assert (0 <= diff).all() and (diff < unit).all(), "floor error" + + # test ceil + result = dt.ceil(round_freq) + diff = result.asi8 - dt.asi8 + mod = result.asi8 % unit + assert (mod == 0).all(), f"ceil not a {round_freq} multiple" + assert (0 <= diff).all() and (diff < unit).all(), "ceil error" + + # test round + result = dt.round(round_freq) + diff = abs(result.asi8 - dt.asi8) + mod = result.asi8 % unit + assert (mod == 0).all(), f"round not a {round_freq} multiple" + assert (diff <= unit // 2).all(), "round error" + if unit % 2 == 0: + assert ( + result.asi8[diff == unit // 2] % 2 == 0 + ).all(), "round half to even error" diff --git a/pandas/tests/indexes/datetimes/methods/test_to_julian_date.py b/pandas/tests/indexes/datetimes/methods/test_to_julian_date.py new file mode 100644 index 0000000000000..fc1f0595c21c5 --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_to_julian_date.py @@ -0,0 +1,45 @@ +import numpy as np + +from pandas import ( + Index, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDateTimeIndexToJulianDate: + def test_1700(self): + dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D") + r1 = Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_2000(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D") + r1 = Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_hour(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="h") + r1 = Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_minute(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="min") + r1 = Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) + + def test_second(self): + dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="s") + r1 = Index([x.to_julian_date() for x in dr]) + r2 = dr.to_julian_date() + assert isinstance(r2, Index) and r2.dtype == np.float64 + tm.assert_index_equal(r1, r2) diff --git a/pandas/tests/indexes/datetimes/methods/test_to_pydatetime.py b/pandas/tests/indexes/datetimes/methods/test_to_pydatetime.py new file mode 100644 index 0000000000000..fe97ff0cca8eb --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_to_pydatetime.py @@ -0,0 +1,51 @@ +from datetime import ( + datetime, + timezone, +) + +import dateutil.parser +import dateutil.tz +from dateutil.tz import tzlocal +import numpy as np + +from pandas import ( + DatetimeIndex, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.tests.indexes.datetimes.test_timezones import FixedOffset + +fixed_off = FixedOffset(-420, "-07:00") + + +class TestToPyDatetime: + def test_dti_to_pydatetime(self): + dt = dateutil.parser.parse("2012-06-13T01:39:00Z") + dt = dt.replace(tzinfo=tzlocal()) + + arr = np.array([dt], dtype=object) + + result = to_datetime(arr, utc=True) + assert result.tz is timezone.utc + + rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal()) + arr = rng.to_pydatetime() + result = to_datetime(arr, utc=True) + assert result.tz is timezone.utc + + def test_dti_to_pydatetime_fizedtz(self): + dates = np.array( + [ + datetime(2000, 1, 1, tzinfo=fixed_off), + datetime(2000, 1, 2, tzinfo=fixed_off), + datetime(2000, 1, 3, tzinfo=fixed_off), + ] + ) + dti = DatetimeIndex(dates) + + result = dti.to_pydatetime() + tm.assert_numpy_array_equal(dates, result) + + result = dti._mpl_repr() + tm.assert_numpy_array_equal(dates, result) diff --git a/pandas/tests/indexes/datetimes/methods/test_tz_convert.py b/pandas/tests/indexes/datetimes/methods/test_tz_convert.py new file mode 100644 index 0000000000000..b2cf488ac8313 --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_tz_convert.py @@ -0,0 +1,283 @@ +from datetime import datetime + +import dateutil.tz +from dateutil.tz import gettz +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import timezones + +from pandas import ( + DatetimeIndex, + Index, + NaT, + Timestamp, + date_range, + offsets, +) +import pandas._testing as tm + + +class TestTZConvert: + def test_tz_convert_nat(self): + # GH#5546 + dates = [NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize("US/Pacific") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) + idx = idx.tz_convert("US/Eastern") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern")) + idx = idx.tz_convert("UTC") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC")) + + dates = ["2010-12-01 00:00", "2010-12-02 00:00", NaT] + idx = DatetimeIndex(dates) + idx = idx.tz_localize("US/Pacific") + tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) + idx = idx.tz_convert("US/Eastern") + expected = ["2010-12-01 03:00", "2010-12-02 03:00", NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) + + idx = idx + offsets.Hour(5) + expected = ["2010-12-01 08:00", "2010-12-02 08:00", NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) + idx = idx.tz_convert("US/Pacific") + expected = ["2010-12-01 05:00", "2010-12-02 05:00", NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) + + idx = idx + np.timedelta64(3, "h") + expected = ["2010-12-01 08:00", "2010-12-02 08:00", NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) + + idx = idx.tz_convert("US/Eastern") + expected = ["2010-12-01 11:00", "2010-12-02 11:00", NaT] + tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_tz_convert_compat_timestamp(self, prefix): + strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] + idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern") + + conv = idx[0].tz_convert(prefix + "US/Pacific") + expected = idx.tz_convert(prefix + "US/Pacific")[0] + + assert conv == expected + + def test_dti_tz_convert_hour_overflow_dst(self): + # Regression test for GH#13306 + + # sorted case US/Eastern -> UTC + ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"] + tt = DatetimeIndex(ts).tz_localize("US/Eastern") + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # sorted case UTC -> US/Eastern + ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"] + tt = DatetimeIndex(ts).tz_localize("UTC") + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case US/Eastern -> UTC + ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"] + tt = DatetimeIndex(ts).tz_localize("US/Eastern") + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case UTC -> US/Eastern + ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"] + tt = DatetimeIndex(ts).tz_localize("UTC") + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz): + # Regression test for GH#13306 + + # sorted case US/Eastern -> UTC + ts = [ + Timestamp("2008-05-12 09:50:00", tz=tz), + Timestamp("2008-12-12 09:50:35", tz=tz), + Timestamp("2009-05-12 09:50:32", tz=tz), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # sorted case UTC -> US/Eastern + ts = [ + Timestamp("2008-05-12 13:50:00", tz="UTC"), + Timestamp("2008-12-12 14:50:35", tz="UTC"), + Timestamp("2009-05-12 13:50:32", tz="UTC"), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case US/Eastern -> UTC + ts = [ + Timestamp("2008-05-12 09:50:00", tz=tz), + Timestamp("2008-12-12 09:50:35", tz=tz), + Timestamp("2008-05-12 09:50:32", tz=tz), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("UTC") + expected = Index([13, 14, 13], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + # unsorted case UTC -> US/Eastern + ts = [ + Timestamp("2008-05-12 13:50:00", tz="UTC"), + Timestamp("2008-12-12 14:50:35", tz="UTC"), + Timestamp("2008-05-12 13:50:32", tz="UTC"), + ] + tt = DatetimeIndex(ts) + ut = tt.tz_convert("US/Eastern") + expected = Index([9, 9, 9], dtype=np.int32) + tm.assert_index_equal(ut.hour, expected) + + @pytest.mark.parametrize("freq, n", [("h", 1), ("min", 60), ("s", 3600)]) + def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n): + # Regression test for tslib.tz_convert(vals, tz1, tz2). + # See GH#4496 for details. + idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq) + idx = idx.tz_localize("UTC") + idx = idx.tz_convert("Europe/Moscow") + + expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + def test_dti_tz_convert_dst(self): + for freq, n in [("h", 1), ("min", 60), ("s", 3600)]: + # Start DST + idx = date_range( + "2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC" + ) + idx = idx.tz_convert("US/Eastern") + expected = np.repeat( + np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]), + np.array([n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + idx = date_range( + "2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + expected = np.repeat( + np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + np.array([n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + # End DST + idx = date_range( + "2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC" + ) + idx = idx.tz_convert("US/Eastern") + expected = np.repeat( + np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]), + np.array([n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + idx = date_range( + "2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + expected = np.repeat( + np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]), + ) + tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) + + # daily + # Start DST + idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC") + idx = idx.tz_convert("US/Eastern") + tm.assert_index_equal(idx.hour, Index([19, 19], dtype=np.int32)) + + idx = date_range( + "2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + tm.assert_index_equal(idx.hour, Index([5, 5], dtype=np.int32)) + + # End DST + idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC") + idx = idx.tz_convert("US/Eastern") + tm.assert_index_equal(idx.hour, Index([20, 20], dtype=np.int32)) + + idx = date_range( + "2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern" + ) + idx = idx.tz_convert("UTC") + tm.assert_index_equal(idx.hour, Index([4, 4], dtype=np.int32)) + + def test_tz_convert_roundtrip(self, tz_aware_fixture): + tz = tz_aware_fixture + idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="ME", tz="UTC") + exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="ME") + + idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC") + exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D") + + idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="h", tz="UTC") + exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="h") + + idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="min", tz="UTC") + exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="min") + + for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]: + converted = idx.tz_convert(tz) + reset = converted.tz_convert(None) + tm.assert_index_equal(reset, expected) + assert reset.tzinfo is None + expected = converted.tz_convert("UTC").tz_localize(None) + expected = expected._with_freq("infer") + tm.assert_index_equal(reset, expected) + + def test_dti_tz_convert_tzlocal(self): + # GH#13583 + # tz_convert doesn't affect to internal + dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC") + dti2 = dti.tz_convert(dateutil.tz.tzlocal()) + tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) + + dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) + dti2 = dti.tz_convert(None) + tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) + + @pytest.mark.parametrize( + "tz", + [ + "US/Eastern", + "dateutil/US/Eastern", + pytz.timezone("US/Eastern"), + gettz("US/Eastern"), + ], + ) + def test_dti_tz_convert_utc_to_local_no_modify(self, tz): + rng = date_range("3/11/2012", "3/12/2012", freq="h", tz="utc") + rng_eastern = rng.tz_convert(tz) + + # Values are unmodified + tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) + + assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz)) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_tz_convert_unsorted(self, tzstr): + dr = date_range("2012-03-09", freq="h", periods=100, tz="utc") + dr = dr.tz_convert(tzstr) + + result = dr[::-1].hour + exp = dr.hour[::-1] + tm.assert_almost_equal(result, exp) diff --git a/pandas/tests/indexes/datetimes/methods/test_tz_localize.py b/pandas/tests/indexes/datetimes/methods/test_tz_localize.py new file mode 100644 index 0000000000000..ca71dde3e6c9e --- /dev/null +++ b/pandas/tests/indexes/datetimes/methods/test_tz_localize.py @@ -0,0 +1,388 @@ +from datetime import ( + datetime, + timedelta, +) + +import dateutil.tz +from dateutil.tz import gettz +import numpy as np +import pytest +import pytz + +from pandas import ( + DatetimeIndex, + Timestamp, + bdate_range, + date_range, + offsets, + to_datetime, +) +import pandas._testing as tm + +try: + from zoneinfo import ZoneInfo +except ImportError: + # Cannot assign to a type [misc] + ZoneInfo = None # type: ignore[misc, assignment] + + +class TestTZLocalize: + def test_tz_localize_invalidates_freq(self): + # we only preserve freq in unambiguous cases + + # if localized to US/Eastern, this crosses a DST transition + dti = date_range("2014-03-08 23:00", "2014-03-09 09:00", freq="h") + assert dti.freq == "h" + + result = dti.tz_localize(None) # no-op + assert result.freq == "h" + + result = dti.tz_localize("UTC") # unambiguous freq preservation + assert result.freq == "h" + + result = dti.tz_localize("US/Eastern", nonexistent="shift_forward") + assert result.freq is None + assert result.inferred_freq is None # i.e. we are not _too_ strict here + + # Case where we _can_ keep freq because we're length==1 + dti2 = dti[:1] + result = dti2.tz_localize("US/Eastern") + assert result.freq == "h" + + def test_tz_localize_utc_copies(self, utc_fixture): + # GH#46460 + times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] + index = DatetimeIndex(times) + + res = index.tz_localize(utc_fixture) + assert not tm.shares_memory(res, index) + + res2 = index._data.tz_localize(utc_fixture) + assert not tm.shares_memory(index._data, res2) + + def test_dti_tz_localize_nonexistent_raise_coerce(self): + # GH#13057 + times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] + index = DatetimeIndex(times) + tz = "US/Eastern" + with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): + index.tz_localize(tz=tz) + + with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): + index.tz_localize(tz=tz, nonexistent="raise") + + result = index.tz_localize(tz=tz, nonexistent="NaT") + test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"] + dti = to_datetime(test_times, utc=True) + expected = dti.tz_convert("US/Eastern") + tm.assert_index_equal(result, expected) + + easts = [pytz.timezone("US/Eastern"), gettz("US/Eastern")] + if ZoneInfo is not None: + try: + tz = ZoneInfo("US/Eastern") + except KeyError: + # no tzdata + pass + else: + easts.append(tz) + + @pytest.mark.parametrize("tz", easts) + def test_dti_tz_localize_ambiguous_infer(self, tz): + # November 6, 2011, fall back, repeat 2 AM hour + # With no repeated hours, we cannot infer the transition + dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=offsets.Hour()) + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + dr.tz_localize(tz) + + # With repeated hours, we can infer the transition + dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=offsets.Hour(), tz=tz) + times = [ + "11/06/2011 00:00", + "11/06/2011 01:00", + "11/06/2011 01:00", + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + di = DatetimeIndex(times) + localized = di.tz_localize(tz, ambiguous="infer") + expected = dr._with_freq(None) + tm.assert_index_equal(expected, localized) + tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer")) + + # When there is no dst transition, nothing special happens + dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=offsets.Hour()) + localized = dr.tz_localize(tz) + localized_infer = dr.tz_localize(tz, ambiguous="infer") + tm.assert_index_equal(localized, localized_infer) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_tz_localize_ambiguous_times(self, tz): + # March 13, 2011, spring forward, skip from 2 AM to 3 AM + dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=offsets.Hour()) + with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"): + dr.tz_localize(tz) + + # after dst transition, it works + dr = date_range( + datetime(2011, 3, 13, 3, 30), periods=3, freq=offsets.Hour(), tz=tz + ) + + # November 6, 2011, fall back, repeat 2 AM hour + dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=offsets.Hour()) + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + dr.tz_localize(tz) + + # UTC is OK + dr = date_range( + datetime(2011, 3, 13), periods=48, freq=offsets.Minute(30), tz=pytz.utc + ) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_tz_localize_pass_dates_to_utc(self, tzstr): + strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] + + idx = DatetimeIndex(strdates) + conv = idx.tz_localize(tzstr) + + fromdates = DatetimeIndex(strdates, tz=tzstr) + + assert conv.tz == fromdates.tz + tm.assert_numpy_array_equal(conv.values, fromdates.values) + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_tz_localize(self, prefix): + tzstr = prefix + "US/Eastern" + dti = date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="ms") + dti2 = dti.tz_localize(tzstr) + + dti_utc = date_range( + start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="ms", tz="utc" + ) + + tm.assert_numpy_array_equal(dti2.values, dti_utc.values) + + dti3 = dti2.tz_convert(prefix + "US/Pacific") + tm.assert_numpy_array_equal(dti3.values, dti_utc.values) + + dti = date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="ms") + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + dti.tz_localize(tzstr) + + dti = date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="ms") + with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"): + dti.tz_localize(tzstr) + + @pytest.mark.parametrize( + "tz", + [ + "US/Eastern", + "dateutil/US/Eastern", + pytz.timezone("US/Eastern"), + gettz("US/Eastern"), + ], + ) + def test_dti_tz_localize_utc_conversion(self, tz): + # Localizing to time zone should: + # 1) check for DST ambiguities + # 2) convert to UTC + + rng = date_range("3/10/2012", "3/11/2012", freq="30min") + + converted = rng.tz_localize(tz) + expected_naive = rng + offsets.Hour(5) + tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) + + # DST ambiguity, this should fail + rng = date_range("3/11/2012", "3/12/2012", freq="30min") + # Is this really how it should fail?? + with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"): + rng.tz_localize(tz) + + def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): + # note: this tz tests that a tz-naive index can be localized + # and de-localized successfully, when there are no DST transitions + # in the range. + idx = date_range(start="2014-06-01", end="2014-08-30", freq="15min") + tz = tz_aware_fixture + localized = idx.tz_localize(tz) + # can't localize a tz-aware object + with pytest.raises( + TypeError, match="Already tz-aware, use tz_convert to convert" + ): + localized.tz_localize(tz) + reset = localized.tz_localize(None) + assert reset.tzinfo is None + expected = idx._with_freq(None) + tm.assert_index_equal(reset, expected) + + def test_dti_tz_localize_naive(self): + rng = date_range("1/1/2011", periods=100, freq="h") + + conv = rng.tz_localize("US/Pacific") + exp = date_range("1/1/2011", periods=100, freq="h", tz="US/Pacific") + + tm.assert_index_equal(conv, exp._with_freq(None)) + + def test_dti_tz_localize_tzlocal(self): + # GH#13583 + offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) + offset = int(offset.total_seconds() * 1000000000) + + dti = date_range(start="2001-01-01", end="2001-03-01") + dti2 = dti.tz_localize(dateutil.tz.tzlocal()) + tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) + + dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) + dti2 = dti.tz_localize(None) + tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_tz_localize_ambiguous_nat(self, tz): + times = [ + "11/06/2011 00:00", + "11/06/2011 01:00", + "11/06/2011 01:00", + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + di = DatetimeIndex(times) + localized = di.tz_localize(tz, ambiguous="NaT") + + times = [ + "11/06/2011 00:00", + np.nan, + np.nan, + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + di_test = DatetimeIndex(times, tz="US/Eastern") + + # left dtype is datetime64[ns, US/Eastern] + # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] + tm.assert_numpy_array_equal(di_test.values, localized.values) + + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_tz_localize_ambiguous_flags(self, tz): + # November 6, 2011, fall back, repeat 2 AM hour + + # Pass in flags to determine right dst transition + dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=offsets.Hour(), tz=tz) + times = [ + "11/06/2011 00:00", + "11/06/2011 01:00", + "11/06/2011 01:00", + "11/06/2011 02:00", + "11/06/2011 03:00", + ] + + # Test tz_localize + di = DatetimeIndex(times) + is_dst = [1, 1, 0, 0, 0] + localized = di.tz_localize(tz, ambiguous=is_dst) + expected = dr._with_freq(None) + tm.assert_index_equal(expected, localized) + tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst)) + + localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) + tm.assert_index_equal(dr, localized) + + localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool")) + tm.assert_index_equal(dr, localized) + + # Test constructor + localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) + tm.assert_index_equal(dr, localized) + + # Test duplicate times where inferring the dst fails + times += times + di = DatetimeIndex(times) + + # When the sizes are incompatible, make sure error is raised + msg = "Length of ambiguous bool-array must be the same size as vals" + with pytest.raises(Exception, match=msg): + di.tz_localize(tz, ambiguous=is_dst) + + # When sizes are compatible and there are repeats ('infer' won't work) + is_dst = np.hstack((is_dst, is_dst)) + localized = di.tz_localize(tz, ambiguous=is_dst) + dr = dr.append(dr) + tm.assert_index_equal(dr, localized) + + # When there is no dst transition, nothing special happens + dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=offsets.Hour()) + is_dst = np.array([1] * 10) + localized = dr.tz_localize(tz) + localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) + tm.assert_index_equal(localized, localized_is_dst) + + def test_dti_tz_localize_bdate_range(self): + dr = bdate_range("1/1/2009", "1/1/2010") + dr_utc = bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc) + localized = dr.tz_localize(pytz.utc) + tm.assert_index_equal(dr_utc, localized) + + @pytest.mark.parametrize( + "start_ts, tz, end_ts, shift", + [ + ["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 01:59:59.999999999", + "backward", + ], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 03:20:00", + timedelta(hours=1), + ], + [ + "2015-03-29 02:20:00", + "Europe/Warsaw", + "2015-03-29 01:20:00", + timedelta(hours=-1), + ], + ["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 01:59:59.999999999", + "backward", + ], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 03:33:00", + timedelta(hours=1), + ], + [ + "2018-03-11 02:33:00", + "US/Pacific", + "2018-03-11 01:33:00", + timedelta(hours=-1), + ], + ], + ) + @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) + def test_dti_tz_localize_nonexistent_shift( + self, start_ts, tz, end_ts, shift, tz_type + ): + # GH#8917 + tz = tz_type + tz + if isinstance(shift, str): + shift = "shift_" + shift + dti = DatetimeIndex([Timestamp(start_ts)]) + result = dti.tz_localize(tz, nonexistent=shift) + expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("offset", [-1, 1]) + def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, warsaw): + # GH#8917 + tz = warsaw + dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")]) + msg = "The provided timedelta will relocalize on a nonexistent time" + with pytest.raises(ValueError, match=msg): + dti.tz_localize(tz, nonexistent=timedelta(seconds=offset)) diff --git a/pandas/tests/indexes/datetimes/test_unique.py b/pandas/tests/indexes/datetimes/methods/test_unique.py similarity index 100% rename from pandas/tests/indexes/datetimes/test_unique.py rename to pandas/tests/indexes/datetimes/methods/test_unique.py diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py new file mode 100644 index 0000000000000..3a7c418b27de6 --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -0,0 +1,56 @@ +# Arithmetic tests specific to DatetimeIndex are generally about `freq` +# rentention or inference. Other arithmetic tests belong in +# tests/arithmetic/test_datetime64.py +import pytest + +from pandas import ( + Timedelta, + TimedeltaIndex, + Timestamp, + date_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestDatetimeIndexArithmetic: + def test_add_timedelta_preserves_freq(self): + # GH#37295 should hold for any DTI with freq=None or Tick freq + tz = "Canada/Eastern" + dti = date_range( + start=Timestamp("2019-03-26 00:00:00-0400", tz=tz), + end=Timestamp("2020-10-17 00:00:00-0400", tz=tz), + freq="D", + ) + result = dti + Timedelta(days=1) + assert result.freq == dti.freq + + def test_sub_datetime_preserves_freq(self, tz_naive_fixture): + # GH#48818 + dti = date_range("2016-01-01", periods=12, tz=tz_naive_fixture) + + res = dti - dti[0] + expected = timedelta_range("0 Days", "11 Days") + tm.assert_index_equal(res, expected) + assert res.freq == expected.freq + + @pytest.mark.xfail( + reason="The inherited freq is incorrect bc dti.freq is incorrect " + "https://github.com/pandas-dev/pandas/pull/48818/files#r982793461" + ) + def test_sub_datetime_preserves_freq_across_dst(self): + # GH#48818 + ts = Timestamp("2016-03-11", tz="US/Pacific") + dti = date_range(ts, periods=4) + + res = dti - dti[0] + expected = TimedeltaIndex( + [ + Timedelta(days=0), + Timedelta(days=1), + Timedelta(days=2), + Timedelta(days=2, hours=23), + ] + ) + tm.assert_index_equal(res, expected) + assert res.freq == expected.freq diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 598845471046f..fa58dbb7f8793 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -9,6 +9,8 @@ from operator import attrgetter import dateutil +import dateutil.tz +from dateutil.tz import gettz import numpy as np import pytest import pytz @@ -16,6 +18,7 @@ from pandas._libs.tslibs import ( OutOfBoundsDatetime, astype_overflowsafe, + timezones, ) import pandas as pd @@ -32,6 +35,7 @@ DatetimeArray, period_array, ) +from pandas.tests.indexes.datetimes.test_timezones import FixedOffset class TestDatetimeIndex: @@ -567,6 +571,16 @@ def test_construction_outofbounds(self): # can't create DatetimeIndex DatetimeIndex(dates) + @pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]]) + def test_dti_date_out_of_range(self, data): + # GH#1475 + msg = ( + "^Out of bounds nanosecond timestamp: " + "1400-01-01( 00:00:00)?, at position 0$" + ) + with pytest.raises(OutOfBoundsDatetime, match=msg): + DatetimeIndex(data) + def test_construction_with_ndarray(self): # GH 5152 dates = [datetime(2013, 10, 7), datetime(2013, 10, 8), datetime(2013, 10, 9)] @@ -948,6 +962,60 @@ def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(sel expected = DatetimeIndex([Timestamp("2019", tz="UTC"), pd.NaT]) tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) + def test_dti_from_tzaware_datetime(self, tz): + d = [datetime(2012, 8, 19, tzinfo=tz)] + + index = DatetimeIndex(d) + assert timezones.tz_compare(index.tz, tz) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_tz_constructors(self, tzstr): + """Test different DatetimeIndex constructions with timezone + Follow-up of GH#4229 + """ + arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"] + + idx1 = to_datetime(arr).tz_localize(tzstr) + idx2 = date_range(start="2005-11-10 08:00:00", freq="h", periods=2, tz=tzstr) + idx2 = idx2._with_freq(None) # the others all have freq=None + idx3 = DatetimeIndex(arr, tz=tzstr) + idx4 = DatetimeIndex(np.array(arr), tz=tzstr) + + for other in [idx2, idx3, idx4]: + tm.assert_index_equal(idx1, other) + + def test_dti_construction_univalent(self): + rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern") + rng2 = DatetimeIndex(data=rng, tz="US/Eastern") + tm.assert_index_equal(rng, rng2) + + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_constructor_static_tzinfo(self, prefix): + # it works! + index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST") + index.hour + index[0] + + def test_dti_constructor_with_fixed_tz(self): + off = FixedOffset(420, "+07:00") + start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) + end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) + rng = date_range(start=start, end=end) + assert off == rng.tz + + rng2 = date_range(start, periods=len(rng), tz=off) + tm.assert_index_equal(rng, rng2) + + rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00") + assert (rng.values == rng3.values).all() + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_convert_datetime_list(self, tzstr): + dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo") + dr2 = DatetimeIndex(list(dr), name="foo", freq="D") + tm.assert_index_equal(dr, dr2) + class TestTimeSeries: def test_dti_constructor_preserve_dti_freq(self): diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 3b0b856d07673..89c4433db1c7b 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -2,7 +2,6 @@ from datetime import date import re -import dateutil import numpy as np import pytest @@ -21,36 +20,6 @@ class TestDatetimeIndex: - def test_sub_datetime_preserves_freq(self, tz_naive_fixture): - # GH#48818 - dti = date_range("2016-01-01", periods=12, tz=tz_naive_fixture) - - res = dti - dti[0] - expected = pd.timedelta_range("0 Days", "11 Days") - tm.assert_index_equal(res, expected) - assert res.freq == expected.freq - - @pytest.mark.xfail( - reason="The inherited freq is incorrect bc dti.freq is incorrect " - "https://github.com/pandas-dev/pandas/pull/48818/files#r982793461" - ) - def test_sub_datetime_preserves_freq_across_dst(self): - # GH#48818 - ts = Timestamp("2016-03-11", tz="US/Pacific") - dti = date_range(ts, periods=4) - - res = dti - dti[0] - expected = pd.TimedeltaIndex( - [ - pd.Timedelta(days=0), - pd.Timedelta(days=1), - pd.Timedelta(days=2), - pd.Timedelta(days=2, hours=23), - ] - ) - tm.assert_index_equal(res, expected) - assert res.freq == expected.freq - def test_time_overflow_for_32bit_machines(self): # GH8943. On some machines NumPy defaults to np.int32 (for example, # 32-bit Linux machines). In the function _generate_regular_range @@ -96,51 +65,12 @@ def test_append_nondatetimeindex(self): result = rng.append(idx) assert isinstance(result[0], Timestamp) - def test_iteration_preserves_tz(self): - # see gh-8890 - index = date_range("2012-01-01", periods=3, freq="h", tz="US/Eastern") - - for i, ts in enumerate(index): - result = ts - expected = index[i] # pylint: disable=unnecessary-list-index-lookup - assert result == expected - - index = date_range( - "2012-01-01", periods=3, freq="h", tz=dateutil.tz.tzoffset(None, -28800) - ) - - for i, ts in enumerate(index): - result = ts - expected = index[i] # pylint: disable=unnecessary-list-index-lookup - assert result._repr_base == expected._repr_base - assert result == expected - - # 9100 - index = DatetimeIndex( - ["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"] - ) - for i, ts in enumerate(index): - result = ts - expected = index[i] # pylint: disable=unnecessary-list-index-lookup - assert result._repr_base == expected._repr_base - assert result == expected - - @pytest.mark.parametrize("periods", [0, 9999, 10000, 10001]) - def test_iteration_over_chunksize(self, periods): - # GH21012 - - index = date_range("2000-01-01 00:00:00", periods=periods, freq="min") - num = 0 - for stamp in index: - assert index[num] == stamp - num += 1 - assert num == len(index) - def test_misc_coverage(self): rng = date_range("1/1/2000", periods=5) result = rng.groupby(rng.day) assert isinstance(next(iter(result.values()))[0], Timestamp) + # TODO: belongs in frame groupby tests? def test_groupby_function_tuple_1677(self): df = DataFrame( np.random.default_rng(2).random(100), diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index deed5926bca91..a181c38e2102a 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -59,6 +59,15 @@ def test_get_values_for_csv(): class TestDatetimeIndexRendering: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_with_timezone_repr(self, tzstr): + rng = pd.date_range("4/13/2010", "5/6/2010") + + rng_eastern = rng.tz_localize(tzstr) + + rng_repr = repr(rng_eastern) + assert "2010-04-13 00:00:00" in rng_repr + def test_dti_repr_dates(self): text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)])) assert "['2013-01-01'," in text diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 37c580c98b139..da11920ac1cba 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -214,6 +214,14 @@ def test_where_tz(self): class TestTake: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_take_dont_lose_meta(self, tzstr): + rng = date_range("1/1/2000", periods=20, tz=tzstr) + + result = rng.take(range(5)) + assert result.tz == rng.tz + assert result.freq == rng.freq + def test_take_nan_first_datetime(self): index = DatetimeIndex([pd.NaT, Timestamp("20130101"), Timestamp("20130102")]) result = index.take([-1, 0, 1]) diff --git a/pandas/tests/indexes/datetimes/test_iter.py b/pandas/tests/indexes/datetimes/test_iter.py new file mode 100644 index 0000000000000..a1861e6e9447e --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_iter.py @@ -0,0 +1,71 @@ +import dateutil.tz +import numpy as np +import pytest + +from pandas import ( + DatetimeIndex, + date_range, + to_datetime, +) + + +class TestDatetimeIndexIteration: + @pytest.mark.parametrize( + "tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)] + ) + def test_iteration_preserves_nanoseconds(self, tz): + # GH#19603 + index = DatetimeIndex( + ["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz + ) + for i, ts in enumerate(index): + assert ts == index[i] # pylint: disable=unnecessary-list-index-lookup + + def test_iter_readonly(self): + # GH#28055 ints_to_pydatetime with readonly array + arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")]) + arr.setflags(write=False) + dti = to_datetime(arr) + list(dti) + + def test_iteration_preserves_tz(self): + # see GH#8890 + index = date_range("2012-01-01", periods=3, freq="h", tz="US/Eastern") + + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result == expected + + def test_iteration_preserves_tz2(self): + index = date_range( + "2012-01-01", periods=3, freq="h", tz=dateutil.tz.tzoffset(None, -28800) + ) + + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result._repr_base == expected._repr_base + assert result == expected + + def test_iteration_preserves_tz3(self): + # GH#9100 + index = DatetimeIndex( + ["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"] + ) + for i, ts in enumerate(index): + result = ts + expected = index[i] # pylint: disable=unnecessary-list-index-lookup + assert result._repr_base == expected._repr_base + assert result == expected + + @pytest.mark.parametrize("periods", [0, 9999, 10000, 10001]) + def test_iteration_over_chunksize(self, periods): + # GH#21012 + + index = date_range("2000-01-01 00:00:00", periods=periods, freq="min") + num = 0 + for stamp in index: + assert index[num] == stamp + num += 1 + assert num == len(index) diff --git a/pandas/tests/indexes/datetimes/test_misc.py b/pandas/tests/indexes/datetimes/test_misc.py deleted file mode 100644 index d86d78ba47c5b..0000000000000 --- a/pandas/tests/indexes/datetimes/test_misc.py +++ /dev/null @@ -1,305 +0,0 @@ -import calendar -from datetime import datetime -import locale -import unicodedata - -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DatetimeIndex, - Index, - Timedelta, - Timestamp, - date_range, - offsets, -) -import pandas._testing as tm -from pandas.core.arrays import DatetimeArray - -from pandas.tseries.frequencies import to_offset - - -class TestDatetime64: - def test_no_millisecond_field(self): - msg = "type object 'DatetimeIndex' has no attribute 'millisecond'" - with pytest.raises(AttributeError, match=msg): - DatetimeIndex.millisecond - - msg = "'DatetimeIndex' object has no attribute 'millisecond'" - with pytest.raises(AttributeError, match=msg): - DatetimeIndex([]).millisecond - - def test_datetimeindex_accessors(self): - dti_naive = date_range(freq="D", start=datetime(1998, 1, 1), periods=365) - # GH#13303 - dti_tz = date_range( - freq="D", start=datetime(1998, 1, 1), periods=365, tz="US/Eastern" - ) - for dti in [dti_naive, dti_tz]: - assert dti.year[0] == 1998 - assert dti.month[0] == 1 - assert dti.day[0] == 1 - assert dti.hour[0] == 0 - assert dti.minute[0] == 0 - assert dti.second[0] == 0 - assert dti.microsecond[0] == 0 - assert dti.dayofweek[0] == 3 - - assert dti.dayofyear[0] == 1 - assert dti.dayofyear[120] == 121 - - assert dti.isocalendar().week.iloc[0] == 1 - assert dti.isocalendar().week.iloc[120] == 18 - - assert dti.quarter[0] == 1 - assert dti.quarter[120] == 2 - - assert dti.days_in_month[0] == 31 - assert dti.days_in_month[90] == 30 - - assert dti.is_month_start[0] - assert not dti.is_month_start[1] - assert dti.is_month_start[31] - assert dti.is_quarter_start[0] - assert dti.is_quarter_start[90] - assert dti.is_year_start[0] - assert not dti.is_year_start[364] - assert not dti.is_month_end[0] - assert dti.is_month_end[30] - assert not dti.is_month_end[31] - assert dti.is_month_end[364] - assert not dti.is_quarter_end[0] - assert not dti.is_quarter_end[30] - assert dti.is_quarter_end[89] - assert dti.is_quarter_end[364] - assert not dti.is_year_end[0] - assert dti.is_year_end[364] - - assert len(dti.year) == 365 - assert len(dti.month) == 365 - assert len(dti.day) == 365 - assert len(dti.hour) == 365 - assert len(dti.minute) == 365 - assert len(dti.second) == 365 - assert len(dti.microsecond) == 365 - assert len(dti.dayofweek) == 365 - assert len(dti.dayofyear) == 365 - assert len(dti.isocalendar()) == 365 - assert len(dti.quarter) == 365 - assert len(dti.is_month_start) == 365 - assert len(dti.is_month_end) == 365 - assert len(dti.is_quarter_start) == 365 - assert len(dti.is_quarter_end) == 365 - assert len(dti.is_year_start) == 365 - assert len(dti.is_year_end) == 365 - - dti.name = "name" - - # non boolean accessors -> return Index - for accessor in DatetimeArray._field_ops: - res = getattr(dti, accessor) - assert len(res) == 365 - assert isinstance(res, Index) - assert res.name == "name" - - # boolean accessors -> return array - for accessor in DatetimeArray._bool_ops: - res = getattr(dti, accessor) - assert len(res) == 365 - assert isinstance(res, np.ndarray) - - # test boolean indexing - res = dti[dti.is_quarter_start] - exp = dti[[0, 90, 181, 273]] - tm.assert_index_equal(res, exp) - res = dti[dti.is_leap_year] - exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name") - tm.assert_index_equal(res, exp) - - def test_datetimeindex_accessors2(self): - dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4) - - assert sum(dti.is_quarter_start) == 0 - assert sum(dti.is_quarter_end) == 4 - assert sum(dti.is_year_start) == 0 - assert sum(dti.is_year_end) == 1 - - def test_datetimeindex_accessors3(self): - # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, - bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu") - dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) - msg = "Custom business days is not supported by is_month_start" - with pytest.raises(ValueError, match=msg): - dti.is_month_start - - def test_datetimeindex_accessors4(self): - dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) - - assert dti.is_month_start[0] == 1 - - def test_datetimeindex_accessors5(self): - freq_m = to_offset("ME") - bm = to_offset("BME") - qfeb = to_offset("Q-FEB") - qsfeb = to_offset("QS-FEB") - bq = to_offset("BQ") - bqs_apr = to_offset("BQS-APR") - as_nov = to_offset("YS-NOV") - - tests = [ - (freq_m.is_month_start(Timestamp("2013-06-01")), 1), - (bm.is_month_start(Timestamp("2013-06-01")), 0), - (freq_m.is_month_start(Timestamp("2013-06-03")), 0), - (bm.is_month_start(Timestamp("2013-06-03")), 1), - (qfeb.is_month_end(Timestamp("2013-02-28")), 1), - (qfeb.is_quarter_end(Timestamp("2013-02-28")), 1), - (qfeb.is_year_end(Timestamp("2013-02-28")), 1), - (qfeb.is_month_start(Timestamp("2013-03-01")), 1), - (qfeb.is_quarter_start(Timestamp("2013-03-01")), 1), - (qfeb.is_year_start(Timestamp("2013-03-01")), 1), - (qsfeb.is_month_end(Timestamp("2013-03-31")), 1), - (qsfeb.is_quarter_end(Timestamp("2013-03-31")), 0), - (qsfeb.is_year_end(Timestamp("2013-03-31")), 0), - (qsfeb.is_month_start(Timestamp("2013-02-01")), 1), - (qsfeb.is_quarter_start(Timestamp("2013-02-01")), 1), - (qsfeb.is_year_start(Timestamp("2013-02-01")), 1), - (bq.is_month_end(Timestamp("2013-06-30")), 0), - (bq.is_quarter_end(Timestamp("2013-06-30")), 0), - (bq.is_year_end(Timestamp("2013-06-30")), 0), - (bq.is_month_end(Timestamp("2013-06-28")), 1), - (bq.is_quarter_end(Timestamp("2013-06-28")), 1), - (bq.is_year_end(Timestamp("2013-06-28")), 0), - (bqs_apr.is_month_end(Timestamp("2013-06-30")), 0), - (bqs_apr.is_quarter_end(Timestamp("2013-06-30")), 0), - (bqs_apr.is_year_end(Timestamp("2013-06-30")), 0), - (bqs_apr.is_month_end(Timestamp("2013-06-28")), 1), - (bqs_apr.is_quarter_end(Timestamp("2013-06-28")), 1), - (bqs_apr.is_year_end(Timestamp("2013-03-29")), 1), - (as_nov.is_year_start(Timestamp("2013-11-01")), 1), - (as_nov.is_year_end(Timestamp("2013-10-31")), 1), - (Timestamp("2012-02-01").days_in_month, 29), - (Timestamp("2013-02-01").days_in_month, 28), - ] - - for ts, value in tests: - assert ts == value - - def test_datetimeindex_accessors6(self): - # GH 6538: Check that DatetimeIndex and its TimeStamp elements - # return the same weekofyear accessor close to new year w/ tz - dates = ["2013/12/29", "2013/12/30", "2013/12/31"] - dates = DatetimeIndex(dates, tz="Europe/Brussels") - expected = [52, 1, 1] - assert dates.isocalendar().week.tolist() == expected - assert [d.weekofyear for d in dates] == expected - - # GH 12806 - # error: Unsupported operand types for + ("List[None]" and "List[str]") - @pytest.mark.parametrize( - "time_locale", [None] + tm.get_locales() # type: ignore[operator] - ) - def test_datetime_name_accessors(self, time_locale): - # Test Monday -> Sunday and January -> December, in that sequence - if time_locale is None: - # If the time_locale is None, day-name and month_name should - # return the english attributes - expected_days = [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday", - "Saturday", - "Sunday", - ] - expected_months = [ - "January", - "February", - "March", - "April", - "May", - "June", - "July", - "August", - "September", - "October", - "November", - "December", - ] - else: - with tm.set_locale(time_locale, locale.LC_TIME): - expected_days = calendar.day_name[:] - expected_months = calendar.month_name[1:] - - # GH#11128 - dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365) - english_days = [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday", - "Saturday", - "Sunday", - ] - for day, name, eng_name in zip(range(4, 11), expected_days, english_days): - name = name.capitalize() - assert dti.day_name(locale=time_locale)[day] == name - assert dti.day_name(locale=None)[day] == eng_name - ts = Timestamp(datetime(2016, 4, day)) - assert ts.day_name(locale=time_locale) == name - dti = dti.append(DatetimeIndex([pd.NaT])) - assert np.isnan(dti.day_name(locale=time_locale)[-1]) - ts = Timestamp(pd.NaT) - assert np.isnan(ts.day_name(locale=time_locale)) - - # GH#12805 - dti = date_range(freq="ME", start="2012", end="2013") - result = dti.month_name(locale=time_locale) - expected = Index([month.capitalize() for month in expected_months]) - - # work around different normalization schemes - # https://github.com/pandas-dev/pandas/issues/22342 - result = result.str.normalize("NFD") - expected = expected.str.normalize("NFD") - - tm.assert_index_equal(result, expected) - - for date, expected in zip(dti, expected_months): - result = date.month_name(locale=time_locale) - expected = expected.capitalize() - - result = unicodedata.normalize("NFD", result) - expected = unicodedata.normalize("NFD", result) - - assert result == expected - dti = dti.append(DatetimeIndex([pd.NaT])) - assert np.isnan(dti.month_name(locale=time_locale)[-1]) - - def test_nanosecond_field(self): - dti = DatetimeIndex(np.arange(10)) - expected = Index(np.arange(10, dtype=np.int32)) - - tm.assert_index_equal(dti.nanosecond, expected) - - -def test_iter_readonly(): - # GH#28055 ints_to_pydatetime with readonly array - arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")]) - arr.setflags(write=False) - dti = pd.to_datetime(arr) - list(dti) - - -def test_add_timedelta_preserves_freq(): - # GH#37295 should hold for any DTI with freq=None or Tick freq - tz = "Canada/Eastern" - dti = date_range( - start=Timestamp("2019-03-26 00:00:00-0400", tz=tz), - end=Timestamp("2020-10-17 00:00:00-0400", tz=tz), - freq="D", - ) - result = dti + Timedelta(days=1) - assert result.freq == dti.freq diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index c58d55ad6371b..30c510864ce68 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -1,10 +1,7 @@ from datetime import datetime -from dateutil.tz import tzlocal import pytest -from pandas.compat import IS64 - from pandas import ( DatetimeIndex, Index, @@ -17,30 +14,6 @@ class TestDatetimeIndexOps: - @pytest.mark.parametrize( - "freq,expected", - [ - ("Y", "day"), - ("Q", "day"), - ("ME", "day"), - ("D", "day"), - ("h", "hour"), - ("min", "minute"), - ("s", "second"), - ("ms", "millisecond"), - ("us", "microsecond"), - ], - ) - def test_resolution(self, request, tz_naive_fixture, freq, expected): - tz = tz_naive_fixture - if freq == "Y" and not IS64 and isinstance(tz, tzlocal): - request.applymarker( - pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038") - ) - - idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz) - assert idx.resolution == expected - def test_infer_freq(self, freq_sample): # GH 11018 idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10) diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 7c7e57b51ccc0..6fbfe27acc0e0 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -1,48 +1,91 @@ """ Tests for DatetimeIndex methods behaving like their Timestamp counterparts """ -from datetime import datetime + +import calendar +from datetime import ( + date, + datetime, + time, +) +import locale +import unicodedata import numpy as np import pytest -from pandas._libs.tslibs import ( - OutOfBoundsDatetime, - to_offset, -) -from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG +from pandas._libs.tslibs import timezones -import pandas as pd from pandas import ( DatetimeIndex, + Index, + NaT, Timestamp, date_range, + offsets, ) import pandas._testing as tm +from pandas.core.arrays import DatetimeArray class TestDatetimeIndexOps: + def test_dti_no_millisecond_field(self): + msg = "type object 'DatetimeIndex' has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): + DatetimeIndex.millisecond + + msg = "'DatetimeIndex' object has no attribute 'millisecond'" + with pytest.raises(AttributeError, match=msg): + DatetimeIndex([]).millisecond + def test_dti_time(self): rng = date_range("1/1/2000", freq="12min", periods=10) - result = pd.Index(rng).time + result = Index(rng).time expected = [t.time() for t in rng] assert (result == expected).all() def test_dti_date(self): rng = date_range("1/1/2000", freq="12h", periods=10) - result = pd.Index(rng).date + result = Index(rng).date expected = [t.date() for t in rng] assert (result == expected).all() - @pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]]) - def test_dti_date_out_of_range(self, data): - # GH#1475 - msg = ( - "^Out of bounds nanosecond timestamp: " - "1400-01-01( 00:00:00)?, at position 0$" - ) - with pytest.raises(OutOfBoundsDatetime, match=msg): - DatetimeIndex(data) + @pytest.mark.parametrize( + "dtype", + [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], + ) + def test_dti_date2(self, dtype): + # Regression test for GH#21230 + expected = np.array([date(2018, 6, 4), NaT]) + + index = DatetimeIndex(["2018-06-04 10:00:00", NaT], dtype=dtype) + result = index.date + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], + ) + def test_dti_time2(self, dtype): + # Regression test for GH#21267 + expected = np.array([time(10, 20, 30), NaT]) + + index = DatetimeIndex(["2018-06-04 10:20:30", NaT], dtype=dtype) + result = index.time + + tm.assert_numpy_array_equal(result, expected) + + def test_dti_timetz(self, tz_naive_fixture): + # GH#21358 + tz = timezones.maybe_get_tz(tz_naive_fixture) + + expected = np.array([time(10, 20, 30, tzinfo=tz), NaT]) + + index = DatetimeIndex(["2018-06-04 10:20:30", NaT], tz=tz) + result = index.timetz + + tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( "field", @@ -69,279 +112,218 @@ def test_dti_timestamp_fields(self, field): result = getattr(Timestamp(idx[-1]), field) assert result == expected - def test_dti_timestamp_isocalendar_fields(self): - idx = tm.makeDateIndex(100) - expected = tuple(idx.isocalendar().iloc[-1].to_list()) - result = idx[-1].isocalendar() - assert result == expected + def test_dti_nanosecond(self): + dti = DatetimeIndex(np.arange(10)) + expected = Index(np.arange(10, dtype=np.int32)) - # ---------------------------------------------------------------- - # DatetimeIndex.round + tm.assert_index_equal(dti.nanosecond, expected) - def test_round_daily(self): - dti = date_range("20130101 09:10:11", periods=5) - result = dti.round("D") - expected = date_range("20130101", periods=5) - tm.assert_index_equal(result, expected) + @pytest.mark.parametrize("prefix", ["", "dateutil/"]) + def test_dti_hour_tzaware(self, prefix): + strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] + rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern") + assert (rng.hour == 0).all() - dti = dti.tz_localize("UTC").tz_convert("US/Eastern") - result = dti.round("D") - expected = date_range("20130101", periods=5).tz_localize("US/Eastern") - tm.assert_index_equal(result, expected) + # a more unusual time zone, GH#1946 + dr = date_range( + "2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan" + ) - result = dti.round("s") - tm.assert_index_equal(result, dti) + expected = Index(np.arange(10, dtype=np.int32)) + tm.assert_index_equal(dr.hour, expected) + # GH#12806 + # error: Unsupported operand types for + ("List[None]" and "List[str]") @pytest.mark.parametrize( - "freq, error_msg", - [ - ("Y", "<YearEnd: month=12> is a non-fixed frequency"), - ("ME", "<MonthEnd> is a non-fixed frequency"), - ("foobar", "Invalid frequency: foobar"), - ], + "time_locale", [None] + tm.get_locales() # type: ignore[operator] ) - def test_round_invalid(self, freq, error_msg): - dti = date_range("20130101 09:10:11", periods=5) - dti = dti.tz_localize("UTC").tz_convert("US/Eastern") - with pytest.raises(ValueError, match=error_msg): - dti.round(freq) - - def test_round(self, tz_naive_fixture): - tz = tz_naive_fixture - rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz) - elt = rng[1] - - expected_rng = DatetimeIndex( - [ - Timestamp("2016-01-01 00:00:00", tz=tz), - Timestamp("2016-01-01 00:00:00", tz=tz), - Timestamp("2016-01-01 01:00:00", tz=tz), - Timestamp("2016-01-01 02:00:00", tz=tz), - Timestamp("2016-01-01 02:00:00", tz=tz), + def test_day_name_month_name(self, time_locale): + # Test Monday -> Sunday and January -> December, in that sequence + if time_locale is None: + # If the time_locale is None, day-name and month_name should + # return the english attributes + expected_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", ] - ) - expected_elt = expected_rng[1] - - tm.assert_index_equal(rng.round(freq="h"), expected_rng) - assert elt.round(freq="h") == expected_elt - - msg = INVALID_FREQ_ERR_MSG - with pytest.raises(ValueError, match=msg): - rng.round(freq="foo") - with pytest.raises(ValueError, match=msg): - elt.round(freq="foo") - - msg = "<MonthEnd> is a non-fixed frequency" - with pytest.raises(ValueError, match=msg): - rng.round(freq="ME") - with pytest.raises(ValueError, match=msg): - elt.round(freq="ME") - - # GH#14440 & GH#15578 - index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz) - result = index.round("ms") - expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz) - tm.assert_index_equal(result, expected) - - for freq in ["us", "ns"]: - tm.assert_index_equal(index, index.round(freq)) - - index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz) - result = index.round("ms") - expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz) - tm.assert_index_equal(result, expected) - - index = DatetimeIndex(["2016-10-17 12:00:00.001501031"]) - result = index.round("10ns") - expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"]) - tm.assert_index_equal(result, expected) - - with tm.assert_produces_warning(False): - ts = "2016-10-17 12:00:00.001501031" - DatetimeIndex([ts]).round("1010ns") - - def test_no_rounding_occurs(self, tz_naive_fixture): - # GH 21262 - tz = tz_naive_fixture - rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz) - - expected_rng = DatetimeIndex( - [ - Timestamp("2016-01-01 00:00:00", tz=tz), - Timestamp("2016-01-01 00:02:00", tz=tz), - Timestamp("2016-01-01 00:04:00", tz=tz), - Timestamp("2016-01-01 00:06:00", tz=tz), - Timestamp("2016-01-01 00:08:00", tz=tz), + expected_months = [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", ] - ) - - tm.assert_index_equal(rng.round(freq="2min"), expected_rng) - - @pytest.mark.parametrize( - "test_input, rounder, freq, expected", - [ - (["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]), - (["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]), - ( - ["2117-01-01 00:00:45.000000012"], - "floor", - "10ns", - ["2117-01-01 00:00:45.000000010"], - ), - ( - ["1823-01-01 00:00:01.000000012"], - "ceil", - "10ns", - ["1823-01-01 00:00:01.000000020"], - ), - (["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]), - (["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]), - (["2018-01-01 00:15:00"], "ceil", "15min", ["2018-01-01 00:15:00"]), - (["2018-01-01 00:15:00"], "floor", "15min", ["2018-01-01 00:15:00"]), - (["1823-01-01 03:00:00"], "ceil", "3h", ["1823-01-01 03:00:00"]), - (["1823-01-01 03:00:00"], "floor", "3h", ["1823-01-01 03:00:00"]), - ( - ("NaT", "1823-01-01 00:00:01"), - "floor", - "1s", - ("NaT", "1823-01-01 00:00:01"), - ), - ( - ("NaT", "1823-01-01 00:00:01"), - "ceil", - "1s", - ("NaT", "1823-01-01 00:00:01"), - ), - ], - ) - def test_ceil_floor_edge(self, test_input, rounder, freq, expected): - dt = DatetimeIndex(list(test_input)) - func = getattr(dt, rounder) - result = func(freq) - expected = DatetimeIndex(list(expected)) - assert expected.equals(result) - - @pytest.mark.parametrize( - "start, index_freq, periods", - [("2018-01-01", "12h", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)], - ) - @pytest.mark.parametrize( - "round_freq", - [ - "2ns", - "3ns", - "4ns", - "5ns", - "6ns", - "7ns", - "250ns", - "500ns", - "750ns", - "1us", - "19us", - "250us", - "500us", - "750us", - "1s", - "2s", - "3s", - "12h", - "1D", - ], - ) - def test_round_int64(self, start, index_freq, periods, round_freq): - dt = date_range(start=start, freq=index_freq, periods=periods) - unit = to_offset(round_freq).nanos - - # test floor - result = dt.floor(round_freq) - diff = dt.asi8 - result.asi8 - mod = result.asi8 % unit - assert (mod == 0).all(), f"floor not a {round_freq} multiple" - assert (0 <= diff).all() and (diff < unit).all(), "floor error" - - # test ceil - result = dt.ceil(round_freq) - diff = result.asi8 - dt.asi8 - mod = result.asi8 % unit - assert (mod == 0).all(), f"ceil not a {round_freq} multiple" - assert (0 <= diff).all() and (diff < unit).all(), "ceil error" - - # test round - result = dt.round(round_freq) - diff = abs(result.asi8 - dt.asi8) - mod = result.asi8 % unit - assert (mod == 0).all(), f"round not a {round_freq} multiple" - assert (diff <= unit // 2).all(), "round error" - if unit % 2 == 0: - assert ( - result.asi8[diff == unit // 2] % 2 == 0 - ).all(), "round half to even error" - - # ---------------------------------------------------------------- - # DatetimeIndex.normalize - - def test_normalize(self): - rng = date_range("1/1/2000 9:30", periods=10, freq="D") - - result = rng.normalize() - expected = date_range("1/1/2000", periods=10, freq="D") - tm.assert_index_equal(result, expected) - - arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype( - "datetime64[ns]" - ) - rng_ns = DatetimeIndex(arr_ns) - rng_ns_normalized = rng_ns.normalize() - - arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype( - "datetime64[ns]" - ) - expected = DatetimeIndex(arr_ns) - tm.assert_index_equal(rng_ns_normalized, expected) - - assert result.is_normalized - assert not rng.is_normalized + else: + with tm.set_locale(time_locale, locale.LC_TIME): + expected_days = calendar.day_name[:] + expected_months = calendar.month_name[1:] + + # GH#11128 + dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365) + english_days = [ + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", + "Sunday", + ] + for day, name, eng_name in zip(range(4, 11), expected_days, english_days): + name = name.capitalize() + assert dti.day_name(locale=time_locale)[day] == name + assert dti.day_name(locale=None)[day] == eng_name + ts = Timestamp(datetime(2016, 4, day)) + assert ts.day_name(locale=time_locale) == name + dti = dti.append(DatetimeIndex([NaT])) + assert np.isnan(dti.day_name(locale=time_locale)[-1]) + ts = Timestamp(NaT) + assert np.isnan(ts.day_name(locale=time_locale)) + + # GH#12805 + dti = date_range(freq="ME", start="2012", end="2013") + result = dti.month_name(locale=time_locale) + expected = Index([month.capitalize() for month in expected_months]) + + # work around different normalization schemes GH#22342 + result = result.str.normalize("NFD") + expected = expected.str.normalize("NFD") - def test_normalize_nat(self): - dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")]) - result = dti.normalize() - expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")]) tm.assert_index_equal(result, expected) - -class TestDateTimeIndexToJulianDate: - def test_1700(self): - dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D") - r1 = pd.Index([x.to_julian_date() for x in dr]) - r2 = dr.to_julian_date() - assert isinstance(r2, pd.Index) and r2.dtype == np.float64 - tm.assert_index_equal(r1, r2) - - def test_2000(self): - dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D") - r1 = pd.Index([x.to_julian_date() for x in dr]) - r2 = dr.to_julian_date() - assert isinstance(r2, pd.Index) and r2.dtype == np.float64 - tm.assert_index_equal(r1, r2) - - def test_hour(self): - dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="h") - r1 = pd.Index([x.to_julian_date() for x in dr]) - r2 = dr.to_julian_date() - assert isinstance(r2, pd.Index) and r2.dtype == np.float64 - tm.assert_index_equal(r1, r2) - - def test_minute(self): - dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="min") - r1 = pd.Index([x.to_julian_date() for x in dr]) - r2 = dr.to_julian_date() - assert isinstance(r2, pd.Index) and r2.dtype == np.float64 - tm.assert_index_equal(r1, r2) - - def test_second(self): - dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="s") - r1 = pd.Index([x.to_julian_date() for x in dr]) - r2 = dr.to_julian_date() - assert isinstance(r2, pd.Index) and r2.dtype == np.float64 - tm.assert_index_equal(r1, r2) + for item, expected in zip(dti, expected_months): + result = item.month_name(locale=time_locale) + expected = expected.capitalize() + + result = unicodedata.normalize("NFD", result) + expected = unicodedata.normalize("NFD", result) + + assert result == expected + dti = dti.append(DatetimeIndex([NaT])) + assert np.isnan(dti.month_name(locale=time_locale)[-1]) + + def test_dti_week(self): + # GH#6538: Check that DatetimeIndex and its TimeStamp elements + # return the same weekofyear accessor close to new year w/ tz + dates = ["2013/12/29", "2013/12/30", "2013/12/31"] + dates = DatetimeIndex(dates, tz="Europe/Brussels") + expected = [52, 1, 1] + assert dates.isocalendar().week.tolist() == expected + assert [d.weekofyear for d in dates] == expected + + @pytest.mark.parametrize("tz", [None, "US/Eastern"]) + def test_dti_fields(self, tz): + # GH#13303 + dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365, tz=tz) + assert dti.year[0] == 1998 + assert dti.month[0] == 1 + assert dti.day[0] == 1 + assert dti.hour[0] == 0 + assert dti.minute[0] == 0 + assert dti.second[0] == 0 + assert dti.microsecond[0] == 0 + assert dti.dayofweek[0] == 3 + + assert dti.dayofyear[0] == 1 + assert dti.dayofyear[120] == 121 + + assert dti.isocalendar().week.iloc[0] == 1 + assert dti.isocalendar().week.iloc[120] == 18 + + assert dti.quarter[0] == 1 + assert dti.quarter[120] == 2 + + assert dti.days_in_month[0] == 31 + assert dti.days_in_month[90] == 30 + + assert dti.is_month_start[0] + assert not dti.is_month_start[1] + assert dti.is_month_start[31] + assert dti.is_quarter_start[0] + assert dti.is_quarter_start[90] + assert dti.is_year_start[0] + assert not dti.is_year_start[364] + assert not dti.is_month_end[0] + assert dti.is_month_end[30] + assert not dti.is_month_end[31] + assert dti.is_month_end[364] + assert not dti.is_quarter_end[0] + assert not dti.is_quarter_end[30] + assert dti.is_quarter_end[89] + assert dti.is_quarter_end[364] + assert not dti.is_year_end[0] + assert dti.is_year_end[364] + + assert len(dti.year) == 365 + assert len(dti.month) == 365 + assert len(dti.day) == 365 + assert len(dti.hour) == 365 + assert len(dti.minute) == 365 + assert len(dti.second) == 365 + assert len(dti.microsecond) == 365 + assert len(dti.dayofweek) == 365 + assert len(dti.dayofyear) == 365 + assert len(dti.isocalendar()) == 365 + assert len(dti.quarter) == 365 + assert len(dti.is_month_start) == 365 + assert len(dti.is_month_end) == 365 + assert len(dti.is_quarter_start) == 365 + assert len(dti.is_quarter_end) == 365 + assert len(dti.is_year_start) == 365 + assert len(dti.is_year_end) == 365 + + dti.name = "name" + + # non boolean accessors -> return Index + for accessor in DatetimeArray._field_ops: + res = getattr(dti, accessor) + assert len(res) == 365 + assert isinstance(res, Index) + assert res.name == "name" + + # boolean accessors -> return array + for accessor in DatetimeArray._bool_ops: + res = getattr(dti, accessor) + assert len(res) == 365 + assert isinstance(res, np.ndarray) + + # test boolean indexing + res = dti[dti.is_quarter_start] + exp = dti[[0, 90, 181, 273]] + tm.assert_index_equal(res, exp) + res = dti[dti.is_leap_year] + exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name") + tm.assert_index_equal(res, exp) + + def test_dti_is_year_quarter_start(self): + dti = date_range(freq="BQ-FEB", start=datetime(1998, 1, 1), periods=4) + + assert sum(dti.is_quarter_start) == 0 + assert sum(dti.is_quarter_end) == 4 + assert sum(dti.is_year_start) == 0 + assert sum(dti.is_year_end) == 1 + + def test_dti_is_month_start(self): + dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) + + assert dti.is_month_start[0] == 1 + + def test_dti_is_month_start_custom(self): + # Ensure is_start/end accessors throw ValueError for CustomBusinessDay, + bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu") + dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt) + msg = "Custom business days is not supported by is_month_start" + with pytest.raises(ValueError, match=msg): + dti.is_month_start diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 8590a2542a31d..379c727f4ed0f 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -2,39 +2,25 @@ Tests for DatetimeIndex timezone-related methods """ from datetime import ( - date, datetime, - time, timedelta, timezone, tzinfo, ) -import dateutil -from dateutil.tz import ( - gettz, - tzlocal, -) +from dateutil.tz import gettz import numpy as np import pytest import pytz -try: - from zoneinfo import ZoneInfo -except ImportError: - # Cannot assign to a type [misc] - ZoneInfo = None # type: ignore[misc, assignment] - from pandas._libs.tslibs import ( conversion, timezones, ) -import pandas.util._test_decorators as td import pandas as pd from pandas import ( DatetimeIndex, - Index, Timestamp, bdate_range, date_range, @@ -61,773 +47,13 @@ def dst(self, dt): return timedelta(0) -fixed_off = FixedOffset(-420, "-07:00") fixed_off_no_name = FixedOffset(-330, None) class TestDatetimeIndexTimezones: - # ------------------------------------------------------------- - # DatetimeIndex.tz_convert - def test_tz_convert_nat(self): - # GH#5546 - dates = [pd.NaT] - idx = DatetimeIndex(dates) - idx = idx.tz_localize("US/Pacific") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) - idx = idx.tz_convert("US/Eastern") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern")) - idx = idx.tz_convert("UTC") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC")) - - dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT] - idx = DatetimeIndex(dates) - idx = idx.tz_localize("US/Pacific") - tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific")) - idx = idx.tz_convert("US/Eastern") - expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) - - idx = idx + pd.offsets.Hour(5) - expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) - idx = idx.tz_convert("US/Pacific") - expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) - - idx = idx + np.timedelta64(3, "h") - expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific")) - - idx = idx.tz_convert("US/Eastern") - expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT] - tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern")) - - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_dti_tz_convert_compat_timestamp(self, prefix): - strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] - idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern") - - conv = idx[0].tz_convert(prefix + "US/Pacific") - expected = idx.tz_convert(prefix + "US/Pacific")[0] - - assert conv == expected - - def test_dti_tz_convert_hour_overflow_dst(self): - # Regression test for: - # https://github.com/pandas-dev/pandas/issues/13306 - - # sorted case US/Eastern -> UTC - ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"] - tt = DatetimeIndex(ts).tz_localize("US/Eastern") - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # sorted case UTC -> US/Eastern - ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"] - tt = DatetimeIndex(ts).tz_localize("UTC") - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case US/Eastern -> UTC - ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"] - tt = DatetimeIndex(ts).tz_localize("US/Eastern") - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case UTC -> US/Eastern - ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"] - tt = DatetimeIndex(ts).tz_localize("UTC") - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz): - # Regression test for GH#13306 - - # sorted case US/Eastern -> UTC - ts = [ - Timestamp("2008-05-12 09:50:00", tz=tz), - Timestamp("2008-12-12 09:50:35", tz=tz), - Timestamp("2009-05-12 09:50:32", tz=tz), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # sorted case UTC -> US/Eastern - ts = [ - Timestamp("2008-05-12 13:50:00", tz="UTC"), - Timestamp("2008-12-12 14:50:35", tz="UTC"), - Timestamp("2009-05-12 13:50:32", tz="UTC"), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case US/Eastern -> UTC - ts = [ - Timestamp("2008-05-12 09:50:00", tz=tz), - Timestamp("2008-12-12 09:50:35", tz=tz), - Timestamp("2008-05-12 09:50:32", tz=tz), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("UTC") - expected = Index([13, 14, 13], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - # unsorted case UTC -> US/Eastern - ts = [ - Timestamp("2008-05-12 13:50:00", tz="UTC"), - Timestamp("2008-12-12 14:50:35", tz="UTC"), - Timestamp("2008-05-12 13:50:32", tz="UTC"), - ] - tt = DatetimeIndex(ts) - ut = tt.tz_convert("US/Eastern") - expected = Index([9, 9, 9], dtype=np.int32) - tm.assert_index_equal(ut.hour, expected) - - @pytest.mark.parametrize("freq, n", [("h", 1), ("min", 60), ("s", 3600)]) - def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n): - # Regression test for tslib.tz_convert(vals, tz1, tz2). - # See https://github.com/pandas-dev/pandas/issues/4496 for details. - idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq) - idx = idx.tz_localize("UTC") - idx = idx.tz_convert("Europe/Moscow") - - expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1])) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - def test_dti_tz_convert_dst(self): - for freq, n in [("h", 1), ("min", 60), ("s", 3600)]: - # Start DST - idx = date_range( - "2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC" - ) - idx = idx.tz_convert("US/Eastern") - expected = np.repeat( - np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]), - np.array([n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - idx = date_range( - "2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - expected = np.repeat( - np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), - np.array([n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - # End DST - idx = date_range( - "2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC" - ) - idx = idx.tz_convert("US/Eastern") - expected = np.repeat( - np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]), - np.array([n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - idx = date_range( - "2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - expected = np.repeat( - np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), - np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]), - ) - tm.assert_index_equal(idx.hour, Index(expected, dtype=np.int32)) - - # daily - # Start DST - idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC") - idx = idx.tz_convert("US/Eastern") - tm.assert_index_equal(idx.hour, Index([19, 19], dtype=np.int32)) - - idx = date_range( - "2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - tm.assert_index_equal(idx.hour, Index([5, 5], dtype=np.int32)) - - # End DST - idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC") - idx = idx.tz_convert("US/Eastern") - tm.assert_index_equal(idx.hour, Index([20, 20], dtype=np.int32)) - - idx = date_range( - "2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern" - ) - idx = idx.tz_convert("UTC") - tm.assert_index_equal(idx.hour, Index([4, 4], dtype=np.int32)) - - def test_tz_convert_roundtrip(self, tz_aware_fixture): - tz = tz_aware_fixture - idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="ME", tz="UTC") - exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="ME") - - idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC") - exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D") - - idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="h", tz="UTC") - exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="h") - - idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="min", tz="UTC") - exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="min") - - for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]: - converted = idx.tz_convert(tz) - reset = converted.tz_convert(None) - tm.assert_index_equal(reset, expected) - assert reset.tzinfo is None - expected = converted.tz_convert("UTC").tz_localize(None) - expected = expected._with_freq("infer") - tm.assert_index_equal(reset, expected) - - def test_dti_tz_convert_tzlocal(self): - # GH#13583 - # tz_convert doesn't affect to internal - dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC") - dti2 = dti.tz_convert(dateutil.tz.tzlocal()) - tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) - - dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) - dti2 = dti.tz_convert(None) - tm.assert_numpy_array_equal(dti2.asi8, dti.asi8) - - @pytest.mark.parametrize( - "tz", - [ - "US/Eastern", - "dateutil/US/Eastern", - pytz.timezone("US/Eastern"), - gettz("US/Eastern"), - ], - ) - def test_dti_tz_convert_utc_to_local_no_modify(self, tz): - rng = date_range("3/11/2012", "3/12/2012", freq="h", tz="utc") - rng_eastern = rng.tz_convert(tz) - - # Values are unmodified - tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) - - assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz)) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_tz_convert_unsorted(self, tzstr): - dr = date_range("2012-03-09", freq="h", periods=100, tz="utc") - dr = dr.tz_convert(tzstr) - - result = dr[::-1].hour - exp = dr.hour[::-1] - tm.assert_almost_equal(result, exp) - - # ------------------------------------------------------------- - # DatetimeIndex.tz_localize - - def test_tz_localize_utc_copies(self, utc_fixture): - # GH#46460 - times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] - index = DatetimeIndex(times) - - res = index.tz_localize(utc_fixture) - assert not tm.shares_memory(res, index) - - res2 = index._data.tz_localize(utc_fixture) - assert not tm.shares_memory(index._data, res2) - - def test_dti_tz_localize_nonexistent_raise_coerce(self): - # GH#13057 - times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"] - index = DatetimeIndex(times) - tz = "US/Eastern" - with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): - index.tz_localize(tz=tz) - - with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)): - index.tz_localize(tz=tz, nonexistent="raise") - - result = index.tz_localize(tz=tz, nonexistent="NaT") - test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"] - dti = to_datetime(test_times, utc=True) - expected = dti.tz_convert("US/Eastern") - tm.assert_index_equal(result, expected) - - easts = [pytz.timezone("US/Eastern"), gettz("US/Eastern")] - if ZoneInfo is not None: - try: - tz = ZoneInfo("US/Eastern") - except KeyError: - # no tzdata - pass - else: - easts.append(tz) - - @pytest.mark.parametrize("tz", easts) - def test_dti_tz_localize_ambiguous_infer(self, tz): - # November 6, 2011, fall back, repeat 2 AM hour - # With no repeated hours, we cannot infer the transition - dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour()) - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - dr.tz_localize(tz) - - # With repeated hours, we can infer the transition - dr = date_range( - datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz - ) - times = [ - "11/06/2011 00:00", - "11/06/2011 01:00", - "11/06/2011 01:00", - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - di = DatetimeIndex(times) - localized = di.tz_localize(tz, ambiguous="infer") - expected = dr._with_freq(None) - tm.assert_index_equal(expected, localized) - tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer")) - - # When there is no dst transition, nothing special happens - dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) - localized = dr.tz_localize(tz) - localized_infer = dr.tz_localize(tz, ambiguous="infer") - tm.assert_index_equal(localized, localized_infer) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_tz_localize_ambiguous_times(self, tz): - # March 13, 2011, spring forward, skip from 2 AM to 3 AM - dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour()) - with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"): - dr.tz_localize(tz) - - # after dst transition, it works - dr = date_range( - datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz - ) - - # November 6, 2011, fall back, repeat 2 AM hour - dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour()) - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - dr.tz_localize(tz) - - # UTC is OK - dr = date_range( - datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc - ) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_tz_localize_pass_dates_to_utc(self, tzstr): - strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] - - idx = DatetimeIndex(strdates) - conv = idx.tz_localize(tzstr) - - fromdates = DatetimeIndex(strdates, tz=tzstr) - - assert conv.tz == fromdates.tz - tm.assert_numpy_array_equal(conv.values, fromdates.values) - - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_dti_tz_localize(self, prefix): - tzstr = prefix + "US/Eastern" - dti = date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="ms") - dti2 = dti.tz_localize(tzstr) - - dti_utc = date_range( - start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="ms", tz="utc" - ) - - tm.assert_numpy_array_equal(dti2.values, dti_utc.values) - - dti3 = dti2.tz_convert(prefix + "US/Pacific") - tm.assert_numpy_array_equal(dti3.values, dti_utc.values) - - dti = date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="ms") - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - dti.tz_localize(tzstr) - - dti = date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="ms") - with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"): - dti.tz_localize(tzstr) - - @pytest.mark.parametrize( - "tz", - [ - "US/Eastern", - "dateutil/US/Eastern", - pytz.timezone("US/Eastern"), - gettz("US/Eastern"), - ], - ) - def test_dti_tz_localize_utc_conversion(self, tz): - # Localizing to time zone should: - # 1) check for DST ambiguities - # 2) convert to UTC - - rng = date_range("3/10/2012", "3/11/2012", freq="30min") - - converted = rng.tz_localize(tz) - expected_naive = rng + pd.offsets.Hour(5) - tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) - - # DST ambiguity, this should fail - rng = date_range("3/11/2012", "3/12/2012", freq="30min") - # Is this really how it should fail?? - with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"): - rng.tz_localize(tz) - - def test_dti_tz_localize_roundtrip(self, tz_aware_fixture): - # note: this tz tests that a tz-naive index can be localized - # and de-localized successfully, when there are no DST transitions - # in the range. - idx = date_range(start="2014-06-01", end="2014-08-30", freq="15min") - tz = tz_aware_fixture - localized = idx.tz_localize(tz) - # can't localize a tz-aware object - with pytest.raises( - TypeError, match="Already tz-aware, use tz_convert to convert" - ): - localized.tz_localize(tz) - reset = localized.tz_localize(None) - assert reset.tzinfo is None - expected = idx._with_freq(None) - tm.assert_index_equal(reset, expected) - - def test_dti_tz_localize_naive(self): - rng = date_range("1/1/2011", periods=100, freq="h") - - conv = rng.tz_localize("US/Pacific") - exp = date_range("1/1/2011", periods=100, freq="h", tz="US/Pacific") - - tm.assert_index_equal(conv, exp._with_freq(None)) - - def test_dti_tz_localize_tzlocal(self): - # GH#13583 - offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1)) - offset = int(offset.total_seconds() * 1000000000) - - dti = date_range(start="2001-01-01", end="2001-03-01") - dti2 = dti.tz_localize(dateutil.tz.tzlocal()) - tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8) - - dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal()) - dti2 = dti.tz_localize(None) - tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_tz_localize_ambiguous_nat(self, tz): - times = [ - "11/06/2011 00:00", - "11/06/2011 01:00", - "11/06/2011 01:00", - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - di = DatetimeIndex(times) - localized = di.tz_localize(tz, ambiguous="NaT") - - times = [ - "11/06/2011 00:00", - np.nan, - np.nan, - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - di_test = DatetimeIndex(times, tz="US/Eastern") - - # left dtype is datetime64[ns, US/Eastern] - # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')] - tm.assert_numpy_array_equal(di_test.values, localized.values) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_tz_localize_ambiguous_flags(self, tz): - # November 6, 2011, fall back, repeat 2 AM hour - - # Pass in flags to determine right dst transition - dr = date_range( - datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz - ) - times = [ - "11/06/2011 00:00", - "11/06/2011 01:00", - "11/06/2011 01:00", - "11/06/2011 02:00", - "11/06/2011 03:00", - ] - - # Test tz_localize - di = DatetimeIndex(times) - is_dst = [1, 1, 0, 0, 0] - localized = di.tz_localize(tz, ambiguous=is_dst) - expected = dr._with_freq(None) - tm.assert_index_equal(expected, localized) - tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst)) - - localized = di.tz_localize(tz, ambiguous=np.array(is_dst)) - tm.assert_index_equal(dr, localized) - - localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool")) - tm.assert_index_equal(dr, localized) - - # Test constructor - localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst) - tm.assert_index_equal(dr, localized) - - # Test duplicate times where inferring the dst fails - times += times - di = DatetimeIndex(times) - - # When the sizes are incompatible, make sure error is raised - msg = "Length of ambiguous bool-array must be the same size as vals" - with pytest.raises(Exception, match=msg): - di.tz_localize(tz, ambiguous=is_dst) - - # When sizes are compatible and there are repeats ('infer' won't work) - is_dst = np.hstack((is_dst, is_dst)) - localized = di.tz_localize(tz, ambiguous=is_dst) - dr = dr.append(dr) - tm.assert_index_equal(dr, localized) - - # When there is no dst transition, nothing special happens - dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour()) - is_dst = np.array([1] * 10) - localized = dr.tz_localize(tz) - localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) - tm.assert_index_equal(localized, localized_is_dst) - - def test_dti_tz_localize_bdate_range(self): - dr = bdate_range("1/1/2009", "1/1/2010") - dr_utc = bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc) - localized = dr.tz_localize(pytz.utc) - tm.assert_index_equal(dr_utc, localized) - - @pytest.mark.parametrize( - "start_ts, tz, end_ts, shift", - [ - ["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"], - [ - "2015-03-29 02:20:00", - "Europe/Warsaw", - "2015-03-29 01:59:59.999999999", - "backward", - ], - [ - "2015-03-29 02:20:00", - "Europe/Warsaw", - "2015-03-29 03:20:00", - timedelta(hours=1), - ], - [ - "2015-03-29 02:20:00", - "Europe/Warsaw", - "2015-03-29 01:20:00", - timedelta(hours=-1), - ], - ["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"], - [ - "2018-03-11 02:33:00", - "US/Pacific", - "2018-03-11 01:59:59.999999999", - "backward", - ], - [ - "2018-03-11 02:33:00", - "US/Pacific", - "2018-03-11 03:33:00", - timedelta(hours=1), - ], - [ - "2018-03-11 02:33:00", - "US/Pacific", - "2018-03-11 01:33:00", - timedelta(hours=-1), - ], - ], - ) - @pytest.mark.parametrize("tz_type", ["", "dateutil/"]) - def test_dti_tz_localize_nonexistent_shift( - self, start_ts, tz, end_ts, shift, tz_type - ): - # GH 8917 - tz = tz_type + tz - if isinstance(shift, str): - shift = "shift_" + shift - dti = DatetimeIndex([Timestamp(start_ts)]) - result = dti.tz_localize(tz, nonexistent=shift) - expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("offset", [-1, 1]) - def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, warsaw): - # GH 8917 - tz = warsaw - dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")]) - msg = "The provided timedelta will relocalize on a nonexistent time" - with pytest.raises(ValueError, match=msg): - dti.tz_localize(tz, nonexistent=timedelta(seconds=offset)) - - # ------------------------------------------------------------- - # DatetimeIndex.normalize - - def test_normalize_tz(self): - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern") - - result = rng.normalize() # does not preserve freq - expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern") - tm.assert_index_equal(result, expected._with_freq(None)) - - assert result.is_normalized - assert not rng.is_normalized - - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") - - result = rng.normalize() - expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC") - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) - result = rng.normalize() # does not preserve freq - expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) - tm.assert_index_equal(result, expected._with_freq(None)) - - assert result.is_normalized - assert not rng.is_normalized - - @td.skip_if_windows - @pytest.mark.parametrize( - "timezone", - [ - "US/Pacific", - "US/Eastern", - "UTC", - "Asia/Kolkata", - "Asia/Shanghai", - "Australia/Canberra", - ], - ) - def test_normalize_tz_local(self, timezone): - # GH#13459 - with tm.set_timezone(timezone): - rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) - - result = rng.normalize() - expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) - expected = expected._with_freq(None) - tm.assert_index_equal(result, expected) - - assert result.is_normalized - assert not rng.is_normalized - - # ------------------------------------------------------------ - # DatetimeIndex.__new__ - - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_dti_constructor_static_tzinfo(self, prefix): - # it works! - index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST") - index.hour - index[0] - - def test_dti_constructor_with_fixed_tz(self): - off = FixedOffset(420, "+07:00") - start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off) - end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off) - rng = date_range(start=start, end=end) - assert off == rng.tz - - rng2 = date_range(start, periods=len(rng), tz=off) - tm.assert_index_equal(rng, rng2) - - rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00") - assert (rng.values == rng3.values).all() - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_convert_datetime_list(self, tzstr): - dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo") - dr2 = DatetimeIndex(list(dr), name="foo", freq="D") - tm.assert_index_equal(dr, dr2) - - def test_dti_construction_univalent(self): - rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern") - rng2 = DatetimeIndex(data=rng, tz="US/Eastern") - tm.assert_index_equal(rng, rng2) - - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) - def test_dti_from_tzaware_datetime(self, tz): - d = [datetime(2012, 8, 19, tzinfo=tz)] - - index = DatetimeIndex(d) - assert timezones.tz_compare(index.tz, tz) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_tz_constructors(self, tzstr): - """Test different DatetimeIndex constructions with timezone - Follow-up of GH#4229 - """ - arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"] - - idx1 = to_datetime(arr).tz_localize(tzstr) - idx2 = date_range(start="2005-11-10 08:00:00", freq="h", periods=2, tz=tzstr) - idx2 = idx2._with_freq(None) # the others all have freq=None - idx3 = DatetimeIndex(arr, tz=tzstr) - idx4 = DatetimeIndex(np.array(arr), tz=tzstr) - - for other in [idx2, idx3, idx4]: - tm.assert_index_equal(idx1, other) - # ------------------------------------------------------------- # Unsorted - @pytest.mark.parametrize( - "dtype", - [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], - ) - def test_date_accessor(self, dtype): - # Regression test for GH#21230 - expected = np.array([date(2018, 6, 4), pd.NaT]) - - index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype) - result = index.date - - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize( - "dtype", - [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], - ) - def test_time_accessor(self, dtype): - # Regression test for GH#21267 - expected = np.array([time(10, 20, 30), pd.NaT]) - - index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype) - result = index.time - - tm.assert_numpy_array_equal(result, expected) - - def test_timetz_accessor(self, tz_naive_fixture): - # GH21358 - tz = timezones.maybe_get_tz(tz_naive_fixture) - - expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT]) - - index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz) - result = index.timetz - - tm.assert_numpy_array_equal(result, expected) - def test_dti_drop_dont_lose_tz(self): # GH#2621 ind = date_range("2012-12-01", periods=10, tz="utc") @@ -946,42 +172,6 @@ def test_dti_tz_nat(self, tzstr): assert isna(idx[1]) assert idx[0].tzinfo is not None - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_astype_asobject_tzinfos(self, tzstr): - # GH#1345 - - # dates around a dst transition - rng = date_range("2/13/2010", "5/6/2010", tz=tzstr) - - objs = rng.astype(object) - for i, x in enumerate(objs): - exval = rng[i] - assert x == exval - assert x.tzinfo == exval.tzinfo - - objs = rng.astype(object) - for i, x in enumerate(objs): - exval = rng[i] - assert x == exval - assert x.tzinfo == exval.tzinfo - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_with_timezone_repr(self, tzstr): - rng = date_range("4/13/2010", "5/6/2010") - - rng_eastern = rng.tz_localize(tzstr) - - rng_repr = repr(rng_eastern) - assert "2010-04-13 00:00:00" in rng_repr - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_dti_take_dont_lose_meta(self, tzstr): - rng = date_range("1/1/2000", periods=20, tz=tzstr) - - result = rng.take(range(5)) - assert result.tz == rng.tz - assert result.freq == rng.freq - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) def test_utc_box_timestamp_and_localize(self, tzstr): tz = timezones.maybe_get_tz(tzstr) @@ -1004,36 +194,6 @@ def test_utc_box_timestamp_and_localize(self, tzstr): rng_eastern[0].tzinfo ) - def test_dti_to_pydatetime(self): - dt = dateutil.parser.parse("2012-06-13T01:39:00Z") - dt = dt.replace(tzinfo=tzlocal()) - - arr = np.array([dt], dtype=object) - - result = to_datetime(arr, utc=True) - assert result.tz is timezone.utc - - rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal()) - arr = rng.to_pydatetime() - result = to_datetime(arr, utc=True) - assert result.tz is timezone.utc - - def test_dti_to_pydatetime_fizedtz(self): - dates = np.array( - [ - datetime(2000, 1, 1, tzinfo=fixed_off), - datetime(2000, 1, 2, tzinfo=fixed_off), - datetime(2000, 1, 3, tzinfo=fixed_off), - ] - ) - dti = DatetimeIndex(dates) - - result = dti.to_pydatetime() - tm.assert_numpy_array_equal(dates, result) - - result = dti._mpl_repr() - tm.assert_numpy_array_equal(dates, result) - @pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")]) def test_with_tz(self, tz): # just want it to work @@ -1065,20 +225,6 @@ def test_with_tz(self, tz): with pytest.raises(Exception, match=msg): bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), "1/1/2009", tz=tz) - @pytest.mark.parametrize("prefix", ["", "dateutil/"]) - def test_field_access_localize(self, prefix): - strdates = ["1/1/2012", "3/1/2012", "4/1/2012"] - rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern") - assert (rng.hour == 0).all() - - # a more unusual time zone, #1946 - dr = date_range( - "2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan" - ) - - expected = Index(np.arange(10, dtype=np.int32)) - tm.assert_index_equal(dr.hour, expected) - @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")]) def test_dti_convert_tz_aware_datetime_datetime(self, tz): # GH#1581 @@ -1092,37 +238,3 @@ def test_dti_convert_tz_aware_datetime_datetime(self, tz): ex_vals = np.array([Timestamp(x).as_unit("ns")._value for x in dates_aware]) tm.assert_numpy_array_equal(converted.asi8, ex_vals) assert converted.tz is timezone.utc - - @pytest.mark.parametrize( - "tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)] - ) - def test_iteration_preserves_nanoseconds(self, tz): - # GH 19603 - index = DatetimeIndex( - ["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz - ) - for i, ts in enumerate(index): - assert ts == index[i] # pylint: disable=unnecessary-list-index-lookup - - -def test_tz_localize_invalidates_freq(): - # we only preserve freq in unambiguous cases - - # if localized to US/Eastern, this crosses a DST transition - dti = date_range("2014-03-08 23:00", "2014-03-09 09:00", freq="h") - assert dti.freq == "h" - - result = dti.tz_localize(None) # no-op - assert result.freq == "h" - - result = dti.tz_localize("UTC") # unambiguous freq preservation - assert result.freq == "h" - - result = dti.tz_localize("US/Eastern", nonexistent="shift_forward") - assert result.freq is None - assert result.inferred_freq is None # i.e. we are not _too_ strict here - - # Case where we _can_ keep freq because we're length==1 - dti2 = dti[:1] - result = dti2.tz_localize("US/Eastern") - assert result.freq == "h" diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index aefaba1aed058..12ac2cfebfb01 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -1787,7 +1787,9 @@ def test_to_datetime_utc(self): assert result.tz is timezone.utc def test_to_datetime_fixed_offset(self): - from pandas.tests.indexes.datetimes.test_timezones import fixed_off + from pandas.tests.indexes.datetimes.test_timezones import FixedOffset + + fixed_off = FixedOffset(-420, "-07:00") dates = [ datetime(2000, 1, 1, tzinfo=fixed_off), diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 451033947fc99..31e118ecaf5a7 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -22,6 +22,7 @@ from pandas._libs.tslibs.offsets import ( _get_offset, _offset_map, + to_offset, ) from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG from pandas.errors import PerformanceWarning @@ -1114,3 +1115,51 @@ def test_dateoffset_operations_on_dataframes(): assert frameresult1[0] == expecteddate assert frameresult2[0] == expecteddate + + +def test_is_yqm_start_end(): + freq_m = to_offset("ME") + bm = to_offset("BME") + qfeb = to_offset("Q-FEB") + qsfeb = to_offset("QS-FEB") + bq = to_offset("BQ") + bqs_apr = to_offset("BQS-APR") + as_nov = to_offset("YS-NOV") + + tests = [ + (freq_m.is_month_start(Timestamp("2013-06-01")), 1), + (bm.is_month_start(Timestamp("2013-06-01")), 0), + (freq_m.is_month_start(Timestamp("2013-06-03")), 0), + (bm.is_month_start(Timestamp("2013-06-03")), 1), + (qfeb.is_month_end(Timestamp("2013-02-28")), 1), + (qfeb.is_quarter_end(Timestamp("2013-02-28")), 1), + (qfeb.is_year_end(Timestamp("2013-02-28")), 1), + (qfeb.is_month_start(Timestamp("2013-03-01")), 1), + (qfeb.is_quarter_start(Timestamp("2013-03-01")), 1), + (qfeb.is_year_start(Timestamp("2013-03-01")), 1), + (qsfeb.is_month_end(Timestamp("2013-03-31")), 1), + (qsfeb.is_quarter_end(Timestamp("2013-03-31")), 0), + (qsfeb.is_year_end(Timestamp("2013-03-31")), 0), + (qsfeb.is_month_start(Timestamp("2013-02-01")), 1), + (qsfeb.is_quarter_start(Timestamp("2013-02-01")), 1), + (qsfeb.is_year_start(Timestamp("2013-02-01")), 1), + (bq.is_month_end(Timestamp("2013-06-30")), 0), + (bq.is_quarter_end(Timestamp("2013-06-30")), 0), + (bq.is_year_end(Timestamp("2013-06-30")), 0), + (bq.is_month_end(Timestamp("2013-06-28")), 1), + (bq.is_quarter_end(Timestamp("2013-06-28")), 1), + (bq.is_year_end(Timestamp("2013-06-28")), 0), + (bqs_apr.is_month_end(Timestamp("2013-06-30")), 0), + (bqs_apr.is_quarter_end(Timestamp("2013-06-30")), 0), + (bqs_apr.is_year_end(Timestamp("2013-06-30")), 0), + (bqs_apr.is_month_end(Timestamp("2013-06-28")), 1), + (bqs_apr.is_quarter_end(Timestamp("2013-06-28")), 1), + (bqs_apr.is_year_end(Timestamp("2013-03-29")), 1), + (as_nov.is_year_start(Timestamp("2013-11-01")), 1), + (as_nov.is_year_end(Timestamp("2013-10-31")), 1), + (Timestamp("2012-02-01").days_in_month, 29), + (Timestamp("2013-02-01").days_in_month, 28), + ] + + for ts, value in tests: + assert ts == value
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55614
2023-10-20T23:51:02Z
2023-10-22T19:27:08Z
2023-10-22T19:27:08Z
2023-10-22T20:30:59Z
Backport PR #55611 on branch 2.1.x (DEPR: correct warning message while parsing datetimes with mixed time zones if utc=False)
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 20a18cf56779f..e61ad28c54afb 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -671,7 +671,7 @@ cdef _array_to_datetime_object( if len(unique_timezones) > 1: warnings.warn( "In a future version of pandas, parsing datetimes with mixed time " - "zones will raise a warning unless `utc=True`. " + "zones will raise an error unless `utc=True`. " "Please specify `utc=True` to opt in to the new behaviour " "and silence this warning. To create a `Series` with mixed offsets and " "`object` dtype, please use `apply` and `datetime.datetime.strptime`", diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index ac306229f3111..303136b7d3890 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -356,7 +356,7 @@ def _return_parsed_timezone_results( if len(non_na_timezones) > 1: warnings.warn( "In a future version of pandas, parsing datetimes with mixed time " - "zones will raise a warning unless `utc=True`. Please specify `utc=True` " + "zones will raise an error unless `utc=True`. Please specify `utc=True` " "to opt in to the new behaviour and silence this warning. " "To create a `Series` with mixed offsets and `object` dtype, " "please use `apply` and `datetime.datetime.strptime`", @@ -788,7 +788,7 @@ def to_datetime( .. warning:: In a future version of pandas, parsing datetimes with mixed time - zones will raise a warning unless `utc=True`. + zones will raise an error unless `utc=True`. Please specify `utc=True` to opt in to the new behaviour and silence this warning. To create a `Series` with mixed offsets and `object` dtype, please use `apply` and `datetime.datetime.strptime`. @@ -1023,7 +1023,7 @@ def to_datetime( >>> pd.to_datetime(['2020-10-25 02:00 +0200', ... '2020-10-25 04:00 +0100']) # doctest: +SKIP FutureWarning: In a future version of pandas, parsing datetimes with mixed - time zones will raise a warning unless `utc=True`. Please specify `utc=True` + time zones will raise an error unless `utc=True`. Please specify `utc=True` to opt in to the new behaviour and silence this warning. To create a `Series` with mixed offsets and `object` dtype, please use `apply` and `datetime.datetime.strptime`. @@ -1037,7 +1037,7 @@ def to_datetime( >>> pd.to_datetime(["2020-01-01 01:00:00-01:00", ... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP FutureWarning: In a future version of pandas, parsing datetimes with mixed - time zones will raise a warning unless `utc=True`. Please specify `utc=True` + time zones will raise an error unless `utc=True`. Please specify `utc=True` to opt in to the new behaviour and silence this warning. To create a `Series` with mixed offsets and `object` dtype, please use `apply` and `datetime.datetime.strptime`. diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index 833f4986b6da6..58979a29c97d3 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -1316,7 +1316,7 @@ def _try_convert_to_date(self, data): warnings.filterwarnings( "ignore", ".*parsing datetimes with mixed time " - "zones will raise a warning", + "zones will raise an error", category=FutureWarning, ) new_data = to_datetime(new_data, errors="raise", unit=date_unit) diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 60996a7d42187..6b1daa96782a0 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -1147,7 +1147,7 @@ def converter(*date_cols, col: Hashable): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", - ".*parsing datetimes with mixed time zones will raise a warning", + ".*parsing datetimes with mixed time zones will raise an error", category=FutureWarning, ) result = tools.to_datetime( @@ -1169,7 +1169,7 @@ def converter(*date_cols, col: Hashable): warnings.filterwarnings( "ignore", ".*parsing datetimes with mixed time zones " - "will raise a warning", + "will raise an error", category=FutureWarning, ) result = tools.to_datetime( @@ -1187,7 +1187,7 @@ def converter(*date_cols, col: Hashable): warnings.filterwarnings( "ignore", ".*parsing datetimes with mixed time zones " - "will raise a warning", + "will raise an error", category=FutureWarning, ) return tools.to_datetime( diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 733c14f33567a..fc7ce19c42b38 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -300,7 +300,7 @@ def test_construction_index_with_mixed_timezones(self): assert not isinstance(result, DatetimeIndex) msg = "DatetimeIndex has mixed timezones" - msg_depr = "parsing datetimes with mixed time zones will raise a warning" + msg_depr = "parsing datetimes with mixed time zones will raise an error" with pytest.raises(TypeError, match=msg): with tm.assert_produces_warning(FutureWarning, match=msg_depr): DatetimeIndex(["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index 93fe9b05adb4f..580f0605efdbc 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -518,7 +518,7 @@ def test_to_datetime_parse_tzname_or_tzoffset_utc_false_deprecated( self, fmt, dates, expected_dates ): # GH 13486, 50887 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(dates, format=fmt) expected = Index(expected_dates) @@ -693,7 +693,7 @@ def test_to_datetime_mixed_datetime_and_string_with_format_mixed_offsets_utc_fal args = ["2000-01-01 01:00:00", "2000-01-01 02:00:00+00:00"] ts1 = constructor(args[0]) ts2 = args[1] - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" expected = Index( [ @@ -734,7 +734,7 @@ def test_to_datetime_mixed_datetime_and_string_with_format_mixed_offsets_utc_fal ) def test_to_datetime_mixed_offsets_with_none_tz(self, fmt, expected): # https://github.com/pandas-dev/pandas/issues/50071 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime( @@ -1236,7 +1236,7 @@ def test_to_datetime_different_offsets(self, cache): ts_string_2 = "March 1, 2018 12:00:00+0500" arr = [ts_string_1] * 5 + [ts_string_2] * 5 expected = Index([parse(x) for x in arr]) - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(arr, cache=cache) tm.assert_index_equal(result, expected) @@ -1604,7 +1604,7 @@ def test_to_datetime_coerce(self): "March 1, 2018 12:00:00+0500", "20100240", ] - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(ts_strings, errors="coerce") expected = Index( @@ -1689,7 +1689,7 @@ def test_iso_8601_strings_with_same_offset(self): def test_iso_8601_strings_with_different_offsets(self): # GH 17697, 11736, 50887 ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT] - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(ts_strings) expected = np.array( @@ -1729,7 +1729,7 @@ def test_mixed_offsets_with_native_datetime_raises(self): now = Timestamp("now") today = Timestamp("today") - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): mixed = to_datetime(ser) expected = Series( @@ -1810,7 +1810,7 @@ def test_to_datetime_fixed_offset(self): ) def test_to_datetime_mixed_offsets_with_utc_false_deprecated(self, date): # GH 50887 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): to_datetime(date, utc=False) @@ -3680,7 +3680,7 @@ def test_to_datetime_with_empty_str_utc_false_format_mixed(): def test_to_datetime_with_empty_str_utc_false_offsets_and_format_mixed(): # GH 50887 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): to_datetime( diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py index 435fe5f4b90d8..829bb140e6e96 100644 --- a/pandas/tests/tslibs/test_array_to_datetime.py +++ b/pandas/tests/tslibs/test_array_to_datetime.py @@ -85,7 +85,7 @@ def test_parsing_different_timezone_offsets(): data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"] data = np.array(data, dtype=object) - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result, result_tz = tslib.array_to_datetime(data) expected = np.array(
Backport PR #55611: DEPR: correct warning message while parsing datetimes with mixed time zones if utc=False
https://api.github.com/repos/pandas-dev/pandas/pulls/55613
2023-10-20T23:09:02Z
2023-10-21T19:43:32Z
2023-10-21T19:43:32Z
2023-10-21T19:43:32Z
REF: separate out _parse_with_format
diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 866181246a284..717ec41a918d8 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -179,11 +179,7 @@ def array_strptime( npy_datetimestruct dts int64_t[::1] iresult object[::1] result_timezone - int year, month, day, minute, hour, second, weekday, julian - int week_of_year, week_of_year_start, parse_code, ordinal - int iso_week, iso_year - int64_t us, ns - object val, group_key, ampm, found, tz + object val, tz bint is_raise = errors=="raise" bint is_ignore = errors=="ignore" bint is_coerce = errors=="coerce" @@ -351,173 +347,9 @@ def array_strptime( if not string_to_dts_succeeded and fmt == "ISO8601": raise ValueError(f"Time data {val} is not ISO8601 format") - # exact matching - if exact: - found = format_regex.match(val) - if not found: - raise ValueError( - f"time data \"{val}\" doesn't match format \"{fmt}\"" - ) - if len(val) != found.end(): - raise ValueError( - "unconverted data remains when parsing with " - f"format \"{fmt}\": \"{val[found.end():]}\"" - ) - - # search - else: - found = format_regex.search(val) - if not found: - raise ValueError( - f"time data \"{val}\" doesn't match format \"{fmt}\"" - ) - - iso_year = -1 - year = 1900 - month = day = 1 - hour = minute = second = ns = us = 0 - tz = None - # Default to -1 to signify that values not known; not critical to have, - # though - iso_week = week_of_year = -1 - week_of_year_start = -1 - # weekday and julian defaulted to -1 so as to signal need to calculate - # values - weekday = julian = -1 - found_dict = found.groupdict() - for group_key in found_dict.iterkeys(): - # Directives not explicitly handled below: - # c, x, X - # handled by making out of other directives - # U, W - # worthless without day of the week - parse_code = _parse_code_table[group_key] - - if parse_code == 0: - year = int(found_dict["y"]) - # Open Group specification for strptime() states that a %y - # value in the range of [00, 68] is in the century 2000, while - # [69,99] is in the century 1900 - if year <= 68: - year += 2000 - else: - year += 1900 - elif parse_code == 1: - year = int(found_dict["Y"]) - elif parse_code == 2: - month = int(found_dict["m"]) - # elif group_key == 'B': - elif parse_code == 3: - month = locale_time.f_month.index(found_dict["B"].lower()) - # elif group_key == 'b': - elif parse_code == 4: - month = locale_time.a_month.index(found_dict["b"].lower()) - # elif group_key == 'd': - elif parse_code == 5: - day = int(found_dict["d"]) - # elif group_key == 'H': - elif parse_code == 6: - hour = int(found_dict["H"]) - elif parse_code == 7: - hour = int(found_dict["I"]) - ampm = found_dict.get("p", "").lower() - # If there was no AM/PM indicator, we'll treat this like AM - if ampm in ("", locale_time.am_pm[0]): - # We're in AM so the hour is correct unless we're - # looking at 12 midnight. - # 12 midnight == 12 AM == hour 0 - if hour == 12: - hour = 0 - elif ampm == locale_time.am_pm[1]: - # We're in PM so we need to add 12 to the hour unless - # we're looking at 12 noon. - # 12 noon == 12 PM == hour 12 - if hour != 12: - hour += 12 - elif parse_code == 8: - minute = int(found_dict["M"]) - elif parse_code == 9: - second = int(found_dict["S"]) - elif parse_code == 10: - s = found_dict["f"] - # Pad to always return nanoseconds - s += "0" * (9 - len(s)) - us = long(s) - ns = us % 1000 - us = us // 1000 - elif parse_code == 11: - weekday = locale_time.f_weekday.index(found_dict["A"].lower()) - elif parse_code == 12: - weekday = locale_time.a_weekday.index(found_dict["a"].lower()) - elif parse_code == 13: - weekday = int(found_dict["w"]) - if weekday == 0: - weekday = 6 - else: - weekday -= 1 - elif parse_code == 14: - julian = int(found_dict["j"]) - elif parse_code == 15 or parse_code == 16: - week_of_year = int(found_dict[group_key]) - if group_key == "U": - # U starts week on Sunday. - week_of_year_start = 6 - else: - # W starts week on Monday. - week_of_year_start = 0 - elif parse_code == 17: - tz = pytz.timezone(found_dict["Z"]) - elif parse_code == 19: - tz = parse_timezone_directive(found_dict["z"]) - elif parse_code == 20: - iso_year = int(found_dict["G"]) - elif parse_code == 21: - iso_week = int(found_dict["V"]) - elif parse_code == 22: - weekday = int(found_dict["u"]) - weekday -= 1 - - # If we know the wk of the year and what day of that wk, we can figure - # out the Julian day of the year. - if julian == -1 and weekday != -1: - if week_of_year != -1: - week_starts_Mon = week_of_year_start == 0 - julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, - week_starts_Mon) - elif iso_year != -1 and iso_week != -1: - year, julian = _calc_julian_from_V(iso_year, iso_week, - weekday + 1) - # Cannot pre-calculate date() since can change in Julian - # calculation and thus could have different value for the day of the wk - # calculation. - if julian == -1: - # Need to add 1 to result since first day of the year is 1, not - # 0. - ordinal = date(year, month, day).toordinal() - julian = ordinal - date(year, 1, 1).toordinal() + 1 - else: - # Assume that if they bothered to include Julian day it will - # be accurate. - datetime_result = date.fromordinal( - (julian - 1) + date(year, 1, 1).toordinal()) - year = datetime_result.year - month = datetime_result.month - day = datetime_result.day - if weekday == -1: - weekday = date(year, month, day).weekday() - - dts.year = year - dts.month = month - dts.day = day - dts.hour = hour - dts.min = minute - dts.sec = second - dts.us = us - dts.ps = ns * 1000 - - iresult[i] = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts) - check_dts_bounds(&dts) - + tz = _parse_with_format( + val, fmt, exact, format_regex, locale_time, &iresult[i] + ) result_timezone[i] = tz except (ValueError, OutOfBoundsDatetime) as ex: @@ -540,6 +372,190 @@ def array_strptime( return result, result_timezone.base +cdef tzinfo _parse_with_format( + str val, str fmt, bint exact, format_regex, locale_time, int64_t* iresult +): + cdef: + npy_datetimestruct dts + int year, month, day, minute, hour, second, weekday, julian + int week_of_year, week_of_year_start, parse_code, ordinal + int iso_week, iso_year + int64_t us, ns + object found + tzinfo tz + dict found_dict + str group_key, ampm + + if exact: + # exact matching + found = format_regex.match(val) + if not found: + raise ValueError( + f"time data \"{val}\" doesn't match format \"{fmt}\"" + ) + if len(val) != found.end(): + raise ValueError( + "unconverted data remains when parsing with " + f"format \"{fmt}\": \"{val[found.end():]}\"" + ) + + else: + # search + found = format_regex.search(val) + if not found: + raise ValueError( + f"time data \"{val}\" doesn't match format \"{fmt}\"" + ) + + iso_year = -1 + year = 1900 + month = day = 1 + hour = minute = second = ns = us = 0 + tz = None + # Default to -1 to signify that values not known; not critical to have, + # though + iso_week = week_of_year = -1 + week_of_year_start = -1 + # weekday and julian defaulted to -1 so as to signal need to calculate + # values + weekday = julian = -1 + found_dict = found.groupdict() + for group_key in found_dict.iterkeys(): + # Directives not explicitly handled below: + # c, x, X + # handled by making out of other directives + # U, W + # worthless without day of the week + parse_code = _parse_code_table[group_key] + + if parse_code == 0: + year = int(found_dict["y"]) + # Open Group specification for strptime() states that a %y + # value in the range of [00, 68] is in the century 2000, while + # [69,99] is in the century 1900 + if year <= 68: + year += 2000 + else: + year += 1900 + elif parse_code == 1: + year = int(found_dict["Y"]) + elif parse_code == 2: + month = int(found_dict["m"]) + # elif group_key == 'B': + elif parse_code == 3: + month = locale_time.f_month.index(found_dict["B"].lower()) + # elif group_key == 'b': + elif parse_code == 4: + month = locale_time.a_month.index(found_dict["b"].lower()) + # elif group_key == 'd': + elif parse_code == 5: + day = int(found_dict["d"]) + # elif group_key == 'H': + elif parse_code == 6: + hour = int(found_dict["H"]) + elif parse_code == 7: + hour = int(found_dict["I"]) + ampm = found_dict.get("p", "").lower() + # If there was no AM/PM indicator, we'll treat this like AM + if ampm in ("", locale_time.am_pm[0]): + # We're in AM so the hour is correct unless we're + # looking at 12 midnight. + # 12 midnight == 12 AM == hour 0 + if hour == 12: + hour = 0 + elif ampm == locale_time.am_pm[1]: + # We're in PM so we need to add 12 to the hour unless + # we're looking at 12 noon. + # 12 noon == 12 PM == hour 12 + if hour != 12: + hour += 12 + elif parse_code == 8: + minute = int(found_dict["M"]) + elif parse_code == 9: + second = int(found_dict["S"]) + elif parse_code == 10: + s = found_dict["f"] + # Pad to always return nanoseconds + s += "0" * (9 - len(s)) + us = long(s) + ns = us % 1000 + us = us // 1000 + elif parse_code == 11: + weekday = locale_time.f_weekday.index(found_dict["A"].lower()) + elif parse_code == 12: + weekday = locale_time.a_weekday.index(found_dict["a"].lower()) + elif parse_code == 13: + weekday = int(found_dict["w"]) + if weekday == 0: + weekday = 6 + else: + weekday -= 1 + elif parse_code == 14: + julian = int(found_dict["j"]) + elif parse_code == 15 or parse_code == 16: + week_of_year = int(found_dict[group_key]) + if group_key == "U": + # U starts week on Sunday. + week_of_year_start = 6 + else: + # W starts week on Monday. + week_of_year_start = 0 + elif parse_code == 17: + tz = pytz.timezone(found_dict["Z"]) + elif parse_code == 19: + tz = parse_timezone_directive(found_dict["z"]) + elif parse_code == 20: + iso_year = int(found_dict["G"]) + elif parse_code == 21: + iso_week = int(found_dict["V"]) + elif parse_code == 22: + weekday = int(found_dict["u"]) + weekday -= 1 + + # If we know the wk of the year and what day of that wk, we can figure + # out the Julian day of the year. + if julian == -1 and weekday != -1: + if week_of_year != -1: + week_starts_Mon = week_of_year_start == 0 + julian = _calc_julian_from_U_or_W(year, week_of_year, weekday, + week_starts_Mon) + elif iso_year != -1 and iso_week != -1: + year, julian = _calc_julian_from_V(iso_year, iso_week, + weekday + 1) + # Cannot pre-calculate date() since can change in Julian + # calculation and thus could have different value for the day of the wk + # calculation. + if julian == -1: + # Need to add 1 to result since first day of the year is 1, not + # 0. + ordinal = date(year, month, day).toordinal() + julian = ordinal - date(year, 1, 1).toordinal() + 1 + else: + # Assume that if they bothered to include Julian day it will + # be accurate. + datetime_result = date.fromordinal( + (julian - 1) + date(year, 1, 1).toordinal()) + year = datetime_result.year + month = datetime_result.month + day = datetime_result.day + if weekday == -1: + weekday = date(year, month, day).weekday() + + dts.year = year + dts.month = month + dts.day = day + dts.hour = hour + dts.min = minute + dts.sec = second + dts.us = us + dts.ps = ns * 1000 + + iresult[0] = npy_datetimestruct_to_datetime(NPY_FR_ns, &dts) + check_dts_bounds(&dts) + + return tz + + class TimeRE(_TimeRE): """ Handle conversion from format directives to regexes.
The non-string paths in array_strptime look a lot like the non-string paths in array_to_datetime. I'd like to share more of those. This is a step in that direction.
https://api.github.com/repos/pandas-dev/pandas/pulls/55612
2023-10-20T22:32:39Z
2023-10-22T19:30:00Z
2023-10-22T19:30:00Z
2023-10-22T20:24:22Z
DEPR: correct warning message while parsing datetimes with mixed time zones if utc=False
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 3694a358cfb56..4989feaf84006 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -668,7 +668,7 @@ cdef _array_to_datetime_object( if len(unique_timezones) > 1: warnings.warn( "In a future version of pandas, parsing datetimes with mixed time " - "zones will raise a warning unless `utc=True`. " + "zones will raise an error unless `utc=True`. " "Please specify `utc=True` to opt in to the new behaviour " "and silence this warning. To create a `Series` with mixed offsets and " "`object` dtype, please use `apply` and `datetime.datetime.strptime`", diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index e23bf6269893d..86063293eee25 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -356,7 +356,7 @@ def _return_parsed_timezone_results( if len(non_na_timezones) > 1: warnings.warn( "In a future version of pandas, parsing datetimes with mixed time " - "zones will raise a warning unless `utc=True`. Please specify `utc=True` " + "zones will raise an error unless `utc=True`. Please specify `utc=True` " "to opt in to the new behaviour and silence this warning. " "To create a `Series` with mixed offsets and `object` dtype, " "please use `apply` and `datetime.datetime.strptime`", @@ -788,7 +788,7 @@ def to_datetime( .. warning:: In a future version of pandas, parsing datetimes with mixed time - zones will raise a warning unless `utc=True`. + zones will raise an error unless `utc=True`. Please specify `utc=True` to opt in to the new behaviour and silence this warning. To create a `Series` with mixed offsets and `object` dtype, please use `apply` and `datetime.datetime.strptime`. @@ -1023,7 +1023,7 @@ def to_datetime( >>> pd.to_datetime(['2020-10-25 02:00 +0200', ... '2020-10-25 04:00 +0100']) # doctest: +SKIP FutureWarning: In a future version of pandas, parsing datetimes with mixed - time zones will raise a warning unless `utc=True`. Please specify `utc=True` + time zones will raise an error unless `utc=True`. Please specify `utc=True` to opt in to the new behaviour and silence this warning. To create a `Series` with mixed offsets and `object` dtype, please use `apply` and `datetime.datetime.strptime`. @@ -1037,7 +1037,7 @@ def to_datetime( >>> pd.to_datetime(["2020-01-01 01:00:00-01:00", ... datetime(2020, 1, 1, 3, 0)]) # doctest: +SKIP FutureWarning: In a future version of pandas, parsing datetimes with mixed - time zones will raise a warning unless `utc=True`. Please specify `utc=True` + time zones will raise an error unless `utc=True`. Please specify `utc=True` to opt in to the new behaviour and silence this warning. To create a `Series` with mixed offsets and `object` dtype, please use `apply` and `datetime.datetime.strptime`. diff --git a/pandas/io/json/_json.py b/pandas/io/json/_json.py index d596891197aaa..e17fcea0aae71 100644 --- a/pandas/io/json/_json.py +++ b/pandas/io/json/_json.py @@ -1326,7 +1326,7 @@ def _try_convert_to_date(self, data): warnings.filterwarnings( "ignore", ".*parsing datetimes with mixed time " - "zones will raise a warning", + "zones will raise an error", category=FutureWarning, ) new_data = to_datetime(new_data, errors="raise", unit=date_unit) diff --git a/pandas/io/parsers/base_parser.py b/pandas/io/parsers/base_parser.py index 60996a7d42187..6b1daa96782a0 100644 --- a/pandas/io/parsers/base_parser.py +++ b/pandas/io/parsers/base_parser.py @@ -1147,7 +1147,7 @@ def converter(*date_cols, col: Hashable): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", - ".*parsing datetimes with mixed time zones will raise a warning", + ".*parsing datetimes with mixed time zones will raise an error", category=FutureWarning, ) result = tools.to_datetime( @@ -1169,7 +1169,7 @@ def converter(*date_cols, col: Hashable): warnings.filterwarnings( "ignore", ".*parsing datetimes with mixed time zones " - "will raise a warning", + "will raise an error", category=FutureWarning, ) result = tools.to_datetime( @@ -1187,7 +1187,7 @@ def converter(*date_cols, col: Hashable): warnings.filterwarnings( "ignore", ".*parsing datetimes with mixed time zones " - "will raise a warning", + "will raise an error", category=FutureWarning, ) return tools.to_datetime( diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 598845471046f..624d990313dff 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -300,7 +300,7 @@ def test_construction_index_with_mixed_timezones(self): assert not isinstance(result, DatetimeIndex) msg = "DatetimeIndex has mixed timezones" - msg_depr = "parsing datetimes with mixed time zones will raise a warning" + msg_depr = "parsing datetimes with mixed time zones will raise an error" with pytest.raises(TypeError, match=msg): with tm.assert_produces_warning(FutureWarning, match=msg_depr): DatetimeIndex(["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"]) diff --git a/pandas/tests/tools/test_to_datetime.py b/pandas/tests/tools/test_to_datetime.py index aefaba1aed058..b634265a06fb5 100644 --- a/pandas/tests/tools/test_to_datetime.py +++ b/pandas/tests/tools/test_to_datetime.py @@ -518,7 +518,7 @@ def test_to_datetime_parse_tzname_or_tzoffset_utc_false_deprecated( self, fmt, dates, expected_dates ): # GH 13486, 50887 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(dates, format=fmt) expected = Index(expected_dates) @@ -693,7 +693,7 @@ def test_to_datetime_mixed_datetime_and_string_with_format_mixed_offsets_utc_fal args = ["2000-01-01 01:00:00", "2000-01-01 02:00:00+00:00"] ts1 = constructor(args[0]) ts2 = args[1] - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" expected = Index( [ @@ -734,7 +734,7 @@ def test_to_datetime_mixed_datetime_and_string_with_format_mixed_offsets_utc_fal ) def test_to_datetime_mixed_offsets_with_none_tz(self, fmt, expected): # https://github.com/pandas-dev/pandas/issues/50071 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime( @@ -1236,7 +1236,7 @@ def test_to_datetime_different_offsets(self, cache): ts_string_2 = "March 1, 2018 12:00:00+0500" arr = [ts_string_1] * 5 + [ts_string_2] * 5 expected = Index([parse(x) for x in arr]) - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(arr, cache=cache) tm.assert_index_equal(result, expected) @@ -1604,7 +1604,7 @@ def test_to_datetime_coerce(self): "March 1, 2018 12:00:00+0500", "20100240", ] - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(ts_strings, errors="coerce") expected = Index( @@ -1689,7 +1689,7 @@ def test_iso_8601_strings_with_same_offset(self): def test_iso_8601_strings_with_different_offsets(self): # GH 17697, 11736, 50887 ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT] - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result = to_datetime(ts_strings) expected = np.array( @@ -1729,7 +1729,7 @@ def test_mixed_offsets_with_native_datetime_raises(self): now = Timestamp("now") today = Timestamp("today") - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): mixed = to_datetime(ser) expected = Series( @@ -1810,7 +1810,7 @@ def test_to_datetime_fixed_offset(self): ) def test_to_datetime_mixed_offsets_with_utc_false_deprecated(self, date): # GH 50887 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): to_datetime(date, utc=False) @@ -3680,7 +3680,7 @@ def test_to_datetime_with_empty_str_utc_false_format_mixed(): def test_to_datetime_with_empty_str_utc_false_offsets_and_format_mixed(): # GH 50887 - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): to_datetime( diff --git a/pandas/tests/tslibs/test_array_to_datetime.py b/pandas/tests/tslibs/test_array_to_datetime.py index 435fe5f4b90d8..829bb140e6e96 100644 --- a/pandas/tests/tslibs/test_array_to_datetime.py +++ b/pandas/tests/tslibs/test_array_to_datetime.py @@ -85,7 +85,7 @@ def test_parsing_different_timezone_offsets(): data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"] data = np.array(data, dtype=object) - msg = "parsing datetimes with mixed time zones will raise a warning" + msg = "parsing datetimes with mixed time zones will raise an error" with tm.assert_produces_warning(FutureWarning, match=msg): result, result_tz = tslib.array_to_datetime(data) expected = np.array(
xref #54014 replaced "warning" with "error" in warning message for parsing datetimes with mixed time zones if `utc=False`
https://api.github.com/repos/pandas-dev/pandas/pulls/55611
2023-10-20T19:13:41Z
2023-10-20T23:07:55Z
2023-10-20T23:07:55Z
2023-10-20T23:08:01Z
BUG: inferred_freq with non-nano
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index c8c27f2f2e178..679845eb0098c 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -410,6 +410,7 @@ Styler Other ^^^^^ - Bug in :func:`cut` incorrectly allowing cutting of timezone-aware datetimes with timezone-naive bins (:issue:`54964`) +- Bug in :func:`infer_freq` and :meth:`DatetimeIndex.inferred_freq` with weekly frequencies and non-nanosecond resolutions (:issue:`55609`) - Bug in :meth:`DataFrame.apply` where passing ``raw=True`` ignored ``args`` passed to the applied function (:issue:`55009`) - Bug in rendering ``inf`` values inside a a :class:`DataFrame` with the ``use_inf_as_na`` option enabled (:issue:`55483`) - Bug in rendering a :class:`Series` with a :class:`MultiIndex` when one of the index level's names is 0 not having that name displayed (:issue:`55415`) diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index aec9915d69e3c..867363e1a03bc 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -229,10 +229,11 @@ def test_infer_freq_index(freq, expected): }.items() ), ) -def test_infer_freq_tz(tz_naive_fixture, expected, dates): - # see gh-7310 +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +def test_infer_freq_tz(tz_naive_fixture, expected, dates, unit): + # see gh-7310, GH#55609 tz = tz_naive_fixture - idx = DatetimeIndex(dates, tz=tz) + idx = DatetimeIndex(dates, tz=tz).as_unit(unit) assert idx.inferred_freq == expected diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 4bd558a30c92f..67958a03c3969 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -276,7 +276,7 @@ def fields(self) -> np.ndarray: # structured array of fields @cache_readonly def rep_stamp(self) -> Timestamp: - return Timestamp(self.i8values[0]) + return Timestamp(self.i8values[0], unit=self.index.unit) def month_position_check(self) -> str | None: return month_position_check(self.fields, self.index.dayofweek)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55609
2023-10-20T16:22:18Z
2023-10-22T19:30:49Z
2023-10-22T19:30:49Z
2023-10-22T20:30:43Z
BUG: BusinessDay addition with non-nano
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index b74f93942c411..412dd6ccb27f3 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -324,6 +324,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.union` returning object dtype for tz-aware indexes with the same timezone but different units (:issue:`55238`) - Bug in :meth:`Tick.delta` with very large ticks raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`) - Bug in adding or subtracting a :class:`Week` offset to a ``datetime64`` :class:`Series`, :class:`Index`, or :class:`DataFrame` column with non-nanosecond resolution returning incorrect results (:issue:`55583`) +- Bug in addition or subtraction of :class:`BusinessDay` offset with ``offset`` attribute to non-nanosecond :class:`Index`, :class:`Series`, or :class:`DataFrame` column giving incorrect results (:issue:`55608`) - Bug in addition or subtraction of :class:`DateOffset` objects with microsecond components to ``datetime64`` :class:`Index`, :class:`Series`, or :class:`DataFrame` columns with non-nanosecond resolution (:issue:`55595`) - Bug in addition or subtraction of very large :class:`Tick` objects with :class:`Timestamp` or :class:`Timedelta` objects raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`) - diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 45188c536d985..c23bccdea3a8a 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1850,7 +1850,6 @@ cdef class BusinessDay(BusinessMixin): res = self._shift_bdays(i8other, reso=reso) if self.offset: res = res.view(dtarr.dtype) + Timedelta(self.offset) - res = res.view("i8") return res def is_on_offset(self, dt: datetime) -> bool: diff --git a/pandas/tests/tseries/offsets/test_business_hour.py b/pandas/tests/tseries/offsets/test_business_hour.py index f0d065f8bb7ef..2ecea6df16162 100644 --- a/pandas/tests/tseries/offsets/test_business_hour.py +++ b/pandas/tests/tseries/offsets/test_business_hour.py @@ -984,9 +984,17 @@ def test_short_datetimeindex_creation(self): expected4 = DatetimeIndex(["2014-07-01 10:00"], freq="bh") tm.assert_index_equal(idx4, expected4) - def test_bday_ignores_timedeltas(self): - idx = date_range("2010/02/01", "2010/02/10", freq="12h") - t1 = idx + BDay(offset=Timedelta(3, unit="h")) + @pytest.mark.parametrize("td_unit", ["s", "ms", "us", "ns"]) + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_bday_ignores_timedeltas(self, unit, td_unit): + # GH#55608 + idx = date_range("2010/02/01", "2010/02/10", freq="12h", unit=unit) + td = Timedelta(3, unit="h").as_unit(td_unit) + off = BDay(offset=td) + t1 = idx + off + + exp_reso = max(td._creso, idx._data._creso) + exp_unit = {7: "s", 8: "ms", 9: "us", 10: "ns"}[exp_reso] expected = DatetimeIndex( [ @@ -1011,9 +1019,22 @@ def test_bday_ignores_timedeltas(self): "2010-02-11 03:00:00", ], freq=None, - ) + ).as_unit(exp_unit) tm.assert_index_equal(t1, expected) + # TODO(GH#55564): as_unit will be unnecessary + pointwise = DatetimeIndex([x + off for x in idx]).as_unit(exp_unit) + tm.assert_index_equal(pointwise, expected) + + def test_add_bday_offset_nanos(self): + # GH#55608 + idx = date_range("2010/02/01", "2010/02/10", freq="12h", unit="ns") + off = BDay(offset=Timedelta(3, unit="ns")) + + result = idx + off + expected = DatetimeIndex([x + off for x in idx]) + tm.assert_index_equal(result, expected) + class TestOpeningTimes: # opening time should be affected by sign of n, not by n's value and end
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55608
2023-10-20T16:12:19Z
2023-10-25T17:15:09Z
2023-10-25T17:15:09Z
2023-10-25T17:38:12Z
TST: move misplaced tests
diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index fc46e5a372806..c2d68a79f32d4 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -746,14 +746,6 @@ def test_iter_zoneinfo_fold(self, tz): assert str(left) == str(right2) assert left.utcoffset() == right2.utcoffset() - def test_date_range_frequency_M_deprecated(self): - depr_msg = "'M' will be deprecated, please use 'ME' instead." - - expected = pd.date_range("1/1/2000", periods=4, freq="2ME") - with tm.assert_produces_warning(UserWarning, match=depr_msg): - result = pd.date_range("1/1/2000", periods=4, freq="2M") - tm.assert_index_equal(result, expected) - def test_factorize_sort_without_freq(): dta = DatetimeArray._from_sequence([0, 2, 1]) diff --git a/pandas/tests/frame/methods/test_map.py b/pandas/tests/frame/methods/test_map.py index 0de88114af199..03681c3df844e 100644 --- a/pandas/tests/frame/methods/test_map.py +++ b/pandas/tests/frame/methods/test_map.py @@ -12,6 +12,8 @@ ) import pandas._testing as tm +from pandas.tseries.offsets import BDay + def test_map(float_frame): result = float_frame.map(lambda x: x * 2) @@ -158,8 +160,6 @@ def test_map_box(): def test_frame_map_dont_convert_datetime64(): - from pandas.tseries.offsets import BDay - df = DataFrame({"x1": [datetime(1996, 1, 1)]}) df = df.map(lambda x: x + BDay()) diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py index 76a543050097d..8c2b95ba631ee 100644 --- a/pandas/tests/groupby/test_grouping.py +++ b/pandas/tests/groupby/test_grouping.py @@ -245,6 +245,7 @@ def test_grouper_creation_bug(self): expected = expected.loc[:, ["A", "B"]] tm.assert_frame_equal(result, expected) + def test_grouper_creation_bug2(self): # GH14334 # Grouper(key=...) may be passed in a list df = DataFrame( @@ -275,15 +276,16 @@ def test_grouper_creation_bug(self): result = g.sum() tm.assert_frame_equal(result, expected) + def test_grouper_creation_bug3(self): # GH8866 - s = Series( + ser = Series( np.arange(8, dtype="int64"), index=MultiIndex.from_product( [list("ab"), range(2), date_range("20130101", periods=2)], names=["one", "two", "three"], ), ) - result = s.groupby(Grouper(level="three", freq="ME")).sum() + result = ser.groupby(Grouper(level="three", freq="ME")).sum() expected = Series( [28], index=pd.DatetimeIndex([Timestamp("2013-01-31")], freq="ME", name="three"), @@ -291,8 +293,8 @@ def test_grouper_creation_bug(self): tm.assert_series_equal(result, expected) # just specifying a level breaks - result = s.groupby(Grouper(level="one")).sum() - expected = s.groupby(level="one").sum() + result = ser.groupby(Grouper(level="one")).sum() + expected = ser.groupby(level="one").sum() tm.assert_series_equal(result, expected) def test_grouper_column_and_index(self): diff --git a/pandas/tests/indexes/datetimes/test_constructors.py b/pandas/tests/indexes/datetimes/test_constructors.py index 077b4fa5a0696..598845471046f 100644 --- a/pandas/tests/indexes/datetimes/test_constructors.py +++ b/pandas/tests/indexes/datetimes/test_constructors.py @@ -1110,9 +1110,3 @@ def test_pass_datetimeindex_to_index(self): expected = Index(rng.to_pydatetime(), dtype=object) tm.assert_numpy_array_equal(idx.values, expected.values) - - def test_date_range_tuple_freq_raises(self): - # GH#34703 - edate = datetime(2000, 1, 1) - with pytest.raises(TypeError, match="pass as a string instead"): - date_range(end=edate, freq=("D", 5), periods=20) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index a74d31747fbb0..e8113b851fe87 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -124,6 +124,20 @@ def test_date_range_timestamp_equiv_preserve_frequency(self): class TestDateRanges: + def test_date_range_frequency_M_deprecated(self): + depr_msg = "'M' will be deprecated, please use 'ME' instead." + + expected = date_range("1/1/2000", periods=4, freq="2ME") + with tm.assert_produces_warning(UserWarning, match=depr_msg): + result = date_range("1/1/2000", periods=4, freq="2M") + tm.assert_index_equal(result, expected) + + def test_date_range_tuple_freq_raises(self): + # GH#34703 + edate = datetime(2000, 1, 1) + with pytest.raises(TypeError, match="pass as a string instead"): + date_range(end=edate, freq=("D", 5), periods=20) + @pytest.mark.parametrize("freq", ["ns", "us", "ms", "min", "s", "h", "D"]) def test_date_range_edges(self, freq): # GH#13672 @@ -911,6 +925,45 @@ def test_date_range_with_tz(self, tzstr): assert stamp == rng[1] + @pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"]) + def test_date_range_ambiguous_endpoint(self, tz): + # construction with an ambiguous end-point + # GH#11626 + + with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): + date_range( + "2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="h" + ) + + times = date_range( + "2013-10-26 23:00", "2013-10-27 01:00", freq="h", tz=tz, ambiguous="infer" + ) + assert times[0] == Timestamp("2013-10-26 23:00", tz=tz) + assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz) + + @pytest.mark.parametrize( + "tz, option, expected", + [ + ["US/Pacific", "shift_forward", "2019-03-10 03:00"], + ["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"], + ["US/Pacific", "shift_backward", "2019-03-10 01:00"], + ["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"], + ["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"], + ], + ) + def test_date_range_nonexistent_endpoint(self, tz, option, expected): + # construction with an nonexistent end-point + + with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"): + date_range( + "2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="h" + ) + + times = date_range( + "2019-03-10 00:00", "2019-03-10 02:00", freq="h", tz=tz, nonexistent=option + ) + assert times[-1] == Timestamp(expected, tz=tz) + class TestGenRangeGeneration: def test_generate(self): diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py deleted file mode 100644 index a012a2985b41c..0000000000000 --- a/pandas/tests/indexes/datetimes/test_datetimelike.py +++ /dev/null @@ -1,13 +0,0 @@ -""" generic tests from the Datetimelike class """ -from pandas import date_range -import pandas._testing as tm - - -class TestDatetimeIndex: - def test_format(self): - # GH35439 - idx = date_range("20130101", periods=5) - expected = [f"{x:%Y-%m-%d}" for x in idx] - msg = r"DatetimeIndex\.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - assert idx.format() == expected diff --git a/pandas/tests/indexes/datetimes/test_formats.py b/pandas/tests/indexes/datetimes/test_formats.py index 6f75ac1b569c0..deed5926bca91 100644 --- a/pandas/tests/indexes/datetimes/test_formats.py +++ b/pandas/tests/indexes/datetimes/test_formats.py @@ -8,6 +8,7 @@ import pandas as pd from pandas import ( DatetimeIndex, + NaT, Series, ) import pandas._testing as tm @@ -33,7 +34,7 @@ def test_get_values_for_csv(): tm.assert_numpy_array_equal(result, expected) # NULL object handling should work - index = DatetimeIndex(["2017-01-01", pd.NaT, "2017-01-03"]) + index = DatetimeIndex(["2017-01-01", NaT, "2017-01-03"]) expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object) result = index._get_values_for_csv(na_rep="NaT") @@ -58,6 +59,20 @@ def test_get_values_for_csv(): class TestDatetimeIndexRendering: + def test_dti_repr_dates(self): + text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)])) + assert "['2013-01-01'," in text + assert ", '2014-01-01']" in text + + def test_dti_repr_mixed(self): + text = str( + pd.to_datetime( + [datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)] + ) + ) + assert "'2013-01-01 00:00:00'," in text + assert "'2014-01-01 00:00:00']" in text + def test_dti_repr_short(self): dr = pd.date_range(start="1/1/2012", periods=1) repr(dr) @@ -114,11 +129,11 @@ def test_dti_representation(self, method): ) idxs.append( DatetimeIndex( - ["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="US/Eastern" + ["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="US/Eastern" ) ) idxs.append( - DatetimeIndex(["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="UTC") + DatetimeIndex(["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="UTC") ) exp = [] @@ -165,7 +180,7 @@ def test_dti_representation_to_series(self): tz="Asia/Tokyo", ) idx6 = DatetimeIndex( - ["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="US/Eastern" + ["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="US/Eastern" ) idx7 = DatetimeIndex(["2011-01-01 09:00", "2011-01-02 10:15"]) @@ -222,7 +237,7 @@ def test_dti_summary(self): tz="Asia/Tokyo", ) idx6 = DatetimeIndex( - ["2011-01-01 09:00", "2011-01-01 10:00", pd.NaT], tz="US/Eastern" + ["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="US/Eastern" ) exp1 = "DatetimeIndex: 0 entries\nFreq: D" @@ -281,6 +296,14 @@ def test_dti_custom_business_summary_dateutil(self): class TestFormat: + def test_format(self): + # GH#35439 + idx = pd.date_range("20130101", periods=5) + expected = [f"{x:%Y-%m-%d}" for x in idx] + msg = r"DatetimeIndex\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert idx.format() == expected + def test_format_with_name_time_info(self): # bug I fixed 12/20/2011 dates = pd.date_range("2011-01-01 04:00:00", periods=10, name="something") @@ -299,3 +322,37 @@ def test_format_datetime_with_time(self): expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"] assert len(result) == 2 assert result == expected + + def test_format_datetime(self): + msg = "DatetimeIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = pd.to_datetime([datetime(2003, 1, 1, 12), NaT]).format() + assert formatted[0] == "2003-01-01 12:00:00" + assert formatted[1] == "NaT" + + def test_format_date(self): + msg = "DatetimeIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = pd.to_datetime([datetime(2003, 1, 1), NaT]).format() + assert formatted[0] == "2003-01-01" + assert formatted[1] == "NaT" + + def test_format_date_tz(self): + dti = pd.to_datetime([datetime(2013, 1, 1)], utc=True) + msg = "DatetimeIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = dti.format() + assert formatted[0] == "2013-01-01 00:00:00+00:00" + + dti = pd.to_datetime([datetime(2013, 1, 1), NaT], utc=True) + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = dti.format() + assert formatted[0] == "2013-01-01 00:00:00+00:00" + + def test_format_date_explicit_date_format(self): + dti = pd.to_datetime([datetime(2003, 2, 1), NaT]) + msg = "DatetimeIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = dti.format(date_format="%m-%d-%Y", na_rep="UT") + assert formatted[0] == "02-01-2003" + assert formatted[1] == "UT" diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index 6071c7fa8319b..4f86e3c721aab 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -1,4 +1,7 @@ -from datetime import datetime +from datetime import ( + datetime, + timezone, +) import numpy as np import pytest @@ -12,6 +15,7 @@ DatetimeIndex, Index, Series, + Timestamp, bdate_range, date_range, ) @@ -416,6 +420,52 @@ def test_intersection_non_tick_no_fastpath(self): expected = dti[:0] tm.assert_index_equal(result, expected) + def test_dti_intersection(self): + rng = date_range("1/1/2011", periods=100, freq="h", tz="utc") + + left = rng[10:90][::-1] + right = rng[20:80][::-1] + + assert left.tz == rng.tz + result = left.intersection(right) + assert result.tz == left.tz + + # Note: not difference, as there is no symmetry requirement there + @pytest.mark.parametrize("setop", ["union", "intersection", "symmetric_difference"]) + def test_dti_setop_aware(self, setop): + # non-overlapping + # GH#39328 as of 2.0 we cast these to UTC instead of object + rng = date_range("2012-11-15 00:00:00", periods=6, freq="h", tz="US/Central") + + rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="h", tz="US/Eastern") + + result = getattr(rng, setop)(rng2) + + left = rng.tz_convert("UTC") + right = rng2.tz_convert("UTC") + expected = getattr(left, setop)(right) + tm.assert_index_equal(result, expected) + assert result.tz == left.tz + if len(result): + assert result[0].tz is timezone.utc + assert result[-1].tz is timezone.utc + + def test_dti_union_mixed(self): + # GH#21671 + rng = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT]) + rng2 = DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo") + result = rng.union(rng2) + expected = Index( + [ + Timestamp("2011-01-01"), + pd.NaT, + Timestamp("2012-01-01", tz="Asia/Tokyo"), + Timestamp("2012-01-02", tz="Asia/Tokyo"), + ], + dtype=object, + ) + tm.assert_index_equal(result, expected) + class TestBusinessDatetimeIndex: def test_union(self, sort): @@ -500,15 +550,13 @@ def test_intersection_bug(self): def test_intersection_list(self): # GH#35876 # values is not an Index -> no name -> retain "a" - values = [pd.Timestamp("2020-01-01"), pd.Timestamp("2020-02-01")] + values = [Timestamp("2020-01-01"), Timestamp("2020-02-01")] idx = DatetimeIndex(values, name="a") res = idx.intersection(values) tm.assert_index_equal(res, idx) def test_month_range_union_tz_pytz(self, sort): - from pytz import timezone - - tz = timezone("US/Eastern") + tz = pytz.timezone("US/Eastern") early_start = datetime(2011, 1, 1) early_end = datetime(2011, 3, 1) @@ -543,13 +591,13 @@ def test_intersection_duplicates(self, sort): # GH#38196 idx1 = Index( [ - pd.Timestamp("2019-12-13"), - pd.Timestamp("2019-12-12"), - pd.Timestamp("2019-12-12"), + Timestamp("2019-12-13"), + Timestamp("2019-12-12"), + Timestamp("2019-12-12"), ] ) result = idx1.intersection(idx1, sort=sort) - expected = Index([pd.Timestamp("2019-12-13"), pd.Timestamp("2019-12-12")]) + expected = Index([Timestamp("2019-12-13"), Timestamp("2019-12-12")]) tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index eb54ea8e4316f..8590a2542a31d 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -605,46 +605,6 @@ def test_dti_tz_localize_ambiguous_flags(self, tz): localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst) tm.assert_index_equal(localized, localized_is_dst) - # TODO: belongs outside tz_localize tests? - @pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"]) - def test_dti_construction_ambiguous_endpoint(self, tz): - # construction with an ambiguous end-point - # GH#11626 - - with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"): - date_range( - "2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="h" - ) - - times = date_range( - "2013-10-26 23:00", "2013-10-27 01:00", freq="h", tz=tz, ambiguous="infer" - ) - assert times[0] == Timestamp("2013-10-26 23:00", tz=tz) - assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz) - - @pytest.mark.parametrize( - "tz, option, expected", - [ - ["US/Pacific", "shift_forward", "2019-03-10 03:00"], - ["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"], - ["US/Pacific", "shift_backward", "2019-03-10 01:00"], - ["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"], - ["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"], - ], - ) - def test_dti_construction_nonexistent_endpoint(self, tz, option, expected): - # construction with an nonexistent end-point - - with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"): - date_range( - "2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="h" - ) - - times = date_range( - "2019-03-10 00:00", "2019-03-10 02:00", freq="h", tz=tz, nonexistent=option - ) - assert times[-1] == Timestamp(expected, tz=tz) - def test_dti_tz_localize_bdate_range(self): dr = bdate_range("1/1/2009", "1/1/2010") dr_utc = bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc) @@ -973,16 +933,6 @@ def test_timestamp_equality_different_timezones(self): assert (utc_range == berlin_range).all() assert (berlin_range == eastern_range).all() - def test_dti_intersection(self): - rng = date_range("1/1/2011", periods=100, freq="h", tz="utc") - - left = rng[10:90][::-1] - right = rng[20:80][::-1] - - assert left.tz == rng.tz - result = left.intersection(right) - assert result.tz == left.tz - def test_dti_equals_with_tz(self): left = date_range("1/1/2011", periods=100, freq="h", tz="utc") right = date_range("1/1/2011", periods=100, freq="h", tz="US/Eastern") @@ -1143,42 +1093,6 @@ def test_dti_convert_tz_aware_datetime_datetime(self, tz): tm.assert_numpy_array_equal(converted.asi8, ex_vals) assert converted.tz is timezone.utc - # Note: not difference, as there is no symmetry requirement there - @pytest.mark.parametrize("setop", ["union", "intersection", "symmetric_difference"]) - def test_dti_setop_aware(self, setop): - # non-overlapping - # GH#39328 as of 2.0 we cast these to UTC instead of object - rng = date_range("2012-11-15 00:00:00", periods=6, freq="h", tz="US/Central") - - rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="h", tz="US/Eastern") - - result = getattr(rng, setop)(rng2) - - left = rng.tz_convert("UTC") - right = rng2.tz_convert("UTC") - expected = getattr(left, setop)(right) - tm.assert_index_equal(result, expected) - assert result.tz == left.tz - if len(result): - assert result[0].tz is timezone.utc - assert result[-1].tz is timezone.utc - - def test_dti_union_mixed(self): - # GH 21671 - rng = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT]) - rng2 = DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo") - result = rng.union(rng2) - expected = Index( - [ - Timestamp("2011-01-01"), - pd.NaT, - Timestamp("2012-01-01", tz="Asia/Tokyo"), - Timestamp("2012-01-02", tz="Asia/Tokyo"), - ], - dtype=object, - ) - tm.assert_index_equal(result, expected) - @pytest.mark.parametrize( "tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)] ) diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index c951403fb2654..2b4107acee096 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -204,7 +204,6 @@ def test_difference_sort_special(): def test_difference_sort_special_true(): - # TODO(GH#25151): decide on True behaviour idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) result = idx.difference([], sort=True) expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) @@ -366,8 +365,6 @@ def test_union_sort_other_empty(slice_): def test_union_sort_other_empty_sort(): - # TODO(GH#25151): decide on True behaviour - # # sort=True idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) other = idx[:0] result = idx.union(other, sort=True) diff --git a/pandas/tests/indexes/period/methods/test_factorize.py b/pandas/tests/indexes/period/methods/test_factorize.py index ac3d09d76157b..1239eae6091b8 100644 --- a/pandas/tests/indexes/period/methods/test_factorize.py +++ b/pandas/tests/indexes/period/methods/test_factorize.py @@ -1,14 +1,11 @@ import numpy as np -from pandas import ( - PeriodIndex, - factorize, -) +from pandas import PeriodIndex import pandas._testing as tm class TestFactorize: - def test_factorize(self): + def test_factorize_period(self): idx1 = PeriodIndex( ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], freq="M", @@ -25,10 +22,12 @@ def test_factorize(self): tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) + def test_factorize_period_nonmonotonic(self): idx2 = PeriodIndex( ["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], freq="M", ) + exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M") exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp) arr, idx = idx2.factorize(sort=True) @@ -40,17 +39,3 @@ def test_factorize(self): arr, idx = idx2.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) - - def test_factorize_complex(self): # TODO: WTF is this test doing here?s - # GH 17927 - array = [1, 2, 2 + 1j] - msg = "factorize with argument that is not not a Series" - with tm.assert_produces_warning(FutureWarning, match=msg): - labels, uniques = factorize(array) - - expected_labels = np.array([0, 1, 2], dtype=np.intp) - tm.assert_numpy_array_equal(labels, expected_labels) - - # Should return a complex dtype in the future - expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object) - tm.assert_numpy_array_equal(uniques, expected_uniques) diff --git a/pandas/tests/indexes/period/test_formats.py b/pandas/tests/indexes/period/test_formats.py index 7245c6a7116fc..723777fa826c5 100644 --- a/pandas/tests/indexes/period/test_formats.py +++ b/pandas/tests/indexes/period/test_formats.py @@ -1,3 +1,10 @@ +from contextlib import nullcontext +from datetime import ( + datetime, + time, +) +import locale + import numpy as np import pytest @@ -9,7 +16,14 @@ import pandas._testing as tm -def test_to_native_types(): +def get_local_am_pm(): + """Return the AM and PM strings returned by strftime in current locale.""" + am_local = time(1).strftime("%p") + pm_local = time(13).strftime("%p") + return am_local, pm_local + + +def test_get_values_for_csv(): index = PeriodIndex(["2017-01-01", "2017-01-02", "2017-01-03"], freq="D") # First, with no arguments. @@ -197,3 +211,136 @@ def test_summary(self): ): result = idx._summary() assert result == expected + + +class TestPeriodIndexFormat: + def test_period_format_and_strftime_default(self): + per = PeriodIndex([datetime(2003, 1, 1, 12), None], freq="h") + + # Default formatting + msg = "PeriodIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = per.format() + assert formatted[0] == "2003-01-01 12:00" # default: minutes not shown + assert formatted[1] == "NaT" + # format is equivalent to strftime(None)... + assert formatted[0] == per.strftime(None)[0] + assert per.strftime(None)[1] is np.nan # ...except for NaTs + + # Same test with nanoseconds freq + per = pd.period_range("2003-01-01 12:01:01.123456789", periods=2, freq="ns") + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = per.format() + assert (formatted == per.strftime(None)).all() + assert formatted[0] == "2003-01-01 12:01:01.123456789" + assert formatted[1] == "2003-01-01 12:01:01.123456790" + + def test_period_custom(self): + # GH#46252 custom formatting directives %l (ms) and %u (us) + msg = "PeriodIndex.format is deprecated" + + # 3 digits + per = pd.period_range("2003-01-01 12:01:01.123", periods=2, freq="ms") + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") + assert formatted[0] == "03 12:01:01 (ms=123 us=123000 ns=123000000)" + assert formatted[1] == "03 12:01:01 (ms=124 us=124000 ns=124000000)" + + # 6 digits + per = pd.period_range("2003-01-01 12:01:01.123456", periods=2, freq="us") + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") + assert formatted[0] == "03 12:01:01 (ms=123 us=123456 ns=123456000)" + assert formatted[1] == "03 12:01:01 (ms=123 us=123457 ns=123457000)" + + # 9 digits + per = pd.period_range("2003-01-01 12:01:01.123456789", periods=2, freq="ns") + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") + assert formatted[0] == "03 12:01:01 (ms=123 us=123456 ns=123456789)" + assert formatted[1] == "03 12:01:01 (ms=123 us=123456 ns=123456790)" + + def test_period_tz(self): + # Formatting periods created from a datetime with timezone. + msg = r"PeriodIndex\.format is deprecated" + # This timestamp is in 2013 in Europe/Paris but is 2012 in UTC + dt = pd.to_datetime(["2013-01-01 00:00:00+01:00"], utc=True) + + # Converting to a period looses the timezone information + # Since tz is currently set as utc, we'll see 2012 + with tm.assert_produces_warning(UserWarning, match="will drop timezone"): + per = dt.to_period(freq="h") + with tm.assert_produces_warning(FutureWarning, match=msg): + assert per.format()[0] == "2012-12-31 23:00" + + # If tz is currently set as paris before conversion, we'll see 2013 + dt = dt.tz_convert("Europe/Paris") + with tm.assert_produces_warning(UserWarning, match="will drop timezone"): + per = dt.to_period(freq="h") + with tm.assert_produces_warning(FutureWarning, match=msg): + assert per.format()[0] == "2013-01-01 00:00" + + @pytest.mark.parametrize( + "locale_str", + [ + pytest.param(None, id=str(locale.getlocale())), + "it_IT.utf8", + "it_IT", # Note: encoding will be 'ISO8859-1' + "zh_CN.utf8", + "zh_CN", # Note: encoding will be 'gb2312' + ], + ) + def test_period_non_ascii_fmt(self, locale_str): + # GH#46468 non-ascii char in input format string leads to wrong output + + # Skip if locale cannot be set + if locale_str is not None and not tm.can_set_locale(locale_str, locale.LC_ALL): + pytest.skip(f"Skipping as locale '{locale_str}' cannot be set on host.") + + # Change locale temporarily for this test. + with tm.set_locale(locale_str, locale.LC_ALL) if locale_str else nullcontext(): + # Scalar + per = pd.Period("2018-03-11 13:00", freq="h") + assert per.strftime("%y é") == "18 é" + + # Index + per = pd.period_range("2003-01-01 01:00:00", periods=2, freq="12h") + msg = "PeriodIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = per.format(date_format="%y é") + assert formatted[0] == "03 é" + assert formatted[1] == "03 é" + + @pytest.mark.parametrize( + "locale_str", + [ + pytest.param(None, id=str(locale.getlocale())), + "it_IT.utf8", + "it_IT", # Note: encoding will be 'ISO8859-1' + "zh_CN.utf8", + "zh_CN", # Note: encoding will be 'gb2312' + ], + ) + def test_period_custom_locale_directive(self, locale_str): + # GH#46319 locale-specific directive leads to non-utf8 c strftime char* result + + # Skip if locale cannot be set + if locale_str is not None and not tm.can_set_locale(locale_str, locale.LC_ALL): + pytest.skip(f"Skipping as locale '{locale_str}' cannot be set on host.") + + # Change locale temporarily for this test. + with tm.set_locale(locale_str, locale.LC_ALL) if locale_str else nullcontext(): + # Get locale-specific reference + am_local, pm_local = get_local_am_pm() + + # Scalar + per = pd.Period("2018-03-11 13:00", freq="h") + assert per.strftime("%p") == pm_local + + # Index + per = pd.period_range("2003-01-01 01:00:00", periods=2, freq="12h") + msg = "PeriodIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = per.format(date_format="%y %I:%M:%S%p") + assert formatted[0] == f"03 01:00:00{am_local}" + assert formatted[1] == f"03 01:00:00{pm_local}" diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 82368c67dc6d4..c743166030048 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -442,8 +442,8 @@ def test_where_complex128(self, index_or_series, fill_val, exp_dtype): "fill_val,exp_dtype", [(1, object), (1.1, object), (1 + 1j, object), (True, np.bool_)], ) - def test_where_series_bool(self, fill_val, exp_dtype): - klass = pd.Series # TODO: use index_or_series once we have Index[bool] + def test_where_series_bool(self, index_or_series, fill_val, exp_dtype): + klass = index_or_series obj = klass([True, False, True, False]) assert obj.dtype == np.bool_ diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index d18c333d79244..231beb4abd8ba 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -1,26 +1,20 @@ """ Test output formatting for Series/DataFrame, including to_string & reprs """ -from contextlib import nullcontext from datetime import ( datetime, - time, timedelta, ) from io import StringIO import itertools -import locale -from operator import methodcaller from pathlib import Path import re from shutil import get_terminal_size import sys import textwrap -import dateutil import numpy as np import pytest -import pytz import pandas as pd from pandas import ( @@ -42,13 +36,6 @@ import pandas.io.formats.format as fmt -def get_local_am_pm(): - """Return the AM and PM strings returned by strftime in current locale.""" - am_local = time(1).strftime("%p") - pm_local = time(13).strftime("%p") - return am_local, pm_local - - @pytest.fixture(params=["string", "pathlike", "buffer"]) def filepath_or_buffer_id(request): """ @@ -3320,243 +3307,6 @@ def test_datetime64formatter_tz_ms(self): assert result[1].strip() == "2999-01-02 00:00:00-08:00" -class TestNaTFormatting: - def test_repr(self): - assert repr(NaT) == "NaT" - - def test_str(self): - assert str(NaT) == "NaT" - - -class TestPeriodIndexFormat: - def test_period_format_and_strftime_default(self): - per = pd.PeriodIndex([datetime(2003, 1, 1, 12), None], freq="h") - - # Default formatting - msg = "PeriodIndex.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = per.format() - assert formatted[0] == "2003-01-01 12:00" # default: minutes not shown - assert formatted[1] == "NaT" - # format is equivalent to strftime(None)... - assert formatted[0] == per.strftime(None)[0] - assert per.strftime(None)[1] is np.nan # ...except for NaTs - - # Same test with nanoseconds freq - per = pd.period_range("2003-01-01 12:01:01.123456789", periods=2, freq="ns") - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = per.format() - assert (formatted == per.strftime(None)).all() - assert formatted[0] == "2003-01-01 12:01:01.123456789" - assert formatted[1] == "2003-01-01 12:01:01.123456790" - - def test_period_custom(self): - # GH#46252 custom formatting directives %l (ms) and %u (us) - msg = "PeriodIndex.format is deprecated" - - # 3 digits - per = pd.period_range("2003-01-01 12:01:01.123", periods=2, freq="ms") - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") - assert formatted[0] == "03 12:01:01 (ms=123 us=123000 ns=123000000)" - assert formatted[1] == "03 12:01:01 (ms=124 us=124000 ns=124000000)" - - # 6 digits - per = pd.period_range("2003-01-01 12:01:01.123456", periods=2, freq="us") - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") - assert formatted[0] == "03 12:01:01 (ms=123 us=123456 ns=123456000)" - assert formatted[1] == "03 12:01:01 (ms=123 us=123457 ns=123457000)" - - # 9 digits - per = pd.period_range("2003-01-01 12:01:01.123456789", periods=2, freq="ns") - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = per.format(date_format="%y %I:%M:%S (ms=%l us=%u ns=%n)") - assert formatted[0] == "03 12:01:01 (ms=123 us=123456 ns=123456789)" - assert formatted[1] == "03 12:01:01 (ms=123 us=123456 ns=123456790)" - - def test_period_tz(self): - # Formatting periods created from a datetime with timezone. - msg = r"PeriodIndex\.format is deprecated" - # This timestamp is in 2013 in Europe/Paris but is 2012 in UTC - dt = pd.to_datetime(["2013-01-01 00:00:00+01:00"], utc=True) - - # Converting to a period looses the timezone information - # Since tz is currently set as utc, we'll see 2012 - with tm.assert_produces_warning(UserWarning, match="will drop timezone"): - per = dt.to_period(freq="h") - with tm.assert_produces_warning(FutureWarning, match=msg): - assert per.format()[0] == "2012-12-31 23:00" - - # If tz is currently set as paris before conversion, we'll see 2013 - dt = dt.tz_convert("Europe/Paris") - with tm.assert_produces_warning(UserWarning, match="will drop timezone"): - per = dt.to_period(freq="h") - with tm.assert_produces_warning(FutureWarning, match=msg): - assert per.format()[0] == "2013-01-01 00:00" - - @pytest.mark.parametrize( - "locale_str", - [ - pytest.param(None, id=str(locale.getlocale())), - "it_IT.utf8", - "it_IT", # Note: encoding will be 'ISO8859-1' - "zh_CN.utf8", - "zh_CN", # Note: encoding will be 'gb2312' - ], - ) - def test_period_non_ascii_fmt(self, locale_str): - # GH#46468 non-ascii char in input format string leads to wrong output - - # Skip if locale cannot be set - if locale_str is not None and not tm.can_set_locale(locale_str, locale.LC_ALL): - pytest.skip(f"Skipping as locale '{locale_str}' cannot be set on host.") - - # Change locale temporarily for this test. - with tm.set_locale(locale_str, locale.LC_ALL) if locale_str else nullcontext(): - # Scalar - per = pd.Period("2018-03-11 13:00", freq="h") - assert per.strftime("%y é") == "18 é" - - # Index - per = pd.period_range("2003-01-01 01:00:00", periods=2, freq="12h") - msg = "PeriodIndex.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = per.format(date_format="%y é") - assert formatted[0] == "03 é" - assert formatted[1] == "03 é" - - @pytest.mark.parametrize( - "locale_str", - [ - pytest.param(None, id=str(locale.getlocale())), - "it_IT.utf8", - "it_IT", # Note: encoding will be 'ISO8859-1' - "zh_CN.utf8", - "zh_CN", # Note: encoding will be 'gb2312' - ], - ) - def test_period_custom_locale_directive(self, locale_str): - # GH#46319 locale-specific directive leads to non-utf8 c strftime char* result - - # Skip if locale cannot be set - if locale_str is not None and not tm.can_set_locale(locale_str, locale.LC_ALL): - pytest.skip(f"Skipping as locale '{locale_str}' cannot be set on host.") - - # Change locale temporarily for this test. - with tm.set_locale(locale_str, locale.LC_ALL) if locale_str else nullcontext(): - # Get locale-specific reference - am_local, pm_local = get_local_am_pm() - - # Scalar - per = pd.Period("2018-03-11 13:00", freq="h") - assert per.strftime("%p") == pm_local - - # Index - per = pd.period_range("2003-01-01 01:00:00", periods=2, freq="12h") - msg = "PeriodIndex.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = per.format(date_format="%y %I:%M:%S%p") - assert formatted[0] == f"03 01:00:00{am_local}" - assert formatted[1] == f"03 01:00:00{pm_local}" - - -class TestDatetimeIndexFormat: - def test_datetime(self): - msg = "DatetimeIndex.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = pd.to_datetime([datetime(2003, 1, 1, 12), NaT]).format() - assert formatted[0] == "2003-01-01 12:00:00" - assert formatted[1] == "NaT" - - def test_date(self): - msg = "DatetimeIndex.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = pd.to_datetime([datetime(2003, 1, 1), NaT]).format() - assert formatted[0] == "2003-01-01" - assert formatted[1] == "NaT" - - def test_date_tz(self): - dti = pd.to_datetime([datetime(2013, 1, 1)], utc=True) - msg = "DatetimeIndex.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = dti.format() - assert formatted[0] == "2013-01-01 00:00:00+00:00" - - dti = pd.to_datetime([datetime(2013, 1, 1), NaT], utc=True) - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = dti.format() - assert formatted[0] == "2013-01-01 00:00:00+00:00" - - def test_date_explicit_date_format(self): - dti = pd.to_datetime([datetime(2003, 2, 1), NaT]) - msg = "DatetimeIndex.format is deprecated" - with tm.assert_produces_warning(FutureWarning, match=msg): - formatted = dti.format(date_format="%m-%d-%Y", na_rep="UT") - assert formatted[0] == "02-01-2003" - assert formatted[1] == "UT" - - -class TestDatetimeIndexUnicode: - def test_dates(self): - text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)])) - assert "['2013-01-01'," in text - assert ", '2014-01-01']" in text - - def test_mixed(self): - text = str( - pd.to_datetime( - [datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)] - ) - ) - assert "'2013-01-01 00:00:00'," in text - assert "'2014-01-01 00:00:00']" in text - - -class TestStringRepTimestamp: - def test_no_tz(self): - dt_date = datetime(2013, 1, 2) - assert str(dt_date) == str(Timestamp(dt_date)) - - dt_datetime = datetime(2013, 1, 2, 12, 1, 3) - assert str(dt_datetime) == str(Timestamp(dt_datetime)) - - dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45) - assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) - - ts_nanos_only = Timestamp(200) - assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200" - - ts_nanos_micros = Timestamp(1200) - assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200" - - def test_tz_pytz(self): - dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc) - assert str(dt_date) == str(Timestamp(dt_date)) - - dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc) - assert str(dt_datetime) == str(Timestamp(dt_datetime)) - - dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc) - assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) - - def test_tz_dateutil(self): - utc = dateutil.tz.tzutc() - - dt_date = datetime(2013, 1, 2, tzinfo=utc) - assert str(dt_date) == str(Timestamp(dt_date)) - - dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc) - assert str(dt_datetime) == str(Timestamp(dt_datetime)) - - dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc) - assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) - - def test_nat_representations(self): - for f in (str, repr, methodcaller("isoformat")): - assert f(NaT) == "NaT" - - @pytest.mark.parametrize( "percentiles, expected", [ diff --git a/pandas/tests/reshape/concat/test_datetimes.py b/pandas/tests/reshape/concat/test_datetimes.py index 51398acd6ec57..71606fb72c0f6 100644 --- a/pandas/tests/reshape/concat/test_datetimes.py +++ b/pandas/tests/reshape/concat/test_datetimes.py @@ -178,6 +178,7 @@ def test_concat_NaT_series(self): result = concat([y, y], ignore_index=True) tm.assert_series_equal(result, expected) + def test_concat_NaT_series2(self): # without tz x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h")) y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h")) @@ -298,6 +299,7 @@ def test_concat_tz_series(self): result = concat([x, y], ignore_index=True) tm.assert_series_equal(result, expected) + def test_concat_tz_series2(self): # gh-11887: concat tz and object x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC")) y = Series(["a", "b"]) @@ -305,6 +307,7 @@ def test_concat_tz_series(self): result = concat([x, y], ignore_index=True) tm.assert_series_equal(result, expected) + def test_concat_tz_series3(self): # see gh-12217 and gh-12306 # Concatenating two UTC times first = DataFrame([[datetime(2016, 1, 1)]]) @@ -316,6 +319,7 @@ def test_concat_tz_series(self): result = concat([first, second]) assert result[0].dtype == "datetime64[ns, UTC]" + def test_concat_tz_series4(self): # Concatenating two London times first = DataFrame([[datetime(2016, 1, 1)]]) first[0] = first[0].dt.tz_localize("Europe/London") @@ -326,6 +330,7 @@ def test_concat_tz_series(self): result = concat([first, second]) assert result[0].dtype == "datetime64[ns, Europe/London]" + def test_concat_tz_series5(self): # Concatenating 2+1 London times first = DataFrame([[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]]) first[0] = first[0].dt.tz_localize("Europe/London") @@ -336,6 +341,7 @@ def test_concat_tz_series(self): result = concat([first, second]) assert result[0].dtype == "datetime64[ns, Europe/London]" + def test_concat_tz_series6(self): # Concat'ing 1+2 London times first = DataFrame([[datetime(2016, 1, 1)]]) first[0] = first[0].dt.tz_localize("Europe/London") @@ -512,6 +518,7 @@ def test_concat_period_other_series(self): tm.assert_series_equal(result, expected) assert result.dtype == "object" + def test_concat_period_other_series2(self): # non-period x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"])) @@ -520,6 +527,7 @@ def test_concat_period_other_series(self): tm.assert_series_equal(result, expected) assert result.dtype == "object" + def test_concat_period_other_series3(self): x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) y = Series(["A", "B"]) expected = Series([x[0], x[1], y[0], y[1]], dtype="object") diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index f5a94099523fb..2df090e5016e7 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -33,6 +33,17 @@ ) +class TestNaTFormatting: + def test_repr(self): + assert repr(NaT) == "NaT" + + def test_str(self): + assert str(NaT) == "NaT" + + def test_isoformat(self): + assert NaT.isoformat() == "NaT" + + @pytest.mark.parametrize( "nat,idx", [ @@ -529,6 +540,8 @@ def test_to_numpy_alias(): marks=pytest.mark.xfail( not np_version_gte1p24p3, reason="td64 doesn't return NotImplemented, see numpy#17017", + # When this xfail is fixed, test_nat_comparisons_numpy + # can be removed. ), ), Timestamp(0), diff --git a/pandas/tests/scalar/timestamp/test_rendering.py b/pandas/tests/scalar/timestamp/test_rendering.py index c351fb23fca0a..ec1df2901d7db 100644 --- a/pandas/tests/scalar/timestamp/test_rendering.py +++ b/pandas/tests/scalar/timestamp/test_rendering.py @@ -1,7 +1,9 @@ +from datetime import datetime import pprint +import dateutil.tz import pytest -import pytz # noqa: F401 # a test below uses pytz but only inside a `eval` call +import pytz # a test below uses pytz but only inside a `eval` call from pandas import Timestamp @@ -80,3 +82,41 @@ def test_to_timestamp_repr_is_code(self): ] for z in zs: assert eval(repr(z)) == z + + def test_repr_matches_pydatetime_no_tz(self): + dt_date = datetime(2013, 1, 2) + assert str(dt_date) == str(Timestamp(dt_date)) + + dt_datetime = datetime(2013, 1, 2, 12, 1, 3) + assert str(dt_datetime) == str(Timestamp(dt_datetime)) + + dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45) + assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) + + ts_nanos_only = Timestamp(200) + assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200" + + ts_nanos_micros = Timestamp(1200) + assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200" + + def test_repr_matches_pydatetime_tz_pytz(self): + dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc) + assert str(dt_date) == str(Timestamp(dt_date)) + + dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc) + assert str(dt_datetime) == str(Timestamp(dt_datetime)) + + dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc) + assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) + + def test_repr_matches_pydatetime_tz_dateutil(self): + utc = dateutil.tz.tzutc() + + dt_date = datetime(2013, 1, 2, tzinfo=utc) + assert str(dt_date) == str(Timestamp(dt_date)) + + dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc) + assert str(dt_datetime) == str(Timestamp(dt_datetime)) + + dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc) + assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us)) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 661290fb00d13..918353c9c7181 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -50,6 +50,20 @@ class TestFactorize: + def test_factorize_complex(self): + # GH#17927 + array = [1, 2, 2 + 1j] + msg = "factorize with argument that is not not a Series" + with tm.assert_produces_warning(FutureWarning, match=msg): + labels, uniques = algos.factorize(array) + + expected_labels = np.array([0, 1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(labels, expected_labels) + + # Should return a complex dtype in the future + expected_uniques = np.array([(1 + 0j), (2 + 0j), (2 + 1j)], dtype=object) + tm.assert_numpy_array_equal(uniques, expected_uniques) + @pytest.mark.parametrize("sort", [True, False]) def test_factorize(self, index_or_series_obj, sort): obj = index_or_series_obj diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index f6b7c08f90833..451033947fc99 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -560,27 +560,27 @@ def test_offsets_hashable(self, offset_types): off = _create_offset(offset_types) assert hash(off) is not None + # TODO: belongs in arithmetic tests? @pytest.mark.filterwarnings( "ignore:Non-vectorized DateOffset being applied to Series or DatetimeIndex" ) @pytest.mark.parametrize("unit", ["s", "ms", "us"]) - def test_add_dt64_ndarray_non_nano(self, offset_types, unit, request): + def test_add_dt64_ndarray_non_nano(self, offset_types, unit): # check that the result with non-nano matches nano off = _create_offset(offset_types) dti = date_range("2016-01-01", periods=35, freq="D", unit=unit) - dta = dti._data - expected = dti._data + off - result = dta + off + result = (dti + off)._with_freq(None) exp_unit = unit - if isinstance(off, Tick) and off._creso > dta._creso: + if isinstance(off, Tick) and off._creso > dti._data._creso: # cast to higher reso like we would with Timedelta scalar exp_unit = Timedelta(off).unit - expected = expected.as_unit(exp_unit) + # TODO(GH#55564): as_unit will be unnecessary + expected = DatetimeIndex([x + off for x in dti]).as_unit(exp_unit) - tm.assert_numpy_array_equal(result._ndarray, expected._ndarray) + tm.assert_index_equal(result, expected) class TestDateOffset:
Also split some tests
https://api.github.com/repos/pandas-dev/pandas/pulls/55603
2023-10-20T01:51:12Z
2023-10-20T16:43:44Z
2023-10-20T16:43:44Z
2023-10-20T18:32:34Z
Backport PR #55596 on branch 2.1.x (CI: Have NumpyDev build only test nightly numpy)
diff --git a/ci/deps/actions-311-numpydev.yaml b/ci/deps/actions-311-numpydev.yaml index 7fd3a65ec91f8..9a6c05a138f9b 100644 --- a/ci/deps/actions-311-numpydev.yaml +++ b/ci/deps/actions-311-numpydev.yaml @@ -29,5 +29,4 @@ dependencies: - "--extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" - "--pre" - "numpy" - - "scipy" - "tzdata>=2022.1"
Backport PR #55596: CI: Have NumpyDev build only test nightly numpy
https://api.github.com/repos/pandas-dev/pandas/pulls/55602
2023-10-19T22:53:42Z
2023-10-20T03:23:25Z
2023-10-20T03:23:25Z
2023-10-20T03:23:25Z
DOC: Correct groupby().mean() usage in table layout getting started article
diff --git a/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst b/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst index 6a0b59b26350c..e4b34e3af57bf 100644 --- a/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst +++ b/doc/source/getting_started/intro_tutorials/07_reshape_table_layout.rst @@ -266,7 +266,7 @@ For more information about :meth:`~DataFrame.pivot_table`, see the user guide se :: - air_quality.groupby(["parameter", "location"]).mean() + air_quality.groupby(["parameter", "location"])[["value"]].mean() .. raw:: html
This would close #55599 in line with the suggestion in the issue.
https://api.github.com/repos/pandas-dev/pandas/pulls/55600
2023-10-19T17:29:15Z
2023-10-19T22:52:31Z
2023-10-19T22:52:30Z
2023-10-19T22:53:06Z
Backport PR #55534 on branch 2.1.x (BUG: Ensure "string[pyarrow]" type is preserved when calling extractall)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 5df34f20bece3..ea6b6d523f405 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -29,6 +29,7 @@ Bug fixes - Fixed bug in :meth:`Series.all` and :meth:`Series.any` not treating missing values correctly for ``dtype="string[pyarrow_numpy]"`` (:issue:`55367`) - Fixed bug in :meth:`Series.floordiv` for :class:`ArrowDtype` (:issue:`55561`) - Fixed bug in :meth:`Series.rank` for ``string[pyarrow_numpy]`` dtype (:issue:`55362`) +- Fixed bug in :meth:`Series.str.extractall` for :class:`ArrowDtype` dtype being converted to object (:issue:`53846`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 124ca546c4583..b299f5d6deab3 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -3449,10 +3449,9 @@ def _result_dtype(arr): # when the list of values is empty. from pandas.core.arrays.string_ import StringDtype - if isinstance(arr.dtype, StringDtype): + if isinstance(arr.dtype, (ArrowDtype, StringDtype)): return arr.dtype - else: - return object + return object def _get_single_group_name(regex: re.Pattern) -> Hashable: diff --git a/pandas/tests/strings/test_extract.py b/pandas/tests/strings/test_extract.py index 4773c13aad376..b8319e90e09a8 100644 --- a/pandas/tests/strings/test_extract.py +++ b/pandas/tests/strings/test_extract.py @@ -4,6 +4,8 @@ import numpy as np import pytest +from pandas.core.dtypes.dtypes import ArrowDtype + from pandas import ( DataFrame, Index, @@ -706,3 +708,12 @@ def test_extractall_same_as_extract_subject_index(any_string_dtype): has_match_index = s.str.extractall(pattern_one_noname) no_match_index = has_match_index.xs(0, level="match") tm.assert_frame_equal(extract_one_noname, no_match_index) + + +def test_extractall_preserves_dtype(): + # Ensure that when extractall is called on a series with specific dtypes set, that + # the dtype is preserved in the resulting DataFrame's column. + pa = pytest.importorskip("pyarrow") + + result = Series(["abc", "ab"], dtype=ArrowDtype(pa.string())).str.extractall("(ab)") + assert result.dtypes[0] == "string[pyarrow]"
Backport PR #55534: BUG: Ensure "string[pyarrow]" type is preserved when calling extractall
https://api.github.com/repos/pandas-dev/pandas/pulls/55597
2023-10-19T16:43:46Z
2023-10-19T18:12:47Z
2023-10-19T18:12:47Z
2023-10-19T18:12:48Z
CI: Have NumpyDev build only test nightly numpy
diff --git a/ci/deps/actions-311-numpydev.yaml b/ci/deps/actions-311-numpydev.yaml index 7fd3a65ec91f8..9a6c05a138f9b 100644 --- a/ci/deps/actions-311-numpydev.yaml +++ b/ci/deps/actions-311-numpydev.yaml @@ -29,5 +29,4 @@ dependencies: - "--extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" - "--pre" - "numpy" - - "scipy" - "tzdata>=2022.1"
Since scipy is an optional dependency and is only use sparsely within the pandas API, I don't think it's entirely necessary to test its nightly
https://api.github.com/repos/pandas-dev/pandas/pulls/55596
2023-10-19T16:26:29Z
2023-10-19T22:53:34Z
2023-10-19T22:53:34Z
2023-10-19T22:53:38Z
BUG: DateOffset addition with non-nano
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index be565616036e6..91863848827a4 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -302,6 +302,7 @@ Datetimelike - Bug in :meth:`DatetimeIndex.union` returning object dtype for tz-aware indexes with the same timezone but different units (:issue:`55238`) - Bug in :meth:`Tick.delta` with very large ticks raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`) - Bug in adding or subtracting a :class:`Week` offset to a ``datetime64`` :class:`Series`, :class:`Index`, or :class:`DataFrame` column with non-nanosecond resolution returning incorrect results (:issue:`55583`) +- Bug in addition or subtraction of :class:`DateOffset` objects with microsecond components to ``datetime64`` :class:`Index`, :class:`Series`, or :class:`DataFrame` columns with non-nanosecond resolution (:issue:`55595`) - Bug in addition or subtraction of very large :class:`Tick` objects with :class:`Timestamp` or :class:`Timedelta` objects raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`) - diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index b25afbf0541a9..5c9da24185060 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -1368,10 +1368,10 @@ cdef class RelativeDeltaOffset(BaseOffset): else: return other + timedelta(self.n) - @apply_array_wraps - def _apply_array(self, dtarr): - reso = get_unit_from_dtype(dtarr.dtype) - dt64other = np.asarray(dtarr) + @cache_readonly + def _pd_timedelta(self) -> Timedelta: + # components of _offset that can be cast to pd.Timedelta + kwds = self.kwds relativedelta_fast = { "years", @@ -1385,28 +1385,26 @@ cdef class RelativeDeltaOffset(BaseOffset): } # relativedelta/_offset path only valid for base DateOffset if self._use_relativedelta and set(kwds).issubset(relativedelta_fast): - - months = (kwds.get("years", 0) * 12 + kwds.get("months", 0)) * self.n - if months: - shifted = shift_months(dt64other.view("i8"), months, reso=reso) - dt64other = shifted.view(dtarr.dtype) - - weeks = kwds.get("weeks", 0) * self.n - if weeks: - delta = Timedelta(days=7 * weeks) - td = (<_Timedelta>delta)._as_creso(reso) - dt64other = dt64other + td - - timedelta_kwds = { - k: v - for k, v in kwds.items() - if k in ["days", "hours", "minutes", "seconds", "microseconds"] + td_kwds = { + key: val + for key, val in kwds.items() + if key in ["days", "hours", "minutes", "seconds", "microseconds"] } - if timedelta_kwds: - delta = Timedelta(**timedelta_kwds) - td = (<_Timedelta>delta)._as_creso(reso) - dt64other = dt64other + (self.n * td) - return dt64other + if "weeks" in kwds: + days = td_kwds.get("days", 0) + td_kwds["days"] = days + 7 * kwds["weeks"] + + if td_kwds: + delta = Timedelta(**td_kwds) + if "microseconds" in kwds: + delta = delta.as_unit("us") + else: + delta = delta.as_unit("s") + else: + delta = Timedelta(0).as_unit("s") + + return delta * self.n + elif not self._use_relativedelta and hasattr(self, "_offset"): # timedelta num_nano = getattr(self, "nanoseconds", 0) @@ -1415,8 +1413,12 @@ cdef class RelativeDeltaOffset(BaseOffset): delta = Timedelta((self._offset + rem_nano) * self.n) else: delta = Timedelta(self._offset * self.n) - td = (<_Timedelta>delta)._as_creso(reso) - return dt64other + td + if "microseconds" in kwds: + delta = delta.as_unit("us") + else: + delta = delta.as_unit("s") + return delta + else: # relativedelta with other keywords kwd = set(kwds) - relativedelta_fast @@ -1426,6 +1428,20 @@ cdef class RelativeDeltaOffset(BaseOffset): "applied vectorized" ) + @apply_array_wraps + def _apply_array(self, dtarr): + reso = get_unit_from_dtype(dtarr.dtype) + dt64other = np.asarray(dtarr) + + delta = self._pd_timedelta # may raise NotImplementedError + + kwds = self.kwds + months = (kwds.get("years", 0) * 12 + kwds.get("months", 0)) * self.n + if months: + shifted = shift_months(dt64other.view("i8"), months, reso=reso) + dt64other = shifted.view(dtarr.dtype) + return dt64other + delta + def is_on_offset(self, dt: datetime) -> bool: if self.normalize and not _is_normalized(dt): return False diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index b94cbe9c3fc60..b6eef812ead05 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -799,7 +799,9 @@ def _add_offset(self, offset) -> Self: values = self try: - result = offset._apply_array(values).view(values.dtype) + result = offset._apply_array(values) + if result.dtype.kind == "i": + result = result.view(values.dtype) except NotImplementedError: warnings.warn( "Non-vectorized DateOffset being applied to Series or DatetimeIndex.", @@ -807,6 +809,7 @@ def _add_offset(self, offset) -> Self: stacklevel=find_stack_level(), ) result = self.astype("O") + offset + # TODO(GH#55564): as_unit will be unnecessary result = type(self)._from_sequence(result).as_unit(self.unit) if not len(self): # GH#30336 _from_sequence won't be able to infer self.tz diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 89df837315396..dd5de83c0cadd 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -1223,13 +1223,16 @@ class TestDatetime64DateOffsetArithmetic: # Tick DateOffsets # TODO: parametrize over timezone? - def test_dt64arr_series_add_tick_DateOffset(self, box_with_array): + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_dt64arr_series_add_tick_DateOffset(self, box_with_array, unit): # GH#4532 # operate with pd.offsets - ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + ser = Series( + [Timestamp("20130101 9:01"), Timestamp("20130101 9:02")] + ).dt.as_unit(unit) expected = Series( [Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")] - ) + ).dt.as_unit(unit) ser = tm.box_expected(ser, box_with_array) expected = tm.box_expected(expected, box_with_array) @@ -1310,7 +1313,8 @@ def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array): # ------------------------------------------------------------- # RelativeDelta DateOffsets - def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array, unit): # GH#10699 vec = DatetimeIndex( [ @@ -1323,7 +1327,7 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): Timestamp("2000-05-15"), Timestamp("2001-06-15"), ] - ) + ).as_unit(unit) vec = tm.box_expected(vec, box_with_array) vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec @@ -1337,24 +1341,29 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): ("seconds", 2), ("microseconds", 5), ] - for i, (unit, value) in enumerate(relative_kwargs): - off = DateOffset(**{unit: value}) + for i, (offset_unit, value) in enumerate(relative_kwargs): + off = DateOffset(**{offset_unit: value}) + + exp_unit = unit + if offset_unit == "microseconds" and unit != "ns": + exp_unit = "us" - expected = DatetimeIndex([x + off for x in vec_items]) + # TODO(GH#55564): as_unit will be unnecessary + expected = DatetimeIndex([x + off for x in vec_items]).as_unit(exp_unit) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec + off) - expected = DatetimeIndex([x - off for x in vec_items]) + expected = DatetimeIndex([x - off for x in vec_items]).as_unit(exp_unit) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec - off) off = DateOffset(**dict(relative_kwargs[: i + 1])) - expected = DatetimeIndex([x + off for x in vec_items]) + expected = DatetimeIndex([x + off for x in vec_items]).as_unit(exp_unit) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec + off) - expected = DatetimeIndex([x - off for x in vec_items]) + expected = DatetimeIndex([x - off for x in vec_items]).as_unit(exp_unit) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec - off) msg = "(bad|unsupported) operand type for unary" diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 7cefd93851b0e..f6b7c08f90833 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -568,10 +568,8 @@ def test_add_dt64_ndarray_non_nano(self, offset_types, unit, request): # check that the result with non-nano matches nano off = _create_offset(offset_types) - dti = date_range("2016-01-01", periods=35, freq="D") - - arr = dti._data._ndarray.astype(f"M8[{unit}]") - dta = type(dti._data)._simple_new(arr, dtype=arr.dtype) + dti = date_range("2016-01-01", periods=35, freq="D", unit=unit) + dta = dti._data expected = dti._data + off result = dta + off
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55595
2023-10-19T15:40:17Z
2023-10-19T22:54:24Z
2023-10-19T22:54:24Z
2023-10-19T23:04:41Z
BLD: Allow building with NumPy nightlies and use it for nightlies
diff --git a/.circleci/config.yml b/.circleci/config.yml index ba124533e953a..1c70debca0caf 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -49,7 +49,12 @@ jobs: no_output_timeout: 30m # Sometimes the tests won't generate any output, make sure the job doesn't get killed by that command: | pip3 install cibuildwheel==2.15.0 + # When this is a nightly wheel build, allow picking up NumPy 2.0 dev wheels: + if [[ "$IS_SCHEDULE_DISPATCH" == "true" || "$IS_PUSH" != 'true' ]]; then + export CIBW_ENVIRONMENT="PIP_EXTRA_INDEX_URL=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" + fi cibuildwheel --prerelease-pythons --output-dir wheelhouse + environment: CIBW_BUILD: << parameters.cibw-build >> diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 6647bc03df17f..f8bfeaef94034 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -137,7 +137,8 @@ jobs: shell: bash -el {0} run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV" - - name: Build wheels + - name: Build normal wheels + if: ${{ (env.IS_SCHEDULE_DISPATCH != 'true' || env.IS_PUSH == 'true') }} uses: pypa/cibuildwheel@v2.16.2 with: package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }} @@ -145,6 +146,18 @@ jobs: CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }} + - name: Build nightly wheels (with NumPy pre-release) + if: ${{ (env.IS_SCHEDULE_DISPATCH == 'true' && env.IS_PUSH != 'true') }} + uses: pypa/cibuildwheel@v2.16.2 + with: + package-dir: ./dist/${{ matrix.buildplat[1] == 'macosx_*' && env.sdist_name || needs.build_sdist.outputs.sdist_file }} + env: + # The nightly wheels should be build witht he NumPy 2.0 pre-releases + # which requires the additional URL. + CIBW_ENVIRONMENT: PIP_EXTRA_INDEX_URL=https://pypi.anaconda.org/scientific-python-nightly-wheels/simple + CIBW_PRERELEASE_PYTHONS: True + CIBW_BUILD: ${{ matrix.python[0] }}-${{ matrix.buildplat[1] }} + - name: Set up Python uses: mamba-org/setup-micromamba@v1 with: diff --git a/pyproject.toml b/pyproject.toml index a5aaa72289209..102f1b7f10d2c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,11 +6,12 @@ requires = [ "meson==1.2.1", "wheel", "Cython>=0.29.33,<3", # Note: sync with setup.py, environment.yml and asv.conf.json - # Note: numpy 1.25 has a backwards compatible C API by default - # we don't want to force users to compile with 1.25 though - # (Ideally, in the future, though, oldest-supported-numpy can be dropped when our min numpy is 1.25.x) - "oldest-supported-numpy>=2022.8.16; python_version<'3.12'", - "numpy>=1.26.0,<2; python_version>='3.12'", + # Any NumPy version should be fine for compiling. Users are unlikely + # to get a NumPy<1.25 so the result will be compatible with all relevant + # NumPy versions (if not it is presumably compatible with their version). + # Pin <2.0 for releases until tested against an RC. But explicitly allow + # testing the `.dev0` nightlies (which require the extra index). + "numpy>1.22.4,<=2.0.0.dev0", "versioneer[toml]" ]
I am not sure that this is uncontested that this is a good version pin change. I also took the liberty of removing the current use of oldest-support-numpy, since Python 3.9 means that you should normally always get 1.25 anyway and that should work for any NumPy version. The `<=2.0.0.dev0` is a way to explicitly allow NumPy nightly wheels, although of course you need the additional URL to use it. In a release version, it might be nicer to not have it, but it also should not really hurt since the you are unlikely to have the NumPy dev wheels available unless you really want them. (Not sure how to best test that the changes work as is.) --- @lithomas1 I suppose you are the one to discuss this? I am not sure that others will agree that changing the version pin as I did is a viable approach, yet. Maybe there is some reason why removing oldest-support-numpy seems terrible also?! To me it seems like it should be safe, assuming it works.
https://api.github.com/repos/pandas-dev/pandas/pulls/55594
2023-10-19T12:45:40Z
2023-10-25T14:45:44Z
2023-10-25T14:45:44Z
2023-10-25T14:46:15Z
DEPR: fix stacklevel for DataFrame(mgr) deprecation
diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst index e9f1a073d77ef..c8e67710c85a9 100644 --- a/doc/source/user_guide/10min.rst +++ b/doc/source/user_guide/10min.rst @@ -763,14 +763,12 @@ Parquet Writing to a Parquet file: .. ipython:: python - :okwarning: df.to_parquet("foo.parquet") Reading from a Parquet file Store using :func:`read_parquet`: .. ipython:: python - :okwarning: pd.read_parquet("foo.parquet") diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index cebe58a4da62e..73ec09cdd12f1 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -2247,7 +2247,6 @@ For line-delimited json files, pandas can also return an iterator which reads in Line-limited json can also be read using the pyarrow reader by specifying ``engine="pyarrow"``. .. ipython:: python - :okwarning: from io import BytesIO df = pd.read_json(BytesIO(jsonl.encode()), lines=True, engine="pyarrow") @@ -5372,7 +5371,6 @@ See the documentation for `pyarrow <https://arrow.apache.org/docs/python/>`__ an Write to a parquet file. .. ipython:: python - :okwarning: df.to_parquet("example_pa.parquet", engine="pyarrow") df.to_parquet("example_fp.parquet", engine="fastparquet") @@ -5380,7 +5378,6 @@ Write to a parquet file. Read from a parquet file. .. ipython:: python - :okwarning: result = pd.read_parquet("example_fp.parquet", engine="fastparquet") result = pd.read_parquet("example_pa.parquet", engine="pyarrow") @@ -5390,7 +5387,6 @@ Read from a parquet file. By setting the ``dtype_backend`` argument you can control the default dtypes used for the resulting DataFrame. .. ipython:: python - :okwarning: result = pd.read_parquet("example_pa.parquet", engine="pyarrow", dtype_backend="pyarrow") @@ -5404,7 +5400,6 @@ By setting the ``dtype_backend`` argument you can control the default dtypes use Read only certain columns of a parquet file. .. ipython:: python - :okwarning: result = pd.read_parquet( "example_fp.parquet", @@ -5433,7 +5428,6 @@ Serializing a ``DataFrame`` to parquet may include the implicit index as one or more columns in the output file. Thus, this code: .. ipython:: python - :okwarning: df = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) df.to_parquet("test.parquet", engine="pyarrow") @@ -5450,7 +5444,6 @@ If you want to omit a dataframe's indexes when writing, pass ``index=False`` to :func:`~pandas.DataFrame.to_parquet`: .. ipython:: python - :okwarning: df.to_parquet("test.parquet", index=False) @@ -5473,7 +5466,6 @@ Partitioning Parquet files Parquet supports partitioning of data based on the values of one or more columns. .. ipython:: python - :okwarning: df = pd.DataFrame({"a": [0, 0, 1, 1], "b": [0, 1, 0, 1]}) df.to_parquet(path="test", engine="pyarrow", partition_cols=["a"], compression=None) @@ -5539,14 +5531,12 @@ ORC format, :func:`~pandas.read_orc` and :func:`~pandas.DataFrame.to_orc`. This Write to an orc file. .. ipython:: python - :okwarning: df.to_orc("example_pa.orc", engine="pyarrow") Read from an orc file. .. ipython:: python - :okwarning: result = pd.read_orc("example_pa.orc") @@ -5555,7 +5545,6 @@ Read from an orc file. Read only certain columns of an orc file. .. ipython:: python - :okwarning: result = pd.read_orc( "example_pa.orc", diff --git a/doc/source/user_guide/pyarrow.rst b/doc/source/user_guide/pyarrow.rst index 9012c7b591f34..61b383afb7c43 100644 --- a/doc/source/user_guide/pyarrow.rst +++ b/doc/source/user_guide/pyarrow.rst @@ -104,7 +104,6 @@ To convert a :external+pyarrow:py:class:`pyarrow.Table` to a :class:`DataFrame`, :external+pyarrow:py:meth:`pyarrow.Table.to_pandas` method with ``types_mapper=pd.ArrowDtype``. .. ipython:: python - :okwarning: table = pa.table([pa.array([1, 2, 3], type=pa.int64())], names=["a"]) @@ -165,7 +164,6 @@ functions provide an ``engine`` keyword that can dispatch to PyArrow to accelera * :func:`read_feather` .. ipython:: python - :okwarning: import io data = io.StringIO("""a,b,c @@ -180,7 +178,6 @@ PyArrow-backed data by specifying the parameter ``dtype_backend="pyarrow"``. A r ``engine="pyarrow"`` to necessarily return PyArrow-backed data. .. ipython:: python - :okwarning: import io data = io.StringIO("""a,b,c,d,e,f,g,h,i diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst index 7b89b25f6ea38..b262de5d71439 100644 --- a/doc/source/user_guide/scale.rst +++ b/doc/source/user_guide/scale.rst @@ -51,7 +51,6 @@ To load the columns we want, we have two options. Option 1 loads in all the data and then filters to what we need. .. ipython:: python - :okwarning: columns = ["id_0", "name_0", "x_0", "y_0"] @@ -60,7 +59,6 @@ Option 1 loads in all the data and then filters to what we need. Option 2 only loads the columns we request. .. ipython:: python - :okwarning: pd.read_parquet("timeseries_wide.parquet", columns=columns) @@ -202,7 +200,6 @@ counts up to this point. As long as each individual file fits in memory, this wi work for arbitrary-sized datasets. .. ipython:: python - :okwarning: %%time files = pathlib.Path("data/timeseries/").glob("ts*.parquet") diff --git a/doc/source/whatsnew/v2.0.0.rst b/doc/source/whatsnew/v2.0.0.rst index 07cc4aeed1272..be63da09846c8 100644 --- a/doc/source/whatsnew/v2.0.0.rst +++ b/doc/source/whatsnew/v2.0.0.rst @@ -152,7 +152,6 @@ When this keyword is set to ``"pyarrow"``, then these functions will return pyar * :meth:`Series.convert_dtypes` .. ipython:: python - :okwarning: import io data = io.StringIO("""a,b,c,d,e,f,g,h,i diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f37be37f37693..49a051151d76c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -697,7 +697,7 @@ def __init__( "is deprecated and will raise in a future version. " "Use public APIs instead.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=1, # bump to 2 once pyarrow 15.0 is released with fix ) if using_copy_on_write(): diff --git a/pandas/core/series.py b/pandas/core/series.py index 88d3616423155..413d29a78bc98 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -407,7 +407,7 @@ def __init__( "is deprecated and will raise in a future version. " "Use public APIs instead.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=2, ) if using_copy_on_write(): data = data.copy(deep=False) @@ -446,7 +446,7 @@ def __init__( "is deprecated and will raise in a future version. " "Use public APIs instead.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=2, ) if copy: @@ -465,7 +465,7 @@ def __init__( "is deprecated and will raise in a future version. " "Use public APIs instead.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=2, ) name = ibase.maybe_extract_name(name, data, type(self)) @@ -539,7 +539,7 @@ def __init__( "is deprecated and will raise in a future version. " "Use public APIs instead.", DeprecationWarning, - stacklevel=find_stack_level(), + stacklevel=2, ) allow_mgr = True diff --git a/pandas/tests/arrays/interval/test_interval.py b/pandas/tests/arrays/interval/test_interval.py index e772e4fba6c01..024721896cc58 100644 --- a/pandas/tests/arrays/interval/test_interval.py +++ b/pandas/tests/arrays/interval/test_interval.py @@ -309,6 +309,9 @@ def test_arrow_array_missing(): assert result.storage.equals(expected) +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) @pytest.mark.parametrize( "breaks", [[0.0, 1.0, 2.0, 3.0], date_range("2017", periods=4, freq="D")], @@ -325,16 +328,12 @@ def test_arrow_table_roundtrip(breaks): table = pa.table(df) assert isinstance(table.field("a").type, ArrowIntervalType) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert isinstance(result["a"].dtype, pd.IntervalDtype) tm.assert_frame_equal(result, df) table2 = pa.concat_tables([table, table]) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table2.to_pandas() + result = table2.to_pandas() expected = pd.concat([df, df], ignore_index=True) tm.assert_frame_equal(result, expected) @@ -342,12 +341,13 @@ def test_arrow_table_roundtrip(breaks): table = pa.table( [pa.chunked_array([], type=table.column(0).type)], schema=table.schema ) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() tm.assert_frame_equal(result, expected[0:0]) +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) @pytest.mark.parametrize( "breaks", [[0.0, 1.0, 2.0, 3.0], date_range("2017", periods=4, freq="D")], @@ -365,9 +365,7 @@ def test_arrow_table_roundtrip_without_metadata(breaks): table = table.replace_schema_metadata() assert table.schema.metadata is None - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert isinstance(result["a"].dtype, pd.IntervalDtype) tm.assert_frame_equal(result, df) diff --git a/pandas/tests/arrays/masked/test_arrow_compat.py b/pandas/tests/arrays/masked/test_arrow_compat.py index 13efc0f60ecef..7a89656bd5aa0 100644 --- a/pandas/tests/arrays/masked/test_arrow_compat.py +++ b/pandas/tests/arrays/masked/test_arrow_compat.py @@ -4,6 +4,10 @@ import pandas as pd import pandas._testing as tm +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + pa = pytest.importorskip("pyarrow") from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask @@ -36,9 +40,7 @@ def test_arrow_roundtrip(data): table = pa.table(df) assert table.field("a").type == str(data.dtype.numpy_dtype) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert result["a"].dtype == data.dtype tm.assert_frame_equal(result, df) @@ -56,9 +58,7 @@ def types_mapper(arrow_type): record_batch = pa.RecordBatch.from_arrays( [bools_array, ints_array, small_ints_array], ["bools", "ints", "small_ints"] ) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = record_batch.to_pandas(types_mapper=types_mapper) + result = record_batch.to_pandas(types_mapper=types_mapper) bools = pd.Series([True, None, False], dtype="boolean") ints = pd.Series([1, None, 2], dtype="Int64") small_ints = pd.Series([-1, 0, 7], dtype="Int64") @@ -75,9 +75,7 @@ def test_arrow_load_from_zero_chunks(data): table = pa.table( [pa.chunked_array([], type=table.field("a").type)], schema=table.schema ) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert result["a"].dtype == data.dtype tm.assert_frame_equal(result, df) @@ -98,18 +96,14 @@ def test_arrow_sliced(data): df = pd.DataFrame({"a": data}) table = pa.table(df) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.slice(2, None).to_pandas() + result = table.slice(2, None).to_pandas() expected = df.iloc[2:].reset_index(drop=True) tm.assert_frame_equal(result, expected) # no missing values df2 = df.fillna(data[0]) table = pa.table(df2) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.slice(2, None).to_pandas() + result = table.slice(2, None).to_pandas() expected = df2.iloc[2:].reset_index(drop=True) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/arrays/period/test_arrow_compat.py b/pandas/tests/arrays/period/test_arrow_compat.py index bb41b564f6db0..6441b188a69c5 100644 --- a/pandas/tests/arrays/period/test_arrow_compat.py +++ b/pandas/tests/arrays/period/test_arrow_compat.py @@ -11,6 +11,11 @@ period_array, ) +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + pa = pytest.importorskip("pyarrow") @@ -81,16 +86,12 @@ def test_arrow_table_roundtrip(): table = pa.table(df) assert isinstance(table.field("a").type, ArrowPeriodType) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert isinstance(result["a"].dtype, PeriodDtype) tm.assert_frame_equal(result, df) table2 = pa.concat_tables([table, table]) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table2.to_pandas() + result = table2.to_pandas() expected = pd.concat([df, df], ignore_index=True) tm.assert_frame_equal(result, expected) @@ -109,9 +110,7 @@ def test_arrow_load_from_zero_chunks(): [pa.chunked_array([], type=table.column(0).type)], schema=table.schema ) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert isinstance(result["a"].dtype, PeriodDtype) tm.assert_frame_equal(result, df) @@ -126,8 +125,6 @@ def test_arrow_table_roundtrip_without_metadata(): table = table.replace_schema_metadata() assert table.schema.metadata is None - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert isinstance(result["a"].dtype, PeriodDtype) tm.assert_frame_equal(result, df) diff --git a/pandas/tests/arrays/string_/test_string.py b/pandas/tests/arrays/string_/test_string.py index 052a013eba739..524a6632e5544 100644 --- a/pandas/tests/arrays/string_/test_string.py +++ b/pandas/tests/arrays/string_/test_string.py @@ -488,6 +488,7 @@ def test_arrow_array(dtype): assert arr.equals(expected) +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") def test_arrow_roundtrip(dtype, string_storage2): # roundtrip possible from arrow 1.0.0 pa = pytest.importorskip("pyarrow") @@ -497,9 +498,7 @@ def test_arrow_roundtrip(dtype, string_storage2): table = pa.table(df) assert table.field("a").type == "string" with pd.option_context("string_storage", string_storage2): - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert isinstance(result["a"].dtype, pd.StringDtype) expected = df.astype(f"string[{string_storage2}]") tm.assert_frame_equal(result, expected) @@ -507,6 +506,7 @@ def test_arrow_roundtrip(dtype, string_storage2): assert result.loc[2, "a"] is na_val(result["a"].dtype) +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") def test_arrow_load_from_zero_chunks(dtype, string_storage2): # GH-41040 pa = pytest.importorskip("pyarrow") @@ -518,9 +518,7 @@ def test_arrow_load_from_zero_chunks(dtype, string_storage2): # Instantiate the same table with no chunks at all table = pa.table([pa.chunked_array([], type=pa.string())], schema=table.schema) with pd.option_context("string_storage", string_storage2): - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() assert isinstance(result["a"].dtype, pd.StringDtype) expected = df.astype(f"string[{string_storage2}]") tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/copy_view/test_constructors.py b/pandas/tests/copy_view/test_constructors.py index b288d51160534..1f61598c40573 100644 --- a/pandas/tests/copy_view/test_constructors.py +++ b/pandas/tests/copy_view/test_constructors.py @@ -197,7 +197,7 @@ def test_dataframe_constructor_mgr_or_df(using_copy_on_write, columns, use_mgr): data = df warn = None msg = "Passing a BlockManager to DataFrame" - with tm.assert_produces_warning(warn, match=msg): + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): new_df = DataFrame(data) assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a")) diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 295f457bcaeab..8644e11db9b0d 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -51,12 +51,16 @@ def test_setitem_invalidates_datetime_index_freq(self): def test_cast_internals(self, float_frame): msg = "Passing a BlockManager to DataFrame" - with tm.assert_produces_warning(DeprecationWarning, match=msg): + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): casted = DataFrame(float_frame._mgr, dtype=int) expected = DataFrame(float_frame._series, dtype=int) tm.assert_frame_equal(casted, expected) - with tm.assert_produces_warning(DeprecationWarning, match=msg): + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): casted = DataFrame(float_frame._mgr, dtype=np.int32) expected = DataFrame(float_frame._series, dtype=np.int32) tm.assert_frame_equal(casted, expected) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index ff4fb85fa615a..bf17b61b0e3f3 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1746,7 +1746,9 @@ def test_constructor_manager_resize(self, float_frame): columns = list(float_frame.columns[:3]) msg = "Passing a BlockManager to DataFrame" - with tm.assert_produces_warning(DeprecationWarning, match=msg): + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): result = DataFrame(float_frame._mgr, index=index, columns=columns) tm.assert_index_equal(result.index, Index(index)) tm.assert_index_equal(result.columns, Index(columns)) diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 350bc3f97796a..3abbd14c20e16 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -88,14 +88,7 @@ def test_read_csv_local(all_parsers, csv1): parser = all_parsers fname = prefix + str(os.path.abspath(csv1)) - - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - msg = "Passing a BlockManager to DataFrame is deprecated" - - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - result = parser.read_csv(fname, index_col=0, parse_dates=True) + result = parser.read_csv(fname, index_col=0, parse_dates=True) # TODO: make unit check more specific if parser.engine == "pyarrow": result.index = result.index.as_unit("ns") diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py index 16ee8ab4106ef..471f525e229e5 100644 --- a/pandas/tests/io/parser/conftest.py +++ b/pandas/tests/io/parser/conftest.py @@ -29,13 +29,20 @@ def read_csv(self, *args, **kwargs): return read_csv(*args, **kwargs) def read_csv_check_warnings( - self, warn_type: type[Warning], warn_msg: str, *args, **kwargs + self, + warn_type: type[Warning], + warn_msg: str, + *args, + raise_on_extra_warnings=True, + **kwargs, ): # We need to check the stacklevel here instead of in the tests # since this is where read_csv is called and where the warning # should point to. kwargs = self.update_kwargs(kwargs) - with tm.assert_produces_warning(warn_type, match=warn_msg): + with tm.assert_produces_warning( + warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings + ): return read_csv(*args, **kwargs) def read_table(self, *args, **kwargs): @@ -43,13 +50,20 @@ def read_table(self, *args, **kwargs): return read_table(*args, **kwargs) def read_table_check_warnings( - self, warn_type: type[Warning], warn_msg: str, *args, **kwargs + self, + warn_type: type[Warning], + warn_msg: str, + *args, + raise_on_extra_warnings=True, + **kwargs, ): # We need to check the stacklevel here instead of in the tests # since this is where read_table is called and where the warning # should point to. kwargs = self.update_kwargs(kwargs) - with tm.assert_produces_warning(warn_type, match=warn_msg): + with tm.assert_produces_warning( + warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings + ): return read_table(*args, **kwargs) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 613284ad096d2..28e5f5ad9bb70 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -20,6 +20,10 @@ from pandas.io.feather_format import read_feather from pandas.io.parsers import read_csv +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + @pytest.mark.network @pytest.mark.single_cpu diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 2fd389772ca4f..47e654fc606af 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -184,14 +184,12 @@ def date_parser(*date_cols): "keep_date_col": keep_date_col, "names": ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8"], } - warn = FutureWarning - if parser.engine == "pyarrow": - warn = (FutureWarning, DeprecationWarning) result = parser.read_csv_check_warnings( - warn, + FutureWarning, "use 'date_format' instead", StringIO(data), **kwds, + raise_on_extra_warnings=False, ) expected = DataFrame( @@ -506,10 +504,11 @@ def test_multiple_date_cols_int_cast(all_parsers): "date_parser": pd.to_datetime, } result = parser.read_csv_check_warnings( - (FutureWarning, DeprecationWarning), + FutureWarning, "use 'date_format' instead", StringIO(data), **kwds, + raise_on_extra_warnings=False, ) expected = DataFrame( @@ -556,17 +555,14 @@ def test_multiple_date_col_timestamp_parse(all_parsers): data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25 05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25""" - warn = FutureWarning - if parser.engine == "pyarrow": - warn = (FutureWarning, DeprecationWarning) - result = parser.read_csv_check_warnings( - warn, + FutureWarning, "use 'date_format' instead", StringIO(data), parse_dates=[[0, 1]], header=None, date_parser=Timestamp, + raise_on_extra_warnings=False, ) expected = DataFrame( [ @@ -722,12 +718,8 @@ def test_date_parser_int_bug(all_parsers): "12345,1,-1,3,invoice_InvoiceResource,search\n" ) - warn = FutureWarning - if parser.engine == "pyarrow": - warn = (FutureWarning, DeprecationWarning) - result = parser.read_csv_check_warnings( - warn, + FutureWarning, "use 'date_format' instead", StringIO(data), index_col=0, @@ -737,6 +729,7 @@ def test_date_parser_int_bug(all_parsers): date_parser=lambda x: datetime.fromtimestamp(int(x), tz=timezone.utc).replace( tzinfo=None ), + raise_on_extra_warnings=False, ) expected = DataFrame( [ @@ -1288,14 +1281,7 @@ def test_bad_date_parse(all_parsers, cache_dates, value): parser = all_parsers s = StringIO((f"{value},\n") * (start_caching_at + 1)) - warn = None - msg = "Passing a BlockManager to DataFrame" - if parser.engine == "pyarrow": - warn = DeprecationWarning - - parser.read_csv_check_warnings( - warn, - msg, + parser.read_csv( s, header=None, names=["foo", "bar"], @@ -1317,24 +1303,22 @@ def test_bad_date_parse_with_warning(all_parsers, cache_dates, value): # pandas doesn't try to guess the datetime format # TODO: parse dates directly in pyarrow, see # https://github.com/pandas-dev/pandas/issues/48017 - warn = DeprecationWarning - msg = "Passing a BlockManager to DataFrame" + warn = None elif cache_dates: # Note: warning is not raised if 'cache_dates', because here there is only a # single unique date and hence no risk of inconsistent parsing. warn = None - msg = None else: warn = UserWarning - msg = "Could not infer format" parser.read_csv_check_warnings( warn, - msg, + "Could not infer format", s, header=None, names=["foo", "bar"], parse_dates=["foo"], cache_dates=cache_dates, + raise_on_extra_warnings=False, ) @@ -1359,17 +1343,14 @@ def test_parse_dates_infer_datetime_format_warning(all_parsers, reader): parser = all_parsers data = "Date,test\n2012-01-01,1\n,2" - warn = FutureWarning - if parser.engine == "pyarrow": - warn = (FutureWarning, DeprecationWarning) - getattr(parser, reader)( - warn, + FutureWarning, "The argument 'infer_datetime_format' is deprecated", StringIO(data), parse_dates=["Date"], infer_datetime_format=True, sep=",", + raise_on_extra_warnings=False, ) @@ -1534,17 +1515,13 @@ def test_parse_date_time_multi_level_column_name(all_parsers): ) def test_parse_date_time(all_parsers, data, kwargs, expected): parser = all_parsers - - warn = FutureWarning - if parser.engine == "pyarrow": - warn = (FutureWarning, DeprecationWarning) - result = parser.read_csv_check_warnings( - warn, + FutureWarning, "use 'date_format' instead", StringIO(data), date_parser=pd.to_datetime, **kwargs, + raise_on_extra_warnings=False, ) # Python can sometimes be flaky about how @@ -1556,19 +1533,15 @@ def test_parse_date_time(all_parsers, data, kwargs, expected): def test_parse_date_fields(all_parsers): parser = all_parsers - - warn = FutureWarning - if parser.engine == "pyarrow": - warn = (FutureWarning, DeprecationWarning) - data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11." result = parser.read_csv_check_warnings( - warn, + FutureWarning, "use 'date_format' instead", StringIO(data), header=0, parse_dates={"ymd": [0, 1, 2]}, date_parser=lambda x: x, + raise_on_extra_warnings=False, ) expected = DataFrame( @@ -1596,21 +1569,14 @@ def test_parse_date_all_fields(all_parsers, key, value, warn): 2001,01,05,10,00,0,0.0,10. 2001,01,5,10,0,00,1.,11. """ - msg = "use 'date_format' instead" - if parser.engine == "pyarrow": - if warn is None: - msg = "Passing a BlockManager to DataFrame is deprecated" - warn = DeprecationWarning - else: - warn = (warn, DeprecationWarning) - result = parser.read_csv_check_warnings( warn, - msg, + "use 'date_format' instead", StringIO(data), header=0, parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, **{key: value}, + raise_on_extra_warnings=False, ) expected = DataFrame( [ @@ -1640,21 +1606,14 @@ def test_datetime_fractional_seconds(all_parsers, key, value, warn): 2001,01,05,10,00,0.123456,0.0,10. 2001,01,5,10,0,0.500000,1.,11. """ - msg = "use 'date_format' instead" - if parser.engine == "pyarrow": - if warn is None: - msg = "Passing a BlockManager to DataFrame is deprecated" - warn = DeprecationWarning - else: - warn = (warn, DeprecationWarning) - result = parser.read_csv_check_warnings( warn, - msg, + "use 'date_format' instead", StringIO(data), header=0, parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]}, **{key: value}, + raise_on_extra_warnings=False, ) expected = DataFrame( [ @@ -1673,17 +1632,14 @@ def test_generic(all_parsers): def parse_function(yy, mm): return [date(year=int(y), month=int(m), day=1) for y, m in zip(yy, mm)] - warn = FutureWarning - if parser.engine == "pyarrow": - warn = (FutureWarning, DeprecationWarning) - result = parser.read_csv_check_warnings( - warn, + FutureWarning, "use 'date_format' instead", StringIO(data), header=0, parse_dates={"ym": [0, 1]}, date_parser=parse_function, + raise_on_extra_warnings=False, ) expected = DataFrame( [[date(2001, 1, 1), 10, 10.0], [date(2001, 2, 1), 1, 11.0]], @@ -2223,8 +2179,7 @@ def test_parse_dot_separated_dates(all_parsers): dtype="object", name="a", ) - warn = DeprecationWarning - msg = "Passing a BlockManager to DataFrame" + warn = None else: expected_index = DatetimeIndex( ["2003-03-27 14:55:00", "2003-08-03 15:20:00"], @@ -2232,9 +2187,14 @@ def test_parse_dot_separated_dates(all_parsers): name="a", ) warn = UserWarning - msg = r"when dayfirst=False \(the default\) was specified" + msg = r"when dayfirst=False \(the default\) was specified" result = parser.read_csv_check_warnings( - warn, msg, StringIO(data), parse_dates=True, index_col=0 + warn, + msg, + StringIO(data), + parse_dates=True, + index_col=0, + raise_on_extra_warnings=False, ) expected = DataFrame({"b": [1, 2]}, index=expected_index) tm.assert_frame_equal(result, expected) @@ -2271,14 +2231,9 @@ def test_parse_dates_dict_format_two_columns(all_parsers, key, parse_dates): 31-,12-2019 31-,12-2020""" - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - result = parser.read_csv( - StringIO(data), date_format={key: "%d- %m-%Y"}, parse_dates=parse_dates - ) + result = parser.read_csv( + StringIO(data), date_format={key: "%d- %m-%Y"}, parse_dates=parse_dates + ) expected = DataFrame( { key: [Timestamp("2019-12-31"), Timestamp("2020-12-31")], diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 468d888590cf8..695dc84db5e25 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -19,6 +19,10 @@ from pandas.io.parsers import read_csv import pandas.io.parsers.readers as parsers +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + @pytest.fixture(params=["python", "python-fwf"], ids=lambda val: val) def python_engine(request): @@ -157,20 +161,15 @@ def test_on_bad_lines_callable_python_or_pyarrow(self, all_parsers): sio = StringIO("a,b\n1,2") bad_lines_func = lambda x: x parser = all_parsers - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - warn_msg = "Passing a BlockManager" - with tm.assert_produces_warning(warn, match=warn_msg, check_stacklevel=False): - if all_parsers.engine not in ["python", "pyarrow"]: - msg = ( - "on_bad_line can only be a callable " - "function if engine='python' or 'pyarrow'" - ) - with pytest.raises(ValueError, match=msg): - parser.read_csv(sio, on_bad_lines=bad_lines_func) - else: + if all_parsers.engine not in ["python", "pyarrow"]: + msg = ( + "on_bad_line can only be a callable " + "function if engine='python' or 'pyarrow'" + ) + with pytest.raises(ValueError, match=msg): parser.read_csv(sio, on_bad_lines=bad_lines_func) + else: + parser.read_csv(sio, on_bad_lines=bad_lines_func) def test_close_file_handle_on_invalid_usecols(all_parsers): diff --git a/pandas/tests/io/parser/usecols/test_usecols_basic.py b/pandas/tests/io/parser/usecols/test_usecols_basic.py index 2388d9da3f5b5..7a620768040a7 100644 --- a/pandas/tests/io/parser/usecols/test_usecols_basic.py +++ b/pandas/tests/io/parser/usecols/test_usecols_basic.py @@ -16,6 +16,10 @@ ) import pandas._testing as tm +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + _msg_validate_usecols_arg = ( "'usecols' must either be list-like " "of all strings, all unicode, all " diff --git a/pandas/tests/io/test_orc.py b/pandas/tests/io/test_orc.py index 6b713bfa42b53..a4021311fc963 100644 --- a/pandas/tests/io/test_orc.py +++ b/pandas/tests/io/test_orc.py @@ -70,9 +70,7 @@ def test_orc_reader_empty(dirpath): expected[colname] = pd.Series(dtype=dtype) inputfile = os.path.join(dirpath, "TestOrcFile.emptyFile.orc") - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(inputfile, columns=columns) + got = read_orc(inputfile, columns=columns) tm.assert_equal(expected, got) @@ -92,9 +90,7 @@ def test_orc_reader_basic(dirpath): expected = pd.DataFrame.from_dict(data) inputfile = os.path.join(dirpath, "TestOrcFile.test1.orc") - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(inputfile, columns=data.keys()) + got = read_orc(inputfile, columns=data.keys()) tm.assert_equal(expected, got) @@ -121,9 +117,7 @@ def test_orc_reader_decimal(dirpath): expected = pd.DataFrame.from_dict(data) inputfile = os.path.join(dirpath, "TestOrcFile.decimal.orc") - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(inputfile).iloc[:10] + got = read_orc(inputfile).iloc[:10] tm.assert_equal(expected, got) @@ -164,9 +158,7 @@ def test_orc_reader_date_low(dirpath): expected = pd.DataFrame.from_dict(data) inputfile = os.path.join(dirpath, "TestOrcFile.testDate1900.orc") - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(inputfile).iloc[:10] + got = read_orc(inputfile).iloc[:10] tm.assert_equal(expected, got) @@ -207,9 +199,7 @@ def test_orc_reader_date_high(dirpath): expected = pd.DataFrame.from_dict(data) inputfile = os.path.join(dirpath, "TestOrcFile.testDate2038.orc") - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(inputfile).iloc[:10] + got = read_orc(inputfile).iloc[:10] tm.assert_equal(expected, got) @@ -250,9 +240,7 @@ def test_orc_reader_snappy_compressed(dirpath): expected = pd.DataFrame.from_dict(data) inputfile = os.path.join(dirpath, "TestOrcFile.testSnappy.orc") - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(inputfile).iloc[:10] + got = read_orc(inputfile).iloc[:10] tm.assert_equal(expected, got) @@ -277,9 +265,7 @@ def test_orc_roundtrip_file(dirpath): with tm.ensure_clean() as path: expected.to_orc(path) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(path) + got = read_orc(path) tm.assert_equal(expected, got) @@ -303,9 +289,7 @@ def test_orc_roundtrip_bytesio(): expected = pd.DataFrame.from_dict(data) bytes = expected.to_orc() - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - got = read_orc(BytesIO(bytes)) + got = read_orc(BytesIO(bytes)) tm.assert_equal(expected, got) @@ -343,9 +327,7 @@ def test_orc_dtype_backend_pyarrow(): ) bytes_data = df.copy().to_orc() - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = read_orc(BytesIO(bytes_data), dtype_backend="pyarrow") + result = read_orc(BytesIO(bytes_data), dtype_backend="pyarrow") expected = pd.DataFrame( { @@ -376,9 +358,7 @@ def test_orc_dtype_backend_numpy_nullable(): ) bytes_data = df.copy().to_orc() - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = read_orc(BytesIO(bytes_data), dtype_backend="numpy_nullable") + result = read_orc(BytesIO(bytes_data), dtype_backend="numpy_nullable") expected = pd.DataFrame( { @@ -407,9 +387,7 @@ def test_orc_uri_path(): with tm.ensure_clean("tmp.orc") as path: expected.to_orc(path) uri = pathlib.Path(path).as_uri() - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = read_orc(uri) + result = read_orc(uri) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 5903255118d40..41b4aa97c56f3 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -170,12 +170,11 @@ def test_pandas_datareader(): pytest.importorskip("pandas_datareader") +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") def test_pyarrow(df): pyarrow = pytest.importorskip("pyarrow") table = pyarrow.Table.from_pandas(df) - msg = "Passing a BlockManager to DataFrame is deprecated" - with tm.assert_produces_warning(DeprecationWarning, match=msg): - result = table.to_pandas() + result = table.to_pandas() tm.assert_frame_equal(result, df)
Follow-up on https://github.com/pandas-dev/pandas/pull/52419. To ensure end users don't see this warning (unless they would be passing a manager themselves), but only libraries see them in their tests. See https://github.com/pandas-dev/pandas/pull/52419#issuecomment-1770215326 for context.
https://api.github.com/repos/pandas-dev/pandas/pulls/55591
2023-10-19T07:22:14Z
2023-11-09T07:49:54Z
2023-11-09T07:49:54Z
2023-11-09T07:49:57Z
[Documentation] Added another example in `df.clip` documentation.
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1ae4c3cdfc458..c525003cabd10 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8785,6 +8785,16 @@ def clip( 3 -1 6 4 5 -4 + Clips using specific lower and upper thresholds per column: + + >>> df.clip([-2, -1], [4,5]) + col_0 col_1 + 0 4 -1 + 1 -2 -1 + 2 0 5 + 3 -1 5 + 4 4 -1 + Clips using specific lower and upper thresholds per column element: >>> t = pd.Series([2, -4, -1, 6, 3])
df.clip takes lower and upper threshholds as inputs, and both of these can be `float or array-like, default None` type. I am aware of an example in doc which Clips specific lower and upper thresholds per column element. But it would be also nice to have example clipping is happening per column with specific lower and upper threshhold.
https://api.github.com/repos/pandas-dev/pandas/pulls/55589
2023-10-19T05:20:42Z
2023-10-20T16:46:23Z
2023-10-20T16:46:23Z
2023-10-20T16:46:30Z
REGR: Groupby methods not supporting numba raising TypeError when the…
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index 7c04d1a03a6e4..9d2826395b0ef 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -16,6 +16,7 @@ Fixed regressions - Fixed regression in :meth:`DataFrame.join` where result has missing values and dtype is arrow backed string (:issue:`55348`) - Fixed regression in :meth:`DataFrame.resample` which was extrapolating back to ``origin`` when ``origin`` was outside its bounds (:issue:`55064`) - Fixed regression in :meth:`DataFrame.sort_index` which was not sorting correctly when the index was a sliced :class:`MultiIndex` (:issue:`55379`) +- Fixed regression in :meth:`DataFrameGroupBy.agg` and :meth:`SeriesGroupBy.agg` where if the option ``compute.use_numba`` was set to True, groupby methods not supported by the numba engine would raise a ``TypeError`` (:issue:`55520`) - Fixed performance regression with wide DataFrames, typically involving methods where all columns were accessed individually (:issue:`55256`, :issue:`55245`) .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index a2f556eba08a4..1bdba5a3e71fb 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -236,10 +236,13 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) kwargs = {} if isinstance(func, str): - if maybe_use_numba(engine): + if maybe_use_numba(engine) and engine is not None: # Not all agg functions support numba, only propagate numba kwargs - # if user asks for numba + # if user asks for numba, and engine is not None + # (if engine is None, the called function will handle the case where + # numba is requested via the global option) kwargs["engine"] = engine + if engine_kwargs is not None: kwargs["engine_kwargs"] = engine_kwargs return getattr(self, func)(*args, **kwargs) diff --git a/pandas/tests/groupby/test_numba.py b/pandas/tests/groupby/test_numba.py index e36c248c99ad6..ee7d342472493 100644 --- a/pandas/tests/groupby/test_numba.py +++ b/pandas/tests/groupby/test_numba.py @@ -3,6 +3,7 @@ from pandas import ( DataFrame, Series, + option_context, ) import pandas._testing as tm @@ -66,3 +67,14 @@ def test_axis_1_unsupported(self, numba_supported_reductions): gb = df.groupby("a", axis=1) with pytest.raises(NotImplementedError, match="axis=1"): getattr(gb, func)(engine="numba", **kwargs) + + def test_no_engine_doesnt_raise(self): + # GH55520 + df = DataFrame({"a": [3, 2, 3, 2], "b": range(4), "c": range(1, 5)}) + gb = df.groupby("a") + # Make sure behavior of functions w/out engine argument don't raise + # when the global use_numba option is set + with option_context("compute.use_numba", True): + res = gb.agg({"b": "first"}) + expected = gb.agg({"b": "first"}) + tm.assert_frame_equal(res, expected)
… global option is set - [ ] closes #55520 (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55586
2023-10-19T02:27:14Z
2023-10-25T17:14:04Z
2023-10-25T17:14:04Z
2023-10-25T17:14:11Z
Backport PR #55582 on branch 2.1.x (CI: Unpin Cython)
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index a7eead7aafbf7..8ff7d290ea9d1 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -236,7 +236,7 @@ jobs: . ~/virtualenvs/pandas-dev/bin/activate python -m pip install --no-cache-dir -U pip wheel setuptools meson[ninja]==1.2.1 meson-python==0.13.1 python -m pip install numpy --config-settings=setup-args="-Dallow-noblas=true" - python -m pip install --no-cache-dir versioneer[toml] "cython<3.0.3" python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 + python -m pip install --no-cache-dir versioneer[toml] cython python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 python -m pip install --no-cache-dir --no-build-isolation -e . python -m pip list --no-cache-dir export PANDAS_CI=1 @@ -274,7 +274,7 @@ jobs: /opt/python/cp311-cp311/bin/python -m venv ~/virtualenvs/pandas-dev . ~/virtualenvs/pandas-dev/bin/activate python -m pip install --no-cache-dir -U pip wheel setuptools meson-python==0.13.1 meson[ninja]==1.2.1 - python -m pip install --no-cache-dir versioneer[toml] "cython<3.0.3" numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 + python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 python -m pip install --no-cache-dir --no-build-isolation -e . python -m pip list --no-cache-dir @@ -347,7 +347,7 @@ jobs: python -m pip install --upgrade pip setuptools wheel meson[ninja]==1.2.1 meson-python==0.13.1 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy python -m pip install versioneer[toml] - python -m pip install python-dateutil pytz tzdata "cython<3.0.3" hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17 + python -m pip install python-dateutil pytz tzdata cython hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17 python -m pip install -ve . --no-build-isolation --no-index --no-deps python -m pip list diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 1cbd6c24debb2..f8fa04a60a378 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index 9ace8e10e0e28..8bf5ea51c4bf3 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -7,7 +7,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-311-numpydev.yaml b/ci/deps/actions-311-numpydev.yaml index 9795a1fb39c6f..7fd3a65ec91f8 100644 --- a/ci/deps/actions-311-numpydev.yaml +++ b/ci/deps/actions-311-numpydev.yaml @@ -8,7 +8,7 @@ dependencies: - versioneer[toml] - meson[ninja]=1.2.1 - meson-python=0.13.1 - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 # test dependencies - pytest>=7.3.2 diff --git a/ci/deps/actions-311-pyarrownightly.yaml b/ci/deps/actions-311-pyarrownightly.yaml index 1a770d74043bf..d7e5523dd2545 100644 --- a/ci/deps/actions-311-pyarrownightly.yaml +++ b/ci/deps/actions-311-pyarrownightly.yaml @@ -7,7 +7,7 @@ dependencies: # build dependencies - versioneer[toml] - meson[ninja]=1.2.1 - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 57fcc2c859a5f..7f8cca9e65fe1 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml index f340d17395d34..5d34940aa27e1 100644 --- a/ci/deps/actions-39-minimum_versions.yaml +++ b/ci/deps/actions-39-minimum_versions.yaml @@ -8,7 +8,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 0e234a0de6969..f5ea116e51dd4 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-pypy-39.yaml b/ci/deps/actions-pypy-39.yaml index a2f4d6395783a..a489690776be3 100644 --- a/ci/deps/actions-pypy-39.yaml +++ b/ci/deps/actions-pypy-39.yaml @@ -9,7 +9,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index 6b091df00fae7..a71523e8c3579 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1
Backport PR #55582: CI: Unpin Cython
https://api.github.com/repos/pandas-dev/pandas/pulls/55585
2023-10-19T01:15:38Z
2023-10-19T12:42:56Z
2023-10-19T12:42:56Z
2023-10-19T12:42:56Z
TST: Test patching over fake instead of real method in accessor test
diff --git a/pandas/tests/test_register_accessor.py b/pandas/tests/test_register_accessor.py index 5b200711f4b36..4e569dc40005d 100644 --- a/pandas/tests/test_register_accessor.py +++ b/pandas/tests/test_register_accessor.py @@ -82,19 +82,13 @@ def test_accessor_works(): def test_overwrite_warns(): - # Need to restore mean - mean = pd.Series.mean - try: - with tm.assert_produces_warning(UserWarning) as w: - pd.api.extensions.register_series_accessor("mean")(MyAccessor) + match = r".*MyAccessor.*fake.*Series.*" + with tm.assert_produces_warning(UserWarning, match=match): + with ensure_removed(pd.Series, "fake"): + setattr(pd.Series, "fake", 123) + pd.api.extensions.register_series_accessor("fake")(MyAccessor) s = pd.Series([1, 2]) - assert s.mean.prop == "item" - msg = str(w[0].message) - assert "mean" in msg - assert "MyAccessor" in msg - assert "Series" in msg - finally: - pd.Series.mean = mean + assert s.fake.prop == "item" def test_raises_attribute_error():
Ran into a scenario where `Series.mean` was patched while running tests in parallel and `Series.mean` therefore was not behaving correctly
https://api.github.com/repos/pandas-dev/pandas/pulls/55584
2023-10-19T00:29:04Z
2023-10-19T16:34:55Z
2023-10-19T16:34:55Z
2023-10-19T16:34:58Z
BUG: Week.__add__ with non-nano
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index b29d35c8ce332..be565616036e6 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -301,8 +301,9 @@ Datetimelike ^^^^^^^^^^^^ - Bug in :meth:`DatetimeIndex.union` returning object dtype for tz-aware indexes with the same timezone but different units (:issue:`55238`) - Bug in :meth:`Tick.delta` with very large ticks raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`) +- Bug in adding or subtracting a :class:`Week` offset to a ``datetime64`` :class:`Series`, :class:`Index`, or :class:`DataFrame` column with non-nanosecond resolution returning incorrect results (:issue:`55583`) - Bug in addition or subtraction of very large :class:`Tick` objects with :class:`Timestamp` or :class:`Timedelta` objects raising ``OverflowError`` instead of ``OutOfBoundsTimedelta`` (:issue:`55503`) - +- Timedelta ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/offsets.pyi b/pandas/_libs/tslibs/offsets.pyi index 1f0fad6abb8b4..aecc8cf681cf8 100644 --- a/pandas/_libs/tslibs/offsets.pyi +++ b/pandas/_libs/tslibs/offsets.pyi @@ -277,7 +277,10 @@ def roll_qtrday( INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"] def shift_months( - dtindex: npt.NDArray[np.int64], months: int, day_opt: str | None = ... + dtindex: npt.NDArray[np.int64], + months: int, + day_opt: str | None = ..., + reso: int = ..., ) -> npt.NDArray[np.int64]: ... _offset_map: dict[str, BaseOffset] diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index f20073766bc3a..cb2f99eef4226 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -3272,7 +3272,8 @@ cdef class Week(SingleConstructorOffset): def _apply_array(self, dtarr): if self.weekday is None: td = timedelta(days=7 * self.n) - td64 = np.timedelta64(td, "ns") + unit = np.datetime_data(dtarr.dtype)[0] + td64 = np.timedelta64(td, unit) return dtarr + td64 else: reso = get_unit_from_dtype(dtarr.dtype) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index 67bcafd583086..89df837315396 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -1415,8 +1415,9 @@ def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array): ) @pytest.mark.parametrize("normalize", [True, False]) @pytest.mark.parametrize("n", [0, 5]) + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) def test_dt64arr_add_sub_DateOffsets( - self, box_with_array, n, normalize, cls_and_kwargs + self, box_with_array, n, normalize, cls_and_kwargs, unit ): # GH#10699 # assert vectorized operation matches pointwise operations @@ -1449,7 +1450,7 @@ def test_dt64arr_add_sub_DateOffsets( Timestamp("2000-05-15"), Timestamp("2001-06-15"), ] - ) + ).as_unit(unit) vec = tm.box_expected(vec, box_with_array) vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec @@ -1461,15 +1462,16 @@ def test_dt64arr_add_sub_DateOffsets( offset = offset_cls(n, normalize=normalize, **kwargs) - expected = DatetimeIndex([x + offset for x in vec_items]) + # TODO(GH#55564): as_unit will be unnecessary + expected = DatetimeIndex([x + offset for x in vec_items]).as_unit(unit) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec + offset) - expected = DatetimeIndex([x - offset for x in vec_items]) + expected = DatetimeIndex([x - offset for x in vec_items]).as_unit(unit) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, vec - offset) - expected = DatetimeIndex([offset + x for x in vec_items]) + expected = DatetimeIndex([offset + x for x in vec_items]).as_unit(unit) expected = tm.box_expected(expected, box_with_array) tm.assert_equal(expected, offset + vec) msg = "(bad|unsupported) operand type for unary" @@ -2392,7 +2394,8 @@ def test_dti_addsub_object_arraylike( @pytest.mark.parametrize("years", [-1, 0, 1]) @pytest.mark.parametrize("months", [-2, 0, 2]) -def test_shift_months(years, months): +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +def test_shift_months(years, months, unit): dti = DatetimeIndex( [ Timestamp("2000-01-05 00:15:00"), @@ -2401,11 +2404,13 @@ def test_shift_months(years, months): Timestamp("2000-02-29"), Timestamp("2000-12-31"), ] - ) - actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months)) + ).as_unit(unit) + shifted = shift_months(dti.asi8, years * 12 + months, reso=dti._data._creso) + shifted_dt64 = shifted.view(f"M8[{dti.unit}]") + actual = DatetimeIndex(shifted_dt64) raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti] - expected = DatetimeIndex(raw) + expected = DatetimeIndex(raw).as_unit(dti.unit) tm.assert_index_equal(actual, expected)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. The shift_months test fix is unrelated, just discovered at the same time.
https://api.github.com/repos/pandas-dev/pandas/pulls/55583
2023-10-18T22:49:54Z
2023-10-19T16:36:03Z
2023-10-19T16:36:03Z
2023-10-19T16:36:59Z
CI: Unpin Cython
diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 53a1f5b95374d..b0a2427732a6a 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -236,7 +236,7 @@ jobs: . ~/virtualenvs/pandas-dev/bin/activate python -m pip install --no-cache-dir -U pip wheel setuptools meson[ninja]==1.2.1 meson-python==0.13.1 python -m pip install numpy --config-settings=setup-args="-Dallow-noblas=true" - python -m pip install --no-cache-dir versioneer[toml] "cython<3.0.3" python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 + python -m pip install --no-cache-dir versioneer[toml] cython python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 python -m pip install --no-cache-dir --no-build-isolation -e . python -m pip list --no-cache-dir export PANDAS_CI=1 @@ -274,7 +274,7 @@ jobs: /opt/python/cp311-cp311/bin/python -m venv ~/virtualenvs/pandas-dev . ~/virtualenvs/pandas-dev/bin/activate python -m pip install --no-cache-dir -U pip wheel setuptools meson-python==0.13.1 meson[ninja]==1.2.1 - python -m pip install --no-cache-dir versioneer[toml] "cython<3.0.3" numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 + python -m pip install --no-cache-dir versioneer[toml] cython numpy python-dateutil pytz pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-asyncio>=0.17 hypothesis>=6.46.1 python -m pip install --no-cache-dir --no-build-isolation -e . python -m pip list --no-cache-dir @@ -347,7 +347,7 @@ jobs: python -m pip install --upgrade pip setuptools wheel meson[ninja]==1.2.1 meson-python==0.13.1 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple numpy python -m pip install versioneer[toml] - python -m pip install python-dateutil pytz tzdata "cython<3.0.3" hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17 + python -m pip install python-dateutil pytz tzdata cython hypothesis>=6.46.1 pytest>=7.3.2 pytest-xdist>=2.2.0 pytest-cov pytest-asyncio>=0.17 python -m pip install -ve . --no-build-isolation --no-index --no-deps python -m pip list diff --git a/ci/deps/actions-310.yaml b/ci/deps/actions-310.yaml index 180d425b07d82..2ca9e328adf9c 100644 --- a/ci/deps/actions-310.yaml +++ b/ci/deps/actions-310.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-311-downstream_compat.yaml b/ci/deps/actions-311-downstream_compat.yaml index c8a5d8cfb7640..3acac13e6e9d1 100644 --- a/ci/deps/actions-311-downstream_compat.yaml +++ b/ci/deps/actions-311-downstream_compat.yaml @@ -7,7 +7,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-311-numpydev.yaml b/ci/deps/actions-311-numpydev.yaml index 9795a1fb39c6f..7fd3a65ec91f8 100644 --- a/ci/deps/actions-311-numpydev.yaml +++ b/ci/deps/actions-311-numpydev.yaml @@ -8,7 +8,7 @@ dependencies: - versioneer[toml] - meson[ninja]=1.2.1 - meson-python=0.13.1 - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 # test dependencies - pytest>=7.3.2 diff --git a/ci/deps/actions-311-pyarrownightly.yaml b/ci/deps/actions-311-pyarrownightly.yaml index 1a770d74043bf..d7e5523dd2545 100644 --- a/ci/deps/actions-311-pyarrownightly.yaml +++ b/ci/deps/actions-311-pyarrownightly.yaml @@ -7,7 +7,7 @@ dependencies: # build dependencies - versioneer[toml] - meson[ninja]=1.2.1 - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson-python=0.13.1 # test dependencies diff --git a/ci/deps/actions-311.yaml b/ci/deps/actions-311.yaml index 3d1df9e3bcd59..a28bc6d0f5395 100644 --- a/ci/deps/actions-311.yaml +++ b/ci/deps/actions-311.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-39-minimum_versions.yaml b/ci/deps/actions-39-minimum_versions.yaml index 691bdf25d4d96..988e0ef944714 100644 --- a/ci/deps/actions-39-minimum_versions.yaml +++ b/ci/deps/actions-39-minimum_versions.yaml @@ -8,7 +8,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-39.yaml b/ci/deps/actions-39.yaml index 199ce4dea1ac0..44068525e8ed6 100644 --- a/ci/deps/actions-39.yaml +++ b/ci/deps/actions-39.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/actions-pypy-39.yaml b/ci/deps/actions-pypy-39.yaml index a2f4d6395783a..a489690776be3 100644 --- a/ci/deps/actions-pypy-39.yaml +++ b/ci/deps/actions-pypy-39.yaml @@ -9,7 +9,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1 diff --git a/ci/deps/circle-310-arm64.yaml b/ci/deps/circle-310-arm64.yaml index c9f46b98273b3..bb8c4d491089b 100644 --- a/ci/deps/circle-310-arm64.yaml +++ b/ci/deps/circle-310-arm64.yaml @@ -6,7 +6,7 @@ dependencies: # build dependencies - versioneer[toml] - - cython>=0.29.33, <3.0.3 + - cython>=0.29.33 - meson[ninja]=1.2.1 - meson-python=0.13.1
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55582
2023-10-18T22:17:40Z
2023-10-19T01:15:30Z
2023-10-19T01:15:30Z
2023-10-19T01:15:31Z
BUG: conversion a JSON field descriptor into pandas type for deprecated offsets frequency 'M'
diff --git a/pandas/io/json/_table_schema.py b/pandas/io/json/_table_schema.py index 3f2291ba7a0c3..4d9fba72cf173 100644 --- a/pandas/io/json/_table_schema.py +++ b/pandas/io/json/_table_schema.py @@ -15,6 +15,7 @@ from pandas._libs import lib from pandas._libs.json import ujson_loads from pandas._libs.tslibs import timezones +from pandas._libs.tslibs.dtypes import freq_to_period_freqstr from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry @@ -34,6 +35,8 @@ from pandas import DataFrame import pandas.core.common as com +from pandas.tseries.frequencies import to_offset + if TYPE_CHECKING: from pandas._typing import ( DtypeObj, @@ -207,8 +210,12 @@ def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype: if field.get("tz"): return f"datetime64[ns, {field['tz']}]" elif field.get("freq"): + # GH#9586 rename frequency M to ME for offsets + offset = to_offset(field["freq"]) + freq_n, freq_name = offset.n, offset.name + freq = freq_to_period_freqstr(freq_n, freq_name) # GH#47747 using datetime over period to minimize the change surface - return f"period[{field['freq']}]" + return f"period[{freq}]" else: return "datetime64[ns]" elif typ == "any": diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 943515acd33b5..ddab3887db810 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -845,3 +845,14 @@ def test_read_json_orient_table_old_schema_version(self): expected = DataFrame({"a": [1, 2.0, "s"]}) result = pd.read_json(StringIO(df_json), orient="table") tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize("freq", ["M", "2M"]) + def test_read_json_table_orient_period_depr_freq(self, freq, recwarn): + # GH#9586 + df = DataFrame( + {"ints": [1, 2]}, + index=pd.PeriodIndex(["2020-01", "2020-06"], freq=freq), + ) + out = df.to_json(orient="table") + result = pd.read_json(out, orient="table") + tm.assert_frame_equal(df, result)
xref #52064 when converting a JSON field descriptor into its corresponding pandas type we don't check if we have deprecated for offsets frequency "M". corrected the definition of `convert_json_field_to_pandas_type`, added a test.
https://api.github.com/repos/pandas-dev/pandas/pulls/55581
2023-10-18T22:00:19Z
2023-10-24T08:41:43Z
2023-10-24T08:41:43Z
2023-10-24T10:03:14Z
BUG/PERF: merge_asof with multiple "by" keys
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index c8c27f2f2e178..f21cbdfa0c3a4 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -277,6 +277,7 @@ Other Deprecations Performance improvements ~~~~~~~~~~~~~~~~~~~~~~~~ - Performance improvement in :func:`concat` with ``axis=1`` and objects with unaligned indexes (:issue:`55084`) +- Performance improvement in :func:`merge_asof` when ``by`` contains more than one key (:issue:`55580`) - Performance improvement in :func:`read_stata` for files with many variables (:issue:`55515`) - Performance improvement in :func:`to_dict` on converting DataFrame to dictionary (:issue:`50990`) - Performance improvement in :meth:`DataFrame.groupby` when aggregating pyarrow timestamp and duration dtypes (:issue:`55031`) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index ba6579a739f54..9cdf513ad7da3 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -9,7 +9,6 @@ ) import datetime from functools import partial -import string from typing import ( TYPE_CHECKING, Literal, @@ -90,7 +89,6 @@ BaseMaskedArray, ExtensionArray, ) -from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.string_ import StringDtype import pandas.core.common as com from pandas.core.construction import ( @@ -99,7 +97,10 @@ ) from pandas.core.frame import _merge_doc from pandas.core.indexes.api import default_index -from pandas.core.sorting import is_int64_overflow_possible +from pandas.core.sorting import ( + get_group_index, + is_int64_overflow_possible, +) if TYPE_CHECKING: from pandas import DataFrame @@ -2117,34 +2118,6 @@ def _convert_values_for_libjoin( def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: """return the join indexers""" - def flip(xs: list[ArrayLike]) -> np.ndarray: - """unlike np.transpose, this returns an array of tuples""" - - def injection(obj: ArrayLike): - if not isinstance(obj.dtype, ExtensionDtype): - # ndarray - return obj - obj = extract_array(obj) - if isinstance(obj, NDArrayBackedExtensionArray): - # fastpath for e.g. dt64tz, categorical - return obj._ndarray - # FIXME: returning obj._values_for_argsort() here doesn't - # break in any existing test cases, but i (@jbrockmendel) - # am pretty sure it should! - # e.g. - # arr = pd.array([0, pd.NA, 255], dtype="UInt8") - # will have values_for_argsort (before GH#45434) - # np.array([0, 255, 255], dtype=np.uint8) - # and the non-injectivity should make a difference somehow - # shouldn't it? - return np.asarray(obj) - - xs = [injection(x) for x in xs] - labels = list(string.ascii_lowercase[: len(xs)]) - dtypes = [x.dtype for x in xs] - labeled_dtypes = list(zip(labels, dtypes)) - return np.array(list(zip(*xs)), labeled_dtypes) - # values to compare left_values = ( self.left.index._values if self.left_index else self.left_join_keys[-1] @@ -2197,11 +2170,23 @@ def injection(obj: ArrayLike): else: # We get here with non-ndarrays in test_merge_by_col_tz_aware # and test_merge_groupby_multiple_column_with_categorical_column - lbv = flip(left_by_values) - rbv = flip(right_by_values) - lbv = ensure_object(lbv) - rbv = ensure_object(rbv) - + mapped = [ + _factorize_keys( + left_by_values[n], + right_by_values[n], + sort=False, + how="left", + ) + for n in range(len(left_by_values)) + ] + arrs = [np.concatenate(m[:2]) for m in mapped] + shape = tuple(m[2] for m in mapped) + group_index = get_group_index( + arrs, shape=shape, sort=False, xnull=False + ) + left_len = len(left_by_values[0]) + lbv = group_index[:left_len] + rbv = group_index[left_len:] # error: Incompatible types in assignment (expression has type # "Union[ndarray[Any, dtype[Any]], ndarray[Any, dtype[object_]]]", # variable has type "List[Union[Union[ExtensionArray, diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index d4cb89dadde9b..8ada42898f947 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -353,7 +353,8 @@ def test_multiby(self): result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"]) tm.assert_frame_equal(result, expected) - def test_multiby_heterogeneous_types(self): + @pytest.mark.parametrize("dtype", ["object", "string"]) + def test_multiby_heterogeneous_types(self, dtype): # GH13936 trades = pd.DataFrame( { @@ -373,6 +374,7 @@ def test_multiby_heterogeneous_types(self): }, columns=["time", "ticker", "exch", "price", "quantity"], ) + trades = trades.astype({"ticker": dtype, "exch": dtype}) quotes = pd.DataFrame( { @@ -393,6 +395,7 @@ def test_multiby_heterogeneous_types(self): }, columns=["time", "ticker", "exch", "bid", "ask"], ) + quotes = quotes.astype({"ticker": dtype, "exch": dtype}) expected = pd.DataFrame( { @@ -414,6 +417,7 @@ def test_multiby_heterogeneous_types(self): }, columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"], ) + expected = expected.astype({"ticker": dtype, "exch": dtype}) result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"]) tm.assert_frame_equal(result, expected) @@ -1666,3 +1670,41 @@ def test_merge_asof_read_only_ndarray(): result = merge_asof(left, right, left_index=True, right_index=True) expected = pd.DataFrame({"left": [2], "right": [1]}, index=[2]) tm.assert_frame_equal(result, expected) + + +def test_merge_asof_multiby_with_categorical(): + # GH 43541 + left = pd.DataFrame( + { + "c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]), + "c2": ["x"] * 4, + "t": [1] * 4, + "v": range(4), + } + ) + right = pd.DataFrame( + { + "c1": pd.Categorical(["b", "b"], categories=["b", "a"]), + "c2": ["x"] * 2, + "t": [1, 2], + "v": range(2), + } + ) + result = merge_asof( + left, + right, + by=["c1", "c2"], + on="t", + direction="forward", + suffixes=["_left", "_right"], + ) + expected = pd.DataFrame( + { + "c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]), + "c2": ["x"] * 4, + "t": [1] * 4, + "v_left": range(4), + "v_right": [np.nan, np.nan, 0.0, 0.0], + } + ) + tm.assert_frame_equal(result, expected)
- [x] closes #43541 - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/v2.2.0.rst` file if fixing a bug or adding a new feature. ``` > asv continuous -f 1.1 upstream/main merge-as-of-multi-by -b join_merge.MergeAsof ``` ``` . before after ratio [59d4e841] [bfe776d7] <main> <merge-as-of-multi-by> - 1.66±0.03s 602±50ms 0.36 join_merge.MergeAsof.time_multiby('nearest', None) - 1.56±0.03s 560±40ms 0.36 join_merge.MergeAsof.time_multiby('nearest', 5) - 1.48±0.03s 449±50ms 0.30 join_merge.MergeAsof.time_multiby('forward', None) - 1.60±0.07s 472±30ms 0.29 join_merge.MergeAsof.time_multiby('forward', 5) - 1.25±0.03s 314±10ms 0.25 join_merge.MergeAsof.time_multiby('backward', None) - 1.35±0.1s 295±10ms 0.22 join_merge.MergeAsof.time_multiby('backward', 5) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/55580
2023-10-18T21:15:57Z
2023-10-22T19:47:32Z
2023-10-22T19:47:32Z
2023-11-16T12:57:01Z
REF: add creso keyword to parse_pydatetime
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 43252ffb5bf13..55454e6c58755 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -456,6 +456,7 @@ cpdef array_to_datetime( tzinfo tz_out = None bint found_tz = False, found_naive = False cnp.flatiter it = cnp.PyArray_IterNew(values) + NPY_DATETIMEUNIT creso = NPY_FR_ns # specify error conditions assert is_raise or is_ignore or is_coerce @@ -484,7 +485,7 @@ cpdef array_to_datetime( found_tz, utc_convert, ) - iresult[i] = parse_pydatetime(val, &dts, utc_convert) + iresult[i] = parse_pydatetime(val, &dts, utc_convert, creso=creso) elif PyDate_Check(val): iresult[i] = pydate_to_dt64(val, &dts) diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 1b3214605582a..3e5a79e833a25 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -59,4 +59,5 @@ cdef int64_t parse_pydatetime( datetime val, npy_datetimestruct *dts, bint utc_convert, + NPY_DATETIMEUNIT creso, ) except? -1 diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 45c4d7809fe7a..1f4bb8af4542f 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -61,6 +61,7 @@ from pandas._libs.tslibs.nattype cimport ( c_nat_strings as nat_strings, ) from pandas._libs.tslibs.parsing cimport parse_datetime_string +from pandas._libs.tslibs.timestamps cimport _Timestamp from pandas._libs.tslibs.timezones cimport ( get_utcoffset, is_utc, @@ -302,8 +303,8 @@ cdef _TSObject convert_to_tsobject(object ts, tzinfo tz, str unit, pandas_datetime_to_datetimestruct(ts, NPY_FR_ns, &obj.dts) elif PyDateTime_Check(ts): if nanos == 0: - if isinstance(ts, ABCTimestamp): - reso = abbrev_to_npy_unit(ts.unit) # TODO: faster way to do this? + if isinstance(ts, _Timestamp): + reso = (<_Timestamp>ts)._creso else: # TODO: what if user explicitly passes nanos=0? reso = NPY_FR_us @@ -729,6 +730,7 @@ cdef int64_t parse_pydatetime( datetime val, npy_datetimestruct *dts, bint utc_convert, + NPY_DATETIMEUNIT creso, ) except? -1: """ Convert pydatetime to datetime64. @@ -741,6 +743,8 @@ cdef int64_t parse_pydatetime( Needed to use in pydatetime_to_dt64, which writes to it. utc_convert : bool Whether to convert/localize to UTC. + creso : NPY_DATETIMEUNIT + Resolution to store the the result. Raises ------ @@ -752,17 +756,15 @@ cdef int64_t parse_pydatetime( if val.tzinfo is not None: if utc_convert: - _ts = convert_datetime_to_tsobject(val, None) - _ts.ensure_reso(NPY_FR_ns) + _ts = convert_datetime_to_tsobject(val, None, nanos=0, reso=creso) result = _ts.value else: - _ts = convert_datetime_to_tsobject(val, None) - _ts.ensure_reso(NPY_FR_ns) + _ts = convert_datetime_to_tsobject(val, None, nanos=0, reso=creso) result = _ts.value else: - if isinstance(val, ABCTimestamp): - result = val.as_unit("ns")._value + if isinstance(val, _Timestamp): + result = (<_Timestamp>val)._as_creso(creso, round_ok=False)._value else: - result = pydatetime_to_dt64(val, dts) - check_dts_bounds(dts) + result = pydatetime_to_dt64(val, dts, reso=creso) + check_dts_bounds(dts, creso) return result
Broken off branch implementing #55564
https://api.github.com/repos/pandas-dev/pandas/pulls/55579
2023-10-18T21:01:11Z
2023-10-18T23:29:04Z
2023-10-18T23:29:04Z
2023-10-18T23:56:40Z
Backport PR #55562 on branch 2.1.x (Floordiv fix for pyarrow dtypes)
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index ddd1f95c56aea..5df34f20bece3 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -27,9 +27,9 @@ Bug fixes - Fixed bug in :meth:`DataFrame.interpolate` raising incorrect error message (:issue:`55347`) - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) - Fixed bug in :meth:`Series.all` and :meth:`Series.any` not treating missing values correctly for ``dtype="string[pyarrow_numpy]"`` (:issue:`55367`) +- Fixed bug in :meth:`Series.floordiv` for :class:`ArrowDtype` (:issue:`55561`) - Fixed bug in :meth:`Series.rank` for ``string[pyarrow_numpy]`` dtype (:issue:`55362`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) -- .. --------------------------------------------------------------------------- .. _whatsnew_212.other: diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index 636a22fcffe3d..875461d39a93e 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -116,7 +116,8 @@ def floordiv_compat( ) -> pa.ChunkedArray: # Ensure int // int -> int mirroring Python/Numpy behavior # as pc.floor(pc.divide_checked(int, int)) -> float - result = pc.floor(pc.divide(left, right)) + converted_left = cast_for_truediv(left, right) + result = pc.floor(pc.divide(converted_left, right)) if pa.types.is_integer(left.type) and pa.types.is_integer(right.type): result = result.cast(left.type) return result diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 8702701f161ae..608cb023a3584 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -3091,3 +3091,12 @@ def test_factorize_chunked_dictionary(): exp_uniques = pd.Index(ArrowExtensionArray(pa_array.combine_chunks())) tm.assert_numpy_array_equal(res_indices, exp_indicies) tm.assert_index_equal(res_uniques, exp_uniques) + + +def test_arrow_floordiv(): + # GH 55561 + a = pd.Series([-7], dtype="int64[pyarrow]") + b = pd.Series([4], dtype="int64[pyarrow]") + expected = pd.Series([-2], dtype="int64[pyarrow]") + result = a // b + tm.assert_series_equal(result, expected)
Backport PR #55562: Floordiv fix for pyarrow dtypes
https://api.github.com/repos/pandas-dev/pandas/pulls/55578
2023-10-18T20:31:27Z
2023-10-18T22:56:23Z
2023-10-18T22:56:23Z
2023-10-18T22:56:23Z
TST: change pyarrow skips to xfails
diff --git a/pandas/tests/io/parser/common/test_common_basic.py b/pandas/tests/io/parser/common/test_common_basic.py index 1f6ba3af5bfe5..4b4366fa387bf 100644 --- a/pandas/tests/io/parser/common/test_common_basic.py +++ b/pandas/tests/io/parser/common/test_common_basic.py @@ -34,7 +34,6 @@ ) xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") def test_override_set_noconvert_columns(): @@ -515,8 +514,6 @@ def test_single_char_leading_whitespace(all_parsers, delim_whitespace): tm.assert_frame_equal(result, expected) -# Skip for now, actually only one test fails though, but its tricky to xfail -@skip_pyarrow @pytest.mark.parametrize( "sep,skip_blank_lines,exp_data", [ @@ -536,7 +533,7 @@ def test_single_char_leading_whitespace(all_parsers, delim_whitespace): ), ], ) -def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data): +def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data, request): parser = all_parsers data = """\ A,B,C @@ -550,6 +547,12 @@ def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data): if sep == r"\s+": data = data.replace(",", " ") + if parser.engine == "pyarrow": + mark = pytest.mark.xfail( + raises=ValueError, + reason="the 'pyarrow' engine does not support regex separators", + ) + request.applymarker(mark) result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines) expected = DataFrame(exp_data, columns=["A", "B", "C"]) diff --git a/pandas/tests/io/parser/common/test_index.py b/pandas/tests/io/parser/common/test_index.py index 1d5b0fec7a7c6..7df14043f478c 100644 --- a/pandas/tests/io/parser/common/test_index.py +++ b/pandas/tests/io/parser/common/test_index.py @@ -21,10 +21,6 @@ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -# GH#43650: Some expected failures with the pyarrow engine can occasionally -# cause a deadlock instead, so we skip these instead of xfailing -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") - @pytest.mark.parametrize( "data,kwargs,expected", @@ -278,7 +274,8 @@ def test_empty_with_index(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +# CSV parse error: Empty CSV file or block: cannot infer number of columns +@xfail_pyarrow def test_empty_with_multi_index(all_parsers): # see gh-10467 data = "x,y,z" @@ -291,7 +288,8 @@ def test_empty_with_multi_index(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +# CSV parse error: Empty CSV file or block: cannot infer number of columns +@xfail_pyarrow def test_empty_with_reversed_multi_index(all_parsers): data = "x,y,z" parser = all_parsers diff --git a/pandas/tests/io/parser/common/test_ints.py b/pandas/tests/io/parser/common/test_ints.py index 939fdbc159454..086b43be59823 100644 --- a/pandas/tests/io/parser/common/test_ints.py +++ b/pandas/tests/io/parser/common/test_ints.py @@ -17,9 +17,7 @@ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -# GH#43650: Some expected failures with the pyarrow engine can occasionally -# cause a deadlock instead, so we skip these instead of xfailing -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") def test_int_conversion(all_parsers): @@ -102,12 +100,16 @@ def test_parse_integers_above_fp_precision(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow # Flaky @pytest.mark.parametrize("sep", [" ", r"\s+"]) def test_integer_overflow_bug(all_parsers, sep): # see gh-2601 data = "65248E10 11\n55555E55 22\n" parser = all_parsers + if parser.engine == "pyarrow" and sep != " ": + msg = "the 'pyarrow' engine does not support regex separators" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), header=None, sep=sep) + return result = parser.read_csv(StringIO(data), header=None, sep=sep) expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]]) @@ -124,7 +126,8 @@ def test_int64_min_issues(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +# ValueError: The 'converters' option is not supported with the 'pyarrow' engine +@xfail_pyarrow @pytest.mark.parametrize("conv", [None, np.int64, np.uint64]) def test_int64_overflow(all_parsers, conv): data = """ID @@ -168,7 +171,7 @@ def test_int64_overflow(all_parsers, conv): parser.read_csv(StringIO(data), converters={"ID": conv}) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min] ) @@ -182,7 +185,7 @@ def test_int64_uint64_range(all_parsers, val): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block @pytest.mark.parametrize( "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1] ) @@ -196,7 +199,7 @@ def test_outside_int64_uint64_range(all_parsers, val): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # gets float64 dtype instead of object @pytest.mark.parametrize("exp_data", [[str(-1), str(2**63)], [str(2**63), str(-1)]]) def test_numeric_range_too_wide(all_parsers, exp_data): # No numerical dtype can hold both negative and uint64 diff --git a/pandas/tests/io/parser/common/test_read_errors.py b/pandas/tests/io/parser/common/test_read_errors.py index 8b612c8d40994..52ddb38192a6b 100644 --- a/pandas/tests/io/parser/common/test_read_errors.py +++ b/pandas/tests/io/parser/common/test_read_errors.py @@ -22,7 +22,6 @@ import pandas._testing as tm xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") def test_empty_decimal_marker(all_parsers): @@ -44,7 +43,6 @@ def test_empty_decimal_marker(all_parsers): parser.read_csv(StringIO(data), decimal="") -@skip_pyarrow def test_bad_stream_exception(all_parsers, csv_dir_path): # see gh-13652 # @@ -65,7 +63,7 @@ def test_bad_stream_exception(all_parsers, csv_dir_path): parser.read_csv(stream) -@skip_pyarrow +@xfail_pyarrow # ValueError: The 'comment' option is not supported def test_malformed(all_parsers): # see gh-6607 parser = all_parsers @@ -80,7 +78,7 @@ def test_malformed(all_parsers): parser.read_csv(StringIO(data), header=1, comment="#") -@skip_pyarrow +@xfail_pyarrow # ValueError: The 'iterator' option is not supported @pytest.mark.parametrize("nrows", [5, 3, None]) def test_malformed_chunks(all_parsers, nrows): data = """ignore @@ -100,7 +98,7 @@ def test_malformed_chunks(all_parsers, nrows): reader.read(nrows) -@skip_pyarrow +@xfail_pyarrow # does not raise def test_catch_too_many_names(all_parsers): # see gh-5156 data = """\ @@ -115,12 +113,17 @@ def test_catch_too_many_names(all_parsers): else "Number of passed names did not match " "number of header fields in the file" ) + depr_msg = "Passing a BlockManager to DataFrame is deprecated" + warn = None + if parser.engine == "pyarrow": + warn = DeprecationWarning - with pytest.raises(ValueError, match=msg): - parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"]) + with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False): + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"]) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block @pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5]) def test_raise_on_no_columns(all_parsers, nrows): parser = all_parsers @@ -208,7 +211,6 @@ def test_read_csv_wrong_num_columns(all_parsers): parser.read_csv(StringIO(data)) -@skip_pyarrow def test_null_byte_char(request, all_parsers): # see gh-2741 data = "\x00,foo" @@ -226,12 +228,19 @@ def test_null_byte_char(request, all_parsers): out = parser.read_csv(StringIO(data), names=names) tm.assert_frame_equal(out, expected) else: - msg = "NULL byte detected" + if parser.engine == "pyarrow": + msg = ( + "CSV parse error: Empty CSV file or block: " + "cannot infer number of columns" + ) + else: + msg = "NULL byte detected" with pytest.raises(ParserError, match=msg): parser.read_csv(StringIO(data), names=names) -@skip_pyarrow +# ValueError: the 'pyarrow' engine does not support sep=None with delim_whitespace=False +@xfail_pyarrow @pytest.mark.filterwarnings("always::ResourceWarning") def test_open_file(request, all_parsers): # GH 39024 diff --git a/pandas/tests/io/parser/conftest.py b/pandas/tests/io/parser/conftest.py index 591defdde7df9..16ee8ab4106ef 100644 --- a/pandas/tests/io/parser/conftest.py +++ b/pandas/tests/io/parser/conftest.py @@ -279,19 +279,3 @@ def pyarrow_xfail(request): if parser.engine == "pyarrow": mark = pytest.mark.xfail(reason="pyarrow doesn't support this.") request.applymarker(mark) - - -@pytest.fixture -def pyarrow_skip(request): - """ - Fixture that skips a test if the engine is pyarrow. - """ - if "all_parsers" in request.fixturenames: - parser = request.getfixturevalue("all_parsers") - elif "all_parsers_all_precisions" in request.fixturenames: - # Return value is tuple of (engine, precision) - parser = request.getfixturevalue("all_parsers_all_precisions")[0] - else: - return - if parser.engine == "pyarrow": - pytest.skip("pyarrow doesn't support this.") diff --git a/pandas/tests/io/parser/dtypes/test_categorical.py b/pandas/tests/io/parser/dtypes/test_categorical.py index d305c94b171f3..c7586bd9334ef 100644 --- a/pandas/tests/io/parser/dtypes/test_categorical.py +++ b/pandas/tests/io/parser/dtypes/test_categorical.py @@ -25,7 +25,6 @@ ) xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") @xfail_pyarrow @@ -55,9 +54,8 @@ def test_categorical_dtype(all_parsers, dtype): tm.assert_frame_equal(actual, expected) -@skip_pyarrow # Flaky @pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}]) -def test_categorical_dtype_single(all_parsers, dtype): +def test_categorical_dtype_single(all_parsers, dtype, request): # see gh-10153 parser = all_parsers data = """a,b,c @@ -67,6 +65,13 @@ def test_categorical_dtype_single(all_parsers, dtype): expected = DataFrame( {"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]} ) + if parser.engine == "pyarrow": + mark = pytest.mark.xfail( + strict=False, + reason="Flaky test sometimes gives object dtype instead of Categorical", + ) + request.applymarker(mark) + actual = parser.read_csv(StringIO(data), dtype=dtype) tm.assert_frame_equal(actual, expected) @@ -141,6 +146,7 @@ def test_categorical_dtype_utf16(all_parsers, csv_dir_path): tm.assert_frame_equal(actual, expected) +# ValueError: The 'chunksize' option is not supported with the 'pyarrow' engine @xfail_pyarrow def test_categorical_dtype_chunksize_infer_categories(all_parsers): # see gh-10153 @@ -161,6 +167,7 @@ def test_categorical_dtype_chunksize_infer_categories(all_parsers): tm.assert_frame_equal(actual, expected) +# ValueError: The 'chunksize' option is not supported with the 'pyarrow' engine @xfail_pyarrow def test_categorical_dtype_chunksize_explicit_categories(all_parsers): # see gh-10153 @@ -253,7 +260,6 @@ def test_categorical_coerces_numeric(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow # Flaky def test_categorical_coerces_datetime(all_parsers): parser = all_parsers dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None) diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py index 2ef7102543154..191d0de50b12f 100644 --- a/pandas/tests/io/parser/test_compression.py +++ b/pandas/tests/io/parser/test_compression.py @@ -17,8 +17,6 @@ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") - @pytest.fixture(params=[True, False]) def buffer(request): @@ -36,7 +34,6 @@ def parser_and_data(all_parsers, csv1): return parser, data, expected -@skip_pyarrow @pytest.mark.parametrize("compression", ["zip", "infer", "zip2"]) def test_zip(parser_and_data, compression): parser, data, expected = parser_and_data @@ -54,7 +51,6 @@ def test_zip(parser_and_data, compression): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize("compression", ["zip", "infer"]) def test_zip_error_multiple_files(parser_and_data, compression): parser, data, expected = parser_and_data @@ -70,7 +66,6 @@ def test_zip_error_multiple_files(parser_and_data, compression): parser.read_csv(path, compression=compression) -@skip_pyarrow def test_zip_error_no_files(parser_and_data): parser, _, _ = parser_and_data @@ -82,7 +77,6 @@ def test_zip_error_no_files(parser_and_data): parser.read_csv(path, compression="zip") -@skip_pyarrow def test_zip_error_invalid_zip(parser_and_data): parser, _, _ = parser_and_data @@ -92,7 +86,6 @@ def test_zip_error_invalid_zip(parser_and_data): parser.read_csv(f, compression="zip") -@skip_pyarrow @pytest.mark.parametrize("filename", [None, "test.{ext}"]) def test_compression( request, @@ -128,7 +121,6 @@ def test_compression( tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize("ext", [None, "gz", "bz2"]) def test_infer_compression(all_parsers, csv1, buffer, ext): # see gh-9770 @@ -148,7 +140,6 @@ def test_infer_compression(all_parsers, csv1, buffer, ext): tm.assert_frame_equal(result, expected) -@skip_pyarrow def test_compression_utf_encoding(all_parsers, csv_dir_path, utf_value, encoding_fmt): # see gh-18071, gh-24130 parser = all_parsers @@ -166,7 +157,6 @@ def test_compression_utf_encoding(all_parsers, csv_dir_path, utf_value, encoding tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize("invalid_compression", ["sfark", "bz3", "zipper"]) def test_invalid_compression(all_parsers, invalid_compression): parser = all_parsers @@ -178,7 +168,6 @@ def test_invalid_compression(all_parsers, invalid_compression): parser.read_csv("test_file.zip", **compress_kwargs) -@skip_pyarrow def test_compression_tar_archive(all_parsers, csv_dir_path): parser = all_parsers path = os.path.join(csv_dir_path, "tar_csv.tar.gz") @@ -200,7 +189,6 @@ def test_ignore_compression_extension(all_parsers): tm.assert_frame_equal(parser.read_csv(path_zip, compression=None), df) -@skip_pyarrow def test_writes_tar_gz(all_parsers): parser = all_parsers data = DataFrame( diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py index 070027860b829..9e1200c142d6b 100644 --- a/pandas/tests/io/parser/test_encoding.py +++ b/pandas/tests/io/parser/test_encoding.py @@ -23,7 +23,6 @@ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") @@ -38,7 +37,7 @@ def test_bytes_io_input(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_read_csv_unicode(all_parsers): parser = all_parsers data = BytesIO("\u0141aski, Jan;1".encode()) @@ -181,13 +180,16 @@ def test_binary_mode_file_buffers(all_parsers, file_path, encoding, datapath): tm.assert_frame_equal(expected, result) -@skip_pyarrow @pytest.mark.parametrize("pass_encoding", [True, False]) def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding): # see gh-24130 parser = all_parsers encoding = encoding_fmt.format(utf_value) + if parser.engine == "pyarrow" and pass_encoding is True and utf_value in [16, 32]: + # FIXME: this is bad! + pytest.skip("These cases freeze") + expected = DataFrame({"foo": ["bar"]}) with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f: @@ -198,7 +200,6 @@ def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding) tm.assert_frame_equal(result, expected) -@skip_pyarrow def test_encoding_named_temp_file(all_parsers): # see gh-31819 parser = all_parsers diff --git a/pandas/tests/io/parser/test_header.py b/pandas/tests/io/parser/test_header.py index d059cc0c49db4..2edb389a0c830 100644 --- a/pandas/tests/io/parser/test_header.py +++ b/pandas/tests/io/parser/test_header.py @@ -22,11 +22,10 @@ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -# TODO(1.4): Change me to xfails at release time -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_read_with_bad_header(all_parsers): parser = all_parsers msg = r"but only \d+ lines in file" @@ -80,7 +79,7 @@ def test_bool_header_arg(all_parsers, header): parser.read_csv(StringIO(data), header=header) -@skip_pyarrow +@xfail_pyarrow def test_header_with_index_col(all_parsers): parser = all_parsers data = """foo,1,2,3 @@ -118,7 +117,7 @@ def test_header_not_first_line(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_header_multi_index(all_parsers): parser = all_parsers expected = tm.makeCustomDataframe(5, 3, r_idx_nlevels=2, c_idx_nlevels=4) @@ -184,7 +183,7 @@ def test_header_multi_index_invalid(all_parsers, kwargs, msg): _TestTuple = namedtuple("_TestTuple", ["first", "second"]) -@skip_pyarrow +@xfail_pyarrow @pytest.mark.parametrize( "kwargs", [ @@ -232,7 +231,7 @@ def test_header_multi_index_common_format1(all_parsers, kwargs): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow @pytest.mark.parametrize( "kwargs", [ @@ -279,7 +278,7 @@ def test_header_multi_index_common_format2(all_parsers, kwargs): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow @pytest.mark.parametrize( "kwargs", [ @@ -327,7 +326,7 @@ def test_header_multi_index_common_format3(all_parsers, kwargs): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_header_multi_index_common_format_malformed1(all_parsers): parser = all_parsers expected = DataFrame( @@ -348,7 +347,7 @@ def test_header_multi_index_common_format_malformed1(all_parsers): tm.assert_frame_equal(expected, result) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_header_multi_index_common_format_malformed2(all_parsers): parser = all_parsers expected = DataFrame( @@ -370,7 +369,7 @@ def test_header_multi_index_common_format_malformed2(all_parsers): tm.assert_frame_equal(expected, result) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_header_multi_index_common_format_malformed3(all_parsers): parser = all_parsers expected = DataFrame( @@ -391,7 +390,7 @@ def test_header_multi_index_common_format_malformed3(all_parsers): tm.assert_frame_equal(expected, result) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_header_multi_index_blank_line(all_parsers): # GH 40442 parser = all_parsers @@ -403,20 +402,24 @@ def test_header_multi_index_blank_line(all_parsers): tm.assert_frame_equal(expected, result) -@skip_pyarrow @pytest.mark.parametrize( "data,header", [("1,2,3\n4,5,6", None), ("foo,bar,baz\n1,2,3\n4,5,6", 0)] ) -def test_header_names_backward_compat(all_parsers, data, header): +def test_header_names_backward_compat(all_parsers, data, header, request): # see gh-2539 parser = all_parsers + + if parser.engine == "pyarrow" and header is not None: + mark = pytest.mark.xfail(reason="mismatched index") + request.applymarker(mark) + expected = parser.read_csv(StringIO("1,2,3\n4,5,6"), names=["a", "b", "c"]) result = parser.read_csv(StringIO(data), names=["a", "b", "c"], header=header) tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block: cannot infer @pytest.mark.parametrize("kwargs", [{}, {"index_col": False}]) def test_read_only_header_no_rows(all_parsers, kwargs): # See gh-7773 @@ -461,7 +464,7 @@ def test_non_int_header(all_parsers, header): parser.read_csv(StringIO(data), header=header) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_singleton_header(all_parsers): # see gh-7757 data = """a,b,c\n0,1,2\n1,2,3""" @@ -472,7 +475,7 @@ def test_singleton_header(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required @pytest.mark.parametrize( "data,expected", [ @@ -519,7 +522,7 @@ def test_mangles_multi_index(all_parsers, data, expected): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is requireds @pytest.mark.parametrize("index_col", [None, [0]]) @pytest.mark.parametrize( "columns", [None, (["", "Unnamed"]), (["Unnamed", ""]), (["Unnamed", "NotUnnamed"])] @@ -558,7 +561,7 @@ def test_multi_index_unnamed(all_parsers, index_col, columns): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Expected 2 columns, got 3 def test_names_longer_than_header_but_equal_with_data_rows(all_parsers): # GH#38453 parser = all_parsers @@ -571,7 +574,7 @@ def test_names_longer_than_header_but_equal_with_data_rows(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_read_csv_multiindex_columns(all_parsers): # GH#6051 parser = all_parsers @@ -603,7 +606,7 @@ def test_read_csv_multiindex_columns(all_parsers): tm.assert_frame_equal(df2, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_read_csv_multi_header_length_check(all_parsers): # GH#43102 parser = all_parsers @@ -619,7 +622,7 @@ def test_read_csv_multi_header_length_check(all_parsers): parser.read_csv(StringIO(case), header=[0, 2]) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Expected 3 columns, got 2 def test_header_none_and_implicit_index(all_parsers): # GH#22144 parser = all_parsers @@ -631,7 +634,7 @@ def test_header_none_and_implicit_index(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # regex mismatch "CSV parse error: Expected 2 columns, got " def test_header_none_and_implicit_index_in_second_row(all_parsers): # GH#22144 parser = all_parsers @@ -640,7 +643,6 @@ def test_header_none_and_implicit_index_in_second_row(all_parsers): parser.read_csv(StringIO(data), names=["a", "b"], header=None) -@skip_pyarrow def test_header_none_and_on_bad_lines_skip(all_parsers): # GH#22144 parser = all_parsers @@ -652,7 +654,7 @@ def test_header_none_and_on_bad_lines_skip(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is requireds def test_header_missing_rows(all_parsers): # GH#47400 parser = all_parsers @@ -664,7 +666,8 @@ def test_header_missing_rows(all_parsers): parser.read_csv(StringIO(data), header=[0, 1, 2]) -@skip_pyarrow +# ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine +@xfail_pyarrow def test_header_multiple_whitespaces(all_parsers): # GH#54931 parser = all_parsers @@ -676,7 +679,8 @@ def test_header_multiple_whitespaces(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +# ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine +@xfail_pyarrow def test_header_delim_whitespace(all_parsers): # GH#54918 parser = all_parsers diff --git a/pandas/tests/io/parser/test_index_col.py b/pandas/tests/io/parser/test_index_col.py index 83dae12b472da..b938b129ac38d 100644 --- a/pandas/tests/io/parser/test_index_col.py +++ b/pandas/tests/io/parser/test_index_col.py @@ -19,8 +19,7 @@ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -# TODO(1.4): Change me to xfails at release time -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") @pytest.mark.parametrize("with_header", [True, False]) @@ -77,7 +76,7 @@ def test_index_col_is_true(all_parsers): parser.read_csv(StringIO(data), index_col=True) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Expected 3 columns, got 4 def test_infer_index_col(all_parsers): data = """A,B,C foo,1,2,3 @@ -95,7 +94,7 @@ def test_infer_index_col(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block @pytest.mark.parametrize( "index_col,kwargs", [ @@ -144,7 +143,7 @@ def test_index_col_empty_data(all_parsers, index_col, kwargs): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_empty_with_index_col_false(all_parsers): # see gh-10413 data = "x,y" @@ -155,7 +154,6 @@ def test_empty_with_index_col_false(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize( "index_names", [ @@ -166,9 +164,13 @@ def test_empty_with_index_col_false(all_parsers): ["NotReallyUnnamed", "Unnamed: 0"], ], ) -def test_multi_index_naming(all_parsers, index_names): +def test_multi_index_naming(all_parsers, index_names, request): parser = all_parsers + if parser.engine == "pyarrow" and "" in index_names: + mark = pytest.mark.xfail(reason="One case raises, others are wrong") + request.applymarker(mark) + # We don't want empty index names being replaced with "Unnamed: 0" data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"]) result = parser.read_csv(StringIO(data), index_col=[0, 1]) @@ -180,7 +182,7 @@ def test_multi_index_naming(all_parsers, index_names): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # ValueError: Found non-unique column index def test_multi_index_naming_not_all_at_beginning(all_parsers): parser = all_parsers data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4" @@ -195,7 +197,7 @@ def test_multi_index_naming_not_all_at_beginning(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # ValueError: Found non-unique column index def test_no_multi_index_level_names_empty(all_parsers): # GH 10984 parser = all_parsers @@ -211,7 +213,7 @@ def test_no_multi_index_level_names_empty(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_header_with_index_col(all_parsers): # GH 33476 parser = all_parsers @@ -257,7 +259,7 @@ def test_index_col_large_csv(all_parsers, monkeypatch): tm.assert_frame_equal(result, df.set_index("a")) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_index_col_multiindex_columns_no_data(all_parsers): # GH#38292 parser = all_parsers @@ -274,7 +276,7 @@ def test_index_col_multiindex_columns_no_data(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_index_col_header_no_data(all_parsers): # GH#38292 parser = all_parsers @@ -287,7 +289,7 @@ def test_index_col_header_no_data(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_multiindex_columns_no_data(all_parsers): # GH#38292 parser = all_parsers @@ -298,7 +300,7 @@ def test_multiindex_columns_no_data(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_multiindex_columns_index_col_with_data(all_parsers): # GH#38292 parser = all_parsers @@ -315,7 +317,7 @@ def test_multiindex_columns_index_col_with_data(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Empty CSV file or block def test_infer_types_boolean_sum(all_parsers): # GH#44079 parser = all_parsers @@ -353,7 +355,7 @@ def test_specify_dtype_for_index_col(all_parsers, dtype, val, request): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_multiindex_columns_not_leading_index_col(all_parsers): # GH#38549 parser = all_parsers diff --git a/pandas/tests/io/parser/test_mangle_dupes.py b/pandas/tests/io/parser/test_mangle_dupes.py index 4acbb82a5f23f..7d148ae6c5a27 100644 --- a/pandas/tests/io/parser/test_mangle_dupes.py +++ b/pandas/tests/io/parser/test_mangle_dupes.py @@ -10,10 +10,15 @@ from pandas import DataFrame import pandas._testing as tm -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -@skip_pyarrow +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +@xfail_pyarrow def test_basic(all_parsers): parser = all_parsers @@ -24,7 +29,7 @@ def test_basic(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow def test_basic_names(all_parsers): # See gh-7160 parser = all_parsers @@ -45,7 +50,7 @@ def test_basic_names_raise(all_parsers): parser.read_csv(StringIO(data), names=["a", "b", "a"]) -@skip_pyarrow +@xfail_pyarrow @pytest.mark.parametrize( "data,expected", [ @@ -74,7 +79,6 @@ def test_thorough_mangle_columns(all_parsers, data, expected): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize( "data,names,expected", [ @@ -114,7 +118,7 @@ def test_thorough_mangle_names(all_parsers, data, names, expected): parser.read_csv(StringIO(data), names=names) -@skip_pyarrow +@xfail_pyarrow def test_mangled_unnamed_placeholders(all_parsers): # xref gh-13017 orig_key = "0" @@ -137,7 +141,7 @@ def test_mangled_unnamed_placeholders(all_parsers): tm.assert_frame_equal(df, expected) -@skip_pyarrow +@xfail_pyarrow def test_mangle_dupe_cols_already_exists(all_parsers): # GH#14704 parser = all_parsers @@ -151,7 +155,7 @@ def test_mangle_dupe_cols_already_exists(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow def test_mangle_dupe_cols_already_exists_unnamed_col(all_parsers): # GH#14704 parser = all_parsers @@ -165,7 +169,6 @@ def test_mangle_dupe_cols_already_exists_unnamed_col(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize("usecol, engine", [([0, 1, 1], "python"), ([0, 1, 1], "c")]) def test_mangle_cols_names(all_parsers, usecol, engine): # GH 11823 diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py index 86c50fe103f2c..59dae1eaa7e6c 100644 --- a/pandas/tests/io/parser/test_na_values.py +++ b/pandas/tests/io/parser/test_na_values.py @@ -20,7 +20,6 @@ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") @@ -59,7 +58,7 @@ def test_detect_string_na(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize( "na_values", [ @@ -145,8 +144,8 @@ def f(i, v): tm.assert_frame_equal(result, expected) -# TODO: needs skiprows list support in pyarrow -@skip_pyarrow +# ValueError: skiprows argument must be an integer when using engine='pyarrow' +@xfail_pyarrow @pytest.mark.parametrize("na_values", ["baz", ["baz"]]) def test_custom_na_values(all_parsers, na_values): parser = all_parsers @@ -183,8 +182,7 @@ def test_bool_na_values(all_parsers): tm.assert_frame_equal(result, expected) -# TODO: Needs pyarrow support for dictionary in na_values -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_na_value_dict(all_parsers): data = """A,B,C foo,bar,NA @@ -236,8 +234,7 @@ def test_na_value_dict_multi_index(all_parsers, index_col, expected): tm.assert_frame_equal(result, expected) -# TODO: xfail components of this test, the first one passes -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict @pytest.mark.parametrize( "kwargs,expected", [ @@ -325,8 +322,7 @@ def test_no_na_values_no_keep_default(all_parsers): tm.assert_frame_equal(result, expected) -# TODO: Blocked on na_values dict support in pyarrow -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_no_keep_default_na_dict_na_values(all_parsers): # see gh-19227 data = "a,b\n,2" @@ -338,8 +334,7 @@ def test_no_keep_default_na_dict_na_values(all_parsers): tm.assert_frame_equal(result, expected) -# TODO: Blocked on na_values dict support in pyarrow -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_no_keep_default_na_dict_na_scalar_values(all_parsers): # see gh-19227 # @@ -351,8 +346,7 @@ def test_no_keep_default_na_dict_na_scalar_values(all_parsers): tm.assert_frame_equal(df, expected) -# TODO: Blocked on na_values dict support in pyarrow -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict @pytest.mark.parametrize("col_zero_na_values", [113125, "113125"]) def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values): # see gh-19227 @@ -382,8 +376,7 @@ def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_v tm.assert_frame_equal(result, expected) -# TODO: Empty null_values doesn't work properly on pyarrow -@skip_pyarrow +@xfail_pyarrow # mismatched dtypes in both cases, FutureWarning in the True case @pytest.mark.parametrize( "na_filter,row_data", [ @@ -405,8 +398,7 @@ def test_na_values_na_filter_override(all_parsers, na_filter, row_data): tm.assert_frame_equal(result, expected) -# TODO: Arrow parse error -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Expected 8 columns, got 5: def test_na_trailing_columns(all_parsers): parser = all_parsers data = """Date,Currency,Symbol,Type,Units,UnitPrice,Cost,Tax @@ -434,8 +426,7 @@ def test_na_trailing_columns(all_parsers): tm.assert_frame_equal(result, expected) -# TODO: xfail the na_values dict case -@skip_pyarrow +@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize( "na_values,row_data", [ @@ -454,7 +445,7 @@ def test_na_values_scalar(all_parsers, na_values, row_data): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_na_values_dict_aliasing(all_parsers): parser = all_parsers na_values = {"a": 2, "b": 1} @@ -470,7 +461,7 @@ def test_na_values_dict_aliasing(all_parsers): tm.assert_dict_equal(na_values, na_values_copy) -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_na_values_dict_col_index(all_parsers): # see gh-14203 data = "a\nfoo\n1" @@ -482,7 +473,7 @@ def test_na_values_dict_col_index(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize( "data,kwargs,expected", [ @@ -512,18 +503,20 @@ def test_empty_na_values_no_default_with_index(all_parsers): tm.assert_frame_equal(result, expected) -# TODO: Missing support for na_filter kewyord -@skip_pyarrow @pytest.mark.parametrize( "na_filter,index_data", [(False, ["", "5"]), (True, [np.nan, 5.0])] ) -def test_no_na_filter_on_index(all_parsers, na_filter, index_data): +def test_no_na_filter_on_index(all_parsers, na_filter, index_data, request): # see gh-5239 # # Don't parse NA-values in index unless na_filter=True parser = all_parsers data = "a,b,c\n1,,3\n4,5,6" + if parser.engine == "pyarrow" and na_filter is False: + mark = pytest.mark.xfail(reason="mismatched index result") + request.applymarker(mark) + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index(index_data, name="b")) result = parser.read_csv(StringIO(data), index_col=[1], na_filter=na_filter) tm.assert_frame_equal(result, expected) @@ -542,7 +535,7 @@ def test_inf_na_values_with_int_index(all_parsers): tm.assert_frame_equal(out, expected) -@skip_pyarrow +@xfail_pyarrow # mismatched shape @pytest.mark.parametrize("na_filter", [True, False]) def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter): # see gh-20377 @@ -558,7 +551,7 @@ def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # mismatched exception message @pytest.mark.parametrize( "data, na_values", [ @@ -587,7 +580,7 @@ def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values): ) -@skip_pyarrow +@xfail_pyarrow # mismatched shapes def test_str_nan_dropped(all_parsers): # see gh-21131 parser = all_parsers @@ -616,7 +609,7 @@ def test_str_nan_dropped(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # ValueError: The pyarrow engine doesn't support passing a dict def test_nan_multi_index(all_parsers): # GH 42446 parser = all_parsers diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 9b911466b5034..554151841aa22 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -41,10 +41,6 @@ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") -# GH#43650: Some expected failures with the pyarrow engine can occasionally -# cause a deadlock instead, so we skip these instead of xfailing -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") - @xfail_pyarrow def test_read_csv_with_custom_date_parser(all_parsers): @@ -1009,11 +1005,11 @@ def test_parse_tz_aware(all_parsers): expected = DataFrame( {"x": [0.5]}, index=Index([Timestamp("2012-06-13 01:39:00+00:00")], name="Date") ) - tm.assert_frame_equal(result, expected) if parser.engine == "pyarrow": expected_tz = pytz.utc else: expected_tz = timezone.utc + tm.assert_frame_equal(result, expected) assert result.index.tz is expected_tz @@ -1799,7 +1795,7 @@ def test_parse_timezone(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # pandas.errors.ParserError: CSV parse error @pytest.mark.parametrize( "date_string", ["32/32/2019", "02/30/2019", "13/13/2019", "13/2019", "a3/11/2018", "10/11/2o17"], @@ -1815,7 +1811,6 @@ def test_invalid_parse_delimited_date(all_parsers, date_string): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize( "date_string,dayfirst,expected", [ @@ -1828,17 +1823,28 @@ def test_invalid_parse_delimited_date(all_parsers, date_string): ], ) def test_parse_delimited_date_swap_no_warning( - all_parsers, date_string, dayfirst, expected + all_parsers, date_string, dayfirst, expected, request ): parser = all_parsers expected = DataFrame({0: [expected]}, dtype="datetime64[ns]") + if parser.engine == "pyarrow": + if not dayfirst: + mark = pytest.mark.xfail(reason="CSV parse error: Empty CSV file or block") + request.applymarker(mark) + msg = "The 'dayfirst' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + StringIO(date_string), header=None, dayfirst=dayfirst, parse_dates=[0] + ) + return + result = parser.read_csv( StringIO(date_string), header=None, dayfirst=dayfirst, parse_dates=[0] ) tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow @pytest.mark.parametrize( "date_string,dayfirst,expected", [ @@ -1889,7 +1895,6 @@ def _helper_hypothesis_delimited_date(call, date_string, **kwargs): return msg, result -@skip_pyarrow @given(DATETIME_NO_TZ) @pytest.mark.parametrize("delimiter", list(" -./")) @pytest.mark.parametrize("dayfirst", [True, False]) @@ -1924,7 +1929,7 @@ def test_hypothesis_delimited_date( assert result == expected -@skip_pyarrow +@xfail_pyarrow # KeyErrors @pytest.mark.parametrize( "names, usecols, parse_dates, missing_cols", [ @@ -1957,13 +1962,17 @@ def test_missing_parse_dates_column_raises( ) -@skip_pyarrow +@xfail_pyarrow # mismatched shape def test_date_parser_and_names(all_parsers): # GH#33699 parser = all_parsers data = StringIO("""x,y\n1,2""") + warn = UserWarning + if parser.engine == "pyarrow": + # DeprecationWarning for passing a Manager object + warn = (UserWarning, DeprecationWarning) result = parser.read_csv_check_warnings( - UserWarning, + warn, "Could not infer format", data, parse_dates=["B"], @@ -1973,7 +1982,7 @@ def test_date_parser_and_names(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required def test_date_parser_multiindex_columns(all_parsers): parser = all_parsers data = """a,b @@ -1986,7 +1995,7 @@ def test_date_parser_multiindex_columns(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: an integer is required @pytest.mark.parametrize( "parse_spec, col_name", [ @@ -2010,7 +2019,8 @@ def test_date_parser_multiindex_columns_combine_cols(all_parsers, parse_spec, co tm.assert_frame_equal(result, expected) -@skip_pyarrow +# ValueError: The 'thousands' option is not supported with the 'pyarrow' engine +@xfail_pyarrow def test_date_parser_usecols_thousands(all_parsers): # GH#39365 data = """A,B,C @@ -2019,8 +2029,12 @@ def test_date_parser_usecols_thousands(all_parsers): """ parser = all_parsers + warn = UserWarning + if parser.engine == "pyarrow": + # DeprecationWarning for passing a Manager object + warn = (UserWarning, DeprecationWarning) result = parser.read_csv_check_warnings( - UserWarning, + warn, "Could not infer format", StringIO(data), parse_dates=[1], @@ -2031,7 +2045,7 @@ def test_date_parser_usecols_thousands(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # mismatched shape def test_parse_dates_and_keep_original_column(all_parsers): # GH#13378 parser = all_parsers @@ -2130,7 +2144,7 @@ def test_dayfirst_warnings_no_leading_zero(date_string, dayfirst): tm.assert_index_equal(expected, res) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Expected 3 columns, got 4 def test_infer_first_column_as_index(all_parsers): # GH#11019 parser = all_parsers @@ -2143,7 +2157,7 @@ def test_infer_first_column_as_index(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # pyarrow engine doesn't support passing a dict for na_values @pytest.mark.parametrize( ("key", "value", "warn"), [ @@ -2183,7 +2197,7 @@ def test_replace_nans_before_parsing_dates(all_parsers, key, value, warn): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # string[python] instead of dt64[ns] def test_parse_dates_and_string_dtype(all_parsers): # GH#34066 parser = all_parsers @@ -2246,7 +2260,6 @@ def test_parse_dates_dict_format(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize( "key, parse_dates", [("a_b", [[0, 1]]), ("foo", {"foo": [0, 1]})] ) @@ -2257,7 +2270,11 @@ def test_parse_dates_dict_format_two_columns(all_parsers, key, parse_dates): 31-,12-2019 31-,12-2020""" - with tm.assert_produces_warning(None): + warn = None + if parser.engine == "pyarrow": + warn = DeprecationWarning + msg = "Passing a BlockManager to DataFrame is deprecated" + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): result = parser.read_csv( StringIO(data), date_format={key: "%d- %m-%Y"}, parse_dates=parse_dates ) @@ -2269,7 +2286,7 @@ def test_parse_dates_dict_format_two_columns(all_parsers, key, parse_dates): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # object dtype index def test_parse_dates_dict_format_index(all_parsers): # GH#51240 parser = all_parsers @@ -2312,7 +2329,7 @@ def test_parse_dates_arrow_engine(all_parsers): tm.assert_frame_equal(result, expected) -@xfail_pyarrow +@xfail_pyarrow # object dtype index def test_from_csv_with_mixed_offsets(all_parsers): parser = all_parsers data = "a\n2020-01-01T00:00:00+01:00\n2020-01-01T00:00:00+00:00" diff --git a/pandas/tests/io/parser/usecols/test_parse_dates.py b/pandas/tests/io/parser/usecols/test_parse_dates.py index f223810772225..bcb1c6af80df6 100644 --- a/pandas/tests/io/parser/usecols/test_parse_dates.py +++ b/pandas/tests/io/parser/usecols/test_parse_dates.py @@ -13,10 +13,6 @@ ) import pandas._testing as tm -# TODO(1.4): Change these to xfails whenever parse_dates support(which was -# intentionally disable to keep small PR sizes) is added back -# pytestmark = pytest.mark.usefixtures("pyarrow_skip") - pytestmark = pytest.mark.filterwarnings( "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) diff --git a/pandas/tests/io/parser/usecols/test_usecols_basic.py b/pandas/tests/io/parser/usecols/test_usecols_basic.py index 07c94e301b37a..e1ae2e8f3655c 100644 --- a/pandas/tests/io/parser/usecols/test_usecols_basic.py +++ b/pandas/tests/io/parser/usecols/test_usecols_basic.py @@ -25,8 +25,11 @@ "Usecols do not match columns, columns expected but not found: {0}" ) -# TODO: Switch to xfails -skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip") +xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail") + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame is deprecated:DeprecationWarning" +) def test_raise_on_mixed_dtype_usecols(all_parsers): @@ -42,9 +45,8 @@ def test_raise_on_mixed_dtype_usecols(all_parsers): parser.read_csv(StringIO(data), usecols=usecols) -@skip_pyarrow @pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")]) -def test_usecols(all_parsers, usecols): +def test_usecols(all_parsers, usecols, request): data = """\ a,b,c 1,2,3 @@ -52,13 +54,16 @@ def test_usecols(all_parsers, usecols): 7,8,9 10,11,12""" parser = all_parsers + if parser.engine == "pyarrow" and isinstance(usecols[0], int): + mark = pytest.mark.xfail(raises=TypeError, reason="expected bytes, int found") + request.applymarker(mark) result = parser.read_csv(StringIO(data), usecols=usecols) expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"]) tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: expected bytes, int found def test_usecols_with_names(all_parsers): data = """\ a,b,c @@ -74,24 +79,28 @@ def test_usecols_with_names(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize( "names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])] ) -def test_usecols_relative_to_names(all_parsers, names, usecols): +def test_usecols_relative_to_names(all_parsers, names, usecols, request): data = """\ 1,2,3 4,5,6 7,8,9 10,11,12""" parser = all_parsers + if parser.engine == "pyarrow" and not isinstance(usecols[0], int): + mark = pytest.mark.xfail( + reason="ArrowKeyError: Column 'fb' in include_columns does not exist" + ) + request.applymarker(mark) + result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols) expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"]) tm.assert_frame_equal(result, expected) -@skip_pyarrow def test_usecols_relative_to_names2(all_parsers): # see gh-5766 data = """\ @@ -100,6 +109,7 @@ def test_usecols_relative_to_names2(all_parsers): 7,8,9 10,11,12""" parser = all_parsers + result = parser.read_csv( StringIO(data), names=["a", "b"], header=None, usecols=[0, 1] ) @@ -108,7 +118,8 @@ def test_usecols_relative_to_names2(all_parsers): tm.assert_frame_equal(result, expected) -@skip_pyarrow +# regex mismatch: "Length mismatch: Expected axis has 1 elements" +@xfail_pyarrow def test_usecols_name_length_conflict(all_parsers): data = """\ 1,2,3 @@ -133,7 +144,7 @@ def test_usecols_single_string(all_parsers): parser.read_csv(StringIO(data), usecols="foo") -@skip_pyarrow +@xfail_pyarrow # CSV parse error in one case, AttributeError in another @pytest.mark.parametrize( "data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"] ) @@ -147,12 +158,14 @@ def test_usecols_index_col_false(all_parsers, data): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize("index_col", ["b", 0]) @pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]]) -def test_usecols_index_col_conflict(all_parsers, usecols, index_col): +def test_usecols_index_col_conflict(all_parsers, usecols, index_col, request): # see gh-4201: test that index_col as integer reflects usecols parser = all_parsers + if parser.engine == "pyarrow" and isinstance(usecols[0], int): + mark = pytest.mark.xfail(raises=TypeError, match="expected bytes, int found") + request.applymarker(mark) data = "a,b,c,d\nA,a,1,one\nB,b,2,two" expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b")) @@ -168,18 +181,13 @@ def test_usecols_index_col_conflict2(all_parsers): expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")}) expected = expected.set_index(["b", "c"]) - msg = "Passing a BlockManager to DataFrame is deprecated" - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - result = parser.read_csv( - StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"] - ) + result = parser.read_csv( + StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"] + ) tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Expected 3 columns, got 4 def test_usecols_implicit_index_col(all_parsers): # see gh-2654 parser = all_parsers @@ -196,12 +204,7 @@ def test_usecols_index_col_middle(all_parsers): data = """a,b,c,d 1,2,3,4 """ - msg = "Passing a BlockManager to DataFrame is deprecated" - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="c") + result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="c") expected = DataFrame({"b": [2], "d": [4]}, index=Index([3], name="c")) tm.assert_frame_equal(result, expected) @@ -212,38 +215,43 @@ def test_usecols_index_col_end(all_parsers): data = """a,b,c,d 1,2,3,4 """ - msg = "Passing a BlockManager to DataFrame is deprecated" - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="d") + result = parser.read_csv(StringIO(data), usecols=["b", "c", "d"], index_col="d") expected = DataFrame({"b": [2], "c": [3]}, index=Index([4], name="d")) tm.assert_frame_equal(result, expected) -@skip_pyarrow def test_usecols_regex_sep(all_parsers): # see gh-2733 parser = all_parsers data = "a b c\n4 apple bat 5.7\n8 orange cow 10" + + if parser.engine == "pyarrow": + msg = "the 'pyarrow' engine does not support regex separators" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b")) + return + result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b")) expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) tm.assert_frame_equal(result, expected) -@skip_pyarrow def test_usecols_with_whitespace(all_parsers): parser = all_parsers data = "a b c\n4 apple bat 5.7\n8 orange cow 10" + if parser.engine == "pyarrow": + msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine" + with pytest.raises(ValueError, match=msg): + parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b")) + return + result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b")) expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8]) tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize( "usecols,expected", [ @@ -256,17 +264,21 @@ def test_usecols_with_whitespace(all_parsers): ), ], ) -def test_usecols_with_integer_like_header(all_parsers, usecols, expected): +def test_usecols_with_integer_like_header(all_parsers, usecols, expected, request): parser = all_parsers data = """2,0,1 1000,2000,3000 4000,5000,6000""" + if parser.engine == "pyarrow" and isinstance(usecols[0], int): + mark = pytest.mark.xfail(raises=TypeError, reason="expected bytes, int found") + request.applymarker(mark) + result = parser.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # mismatched shape def test_empty_usecols(all_parsers): data = "a,b,c\n1,2,3\n4,5,6" expected = DataFrame(columns=Index([])) @@ -283,16 +295,11 @@ def test_np_array_usecols(all_parsers): usecols = np.array(["a", "b"]) expected = DataFrame([[1, 2]], columns=usecols) - msg = "Passing a BlockManager to DataFrame is deprecated" - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - result = parser.read_csv(StringIO(data), usecols=usecols) + result = parser.read_csv(StringIO(data), usecols=usecols) tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: 'function' object is not iterable @pytest.mark.parametrize( "usecols,expected", [ @@ -325,7 +332,8 @@ def test_callable_usecols(all_parsers, usecols, expected): tm.assert_frame_equal(result, expected) -@skip_pyarrow +# ArrowKeyError: Column 'fa' in include_columns does not exist in CSV file +@xfail_pyarrow @pytest.mark.parametrize("usecols", [["a", "c"], lambda x: x in ["a", "c"]]) def test_incomplete_first_row(all_parsers, usecols): # see gh-6710 @@ -338,7 +346,7 @@ def test_incomplete_first_row(all_parsers, usecols): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # CSV parse error: Expected 3 columns, got 4 @pytest.mark.parametrize( "data,usecols,kwargs,expected", [ @@ -371,7 +379,6 @@ def test_uneven_length_cols(all_parsers, data, usecols, kwargs, expected): tm.assert_frame_equal(result, expected) -@skip_pyarrow @pytest.mark.parametrize( "usecols,kwargs,expected,msg", [ @@ -415,11 +422,22 @@ def test_uneven_length_cols(all_parsers, data, usecols, kwargs, expected): ), ], ) -def test_raises_on_usecols_names_mismatch(all_parsers, usecols, kwargs, expected, msg): +def test_raises_on_usecols_names_mismatch( + all_parsers, usecols, kwargs, expected, msg, request +): data = "a,b,c,d\n1,2,3,4\n5,6,7,8" kwargs.update(usecols=usecols) parser = all_parsers + if parser.engine == "pyarrow" and not ( + usecols is not None and expected is not None + ): + # everything but the first case + mark = pytest.mark.xfail( + reason="e.g. Column 'f' in include_columns does not exist in CSV file" + ) + request.applymarker(mark) + if expected is None: with pytest.raises(ValueError, match=msg): parser.read_csv(StringIO(data), **kwargs) @@ -428,7 +446,7 @@ def test_raises_on_usecols_names_mismatch(all_parsers, usecols, kwargs, expected tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize("usecols", [["A", "C"], [0, 2]]) def test_usecols_subset_names_mismatch_orig_columns(all_parsers, usecols): data = "a,b,c,d\n1,2,3,4\n5,6,7,8" @@ -440,7 +458,7 @@ def test_usecols_subset_names_mismatch_orig_columns(all_parsers, usecols): tm.assert_frame_equal(result, expected) -@skip_pyarrow +@xfail_pyarrow # TypeError: expected bytes, int found @pytest.mark.parametrize("names", [None, ["a", "b"]]) def test_usecols_indices_out_of_bounds(all_parsers, names): # GH#25623 & GH 41130; enforced in 2.0 @@ -453,21 +471,30 @@ def test_usecols_indices_out_of_bounds(all_parsers, names): parser.read_csv(StringIO(data), usecols=[0, 2], names=names, header=0) -@skip_pyarrow def test_usecols_additional_columns(all_parsers): # GH#46997 parser = all_parsers usecols = lambda header: header.strip() in ["a", "b", "c"] + + if parser.engine == "pyarrow": + msg = "'function' object is not iterable" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO("a,b\nx,y,z"), index_col=False, usecols=usecols) + return result = parser.read_csv(StringIO("a,b\nx,y,z"), index_col=False, usecols=usecols) expected = DataFrame({"a": ["x"], "b": "y"}) tm.assert_frame_equal(result, expected) -@skip_pyarrow def test_usecols_additional_columns_integer_columns(all_parsers): # GH#46997 parser = all_parsers usecols = lambda header: header.strip() in ["0", "1"] + if parser.engine == "pyarrow": + msg = "'function' object is not iterable" + with pytest.raises(TypeError, match=msg): + parser.read_csv(StringIO("0,1\nx,y,z"), index_col=False, usecols=usecols) + return result = parser.read_csv(StringIO("0,1\nx,y,z"), index_col=False, usecols=usecols) expected = DataFrame({"0": ["x"], "1": "y"}) tm.assert_frame_equal(result, expected) @@ -480,16 +507,11 @@ def test_usecols_dtype(all_parsers): a,1,x b,2,y """ - msg = "Passing a BlockManager to DataFrame is deprecated" - warn = None - if parser.engine == "pyarrow": - warn = DeprecationWarning - with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): - result = parser.read_csv( - StringIO(data), - usecols=["col1", "col2"], - dtype={"col1": "string", "col2": "uint8", "col3": "string"}, - ) + result = parser.read_csv( + StringIO(data), + usecols=["col1", "col2"], + dtype={"col1": "string", "col2": "uint8", "col3": "string"}, + ) expected = DataFrame( {"col1": array(["a", "b"]), "col2": np.array([1, 2], dtype="uint8")} )
- [x] closes #49411
https://api.github.com/repos/pandas-dev/pandas/pulls/55576
2023-10-18T18:11:30Z
2023-10-23T17:29:19Z
2023-10-23T17:29:19Z
2023-10-23T19:48:03Z
Put create_manager_from_blocks in internals.api
diff --git a/pandas/core/internals/api.py b/pandas/core/internals/api.py index 10e6b76e985b3..a3fd77fc8d9ea 100644 --- a/pandas/core/internals/api.py +++ b/pandas/core/internals/api.py @@ -105,3 +105,25 @@ def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int else: ndim = values.ndim return ndim + + +def __getattr__(name: str): + import warnings + + from pandas.util._exceptions import find_stack_level + + if name == "create_block_manager_from_blocks": + # GH#33892 + warnings.warn( + f"{name} is deprecated and will be removed in a future version. " + "Use public APIs instead.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + from pandas.core.internals.managers import create_block_manager_from_blocks + + return create_block_manager_from_blocks + + raise AttributeError( + f"module 'pandas.core.internals.api' has no attribute '{name}'" + )
Deprecated this from `internals.__init__` telling downstream libraries to get it from internals.api, but forgot to actually put it there.
https://api.github.com/repos/pandas-dev/pandas/pulls/55575
2023-10-18T14:59:12Z
2023-10-18T16:52:01Z
2023-10-18T16:52:01Z
2023-10-18T17:57:18Z
Series.pow when right operand is missing value
diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index b29d35c8ce332..4f5f31da75e03 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -317,6 +317,7 @@ Timezones Numeric ^^^^^^^ - Bug in :func:`read_csv` with ``engine="pyarrow"`` causing rounding errors for large integers (:issue:`52505`) +- Bug in :meth:`Series.pow` not filling missing values correctly (:issue:`55512`) - Conversion diff --git a/pandas/core/series.py b/pandas/core/series.py index c6e1e460b336a..724a54c85da9b 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -6043,6 +6043,8 @@ def _flex_method(self, other, op, *, level=None, fill_value=None, axis: Axis = 0 return result else: if fill_value is not None: + if isna(other): + return op(self, fill_value) self = self.fillna(fill_value) return op(self, other) diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 86aef2642750e..f9fcfd19e9956 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -3017,6 +3017,14 @@ def test_arrowextensiondtype_dataframe_repr(): assert result == expected +def test_pow_missing_operand(): + # GH 55512 + k = pd.Series([2, None], dtype="int64[pyarrow]") + result = k.pow(None, fill_value=3) + expected = pd.Series([8, None], dtype="int64[pyarrow]") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("pa_type", tm.TIMEDELTA_PYARROW_DTYPES) def test_duration_fillna_numpy(pa_type): # GH 54707
- [x] closes #55512 (Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55568
2023-10-18T01:08:00Z
2023-10-19T16:40:55Z
2023-10-19T16:40:55Z
2023-10-19T16:54:15Z
Backport PR #55565 on branch 2.1.x (TST: Remove np.core usage)
diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index 55d4a6c3b39fa..bb4aed2163dac 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -42,7 +42,7 @@ def test_from_records_with_datetimes(self): arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])] dtypes = [("EXPIRY", "<M8[ns]")] - recarray = np.core.records.fromarrays(arrdata, dtype=dtypes) + recarray = np.rec.fromarrays(arrdata, dtype=dtypes) result = DataFrame.from_records(recarray) tm.assert_frame_equal(result, expected) @@ -50,7 +50,7 @@ def test_from_records_with_datetimes(self): # coercion should work too arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])] dtypes = [("EXPIRY", "<M8[m]")] - recarray = np.core.records.fromarrays(arrdata, dtype=dtypes) + recarray = np.rec.fromarrays(arrdata, dtype=dtypes) result = DataFrame.from_records(recarray) # we get the closest supported unit, "s" expected["EXPIRY"] = expected["EXPIRY"].astype("M8[s]")
Backport PR #55565: TST: Remove np.core usage
https://api.github.com/repos/pandas-dev/pandas/pulls/55567
2023-10-17T22:30:57Z
2023-10-18T00:28:20Z
2023-10-18T00:28:20Z
2023-10-18T00:28:20Z
TST: Remove np.core usage
diff --git a/pandas/tests/frame/constructors/test_from_records.py b/pandas/tests/frame/constructors/test_from_records.py index 55d4a6c3b39fa..bb4aed2163dac 100644 --- a/pandas/tests/frame/constructors/test_from_records.py +++ b/pandas/tests/frame/constructors/test_from_records.py @@ -42,7 +42,7 @@ def test_from_records_with_datetimes(self): arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])] dtypes = [("EXPIRY", "<M8[ns]")] - recarray = np.core.records.fromarrays(arrdata, dtype=dtypes) + recarray = np.rec.fromarrays(arrdata, dtype=dtypes) result = DataFrame.from_records(recarray) tm.assert_frame_equal(result, expected) @@ -50,7 +50,7 @@ def test_from_records_with_datetimes(self): # coercion should work too arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])] dtypes = [("EXPIRY", "<M8[m]")] - recarray = np.core.records.fromarrays(arrdata, dtype=dtypes) + recarray = np.rec.fromarrays(arrdata, dtype=dtypes) result = DataFrame.from_records(recarray) # we get the closest supported unit, "s" expected["EXPIRY"] = expected["EXPIRY"].astype("M8[s]")
https://github.com/numpy/numpy/pull/24634
https://api.github.com/repos/pandas-dev/pandas/pulls/55565
2023-10-17T17:29:54Z
2023-10-17T22:29:37Z
2023-10-17T22:29:37Z
2023-10-17T22:35:05Z
Floordiv fix for pyarrow dtypes
diff --git a/doc/source/whatsnew/v2.1.2.rst b/doc/source/whatsnew/v2.1.2.rst index cc51e22265d7c..28b271f9cf446 100644 --- a/doc/source/whatsnew/v2.1.2.rst +++ b/doc/source/whatsnew/v2.1.2.rst @@ -29,9 +29,9 @@ Bug fixes - Fixed bug in :meth:`DataFrame.interpolate` raising incorrect error message (:issue:`55347`) - Fixed bug in :meth:`Index.insert` raising when inserting ``None`` into :class:`Index` with ``dtype="string[pyarrow_numpy]"`` (:issue:`55365`) - Fixed bug in :meth:`Series.all` and :meth:`Series.any` not treating missing values correctly for ``dtype="string[pyarrow_numpy]"`` (:issue:`55367`) +- Fixed bug in :meth:`Series.floordiv` for :class:`ArrowDtype` (:issue:`55561`) - Fixed bug in :meth:`Series.rank` for ``string[pyarrow_numpy]`` dtype (:issue:`55362`) - Silence ``Period[B]`` warnings introduced by :issue:`53446` during normal plotting activity (:issue:`55138`) -- .. --------------------------------------------------------------------------- .. _whatsnew_212.other: diff --git a/pandas/core/arrays/arrow/array.py b/pandas/core/arrays/arrow/array.py index a00640a88d7fb..a2d8694b5b19b 100644 --- a/pandas/core/arrays/arrow/array.py +++ b/pandas/core/arrays/arrow/array.py @@ -119,7 +119,8 @@ def floordiv_compat( ) -> pa.ChunkedArray: # Ensure int // int -> int mirroring Python/Numpy behavior # as pc.floor(pc.divide_checked(int, int)) -> float - result = pc.floor(pc.divide(left, right)) + converted_left = cast_for_truediv(left, right) + result = pc.floor(pc.divide(converted_left, right)) if pa.types.is_integer(left.type) and pa.types.is_integer(right.type): result = result.cast(left.type) return result diff --git a/pandas/tests/extension/test_arrow.py b/pandas/tests/extension/test_arrow.py index 86aef2642750e..aba6090eab329 100644 --- a/pandas/tests/extension/test_arrow.py +++ b/pandas/tests/extension/test_arrow.py @@ -3046,3 +3046,12 @@ def test_factorize_chunked_dictionary(): exp_uniques = pd.Index(ArrowExtensionArray(pa_array.combine_chunks())) tm.assert_numpy_array_equal(res_indices, exp_indicies) tm.assert_index_equal(res_uniques, exp_uniques) + + +def test_arrow_floordiv(): + # GH 55561 + a = pd.Series([-7], dtype="int64[pyarrow]") + b = pd.Series([4], dtype="int64[pyarrow]") + expected = pd.Series([-2], dtype="int64[pyarrow]") + result = a // b + tm.assert_series_equal(result, expected)
- [x] closes #55561(Replace xxxx with the GitHub issue number) - [x] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [x] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [x] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55562
2023-10-17T14:25:54Z
2023-10-18T20:30:22Z
2023-10-18T20:30:22Z
2023-10-18T20:40:50Z
added assert
diff --git a/pandas/tests/indexing/interval/test_interval_new.py b/pandas/tests/indexing/interval/test_interval_new.py index 1aa62326e9071..283921a23e368 100644 --- a/pandas/tests/indexing/interval/test_interval_new.py +++ b/pandas/tests/indexing/interval/test_interval_new.py @@ -140,7 +140,7 @@ def test_loc_with_overlap(self, indexer_sl): # interval expected = 0 result = indexer_sl(ser)[Interval(1, 5)] - result == expected + assert expected == result expected = ser result = indexer_sl(ser)[[Interval(1, 5), Interval(3, 7)]]
- [x] closes #55557 - [x] [Tests added and passed] - [ ] All [code checks passed] - [x] Added [type annotations] to new arguments/methods/functions. - [x] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature.
https://api.github.com/repos/pandas-dev/pandas/pulls/55560
2023-10-17T14:13:50Z
2023-10-21T21:19:58Z
2023-10-21T21:19:58Z
2023-10-21T21:20:10Z
BUG: Don't clean docs and open browser in code_checks
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index 6caa39ae42926..e91629744463f 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -176,9 +176,8 @@ fi ### SINGLE-PAGE DOCS ### if [[ -z "$CHECK" || "$CHECK" == "single-docs" ]]; then - python doc/make.py --warnings-are-errors --single pandas.Series.value_counts - python doc/make.py --warnings-are-errors --single pandas.Series.str.split - python doc/make.py clean + python doc/make.py --warnings-are-errors --no-browser --single pandas.Series.value_counts + python doc/make.py --warnings-are-errors --no-browser --single pandas.Series.str.split fi exit $RET diff --git a/doc/make.py b/doc/make.py index 23b18d69acc04..dfa8ae6c1e34c 100755 --- a/doc/make.py +++ b/doc/make.py @@ -45,12 +45,14 @@ def __init__( single_doc=None, verbosity=0, warnings_are_errors=False, + no_browser=False, ) -> None: self.num_jobs = num_jobs self.include_api = include_api self.whatsnew = whatsnew self.verbosity = verbosity self.warnings_are_errors = warnings_are_errors + self.no_browser = no_browser if single_doc: single_doc = self._process_single_doc(single_doc) @@ -234,11 +236,11 @@ def html(self): os.remove(zip_fname) if ret_code == 0: - if self.single_doc_html is not None: + if self.single_doc_html is not None and not self.no_browser: self._open_browser(self.single_doc_html) else: self._add_redirects() - if self.whatsnew: + if self.whatsnew and not self.no_browser: self._open_browser(os.path.join("whatsnew", "index.html")) return ret_code @@ -349,6 +351,12 @@ def main(): action="store_true", help="fail if warnings are raised", ) + argparser.add_argument( + "--no-browser", + help="Don't open browser", + default=False, + action="store_true", + ) args = argparser.parse_args() if args.command not in cmds: @@ -374,6 +382,7 @@ def main(): args.single, args.verbosity, args.warnings_are_errors, + args.no_browser, ) return getattr(builder, args.command)()
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Ref: https://github.com/pandas-dev/pandas/pull/46806#issuecomment-1152922326 In regards to not cleaning the docs at the end, when I'm wrapping up a PR, I'm typically building documentation and running code_checks. Right now these are at odds with each other since the docs are cleared in code_checks. Can remove if there is opposition to this change.
https://api.github.com/repos/pandas-dev/pandas/pulls/55556
2023-10-17T03:41:05Z
2023-10-17T15:46:25Z
2023-10-17T15:46:25Z
2023-11-26T13:41:41Z
DOC: Remove explicit paths from references
diff --git a/doc/source/getting_started/comparison/comparison_with_sql.rst b/doc/source/getting_started/comparison/comparison_with_sql.rst index f0eaa7362c52c..daa528c7d408a 100644 --- a/doc/source/getting_started/comparison/comparison_with_sql.rst +++ b/doc/source/getting_started/comparison/comparison_with_sql.rst @@ -164,16 +164,16 @@ The pandas equivalent would be: tips.groupby("sex").size() -Notice that in the pandas code we used :meth:`~pandas.core.groupby.DataFrameGroupBy.size` and not -:meth:`~pandas.core.groupby.DataFrameGroupBy.count`. This is because -:meth:`~pandas.core.groupby.DataFrameGroupBy.count` applies the function to each column, returning +Notice that in the pandas code we used :meth:`.DataFrameGroupBy.size` and not +:meth:`.DataFrameGroupBy.count`. This is because +:meth:`.DataFrameGroupBy.count` applies the function to each column, returning the number of ``NOT NULL`` records within each. .. ipython:: python tips.groupby("sex").count() -Alternatively, we could have applied the :meth:`~pandas.core.groupby.DataFrameGroupBy.count` method +Alternatively, we could have applied the :meth:`.DataFrameGroupBy.count` method to an individual column: .. ipython:: python @@ -181,7 +181,7 @@ to an individual column: tips.groupby("sex")["total_bill"].count() Multiple functions can also be applied at once. For instance, say we'd like to see how tip amount -differs by day of the week - :meth:`~pandas.core.groupby.DataFrameGroupBy.agg` allows you to pass a dictionary +differs by day of the week - :meth:`.DataFrameGroupBy.agg` allows you to pass a dictionary to your grouped DataFrame, indicating which functions to apply to specific columns. .. code-block:: sql diff --git a/doc/source/user_guide/10min.rst b/doc/source/user_guide/10min.rst index 2c612e31d33b6..c8e67710c85a9 100644 --- a/doc/source/user_guide/10min.rst +++ b/doc/source/user_guide/10min.rst @@ -525,7 +525,7 @@ See the :ref:`Grouping section <groupby>`. df Grouping by a column label, selecting column labels, and then applying the -:meth:`~pandas.core.groupby.DataFrameGroupBy.sum` function to the resulting +:meth:`.DataFrameGroupBy.sum` function to the resulting groups: .. ipython:: python diff --git a/doc/source/user_guide/enhancingperf.rst b/doc/source/user_guide/enhancingperf.rst index c4721f3a6b09c..8c510173819e0 100644 --- a/doc/source/user_guide/enhancingperf.rst +++ b/doc/source/user_guide/enhancingperf.rst @@ -453,7 +453,7 @@ by evaluate arithmetic and boolean expression all at once for large :class:`~pan :func:`~pandas.eval` is many orders of magnitude slower for smaller expressions or objects than plain Python. A good rule of thumb is to only use :func:`~pandas.eval` when you have a - :class:`~pandas.core.frame.DataFrame` with more than 10,000 rows. + :class:`.DataFrame` with more than 10,000 rows. Supported syntax ~~~~~~~~~~~~~~~~ diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index b2df1eaec60cc..240b262ca6151 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -458,7 +458,7 @@ Selecting a group ----------------- A single group can be selected using -:meth:`~pandas.core.groupby.DataFrameGroupBy.get_group`: +:meth:`.DataFrameGroupBy.get_group`: .. ipython:: python @@ -1531,7 +1531,7 @@ Enumerate groups To see the ordering of the groups (as opposed to the order of rows within a group given by ``cumcount``) you can use -:meth:`~pandas.core.groupby.DataFrameGroupBy.ngroup`. +:meth:`.DataFrameGroupBy.ngroup`. @@ -1660,7 +1660,7 @@ Regroup columns of a DataFrame according to their sum, and sum the aggregated on Multi-column factorization ~~~~~~~~~~~~~~~~~~~~~~~~~~ -By using :meth:`~pandas.core.groupby.DataFrameGroupBy.ngroup`, we can extract +By using :meth:`.DataFrameGroupBy.ngroup`, we can extract information about the groups in a way similar to :func:`factorize` (as described further in the :ref:`reshaping API <reshaping.factorize>`) but which applies naturally to multiple columns of mixed type and different diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index c2fe277c4f4e5..cc19e4aca061c 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -2701,7 +2701,7 @@ in the method ``to_string`` described above. .. note:: Not all of the possible options for ``DataFrame.to_html`` are shown here for - brevity's sake. See :func:`~pandas.core.frame.DataFrame.to_html` for the + brevity's sake. See :func:`.DataFrame.to_html` for the full set of options. .. note:: @@ -6020,7 +6020,7 @@ Stata format Writing to stata format ''''''''''''''''''''''' -The method :func:`~pandas.core.frame.DataFrame.to_stata` will write a DataFrame +The method :func:`.DataFrame.to_stata` will write a DataFrame into a .dta file. The format version of this file is always 115 (Stata 12). .. ipython:: python @@ -6060,7 +6060,7 @@ outside of this range, the variable is cast to ``int16``. .. warning:: :class:`~pandas.io.stata.StataWriter` and - :func:`~pandas.core.frame.DataFrame.to_stata` only support fixed width + :func:`.DataFrame.to_stata` only support fixed width strings containing up to 244 characters, a limitation imposed by the version 115 dta file format. Attempting to write *Stata* dta files with strings longer than 244 characters raises a ``ValueError``. diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst index 774d17e6ff6b0..93de83875746b 100644 --- a/doc/source/whatsnew/v0.17.1.rst +++ b/doc/source/whatsnew/v0.17.1.rst @@ -43,7 +43,7 @@ We've added *experimental* support for conditional HTML formatting: the visual styling of a DataFrame based on the data. The styling is accomplished with HTML and CSS. Accesses the styler class with the :attr:`pandas.DataFrame.style`, attribute, -an instance of :class:`~pandas.core.style.Styler` with your data attached. +an instance of :class:`.Styler` with your data attached. Here's a quick example: @@ -58,7 +58,7 @@ We can render the HTML to get the following table. .. raw:: html :file: whatsnew_0171_html_table.html -:class:`~pandas.core.style.Styler` interacts nicely with the Jupyter Notebook. +:class:`.Styler` interacts nicely with the Jupyter Notebook. See the :ref:`documentation </user_guide/style.ipynb>` for more. .. _whatsnew_0171.enhancements: diff --git a/doc/source/whatsnew/v0.20.2.rst b/doc/source/whatsnew/v0.20.2.rst index 430a39d2d2e97..6945b360a97b0 100644 --- a/doc/source/whatsnew/v0.20.2.rst +++ b/doc/source/whatsnew/v0.20.2.rst @@ -28,8 +28,8 @@ Enhancements - Unblocked access to additional compression types supported in pytables: 'blosc:blosclz, 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy', 'blosc:zlib', 'blosc:zstd' (:issue:`14478`) - ``Series`` provides a ``to_latex`` method (:issue:`16180`) -- A new groupby method :meth:`~pandas.core.groupby.GroupBy.ngroup`, - parallel to the existing :meth:`~pandas.core.groupby.GroupBy.cumcount`, +- A new groupby method :meth:`.GroupBy.ngroup`, + parallel to the existing :meth:`.GroupBy.cumcount`, has been added to return the group order (:issue:`11642`); see :ref:`here <groupby.ngroup>`. diff --git a/doc/source/whatsnew/v0.21.0.rst b/doc/source/whatsnew/v0.21.0.rst index 62296550a472b..08a132264ddba 100644 --- a/doc/source/whatsnew/v0.21.0.rst +++ b/doc/source/whatsnew/v0.21.0.rst @@ -306,7 +306,7 @@ Other enhancements New functions or methods """""""""""""""""""""""" -- :meth:`~pandas.core.resample.Resampler.nearest` is added to support nearest-neighbor upsampling (:issue:`17496`). +- :meth:`.Resampler.nearest` is added to support nearest-neighbor upsampling (:issue:`17496`). - :class:`~pandas.Index` has added support for a ``to_frame`` method (:issue:`15230`). New keywords diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst index 5aba5ec061e8b..cdffc6968a170 100644 --- a/doc/source/whatsnew/v0.23.0.rst +++ b/doc/source/whatsnew/v0.23.0.rst @@ -299,8 +299,8 @@ For pivoting operations, this behavior is *already* controlled by the ``dropna`` Rolling/Expanding.apply() accepts ``raw=False`` to pass a ``Series`` to the function ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -:func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, -:func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have gained a ``raw=None`` parameter. +:func:`Series.rolling().apply() <.Rolling.apply>`, :func:`DataFrame.rolling().apply() <.Rolling.apply>`, +:func:`Series.expanding().apply() <.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <.Expanding.apply>` have gained a ``raw=None`` parameter. This is similar to :func:`DataFame.apply`. This parameter, if ``True`` allows one to send a ``np.ndarray`` to the applied function. If ``False`` a ``Series`` will be passed. The default is ``None``, which preserves backward compatibility, so this will default to ``True``, sending an ``np.ndarray``. In a future version the default will be changed to ``False``, sending a ``Series``. (:issue:`5071`, :issue:`20584`) @@ -524,7 +524,7 @@ Other enhancements - ``Categorical.rename_categories``, ``CategoricalIndex.rename_categories`` and :attr:`Series.cat.rename_categories` can now take a callable as their argument (:issue:`18862`) - :class:`Interval` and :class:`IntervalIndex` have gained a ``length`` attribute (:issue:`18789`) -- ``Resampler`` objects now have a functioning :attr:`~pandas.core.resample.Resampler.pipe` method. +- ``Resampler`` objects now have a functioning :attr:`.Resampler.pipe` method. Previously, calls to ``pipe`` were diverted to the ``mean`` method (:issue:`17905`). - :func:`~pandas.api.types.is_scalar` now returns ``True`` for ``DateOffset`` objects (:issue:`18943`). - :func:`DataFrame.pivot` now accepts a list for the ``values=`` kwarg (:issue:`17160`). @@ -536,7 +536,7 @@ Other enhancements - ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) - :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`) -- Added :func:`pandas.core.groupby.SeriesGroupBy.is_monotonic_increasing` and :func:`pandas.core.groupby.SeriesGroupBy.is_monotonic_decreasing` (:issue:`17015`) +- Added :func:`.SeriesGroupBy.is_monotonic_increasing` and :func:`.SeriesGroupBy.is_monotonic_decreasing` (:issue:`17015`) - For subclassed ``DataFrames``, :func:`DataFrame.apply` will now preserve the ``Series`` subclass (if defined) when passing the data to the applied function (:issue:`19822`) - :func:`DataFrame.from_dict` now accepts a ``columns`` argument that can be used to specify the column names when ``orient='index'`` is used (:issue:`18529`) - Added option ``display.html.use_mathjax`` so `MathJax <https://www.mathjax.org/>`_ can be disabled when rendering tables in ``Jupyter`` notebooks (:issue:`19856`, :issue:`19824`) @@ -547,7 +547,7 @@ Other enhancements ``SQLAlchemy`` dialects supporting multi-value inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) - :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) - :func:`read_html` now reads all ``<tbody>`` elements in a ``<table>``, not just the first. (:issue:`20690`) -- :meth:`~pandas.core.window.Rolling.quantile` and :meth:`~pandas.core.window.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) +- :meth:`.Rolling.quantile` and :meth:`.Expanding.quantile` now accept the ``interpolation`` keyword, ``linear`` by default (:issue:`20497`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) - :class:`~pandas.tseries.offsets.WeekOfMonth` constructor now supports ``n=0`` (:issue:`20517`). - :class:`DataFrame` and :class:`Series` now support matrix multiplication (``@``) operator (:issue:`10259`) for Python>=3.5 @@ -1052,7 +1052,7 @@ Other API changes - :func:`DatetimeIndex.strftime` and :func:`PeriodIndex.strftime` now return an ``Index`` instead of a numpy array to be consistent with similar accessors (:issue:`20127`) - Constructing a Series from a list of length 1 no longer broadcasts this list when a longer index is specified (:issue:`19714`, :issue:`20391`). - :func:`DataFrame.to_dict` with ``orient='index'`` no longer casts int columns to float for a DataFrame with only int and float columns (:issue:`18580`) -- A user-defined-function that is passed to :func:`Series.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, :func:`DataFrame.rolling().aggregate() <pandas.core.window.Rolling.aggregate>`, or its expanding cousins, will now *always* be passed a ``Series``, rather than a ``np.array``; ``.apply()`` only has the ``raw`` keyword, see :ref:`here <whatsnew_0230.enhancements.window_raw>`. This is consistent with the signatures of ``.aggregate()`` across pandas (:issue:`20584`) +- A user-defined-function that is passed to :func:`Series.rolling().aggregate() <.Rolling.aggregate>`, :func:`DataFrame.rolling().aggregate() <.Rolling.aggregate>`, or its expanding cousins, will now *always* be passed a ``Series``, rather than a ``np.array``; ``.apply()`` only has the ``raw`` keyword, see :ref:`here <whatsnew_0230.enhancements.window_raw>`. This is consistent with the signatures of ``.aggregate()`` across pandas (:issue:`20584`) - Rolling and Expanding types raise ``NotImplementedError`` upon iteration (:issue:`11704`). .. _whatsnew_0230.deprecations: @@ -1084,8 +1084,7 @@ Deprecations - ``Index.summary()`` is deprecated and will be removed in a future version (:issue:`18217`) - ``NDFrame.get_ftype_counts()`` is deprecated and will be removed in a future version (:issue:`18243`) - The ``convert_datetime64`` parameter in :func:`DataFrame.to_records` has been deprecated and will be removed in a future version. The NumPy bug motivating this parameter has been resolved. The default value for this parameter has also changed from ``True`` to ``None`` (:issue:`18160`). -- :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, - :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` have deprecated passing an ``np.array`` by default. One will need to pass the new ``raw`` parameter to be explicit about what is passed (:issue:`20584`) +- :func:`Series.rolling().apply() <.Rolling.apply>`, :func:`DataFrame.rolling().apply() <.Rolling.apply>`, :func:`Series.expanding().apply() <.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <.Expanding.apply>` have deprecated passing an ``np.array`` by default. One will need to pass the new ``raw`` parameter to be explicit about what is passed (:issue:`20584`) - The ``data``, ``base``, ``strides``, ``flags`` and ``itemsize`` properties of the ``Series`` and ``Index`` classes have been deprecated and will be removed in a future version (:issue:`20419`). @@ -1159,15 +1158,15 @@ Performance improvements - Improved performance of :func:`MultiIndex.remove_unused_levels` when there are no unused levels, at the cost of a reduction in performance when there are (:issue:`19289`) - Improved performance of :func:`Index.get_loc` for non-unique indexes (:issue:`19478`) - Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`) -- Improved performance of :func:`pandas.core.groupby.GroupBy.rank` (:issue:`15779`) +- Improved performance of :func:`.GroupBy.rank` (:issue:`15779`) - Improved performance of variable ``.rolling()`` on ``.min()`` and ``.max()`` (:issue:`19521`) -- Improved performance of :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` (:issue:`11296`) -- Improved performance of :func:`pandas.core.groupby.GroupBy.any` and :func:`pandas.core.groupby.GroupBy.all` (:issue:`15435`) -- Improved performance of :func:`pandas.core.groupby.GroupBy.pct_change` (:issue:`19165`) +- Improved performance of :func:`.GroupBy.ffill` and :func:`.GroupBy.bfill` (:issue:`11296`) +- Improved performance of :func:`.GroupBy.any` and :func:`.GroupBy.all` (:issue:`15435`) +- Improved performance of :func:`.GroupBy.pct_change` (:issue:`19165`) - Improved performance of :func:`Series.isin` in the case of categorical dtypes (:issue:`20003`) - Improved performance of ``getattr(Series, attr)`` when the Series has certain index types. This manifested in slow printing of large Series with a ``DatetimeIndex`` (:issue:`19764`) - Fixed a performance regression for :func:`GroupBy.nth` and :func:`GroupBy.last` with some object columns (:issue:`19283`) -- Improved performance of :func:`pandas.core.arrays.Categorical.from_codes` (:issue:`18501`) +- Improved performance of :func:`.Categorical.from_codes` (:issue:`18501`) .. _whatsnew_0230.docs: @@ -1412,13 +1411,13 @@ GroupBy/resample/rolling - Bug in :func:`DataFrame.groupby` where aggregation by ``first``/``last``/``min``/``max`` was causing timestamps to lose precision (:issue:`19526`) - Bug in :func:`DataFrame.transform` where particular aggregation functions were being incorrectly cast to match the dtype(s) of the grouped data (:issue:`19200`) - Bug in :func:`DataFrame.groupby` passing the ``on=`` kwarg, and subsequently using ``.apply()`` (:issue:`17813`) -- Bug in :func:`DataFrame.resample().aggregate <pandas.core.resample.Resampler.aggregate>` not raising a ``KeyError`` when aggregating a non-existent column (:issue:`16766`, :issue:`19566`) +- Bug in :func:`DataFrame.resample().aggregate <.Resampler.aggregate>` not raising a ``KeyError`` when aggregating a non-existent column (:issue:`16766`, :issue:`19566`) - Bug in :func:`DataFrameGroupBy.cumsum` and :func:`DataFrameGroupBy.cumprod` when ``skipna`` was passed (:issue:`19806`) - Bug in :func:`DataFrame.resample` that dropped timezone information (:issue:`13238`) - Bug in :func:`DataFrame.groupby` where transformations using ``np.all`` and ``np.any`` were raising a ``ValueError`` (:issue:`20653`) - Bug in :func:`DataFrame.resample` where ``ffill``, ``bfill``, ``pad``, ``backfill``, ``fillna``, ``interpolate``, and ``asfreq`` were ignoring ``loffset``. (:issue:`20744`) - Bug in :func:`DataFrame.groupby` when applying a function that has mixed data types and the user supplied function can fail on the grouping column (:issue:`20949`) -- Bug in :func:`DataFrameGroupBy.rolling().apply() <pandas.core.window.Rolling.apply>` where operations performed against the associated :class:`DataFrameGroupBy` object could impact the inclusion of the grouped item(s) in the result (:issue:`14013`) +- Bug in :func:`DataFrameGroupBy.rolling().apply() <.Rolling.apply>` where operations performed against the associated :class:`DataFrameGroupBy` object could impact the inclusion of the grouped item(s) in the result (:issue:`14013`) Sparse ^^^^^^ diff --git a/doc/source/whatsnew/v0.23.1.rst b/doc/source/whatsnew/v0.23.1.rst index b51368c87f991..685fe1b3836bf 100644 --- a/doc/source/whatsnew/v0.23.1.rst +++ b/doc/source/whatsnew/v0.23.1.rst @@ -100,8 +100,8 @@ Bug fixes **Groupby/resample/rolling** - Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) -- Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`) -- Bug in :func:`pandas.core.groupby.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True`` +- Bug in :func:`.GroupBy.ffill` and :func:`.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`) +- Bug in :func:`.GroupBy.rank` where results did not scale to 100% when specifying ``method='dense'`` and ``pct=True`` - Bug in :func:`pandas.DataFrame.rolling` and :func:`pandas.Series.rolling` which incorrectly accepted a 0 window size rather than raising (:issue:`21286`) **Data-type specific** diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index b013d03b2d68c..cb12962256a55 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -335,7 +335,7 @@ convenient way to apply users' predefined styling functions, and can help reduce df.style.pipe(format_and_align).set_caption('Summary of results.') Similar methods already exist for other classes in pandas, including :meth:`DataFrame.pipe`, -:meth:`GroupBy.pipe() <pandas.core.groupby.GroupBy.pipe>`, and :meth:`Resampler.pipe() <pandas.core.resample.Resampler.pipe>`. +:meth:`GroupBy.pipe() <.GroupBy.pipe>`, and :meth:`Resampler.pipe() <.Resampler.pipe>`. .. _whatsnew_0240.enhancements.rename_axis: @@ -405,7 +405,7 @@ Other enhancements now support an ``ambiguous`` argument for handling datetimes that are rounded to ambiguous times (:issue:`18946`) and a ``nonexistent`` argument for handling datetimes that are rounded to nonexistent times. See :ref:`timeseries.timezone_nonexistent` (:issue:`22647`) - The result of :meth:`~DataFrame.resample` is now iterable similar to ``groupby()`` (:issue:`15314`). -- :meth:`Series.resample` and :meth:`DataFrame.resample` have gained the :meth:`pandas.core.resample.Resampler.quantile` (:issue:`15023`). +- :meth:`Series.resample` and :meth:`DataFrame.resample` have gained the :meth:`.Resampler.quantile` (:issue:`15023`). - :meth:`DataFrame.resample` and :meth:`Series.resample` with a :class:`PeriodIndex` will now respect the ``base`` argument in the same fashion as with a :class:`DatetimeIndex`. (:issue:`23882`) - :meth:`pandas.api.types.is_list_like` has gained a keyword ``allow_sets`` which is ``True`` by default; if ``False``, all instances of ``set`` will not be considered "list-like" anymore (:issue:`23061`) @@ -1549,7 +1549,7 @@ Performance improvements shows similar speed improvements as above (:issue:`21659`) - Improved performance of :meth:`CategoricalIndex.equals` when comparing to another :class:`CategoricalIndex` (:issue:`24023`) - Improved performance of :func:`Series.describe` in case of numeric dtpyes (:issue:`21274`) -- Improved performance of :func:`pandas.core.groupby.GroupBy.rank` when dealing with tied rankings (:issue:`21237`) +- Improved performance of :func:`.GroupBy.rank` when dealing with tied rankings (:issue:`21237`) - Improved performance of :func:`DataFrame.set_index` with columns consisting of :class:`Period` objects (:issue:`21582`, :issue:`21606`) - Improved performance of :meth:`Series.at` and :meth:`Index.get_value` for Extension Arrays values (e.g. :class:`Categorical`) (:issue:`24204`) - Improved performance of membership checks in :class:`Categorical` and :class:`CategoricalIndex` @@ -1857,28 +1857,28 @@ Plotting GroupBy/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- Bug in :func:`pandas.core.window.Rolling.min` and :func:`pandas.core.window.Rolling.max` with ``closed='left'``, a datetime-like index and only one entry in the series leading to segfault (:issue:`24718`) -- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` with ``as_index=False`` leading to the loss of timezone information (:issue:`15884`) +- Bug in :func:`.Rolling.min` and :func:`.Rolling.max` with ``closed='left'``, a datetime-like index and only one entry in the series leading to segfault (:issue:`24718`) +- Bug in :func:`.GroupBy.first` and :func:`.GroupBy.last` with ``as_index=False`` leading to the loss of timezone information (:issue:`15884`) - Bug in :meth:`DateFrame.resample` when downsampling across a DST boundary (:issue:`8531`) - Bug in date anchoring for :meth:`DateFrame.resample` with offset :class:`Day` when n > 1 (:issue:`24127`) -- Bug where ``ValueError`` is wrongly raised when calling :func:`~pandas.core.groupby.SeriesGroupBy.count` method of a +- Bug where ``ValueError`` is wrongly raised when calling :func:`.SeriesGroupBy.count` method of a ``SeriesGroupBy`` when the grouping variable only contains NaNs and numpy version < 1.13 (:issue:`21956`). -- Multiple bugs in :func:`pandas.core.window.Rolling.min` with ``closed='left'`` and a +- Multiple bugs in :func:`.Rolling.min` with ``closed='left'`` and a datetime-like index leading to incorrect results and also segfault. (:issue:`21704`) -- Bug in :meth:`pandas.core.resample.Resampler.apply` when passing positional arguments to applied func (:issue:`14615`). +- Bug in :meth:`.Resampler.apply` when passing positional arguments to applied func (:issue:`14615`). - Bug in :meth:`Series.resample` when passing ``numpy.timedelta64`` to ``loffset`` kwarg (:issue:`7687`). -- Bug in :meth:`pandas.core.resample.Resampler.asfreq` when frequency of ``TimedeltaIndex`` is a subperiod of a new frequency (:issue:`13022`). -- Bug in :meth:`pandas.core.groupby.SeriesGroupBy.mean` when values were integral but could not fit inside of int64, overflowing instead. (:issue:`22487`) -- :func:`pandas.core.groupby.RollingGroupby.agg` and :func:`pandas.core.groupby.ExpandingGroupby.agg` now support multiple aggregation functions as parameters (:issue:`15072`) +- Bug in :meth:`.Resampler.asfreq` when frequency of ``TimedeltaIndex`` is a subperiod of a new frequency (:issue:`13022`). +- Bug in :meth:`.SeriesGroupBy.mean` when values were integral but could not fit inside of int64, overflowing instead. (:issue:`22487`) +- :func:`.RollingGroupby.agg` and :func:`.ExpandingGroupby.agg` now support multiple aggregation functions as parameters (:issue:`15072`) - Bug in :meth:`DataFrame.resample` and :meth:`Series.resample` when resampling by a weekly offset (``'W'``) across a DST transition (:issue:`9119`, :issue:`21459`) - Bug in :meth:`DataFrame.expanding` in which the ``axis`` argument was not being respected during aggregations (:issue:`23372`) -- Bug in :meth:`pandas.core.groupby.GroupBy.transform` which caused missing values when the input function can accept a :class:`DataFrame` but renames it (:issue:`23455`). -- Bug in :func:`pandas.core.groupby.GroupBy.nth` where column order was not always preserved (:issue:`20760`) -- Bug in :meth:`pandas.core.groupby.GroupBy.rank` with ``method='dense'`` and ``pct=True`` when a group has only one member would raise a ``ZeroDivisionError`` (:issue:`23666`). -- Calling :meth:`pandas.core.groupby.GroupBy.rank` with empty groups and ``pct=True`` was raising a ``ZeroDivisionError`` (:issue:`22519`) +- Bug in :meth:`.GroupBy.transform` which caused missing values when the input function can accept a :class:`DataFrame` but renames it (:issue:`23455`). +- Bug in :func:`.GroupBy.nth` where column order was not always preserved (:issue:`20760`) +- Bug in :meth:`.GroupBy.rank` with ``method='dense'`` and ``pct=True`` when a group has only one member would raise a ``ZeroDivisionError`` (:issue:`23666`). +- Calling :meth:`.GroupBy.rank` with empty groups and ``pct=True`` was raising a ``ZeroDivisionError`` (:issue:`22519`) - Bug in :meth:`DataFrame.resample` when resampling ``NaT`` in ``TimeDeltaIndex`` (:issue:`13223`). - Bug in :meth:`DataFrame.groupby` did not respect the ``observed`` argument when selecting a column and instead always used ``observed=False`` (:issue:`23970`) -- Bug in :func:`pandas.core.groupby.SeriesGroupBy.pct_change` or :func:`pandas.core.groupby.DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`). +- Bug in :func:`.SeriesGroupBy.pct_change` or :func:`.DataFrameGroupBy.pct_change` would previously work across groups when calculating the percent change, where it now correctly works per group (:issue:`21200`, :issue:`21235`). - Bug preventing hash table creation with very large number (2^32) of rows (:issue:`22805`) - Bug in groupby when grouping on categorical causes ``ValueError`` and incorrect grouping if ``observed=True`` and ``nan`` is present in categorical column (:issue:`24740`, :issue:`21151`). @@ -1892,7 +1892,7 @@ Reshaping - Bug in :meth:`DataFrame.where` with an empty DataFrame and empty ``cond`` having non-bool dtype (:issue:`21947`) - Bug in :meth:`Series.mask` and :meth:`DataFrame.mask` with ``list`` conditionals (:issue:`21891`) - Bug in :meth:`DataFrame.replace` raises RecursionError when converting OutOfBounds ``datetime64[ns, tz]`` (:issue:`20380`) -- :func:`pandas.core.groupby.GroupBy.rank` now raises a ``ValueError`` when an invalid value is passed for argument ``na_option`` (:issue:`22124`) +- :func:`.GroupBy.rank` now raises a ``ValueError`` when an invalid value is passed for argument ``na_option`` (:issue:`22124`) - Bug in :func:`get_dummies` with Unicode attributes in Python 2 (:issue:`22084`) - Bug in :meth:`DataFrame.replace` raises ``RecursionError`` when replacing empty lists (:issue:`22083`) - Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` when dict is used as the ``to_replace`` value and one key in the dict is another key's value, the results were inconsistent between using integer key and using string key (:issue:`20656`) @@ -1910,7 +1910,7 @@ Reshaping - Bug in :func:`pandas.concat` when joining ``Series`` datetimetz with ``Series`` category would lose timezone (:issue:`23816`) - Bug in :meth:`DataFrame.join` when joining on partial MultiIndex would drop names (:issue:`20452`). - :meth:`DataFrame.nlargest` and :meth:`DataFrame.nsmallest` now returns the correct n values when keep != 'all' also when tied on the first columns (:issue:`22752`) -- Constructing a DataFrame with an index argument that wasn't already an instance of :class:`~pandas.core.Index` was broken (:issue:`22227`). +- Constructing a DataFrame with an index argument that wasn't already an instance of :class:`.Index` was broken (:issue:`22227`). - Bug in :class:`DataFrame` prevented list subclasses to be used to construction (:issue:`21226`) - Bug in :func:`DataFrame.unstack` and :func:`DataFrame.pivot_table` returning a misleading error message when the resulting DataFrame has more elements than int32 can handle. Now, the error message is improved, pointing towards the actual problem (:issue:`20601`) - Bug in :func:`DataFrame.unstack` where a ``ValueError`` was raised when unstacking timezone aware values (:issue:`18338`) diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst index 36684d465373c..9a8c2ee5d00fa 100644 --- a/doc/source/whatsnew/v0.24.2.rst +++ b/doc/source/whatsnew/v0.24.2.rst @@ -54,7 +54,7 @@ Bug fixes **Reshaping** -- Bug in :meth:`~pandas.core.groupby.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`) +- Bug in :meth:`.GroupBy.transform` where applying a function to a timezone aware column would return a timezone naive result (:issue:`24198`) - Bug in :func:`DataFrame.join` when joining on a timezone aware :class:`DatetimeIndex` (:issue:`23931`) **Visualization** diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index c0f169aa6251f..5cf5623f73036 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -89,7 +89,7 @@ GroupBy aggregation with multiple lambdas ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can now provide multiple lambda functions to a list-like aggregation in -:class:`pandas.core.groupby.GroupBy.agg` (:issue:`26430`). +:class:`.GroupBy.agg` (:issue:`26430`). .. ipython:: python @@ -225,7 +225,7 @@ Other enhancements - :class:`datetime.timezone` objects are now supported as arguments to timezone methods and constructors (:issue:`25065`) - :meth:`DataFrame.query` and :meth:`DataFrame.eval` now supports quoting column names with backticks to refer to names with spaces (:issue:`6508`) - :func:`merge_asof` now gives a more clear error message when merge keys are categoricals that are not equal (:issue:`26136`) -- :meth:`pandas.core.window.Rolling` supports exponential (or Poisson) window type (:issue:`21303`) +- :meth:`.Rolling` supports exponential (or Poisson) window type (:issue:`21303`) - Error message for missing required imports now includes the original import error's text (:issue:`23868`) - :class:`DatetimeIndex` and :class:`TimedeltaIndex` now have a ``mean`` method (:issue:`24757`) - :meth:`DataFrame.describe` now formats integer percentiles without decimal point (:issue:`26660`) @@ -311,7 +311,7 @@ would be reassigned as -1. (:issue:`19387`) ``GroupBy.apply`` on ``DataFrame`` evaluates first group only once ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The implementation of :meth:`DataFrameGroupBy.apply() <pandas.core.groupby.DataFrameGroupBy.apply>` +The implementation of :meth:`.DataFrameGroupBy.apply` previously evaluated the supplied function consistently twice on the first group to infer if it is safe to use a fast code path. Particularly for functions with side effects, this was an undesired behavior and may have led to surprises. (:issue:`2936`, :issue:`2656`, :issue:`7739`, :issue:`10519`, :issue:`12155`, :issue:`20084`, :issue:`21417`) @@ -493,7 +493,7 @@ values are coerced to floating point, which may result in loss of precision. See ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The methods ``ffill``, ``bfill``, ``pad`` and ``backfill`` of -:class:`DataFrameGroupBy <pandas.core.groupby.DataFrameGroupBy>` +:class:`.DataFrameGroupBy` previously included the group labels in the return value, which was inconsistent with other groupby transforms. Now only the filled values are returned. (:issue:`21521`) @@ -885,8 +885,8 @@ Other API changes - :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`) - Comparing :class:`Timestamp` with unsupported objects now returns :py:obj:`NotImplemented` instead of raising ``TypeError``. This implies that unsupported rich comparisons are delegated to the other object, and are now consistent with Python 3 behavior for ``datetime`` objects (:issue:`24011`) - Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`) -- The ``arg`` argument in :meth:`pandas.core.groupby.DataFrameGroupBy.agg` has been renamed to ``func`` (:issue:`26089`) -- The ``arg`` argument in :meth:`pandas.core.window._Window.aggregate` has been renamed to ``func`` (:issue:`26372`) +- The ``arg`` argument in :meth:`.DataFrameGroupBy.agg` has been renamed to ``func`` (:issue:`26089`) +- The ``arg`` argument in :meth:`.Window.aggregate` has been renamed to ``func`` (:issue:`26372`) - Most pandas classes had a ``__bytes__`` method, which was used for getting a python2-style bytestring representation of the object. This method has been removed as a part of dropping Python2 (:issue:`26447`) - The ``.str``-accessor has been disabled for 1-level :class:`MultiIndex`, use :meth:`MultiIndex.to_flat_index` if necessary (:issue:`23679`) - Removed support of gtk package for clipboards (:issue:`26563`) @@ -991,7 +991,7 @@ Performance improvements - :meth:`DataFrame.to_stata()` is now faster when outputting data with any string or non-native endian columns (:issue:`25045`) - Improved performance of :meth:`Series.searchsorted`. The speedup is especially large when the dtype is int8/int16/int32 and the searched key is within the integer bounds for the dtype (:issue:`22034`) -- Improved performance of :meth:`pandas.core.groupby.GroupBy.quantile` (:issue:`20405`) +- Improved performance of :meth:`.GroupBy.quantile` (:issue:`20405`) - Improved performance of slicing and other selected operation on a :class:`RangeIndex` (:issue:`26565`, :issue:`26617`, :issue:`26722`) - :class:`RangeIndex` now performs standard lookup without instantiating an actual hashtable, hence saving memory (:issue:`16685`) - Improved performance of :meth:`read_csv` by faster tokenizing and faster parsing of small float numbers (:issue:`25784`) @@ -1117,7 +1117,7 @@ Indexing - Bug in :meth:`DataFrame.loc` and :meth:`Series.loc` where ``KeyError`` was not raised for a ``MultiIndex`` when the key was less than or equal to the number of levels in the :class:`MultiIndex` (:issue:`14885`). - Bug in which :meth:`DataFrame.append` produced an erroneous warning indicating that a ``KeyError`` will be thrown in the future when the data to be appended contains new columns (:issue:`22252`). - Bug in which :meth:`DataFrame.to_csv` caused a segfault for a reindexed data frame, when the indices were single-level :class:`MultiIndex` (:issue:`26303`). -- Fixed bug where assigning a :class:`arrays.PandasArray` to a :class:`pandas.core.frame.DataFrame` would raise error (:issue:`26390`) +- Fixed bug where assigning a :class:`arrays.PandasArray` to a :class:`.DataFrame` would raise error (:issue:`26390`) - Allow keyword arguments for callable local reference used in the :meth:`DataFrame.query` string (:issue:`26426`) - Fixed a ``KeyError`` when indexing a :class:`MultiIndex` level with a list containing exactly one label, which is missing (:issue:`27148`) - Bug which produced ``AttributeError`` on partial matching :class:`Timestamp` in a :class:`MultiIndex` (:issue:`26944`) @@ -1190,28 +1190,28 @@ Plotting GroupBy/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- Bug in :meth:`pandas.core.resample.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`) -- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.nunique` in which the names of column levels were lost (:issue:`23222`) -- Bug in :func:`pandas.core.groupby.GroupBy.agg` when applying an aggregation function to timezone aware data (:issue:`23683`) -- Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` where timezone information would be dropped (:issue:`21603`) -- Bug in :func:`pandas.core.groupby.GroupBy.size` when grouping only NA values (:issue:`23050`) +- Bug in :meth:`.Resampler.agg` with a timezone aware index where ``OverflowError`` would raise when passing a list of functions (:issue:`22660`) +- Bug in :meth:`.DataFrameGroupBy.nunique` in which the names of column levels were lost (:issue:`23222`) +- Bug in :func:`.GroupBy.agg` when applying an aggregation function to timezone aware data (:issue:`23683`) +- Bug in :func:`.GroupBy.first` and :func:`.GroupBy.last` where timezone information would be dropped (:issue:`21603`) +- Bug in :func:`.GroupBy.size` when grouping only NA values (:issue:`23050`) - Bug in :func:`Series.groupby` where ``observed`` kwarg was previously ignored (:issue:`24880`) - Bug in :func:`Series.groupby` where using ``groupby`` with a :class:`MultiIndex` Series with a list of labels equal to the length of the series caused incorrect grouping (:issue:`25704`) - Ensured that ordering of outputs in ``groupby`` aggregation functions is consistent across all versions of Python (:issue:`25692`) - Ensured that result group order is correct when grouping on an ordered ``Categorical`` and specifying ``observed=True`` (:issue:`25871`, :issue:`25167`) -- Bug in :meth:`pandas.core.window.Rolling.min` and :meth:`pandas.core.window.Rolling.max` that caused a memory leak (:issue:`25893`) -- Bug in :meth:`pandas.core.window.Rolling.count` and ``pandas.core.window.Expanding.count`` was previously ignoring the ``axis`` keyword (:issue:`13503`) -- Bug in :meth:`pandas.core.groupby.GroupBy.idxmax` and :meth:`pandas.core.groupby.GroupBy.idxmin` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`) -- Bug in :meth:`pandas.core.groupby.GroupBy.cumsum`, :meth:`pandas.core.groupby.GroupBy.cumprod`, :meth:`pandas.core.groupby.GroupBy.cummin` and :meth:`pandas.core.groupby.GroupBy.cummax` with categorical column having absent categories, would return incorrect result or segfault (:issue:`16771`) -- Bug in :meth:`pandas.core.groupby.GroupBy.nth` where NA values in the grouping would return incorrect results (:issue:`26011`) -- Bug in :meth:`pandas.core.groupby.SeriesGroupBy.transform` where transforming an empty group would raise a ``ValueError`` (:issue:`26208`) -- Bug in :meth:`pandas.core.frame.DataFrame.groupby` where passing a :class:`pandas.core.groupby.grouper.Grouper` would return incorrect groups when using the ``.groups`` accessor (:issue:`26326`) -- Bug in :meth:`pandas.core.groupby.GroupBy.agg` where incorrect results are returned for uint64 columns. (:issue:`26310`) -- Bug in :meth:`pandas.core.window.Rolling.median` and :meth:`pandas.core.window.Rolling.quantile` where MemoryError is raised with empty window (:issue:`26005`) -- Bug in :meth:`pandas.core.window.Rolling.median` and :meth:`pandas.core.window.Rolling.quantile` where incorrect results are returned with ``closed='left'`` and ``closed='neither'`` (:issue:`26005`) -- Improved :class:`pandas.core.window.Rolling`, :class:`pandas.core.window.Window` and :class:`pandas.core.window.ExponentialMovingWindow` functions to exclude nuisance columns from results instead of raising errors and raise a ``DataError`` only if all columns are nuisance (:issue:`12537`) -- Bug in :meth:`pandas.core.window.Rolling.max` and :meth:`pandas.core.window.Rolling.min` where incorrect results are returned with an empty variable window (:issue:`26005`) -- Raise a helpful exception when an unsupported weighted window function is used as an argument of :meth:`pandas.core.window.Window.aggregate` (:issue:`26597`) +- Bug in :meth:`.Rolling.min` and :meth:`.Rolling.max` that caused a memory leak (:issue:`25893`) +- Bug in :meth:`.Rolling.count` and ``.Expanding.count`` was previously ignoring the ``axis`` keyword (:issue:`13503`) +- Bug in :meth:`.GroupBy.idxmax` and :meth:`.GroupBy.idxmin` with datetime column would return incorrect dtype (:issue:`25444`, :issue:`15306`) +- Bug in :meth:`.GroupBy.cumsum`, :meth:`.GroupBy.cumprod`, :meth:`.GroupBy.cummin` and :meth:`.GroupBy.cummax` with categorical column having absent categories, would return incorrect result or segfault (:issue:`16771`) +- Bug in :meth:`.GroupBy.nth` where NA values in the grouping would return incorrect results (:issue:`26011`) +- Bug in :meth:`.SeriesGroupBy.transform` where transforming an empty group would raise a ``ValueError`` (:issue:`26208`) +- Bug in :meth:`.DataFrame.groupby` where passing a :class:`.Grouper` would return incorrect groups when using the ``.groups`` accessor (:issue:`26326`) +- Bug in :meth:`.GroupBy.agg` where incorrect results are returned for uint64 columns. (:issue:`26310`) +- Bug in :meth:`.Rolling.median` and :meth:`.Rolling.quantile` where MemoryError is raised with empty window (:issue:`26005`) +- Bug in :meth:`.Rolling.median` and :meth:`.Rolling.quantile` where incorrect results are returned with ``closed='left'`` and ``closed='neither'`` (:issue:`26005`) +- Improved :class:`.Rolling`, :class:`.Window` and :class:`.ExponentialMovingWindow` functions to exclude nuisance columns from results instead of raising errors and raise a ``DataError`` only if all columns are nuisance (:issue:`12537`) +- Bug in :meth:`.Rolling.max` and :meth:`.Rolling.min` where incorrect results are returned with an empty variable window (:issue:`26005`) +- Raise a helpful exception when an unsupported weighted window function is used as an argument of :meth:`.Window.aggregate` (:issue:`26597`) Reshaping ^^^^^^^^^ diff --git a/doc/source/whatsnew/v0.25.1.rst b/doc/source/whatsnew/v0.25.1.rst index cc24ba5d6557c..67017d7c9fb29 100644 --- a/doc/source/whatsnew/v0.25.1.rst +++ b/doc/source/whatsnew/v0.25.1.rst @@ -86,10 +86,10 @@ GroupBy/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ - Fixed regression in :meth:`pands.core.groupby.DataFrameGroupBy.quantile` raising when multiple quantiles are given (:issue:`27526`) -- Bug in :meth:`pandas.core.groupby.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) -- Bug in :meth:`pandas.core.groupby.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`) +- Bug in :meth:`.DataFrameGroupBy.transform` where applying a timezone conversion lambda function would drop timezone information (:issue:`27496`) +- Bug in :meth:`.GroupBy.nth` where ``observed=False`` was being ignored for Categorical groupers (:issue:`26385`) - Bug in windowing over read-only arrays (:issue:`27766`) -- Fixed segfault in ``pandas.core.groupby.DataFrameGroupBy.quantile`` when an invalid quantile was passed (:issue:`27470`) +- Fixed segfault in ``.DataFrameGroupBy.quantile`` when an invalid quantile was passed (:issue:`27470`) Reshaping ^^^^^^^^^ diff --git a/doc/source/whatsnew/v0.25.2.rst b/doc/source/whatsnew/v0.25.2.rst index ab6aaebe4ed06..5bd00f17bd3c5 100644 --- a/doc/source/whatsnew/v0.25.2.rst +++ b/doc/source/whatsnew/v0.25.2.rst @@ -31,8 +31,8 @@ IO GroupBy/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`pandas.core.groupby.DataFrameGroupBy.quantile` (:issue:`28113`). -- Bug in :meth:`pandas.core.groupby.GroupBy.shift`, :meth:`pandas.core.groupby.GroupBy.bfill` and :meth:`pandas.core.groupby.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`) +- Bug incorrectly raising an ``IndexError`` when passing a list of quantiles to :meth:`.DataFrameGroupBy.quantile` (:issue:`28113`). +- Bug in :meth:`.GroupBy.shift`, :meth:`.GroupBy.bfill` and :meth:`.GroupBy.ffill` where timezone information would be dropped (:issue:`19995`, :issue:`27992`) Other ^^^^^ diff --git a/doc/source/whatsnew/v1.0.0.rst b/doc/source/whatsnew/v1.0.0.rst index 5e4559303a8b7..f1302b639647b 100755 --- a/doc/source/whatsnew/v1.0.0.rst +++ b/doc/source/whatsnew/v1.0.0.rst @@ -242,7 +242,7 @@ Other enhancements - :func:`to_parquet` now appropriately handles the ``schema`` argument for user defined schemas in the pyarrow engine. (:issue:`30270`) - :meth:`DataFrame.to_json` now accepts an ``indent`` integer argument to enable pretty printing of JSON output (:issue:`12004`) - :meth:`read_stata` can read Stata 119 dta files. (:issue:`28250`) -- Implemented :meth:`pandas.core.window.Window.var` and :meth:`pandas.core.window.Window.std` functions (:issue:`26597`) +- Implemented :meth:`.Window.var` and :meth:`.Window.std` functions (:issue:`26597`) - Added ``encoding`` argument to :meth:`DataFrame.to_string` for non-ascii text (:issue:`28766`) - Added ``encoding`` argument to :func:`DataFrame.to_html` for non-ascii text (:issue:`28663`) - :meth:`Styler.background_gradient` now accepts ``vmin`` and ``vmax`` arguments (:issue:`12145`) @@ -987,7 +987,7 @@ or ``matplotlib.Axes.plot``. See :ref:`plotting.formatters` for more. - The 'outer' method on Numpy ufuncs, e.g. ``np.subtract.outer`` operating on :class:`Series` objects is no longer supported, and will raise ``NotImplementedError`` (:issue:`27198`) - Removed ``Series.get_dtype_counts`` and ``DataFrame.get_dtype_counts`` (:issue:`27145`) - Changed the default "fill_value" argument in :meth:`Categorical.take` from ``True`` to ``False`` (:issue:`20841`) -- Changed the default value for the ``raw`` argument in :func:`Series.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`DataFrame.rolling().apply() <pandas.core.window.Rolling.apply>`, :func:`Series.expanding().apply() <pandas.core.window.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <pandas.core.window.Expanding.apply>` from ``None`` to ``False`` (:issue:`20584`) +- Changed the default value for the ``raw`` argument in :func:`Series.rolling().apply() <.Rolling.apply>`, :func:`DataFrame.rolling().apply() <.Rolling.apply>`, :func:`Series.expanding().apply() <.Expanding.apply>`, and :func:`DataFrame.expanding().apply() <.Expanding.apply>` from ``None`` to ``False`` (:issue:`20584`) - Removed deprecated behavior of :meth:`Series.argmin` and :meth:`Series.argmax`, use :meth:`Series.idxmin` and :meth:`Series.idxmax` for the old behavior (:issue:`16955`) - Passing a tz-aware ``datetime.datetime`` or :class:`Timestamp` into the :class:`Timestamp` constructor with the ``tz`` argument now raises a ``ValueError`` (:issue:`23621`) - Removed ``Series.base``, ``Index.base``, ``Categorical.base``, ``Series.flags``, ``Index.flags``, ``PeriodArray.flags``, ``Series.strides``, ``Index.strides``, ``Series.itemsize``, ``Index.itemsize``, ``Series.data``, ``Index.data`` (:issue:`20721`) @@ -1217,7 +1217,7 @@ GroupBy/resample/rolling - Bug in :meth:`core.groupby.DataFrameGroupBy.apply` only showing output from a single group when function returns an :class:`Index` (:issue:`28652`) - Bug in :meth:`DataFrame.groupby` with multiple groups where an ``IndexError`` would be raised if any group contained all NA values (:issue:`20519`) -- Bug in :meth:`pandas.core.resample.Resampler.size` and :meth:`pandas.core.resample.Resampler.count` returning wrong dtype when used with an empty :class:`Series` or :class:`DataFrame` (:issue:`28427`) +- Bug in :meth:`.Resampler.size` and :meth:`.Resampler.count` returning wrong dtype when used with an empty :class:`Series` or :class:`DataFrame` (:issue:`28427`) - Bug in :meth:`DataFrame.rolling` not allowing for rolling over datetimes when ``axis=1`` (:issue:`28192`) - Bug in :meth:`DataFrame.rolling` not allowing rolling over multi-index levels (:issue:`15584`). - Bug in :meth:`DataFrame.rolling` not allowing rolling on monotonic decreasing time indexes (:issue:`19248`). diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index 340fa97a513ad..0a5716f52c836 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -19,8 +19,8 @@ Fixed regressions - Fixed regression in :meth:`.DataFrameGroupBy.agg` and :meth:`.SeriesGroupBy.agg` which were failing on frames with :class:`MultiIndex` columns and a custom function (:issue:`31777`) - Fixed regression in ``groupby(..).rolling(..).apply()`` (``RollingGroupby``) where the ``raw`` parameter was ignored (:issue:`31754`) -- Fixed regression in :meth:`rolling(..).corr() <pandas.core.window.rolling.Rolling.corr>` when using a time offset (:issue:`31789`) -- Fixed regression in :meth:`groupby(..).nunique() <pandas.core.groupby.DataFrameGroupBy.nunique>` which was modifying the original values if ``NaN`` values were present (:issue:`31950`) +- Fixed regression in :meth:`rolling(..).corr() <.Rolling.corr>` when using a time offset (:issue:`31789`) +- Fixed regression in :meth:`groupby(..).nunique() <.DataFrameGroupBy.nunique>` which was modifying the original values if ``NaN`` values were present (:issue:`31950`) - Fixed regression in ``DataFrame.groupby`` raising a ``ValueError`` from an internal operation (:issue:`31802`) - Fixed regression in :meth:`.DataFrameGroupBy.agg` and :meth:`.SeriesGroupBy.agg` calling a user-provided function an extra time on an empty input (:issue:`31760`) diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index dd566eaab1e75..37d021efddf0b 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -313,9 +313,9 @@ Other enhancements - :meth:`melt` has gained an ``ignore_index`` (default ``True``) argument that, if set to ``False``, prevents the method from dropping the index (:issue:`17440`). - :meth:`Series.update` now accepts objects that can be coerced to a :class:`Series`, such as ``dict`` and ``list``, mirroring the behavior of :meth:`DataFrame.update` (:issue:`33215`) -- :meth:`~pandas.core.groupby.DataFrameGroupBy.transform` and :meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` have gained ``engine`` and ``engine_kwargs`` arguments that support executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`) -- :meth:`~pandas.core.resample.Resampler.interpolate` now supports SciPy interpolation method :class:`scipy.interpolate.CubicSpline` as method ``cubicspline`` (:issue:`33670`) -- :class:`~pandas.core.groupby.DataFrameGroupBy` and :class:`~pandas.core.groupby.SeriesGroupBy` now implement the ``sample`` method for doing random sampling within groups (:issue:`31775`) +- :meth:`.DataFrameGroupBy.transform` and :meth:`.DataFrameGroupBy.aggregate` have gained ``engine`` and ``engine_kwargs`` arguments that support executing functions with ``Numba`` (:issue:`32854`, :issue:`33388`) +- :meth:`.Resampler.interpolate` now supports SciPy interpolation method :class:`scipy.interpolate.CubicSpline` as method ``cubicspline`` (:issue:`33670`) +- :class:`.DataFrameGroupBy` and :class:`.SeriesGroupBy` now implement the ``sample`` method for doing random sampling within groups (:issue:`31775`) - :meth:`DataFrame.to_numpy` now supports the ``na_value`` keyword to control the NA sentinel in the output array (:issue:`33820`) - Added :class:`api.extension.ExtensionArray.equals` to the extension array interface, similar to :meth:`Series.equals` (:issue:`27081`) - The minimum supported dta version has increased to 105 in :func:`read_stata` and :class:`~pandas.io.stata.StataReader` (:issue:`26667`). @@ -327,10 +327,10 @@ Other enhancements and :class:`~pandas.io.stata.StataWriterUTF8` (:issue:`26599`). - :meth:`HDFStore.put` now accepts a ``track_times`` parameter. This parameter is passed to the ``create_table`` method of ``PyTables`` (:issue:`32682`). - :meth:`Series.plot` and :meth:`DataFrame.plot` now accepts ``xlabel`` and ``ylabel`` parameters to present labels on x and y axis (:issue:`9093`). -- Made :class:`pandas.core.window.rolling.Rolling` and :class:`pandas.core.window.expanding.Expanding` iterable(:issue:`11704`) +- Made :class:`.Rolling` and :class:`.Expanding` iterable(:issue:`11704`) - Made ``option_context`` a :class:`contextlib.ContextDecorator`, which allows it to be used as a decorator over an entire function (:issue:`34253`). - :meth:`DataFrame.to_csv` and :meth:`Series.to_csv` now accept an ``errors`` argument (:issue:`22610`) -- :meth:`~pandas.core.groupby.DataFrameGroupBy.groupby.transform` now allows ``func`` to be ``pad``, ``backfill`` and ``cumcount`` (:issue:`31269`). +- :meth:`.DataFrameGroupBy.groupby.transform` now allows ``func`` to be ``pad``, ``backfill`` and ``cumcount`` (:issue:`31269`). - :func:`read_json` now accepts an ``nrows`` parameter. (:issue:`33916`). - :meth:`DataFrame.hist`, :meth:`Series.hist`, :meth:`core.groupby.DataFrameGroupBy.hist`, and :meth:`core.groupby.SeriesGroupBy.hist` have gained the ``legend`` argument. Set to True to show a legend in the histogram. (:issue:`6279`) - :func:`concat` and :meth:`~DataFrame.append` now preserve extension dtypes, for example @@ -344,7 +344,7 @@ Other enhancements - :meth:`~Series.explode` now accepts ``ignore_index`` to reset the index, similar to :meth:`pd.concat` or :meth:`DataFrame.sort_values` (:issue:`34932`). - :meth:`DataFrame.to_markdown` and :meth:`Series.to_markdown` now accept ``index`` argument as an alias for tabulate's ``showindex`` (:issue:`32667`) - :meth:`read_csv` now accepts string values like "0", "0.0", "1", "1.0" as convertible to the nullable Boolean dtype (:issue:`34859`) -- :class:`pandas.core.window.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) +- :class:`.ExponentialMovingWindow` now supports a ``times`` argument that allows ``mean`` to be calculated with observations spaced by the timestamps in ``times`` (:issue:`34839`) - :meth:`DataFrame.agg` and :meth:`Series.agg` now accept named aggregation for renaming the output columns/indexes. (:issue:`26513`) - ``compute.use_numba`` now exists as a configuration option that utilizes the numba engine when available (:issue:`33966`, :issue:`35374`) - :meth:`Series.plot` now supports asymmetric error bars. Previously, if :meth:`Series.plot` received a "2xN" array with error values for ``yerr`` and/or ``xerr``, the left/lower values (first row) were mirrored, while the right/upper values (second row) were ignored. Now, the first row represents the left/lower error values and the second row the right/upper error values. (:issue:`9536`) @@ -629,7 +629,7 @@ Using :meth:`DataFrame.groupby` with ``as_index=False`` and the function ``idxma df.groupby("a", as_index=False).nunique() -The method :meth:`~pandas.core.groupby.DataFrameGroupBy.size` would previously ignore ``as_index=False``. Now the grouping columns are returned as columns, making the result a :class:`DataFrame` instead of a :class:`Series`. (:issue:`32599`) +The method :meth:`.DataFrameGroupBy.size` would previously ignore ``as_index=False``. Now the grouping columns are returned as columns, making the result a :class:`DataFrame` instead of a :class:`Series`. (:issue:`32599`) *Previous behavior*: @@ -650,10 +650,10 @@ The method :meth:`~pandas.core.groupby.DataFrameGroupBy.size` would previously i .. _whatsnew_110.api_breaking.groupby_results_lost_as_index_false: -:meth:`~pandas.core.groupby.DataFrameGroupby.agg` lost results with ``as_index=False`` when relabeling columns -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:meth:`.DataFrameGroupby.agg` lost results with ``as_index=False`` when relabeling columns +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Previously :meth:`~pandas.core.groupby.DataFrameGroupby.agg` lost the result columns, when the ``as_index`` option was +Previously :meth:`.DataFrameGroupby.agg` lost the result columns, when the ``as_index`` option was set to ``False`` and the result columns were relabeled. In this case the result values were replaced with the previous index (:issue:`32240`). @@ -878,14 +878,14 @@ Performance improvements sparse values from ``scipy.sparse`` matrices using the :meth:`DataFrame.sparse.from_spmatrix` constructor (:issue:`32821`, :issue:`32825`, :issue:`32826`, :issue:`32856`, :issue:`32858`). -- Performance improvement for groupby methods :meth:`~pandas.core.groupby.groupby.Groupby.first` - and :meth:`~pandas.core.groupby.groupby.Groupby.last` (:issue:`34178`) +- Performance improvement for groupby methods :meth:`.Groupby.first` + and :meth:`.Groupby.last` (:issue:`34178`) - Performance improvement in :func:`factorize` for nullable (integer and Boolean) dtypes (:issue:`33064`). - Performance improvement when constructing :class:`Categorical` objects (:issue:`33921`) - Fixed performance regression in :func:`pandas.qcut` and :func:`pandas.cut` (:issue:`33921`) - Performance improvement in reductions (``sum``, ``prod``, ``min``, ``max``) for nullable (integer and Boolean) dtypes (:issue:`30982`, :issue:`33261`, :issue:`33442`). - Performance improvement in arithmetic operations between two :class:`DataFrame` objects (:issue:`32779`) -- Performance improvement in :class:`pandas.core.groupby.RollingGroupby` (:issue:`34052`) +- Performance improvement in :class:`.RollingGroupby` (:issue:`34052`) - Performance improvement in arithmetic operations (``sub``, ``add``, ``mul``, ``div``) for :class:`MultiIndex` (:issue:`34297`) - Performance improvement in ``DataFrame[bool_indexer]`` when ``bool_indexer`` is a ``list`` (:issue:`33924`) - Significant performance improvement of :meth:`io.formats.style.Styler.render` with styles added with various ways such as :meth:`io.formats.style.Styler.apply`, :meth:`io.formats.style.Styler.applymap` or :meth:`io.formats.style.Styler.bar` (:issue:`19917`) diff --git a/doc/source/whatsnew/v1.1.1.rst b/doc/source/whatsnew/v1.1.1.rst index 77ea67f76f655..176a8b85e76b4 100644 --- a/doc/source/whatsnew/v1.1.1.rst +++ b/doc/source/whatsnew/v1.1.1.rst @@ -30,7 +30,7 @@ Fixed regressions - Fixed regression where :func:`pandas.merge_asof` would raise a ``UnboundLocalError`` when ``left_index``, ``right_index`` and ``tolerance`` were set (:issue:`35558`) - Fixed regression in ``.groupby(..).rolling(..)`` where a custom ``BaseIndexer`` would be ignored (:issue:`35557`) - Fixed regression in :meth:`DataFrame.replace` and :meth:`Series.replace` where compiled regular expressions would be ignored during replacement (:issue:`35680`) -- Fixed regression in :meth:`~pandas.core.groupby.DataFrameGroupBy.aggregate` where a list of functions would produce the wrong results if at least one of the functions did not aggregate (:issue:`35490`) +- Fixed regression in :meth:`.DataFrameGroupBy.aggregate` where a list of functions would produce the wrong results if at least one of the functions did not aggregate (:issue:`35490`) - Fixed memory usage issue when instantiating large :class:`pandas.arrays.StringArray` (:issue:`35499`) .. --------------------------------------------------------------------------- diff --git a/doc/source/whatsnew/v1.2.0.rst b/doc/source/whatsnew/v1.2.0.rst index fc8b59e11e001..12ab4f27d1e62 100644 --- a/doc/source/whatsnew/v1.2.0.rst +++ b/doc/source/whatsnew/v1.2.0.rst @@ -793,13 +793,13 @@ Groupby/resample/rolling - Bug in :meth:`DataFrame.resample` that would throw a ``ValueError`` when resampling from ``"D"`` to ``"24H"`` over a transition into daylight savings time (DST) (:issue:`35219`) - Bug when combining methods :meth:`DataFrame.groupby` with :meth:`DataFrame.resample` and :meth:`DataFrame.interpolate` raising a ``TypeError`` (:issue:`35325`) - Bug in :meth:`.DataFrameGroupBy.apply` where a non-nuisance grouping column would be dropped from the output columns if another groupby method was called before ``.apply`` (:issue:`34656`) -- Bug when subsetting columns on a :class:`~pandas.core.groupby.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values (:issue:`9959`) +- Bug when subsetting columns on a :class:`.DataFrameGroupBy` (e.g. ``df.groupby('a')[['b']])``) would reset the attributes ``axis``, ``dropna``, ``group_keys``, ``level``, ``mutated``, ``sort``, and ``squeeze`` to their default values (:issue:`9959`) - Bug in :meth:`.DataFrameGroupBy.tshift` failing to raise ``ValueError`` when a frequency cannot be inferred for the index of a group (:issue:`35937`) - Bug in :meth:`DataFrame.groupby` does not always maintain column index name for ``any``, ``all``, ``bfill``, ``ffill``, ``shift`` (:issue:`29764`) - Bug in :meth:`.DataFrameGroupBy.apply` raising error with ``np.nan`` group(s) when ``dropna=False`` (:issue:`35889`) - Bug in :meth:`.Rolling.sum` returned wrong values when dtypes where mixed between float and integer and ``axis=1`` (:issue:`20649`, :issue:`35596`) - Bug in :meth:`.Rolling.count` returned ``np.nan`` with :class:`~pandas.api.indexers.FixedForwardWindowIndexer` as window, ``min_periods=0`` and only missing values in the window (:issue:`35579`) -- Bug where :class:`pandas.core.window.Rolling` produces incorrect window sizes when using a ``PeriodIndex`` (:issue:`34225`) +- Bug where :class:`.Rolling` produces incorrect window sizes when using a ``PeriodIndex`` (:issue:`34225`) - Bug in :meth:`.DataFrameGroupBy.ffill` and :meth:`.DataFrameGroupBy.bfill` where a ``NaN`` group would return filled values instead of ``NaN`` when ``dropna=True`` (:issue:`34725`) - Bug in :meth:`.RollingGroupby.count` where a ``ValueError`` was raised when specifying the ``closed`` parameter (:issue:`35869`) - Bug in :meth:`.DataFrameGroupBy.rolling` returning wrong values with partial centered window (:issue:`36040`) diff --git a/doc/source/whatsnew/v1.4.2.rst b/doc/source/whatsnew/v1.4.2.rst index 64c36632bfefe..64d7e683c9d27 100644 --- a/doc/source/whatsnew/v1.4.2.rst +++ b/doc/source/whatsnew/v1.4.2.rst @@ -33,7 +33,7 @@ Bug fixes - Fix some cases for subclasses that define their ``_constructor`` properties as general callables (:issue:`46018`) - Fixed "longtable" formatting in :meth:`.Styler.to_latex` when ``column_format`` is given in extended format (:issue:`46037`) - Fixed incorrect rendering in :meth:`.Styler.format` with ``hyperlinks="html"`` when the url contains a colon or other special characters (:issue:`46389`) -- Improved error message in :class:`~pandas.core.window.Rolling` when ``window`` is a frequency and ``NaT`` is in the rolling axis (:issue:`46087`) +- Improved error message in :class:`.Rolling` when ``window`` is a frequency and ``NaT`` is in the rolling axis (:issue:`46087`) .. --------------------------------------------------------------------------- diff --git a/doc/source/whatsnew/v2.2.0.rst b/doc/source/whatsnew/v2.2.0.rst index ef59c86a21598..f190219e6e2d6 100644 --- a/doc/source/whatsnew/v2.2.0.rst +++ b/doc/source/whatsnew/v2.2.0.rst @@ -371,7 +371,7 @@ Plotting Groupby/resample/rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- Bug in :class:`pandas.core.window.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`) +- Bug in :class:`.Rolling` where duplicate datetimelike indexes are treated as consecutive rather than equal with ``closed='left'`` and ``closed='neither'`` (:issue:`20712`) - Bug in :meth:`DataFrame.resample` not respecting ``closed`` and ``label`` arguments for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55282`) - Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.BusinessDay` (:issue:`55281`) - Bug in :meth:`DataFrame.resample` where bin edges were not correct for :class:`~pandas.tseries.offsets.MonthBegin` (:issue:`55271`)
- [ ] closes #xxxx (Replace xxxx with the GitHub issue number) - [ ] [Tests added and passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#writing-tests) if fixing a bug or adding a new feature - [ ] All [code checks passed](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#pre-commit). - [ ] Added [type annotations](https://pandas.pydata.org/pandas-docs/dev/development/contributing_codebase.html#type-hints) to new arguments/methods/functions. - [ ] Added an entry in the latest `doc/source/whatsnew/vX.X.X.rst` file if fixing a bug or adding a new feature. Doing this will make the diffs slightly smaller for #27522, and I think should be done in any case.
https://api.github.com/repos/pandas-dev/pandas/pulls/55555
2023-10-17T01:59:49Z
2023-10-17T15:47:23Z
2023-10-17T15:47:23Z
2023-10-17T19:38:07Z
TST: split/collect/parametrize tests
diff --git a/pandas/conftest.py b/pandas/conftest.py index 829ac64884dac..ce5647320837c 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -19,7 +19,7 @@ """ from collections import abc -from datetime import date, time, timedelta, timezone +from datetime import date, datetime, time, timedelta, timezone from decimal import Decimal import operator import os @@ -757,6 +757,27 @@ def mixed_type_frame(): ) +@pytest.fixture +def rand_series_with_duplicate_datetimeindex(): + """ + Fixture for Series with a DatetimeIndex that has duplicates. + """ + dates = [ + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ] + + return Series(np.random.randn(len(dates)), index=dates) + + # ---------------------------------------------------------------- # Scalars # ---------------------------------------------------------------- diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index dd836591d5965..4dce7e8553be4 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2143,3 +2143,10 @@ def test_groupby_numerical_stability_cumsum(): ) expected = DataFrame({"a": exp_data, "b": exp_data}) tm.assert_frame_equal(result, expected, check_exact=True) + + +def test_groupby_mean_duplicate_index(rand_series_with_duplicate_datetimeindex): + dups = rand_series_with_duplicate_datetimeindex + result = dups.groupby(level=0).mean() + expected = dups.groupby(dups.index).mean() + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index e98dd2ec79de9..846bca2ecf2f6 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -198,23 +198,6 @@ def test_ns_index(self): new_index = date_range(start=index[0], end=index[-1], freq=index.freq) self.assert_index_parameters(new_index) - @pytest.mark.parametrize( - "arr, expected", - [ - (DatetimeIndex(["2017", "2017"]), DatetimeIndex(["2017"])), - ( - DatetimeIndex(["2017", "2017"], tz="US/Eastern"), - DatetimeIndex(["2017"], tz="US/Eastern"), - ), - ], - ) - def test_unique(self, arr, expected): - result = arr.unique() - tm.assert_index_equal(result, expected) - # GH 21737 - # Ensure the underlying data is consistent - assert result[0] == expected[0] - def test_asarray_tz_naive(self): # This shouldn't produce a warning. idx = date_range("2000", periods=2) diff --git a/pandas/tests/indexes/datetimes/test_unique.py b/pandas/tests/indexes/datetimes/test_unique.py new file mode 100644 index 0000000000000..f85cc87ee88a8 --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_unique.py @@ -0,0 +1,74 @@ +from datetime import datetime, timedelta + +import pytest + +from pandas import DatetimeIndex, NaT, Timestamp +import pandas._testing as tm + + +@pytest.mark.parametrize( + "arr, expected", + [ + (DatetimeIndex(["2017", "2017"]), DatetimeIndex(["2017"])), + ( + DatetimeIndex(["2017", "2017"], tz="US/Eastern"), + DatetimeIndex(["2017"], tz="US/Eastern"), + ), + ], +) +def test_unique(arr, expected): + result = arr.unique() + tm.assert_index_equal(result, expected) + # GH#21737 + # Ensure the underlying data is consistent + assert result[0] == expected[0] + + +def test_index_unique(rand_series_with_duplicate_datetimeindex): + dups = rand_series_with_duplicate_datetimeindex + index = dups.index + + uniques = index.unique() + expected = DatetimeIndex( + [ + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ] + ) + assert uniques.dtype == "M8[ns]" # sanity + tm.assert_index_equal(uniques, expected) + assert index.nunique() == 4 + + # GH#2563 + assert isinstance(uniques, DatetimeIndex) + + dups_local = index.tz_localize("US/Eastern") + dups_local.name = "foo" + result = dups_local.unique() + expected = DatetimeIndex(expected, name="foo") + expected = expected.tz_localize("US/Eastern") + assert result.tz is not None + assert result.name == "foo" + tm.assert_index_equal(result, expected) + + # NaT, note this is excluded + arr = [1370745748 + t for t in range(20)] + [NaT.value] + idx = DatetimeIndex(arr * 3) + tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) + assert idx.nunique() == 20 + assert idx.nunique(dropna=False) == 21 + + arr = [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20) + ] + [NaT] + idx = DatetimeIndex(arr * 3) + tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) + assert idx.nunique() == 20 + assert idx.nunique(dropna=False) == 21 + + +def test_is_unique_monotonic(rand_series_with_duplicate_datetimeindex): + index = rand_series_with_duplicate_datetimeindex.index + assert not index.is_unique diff --git a/pandas/tests/series/indexing/test_datetime.py b/pandas/tests/series/indexing/test_datetime.py index a15ef11f9c292..6c595e1f0f19f 100644 --- a/pandas/tests/series/indexing/test_datetime.py +++ b/pandas/tests/series/indexing/test_datetime.py @@ -9,18 +9,10 @@ import pytest import pytz -from pandas._libs import iNaT, index as libindex +from pandas._libs import index as libindex import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - NaT, - Series, - Timestamp, - date_range, - period_range, -) +from pandas import DataFrame, Series, Timestamp, date_range, period_range import pandas._testing as tm @@ -347,77 +339,10 @@ def test_datetime_indexing(): """ -@pytest.fixture -def dups(): - dates = [ - datetime(2000, 1, 2), - datetime(2000, 1, 2), - datetime(2000, 1, 2), - datetime(2000, 1, 3), - datetime(2000, 1, 3), - datetime(2000, 1, 3), - datetime(2000, 1, 4), - datetime(2000, 1, 4), - datetime(2000, 1, 4), - datetime(2000, 1, 5), - ] - - return Series(np.random.randn(len(dates)), index=dates) - - -def test_constructor(dups): - assert isinstance(dups, Series) - assert isinstance(dups.index, DatetimeIndex) - - -def test_is_unique_monotonic(dups): - assert not dups.index.is_unique - - -def test_index_unique(dups): - uniques = dups.index.unique() - expected = DatetimeIndex( - [ - datetime(2000, 1, 2), - datetime(2000, 1, 3), - datetime(2000, 1, 4), - datetime(2000, 1, 5), - ] - ) - assert uniques.dtype == "M8[ns]" # sanity - tm.assert_index_equal(uniques, expected) - assert dups.index.nunique() == 4 - - # #2563 - assert isinstance(uniques, DatetimeIndex) - - dups_local = dups.index.tz_localize("US/Eastern") - dups_local.name = "foo" - result = dups_local.unique() - expected = DatetimeIndex(expected, name="foo") - expected = expected.tz_localize("US/Eastern") - assert result.tz is not None - assert result.name == "foo" - tm.assert_index_equal(result, expected) - - # NaT, note this is excluded - arr = [1370745748 + t for t in range(20)] + [iNaT] - idx = DatetimeIndex(arr * 3) - tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) - assert idx.nunique() == 20 - assert idx.nunique(dropna=False) == 21 - - arr = [ - Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20) - ] + [NaT] - idx = DatetimeIndex(arr * 3) - tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) - assert idx.nunique() == 20 - assert idx.nunique(dropna=False) == 21 - - -def test_duplicate_dates_indexing(dups): - ts = dups +def test_indexing_with_duplicate_datetimeindex( + rand_series_with_duplicate_datetimeindex, +): + ts = rand_series_with_duplicate_datetimeindex uniques = ts.index.unique() for date in uniques: @@ -445,12 +370,6 @@ def test_duplicate_dates_indexing(dups): assert ts[datetime(2000, 1, 6)] == 0 -def test_groupby_average_dup_values(dups): - result = dups.groupby(level=0).mean() - expected = dups.groupby(dups.index).mean() - tm.assert_series_equal(result, expected) - - def test_indexing_over_size_cutoff(monkeypatch): # #1821 @@ -579,6 +498,8 @@ def test_indexing(): result = df["2001"]["A"] tm.assert_series_equal(expected, result) + +def test_getitem_str_month_with_datetimeindex(): # GH3546 (not including times on the last day) idx = date_range(start="2013-05-31 00:00", end="2013-05-31 23:00", freq="H") ts = Series(range(len(idx)), index=idx) @@ -590,6 +511,8 @@ def test_indexing(): expected = ts["2013-05"] tm.assert_series_equal(expected, ts) + +def test_getitem_str_year_with_datetimeindex(): idx = [ Timestamp("2013-05-31 00:00"), Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999)), @@ -598,17 +521,19 @@ def test_indexing(): expected = ts["2013"] tm.assert_series_equal(expected, ts) + +def test_getitem_str_second_with_datetimeindex(): # GH14826, indexing with a seconds resolution string / datetime object df = DataFrame( np.random.rand(5, 5), columns=["open", "high", "low", "close", "volume"], index=date_range("2012-01-02 18:01:00", periods=5, tz="US/Central", freq="s"), ) - expected = df.loc[[df.index[2]]] # this is a single date, so will raise with pytest.raises(KeyError, match=r"^'2012-01-02 18:01:02'$"): df["2012-01-02 18:01:02"] + msg = r"Timestamp\('2012-01-02 18:01:02-0600', tz='US/Central', freq='S'\)" with pytest.raises(KeyError, match=msg): df[df.index[2]] diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index c9e505ef4bbaf..23b98ba9c4eb9 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -546,64 +546,52 @@ def test_timedelta_assignment(): @pytest.mark.parametrize( - "nat_val,should_cast", + "nat_val", [ - (pd.NaT, True), - (np.timedelta64("NaT", "ns"), False), - (np.datetime64("NaT", "ns"), True), + pd.NaT, + np.timedelta64("NaT", "ns"), + np.datetime64("NaT", "ns"), ], ) @pytest.mark.parametrize("tz", [None, "UTC"]) -def test_dt64_series_assign_nat(nat_val, should_cast, tz): +def test_dt64_series_assign_nat(nat_val, tz, indexer_sli): # some nat-like values should be cast to datetime64 when inserting # into a datetime64 series. Others should coerce to object # and retain their dtypes. dti = pd.date_range("2016-01-01", periods=3, tz=tz) base = Series(dti) expected = Series([pd.NaT] + list(dti[1:]), dtype=dti.dtype) + + should_cast = nat_val is pd.NaT or base.dtype.kind == nat_val.dtype.kind if not should_cast: expected = expected.astype(object) ser = base.copy(deep=True) - ser[0] = nat_val - tm.assert_series_equal(ser, expected) - - ser = base.copy(deep=True) - ser.loc[0] = nat_val - tm.assert_series_equal(ser, expected) - - ser = base.copy(deep=True) - ser.iloc[0] = nat_val + indexer_sli(ser)[0] = nat_val tm.assert_series_equal(ser, expected) @pytest.mark.parametrize( - "nat_val,should_cast", + "nat_val", [ - (pd.NaT, True), - (np.timedelta64("NaT", "ns"), True), - (np.datetime64("NaT", "ns"), False), + pd.NaT, + np.timedelta64("NaT", "ns"), + np.datetime64("NaT", "ns"), ], ) -def test_td64_series_assign_nat(nat_val, should_cast): +def test_td64_series_assign_nat(nat_val, indexer_sli): # some nat-like values should be cast to timedelta64 when inserting # into a timedelta64 series. Others should coerce to object # and retain their dtypes. base = Series([0, 1, 2], dtype="m8[ns]") expected = Series([pd.NaT, 1, 2], dtype="m8[ns]") + + should_cast = nat_val is pd.NaT or base.dtype == nat_val.dtype if not should_cast: expected = expected.astype(object) ser = base.copy(deep=True) - ser[0] = nat_val - tm.assert_series_equal(ser, expected) - - ser = base.copy(deep=True) - ser.loc[0] = nat_val - tm.assert_series_equal(ser, expected) - - ser = base.copy(deep=True) - ser.iloc[0] = nat_val + indexer_sli(ser)[0] = nat_val tm.assert_series_equal(ser, expected) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index 4605e0961189b..780fd276cdceb 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1672,3 +1672,9 @@ def test_from_list_dtype(self): result = Series(["2015"], dtype="datetime64[ns]") assert result._mgr.blocks[0].is_extension is False + + +def test_constructor(rand_series_with_duplicate_datetimeindex): + dups = rand_series_with_duplicate_datetimeindex + assert isinstance(dups, Series) + assert isinstance(dups.index, DatetimeIndex)
https://api.github.com/repos/pandas-dev/pandas/pulls/39663
2021-02-08T03:22:52Z
2021-02-08T13:49:06Z
2021-02-08T13:49:06Z
2021-02-08T15:04:07Z
TST/REF: collect indexing tests
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5c1fabd67bc8d..346258fe5162b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -115,7 +115,7 @@ ) if TYPE_CHECKING: - from pandas import IntervalIndex, MultiIndex, RangeIndex, Series + from pandas import CategoricalIndex, IntervalIndex, MultiIndex, RangeIndex, Series from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin @@ -405,6 +405,7 @@ def _ensure_array(cls, data, dtype, copy: bool): data = data.copy() return data + @final @classmethod def _dtype_to_subclass(cls, dtype: DtypeObj): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 @@ -1013,8 +1014,8 @@ def _format_data(self, name=None) -> str_t: if self.inferred_type == "string": is_justify = False elif self.inferred_type == "categorical": - # error: "Index" has no attribute "categories" - if is_object_dtype(self.categories): # type: ignore[attr-defined] + self = cast("CategoricalIndex", self) + if is_object_dtype(self.categories): is_justify = False return format_object_summary( @@ -1075,6 +1076,7 @@ def _format_with_header( result = trim_front(format_array(values, None, justify="left")) return header + result + @final def to_native_types(self, slicer=None, **kwargs): """ Format specified values of `self` and return them. diff --git a/pandas/tests/arrays/sparse/test_arithmetics.py b/pandas/tests/arrays/sparse/test_arithmetics.py index 86fbcb337b30e..6cc730ce49840 100644 --- a/pandas/tests/arrays/sparse/test_arithmetics.py +++ b/pandas/tests/arrays/sparse/test_arithmetics.py @@ -357,7 +357,7 @@ def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, reques if not np_version_under1p20: if op in [operator.floordiv, ops.rfloordiv] and mix: - mark = pytest.mark.xfail(strict=True, reason="GH#38172") + mark = pytest.mark.xfail(reason="GH#38172") request.node.add_marker(mark) rdtype = "int64" diff --git a/pandas/tests/base/test_conversion.py b/pandas/tests/base/test_conversion.py index 94330f861eacd..67e519553517f 100644 --- a/pandas/tests/base/test_conversion.py +++ b/pandas/tests/base/test_conversion.py @@ -197,13 +197,13 @@ def test_iter_box(self): pd.DatetimeIndex(["2017", "2018"]), np.ndarray, "datetime64[ns]", - marks=[pytest.mark.xfail(reason="datetime _values", strict=True)], + marks=[pytest.mark.xfail(reason="datetime _values")], ), pytest.param( pd.TimedeltaIndex([10 ** 10]), np.ndarray, "m8[ns]", - marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)], + marks=[pytest.mark.xfail(reason="timedelta _values")], ), ], ) diff --git a/pandas/tests/extension/test_boolean.py b/pandas/tests/extension/test_boolean.py index 1db18fb72bdeb..3ef3beaa9c1b1 100644 --- a/pandas/tests/extension/test_boolean.py +++ b/pandas/tests/extension/test_boolean.py @@ -106,16 +106,16 @@ def check_opname(self, s, op_name, other, exc=None): # overwriting to indicate ops don't raise an error super().check_opname(s, op_name, other, exc=None) - def _check_op(self, s, op, other, op_name, exc=NotImplementedError): + def _check_op(self, obj, op, other, op_name, exc=NotImplementedError): if exc is None: if op_name in self.implements: msg = r"numpy boolean subtract" with pytest.raises(TypeError, match=msg): - op(s, other) + op(obj, other) return - result = op(s, other) - expected = s.combine(other, op) + result = op(obj, other) + expected = self._combine(obj, other, op) if op_name in ( "__floordiv__", @@ -130,27 +130,20 @@ def _check_op(self, s, op, other, op_name, exc=NotImplementedError): elif op_name in ("__truediv__", "__rtruediv__"): # combine with bools does not generate the correct result # (numpy behaviour for div is to regard the bools as numeric) - expected = s.astype(float).combine(other, op).astype("Float64") + expected = self._combine(obj.astype(float), other, op) + expected = expected.astype("Float64") if op_name == "__rpow__": # for rpow, combine does not propagate NaN expected[result.isna()] = np.nan - self.assert_series_equal(result, expected) + self.assert_equal(result, expected) else: with pytest.raises(exc): - op(s, other) + op(obj, other) def _check_divmod_op(self, s, op, other, exc=None): # override to not raise an error super()._check_divmod_op(s, op, other, None) - def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): - # frame & scalar - op_name = all_arithmetic_operators - if op_name not in self.implements: - mark = pytest.mark.xfail(reason="_reduce needs implementation") - request.node.add_marker(mark) - super().test_arith_frame_with_scalar(data, all_arithmetic_operators) - class TestComparisonOps(base.BaseComparisonOpsTests): def check_opname(self, s, op_name, other, exc=None): diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 870099a9fcb4c..766910618d925 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -450,7 +450,7 @@ def _compare_other(self, s, data, op_name, other): class TestPrinting(BaseSparseTests, base.BasePrintingTests): - @pytest.mark.xfail(reason="Different repr", strict=True) + @pytest.mark.xfail(reason="Different repr") def test_array_repr(self, data, size): super().test_array_repr(data, size) diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py index 6c6b4e002644c..4282db6933371 100644 --- a/pandas/tests/frame/indexing/test_getitem.py +++ b/pandas/tests/frame/indexing/test_getitem.py @@ -6,6 +6,7 @@ CategoricalDtype, CategoricalIndex, DataFrame, + Index, MultiIndex, Series, Timestamp, @@ -174,3 +175,24 @@ def test_getitem_bool_mask_categorical_index(self): df4[df4.index < 2] with pytest.raises(TypeError, match=msg): df4[df4.index > 1] + + +class TestGetitemSlice: + def test_getitem_slice_float64(self, frame_or_series): + values = np.arange(10.0, 50.0, 2) + index = Index(values) + + start, end = values[[5, 15]] + + data = np.random.randn(20, 3) + if frame_or_series is not DataFrame: + data = data[:, 0] + + obj = frame_or_series(data, index=index) + + result = obj[start:end] + expected = obj.iloc[5:16] + tm.assert_equal(result, expected) + + result = obj.loc[start:end] + tm.assert_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_replace.py b/pandas/tests/frame/methods/test_replace.py index 1b570028964df..febd113f309ae 100644 --- a/pandas/tests/frame/methods/test_replace.py +++ b/pandas/tests/frame/methods/test_replace.py @@ -1587,7 +1587,6 @@ def test_replace_value_category_type(self): @pytest.mark.xfail( reason="category dtype gets changed to object type after replace, see #35268", - strict=True, ) def test_replace_dict_category_type(self, input_category_df, expected_category_df): """ diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 686f383deab37..e9ae7bb056041 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -60,16 +60,22 @@ def test_getitem_ndarray_3d(self, index, frame_or_series, indexer_sli): idxr = indexer_sli(obj) nd3 = np.random.randint(5, size=(2, 2, 2)) - msg = "|".join( - [ - r"Buffer has wrong number of dimensions \(expected 1, got 3\)", - "Cannot index with multidimensional key", - r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]", - "Index data must be 1-dimensional", - "positional indexers are out-of-bounds", - "Indexing a MultiIndex with a multidimensional key is not implemented", - ] - ) + msgs = [] + if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]: + msgs.append(r"Wrong number of dimensions. values.ndim != ndim \[3 != 1\]") + if frame_or_series is Series or indexer_sli is tm.iloc: + msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)") + if indexer_sli is tm.loc or ( + frame_or_series is Series and indexer_sli is tm.setitem + ): + msgs.append("Cannot index with multidimensional key") + if frame_or_series is DataFrame and indexer_sli is tm.setitem: + msgs.append("Index data must be 1-dimensional") + if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc: + msgs.append("Index data must be 1-dimensional") + if len(index) == 0 or isinstance(index, pd.MultiIndex): + msgs.append("positional indexers are out-of-bounds") + msg = "|".join(msgs) potential_errors = (IndexError, ValueError, NotImplementedError) with pytest.raises(potential_errors, match=msg): diff --git a/pandas/tests/series/indexing/test_getitem.py b/pandas/tests/series/indexing/test_getitem.py index a7a60f37bcd00..5a5e285bf719f 100644 --- a/pandas/tests/series/indexing/test_getitem.py +++ b/pandas/tests/series/indexing/test_getitem.py @@ -222,6 +222,24 @@ def test_getitem_slice_float_raises(self, datetime_series): with pytest.raises(TypeError, match=msg.format(key=r"4\.5")): datetime_series[4.5:10.0] + def test_getitem_slice_bug(self): + ser = Series(range(10), index=list(range(10))) + result = ser[-12:] + tm.assert_series_equal(result, ser) + + result = ser[-7:] + tm.assert_series_equal(result, ser[3:]) + + result = ser[:-12] + tm.assert_series_equal(result, ser[:0]) + + def test_getitem_slice_integers(self): + ser = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16]) + + result = ser[:4] + expected = Series(ser.values[:4], index=[2, 4, 6, 8]) + tm.assert_series_equal(result, expected) + class TestSeriesGetitemListLike: @pytest.mark.parametrize("box", [list, np.array, Index, pd.Series]) diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py deleted file mode 100644 index 10b9360802c1c..0000000000000 --- a/pandas/tests/series/indexing/test_numeric.py +++ /dev/null @@ -1,55 +0,0 @@ -import numpy as np - -from pandas import DataFrame, Index, Series -import pandas._testing as tm - - -def test_slice_float64(frame_or_series): - values = np.arange(10.0, 50.0, 2) - index = Index(values) - - start, end = values[[5, 15]] - - data = np.random.randn(20, 3) - if frame_or_series is not DataFrame: - data = data[:, 0] - - obj = frame_or_series(data, index=index) - - result = obj[start:end] - expected = obj.iloc[5:16] - tm.assert_equal(result, expected) - - result = obj.loc[start:end] - tm.assert_equal(result, expected) - - -def test_getitem_setitem_slice_bug(): - s = Series(range(10), index=list(range(10))) - result = s[-12:] - tm.assert_series_equal(result, s) - - result = s[-7:] - tm.assert_series_equal(result, s[3:]) - - result = s[:-12] - tm.assert_series_equal(result, s[:0]) - - s = Series(range(10), index=list(range(10))) - s[-12:] = 0 - assert (s == 0).all() - - s[:-12] = 5 - assert (s == 0).all() - - -def test_getitem_setitem_slice_integers(): - s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16]) - - result = s[:4] - expected = s.reindex([2, 4, 6, 8]) - tm.assert_series_equal(result, expected) - - s[:4] = 0 - assert (s[:4] == 0).all() - assert not (s[4:] == 0).any() diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 3a993f544b64a..3023baca6ff5d 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -113,6 +113,21 @@ def test_setitem_slice_float_raises(self, datetime_series): with pytest.raises(TypeError, match=msg.format(key=r"4\.5")): datetime_series[4.5:10.0] = 0 + def test_setitem_slice(self): + ser = Series(range(10), index=list(range(10))) + ser[-12:] = 0 + assert (ser == 0).all() + + ser[:-12] = 5 + assert (ser == 0).all() + + def test_setitem_slice_integers(self): + ser = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16]) + + ser[:4] = 0 + assert (ser[:4] == 0).all() + assert not (ser[4:] == 0).any() + class TestSetitemBooleanMask: def test_setitem_boolean(self, string_series): diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 4dd91b942474a..c09df52fb5df5 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -174,6 +174,6 @@ def test_attrs(self): @skip_if_no("jinja2") def test_inspect_getmembers(self): # GH38782 - ser = Series() + ser = Series(dtype=object) with tm.assert_produces_warning(None): inspect.getmembers(ser) diff --git a/pandas/tests/series/test_logical_ops.py b/pandas/tests/series/test_logical_ops.py index 7cfda2464f21a..23aa11bc9358a 100644 --- a/pandas/tests/series/test_logical_ops.py +++ b/pandas/tests/series/test_logical_ops.py @@ -286,7 +286,6 @@ def test_reversed_xor_with_index_returns_index(self): marks=pytest.mark.xfail( reason="GH#22092 Index __and__ returns Index intersection", raises=AssertionError, - strict=True, ), ), pytest.param( @@ -294,7 +293,6 @@ def test_reversed_xor_with_index_returns_index(self): marks=pytest.mark.xfail( reason="GH#22092 Index __or__ returns Index union", raises=AssertionError, - strict=True, ), ), ],
and assorted cleanups
https://api.github.com/repos/pandas-dev/pandas/pulls/39661
2021-02-08T00:02:04Z
2021-02-08T13:48:00Z
2021-02-08T13:48:00Z
2021-02-08T15:04:51Z
CLN: Styler Types for CSS variables on `ctx` object.
diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index 5dac3a26424a8..ad8a23882e1e8 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -374,7 +374,7 @@ For example, after running the following, ``styled.xlsx`` renders as below: df.iloc[0, 2] = np.nan df styled = (df.style - .applymap(lambda val: 'color: %s' % 'red' if val < 0 else 'black') + .applymap(lambda val: 'color:red;' if val < 0 else 'color:black;') .highlight_max()) styled.to_excel('styled.xlsx', engine='openpyxl') diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index bf67ff6525005..1e9a96b536974 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -65,7 +65,8 @@ Other enhancements - :meth:`DataFrame.plot.scatter` can now accept a categorical column as the argument to ``c`` (:issue:`12380`, :issue:`31357`) - :meth:`.Styler.set_tooltips` allows on hover tooltips to be added to styled HTML dataframes (:issue:`35643`, :issue:`21266`, :issue:`39317`) - :meth:`.Styler.set_tooltips_class` and :meth:`.Styler.set_table_styles` amended to optionally allow certain css-string input arguments (:issue:`39564`) -- :meth:`.Styler.apply` now more consistently accepts ndarray function returns, i.e. in all cases for ``axis`` is ``0, 1 or None``. (:issue:`39359`) +- :meth:`.Styler.apply` now more consistently accepts ndarray function returns, i.e. in all cases for ``axis`` is ``0, 1 or None`` (:issue:`39359`) +- :meth:`.Styler.apply` and :meth:`.Styler.applymap` now raise errors if wrong format CSS is passed on render (:issue:`39660`) - :meth:`Series.loc.__getitem__` and :meth:`Series.loc.__setitem__` with :class:`MultiIndex` now raising helpful error message when indexer has too many dimensions (:issue:`35349`) - :meth:`pandas.read_stata` and :class:`StataReader` support reading data from compressed files. - Add support for parsing ``ISO 8601``-like timestamps with negative signs to :meth:`pandas.Timedelta` (:issue:`37172`) diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index adfe1f285f2ee..1ec2f7bfdd4be 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -773,7 +773,8 @@ def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]: series = self.df.iloc[:, colidx] for i, val in enumerate(series): if styles is not None: - xlstyle = self.style_converter(";".join(styles[i, colidx])) + css = ";".join([a + ":" + str(v) for (a, v) in styles[i, colidx]]) + xlstyle = self.style_converter(css) yield ExcelCell(self.rowcounter + i, colidx + coloffset, val, xlstyle) def get_formatted_cells(self) -> Iterable[ExcelCell]: diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 1e2148125a9d1..c3218ccb91f25 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -53,8 +53,10 @@ ) jinja2 = import_optional_dependency("jinja2", extra="DataFrame.style requires jinja2.") -CSSSequence = Sequence[Tuple[str, Union[str, int, float]]] -CSSProperties = Union[str, CSSSequence] + +CSSPair = Tuple[str, Union[str, int, float]] +CSSList = List[CSSPair] +CSSProperties = Union[str, CSSList] CSSStyles = List[Dict[str, CSSProperties]] try: @@ -194,7 +196,7 @@ def __init__( # assign additional default vars self.hidden_index: bool = False self.hidden_columns: Sequence[int] = [] - self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list) + self.ctx: DefaultDict[Tuple[int, int], CSSList] = defaultdict(list) self.cell_context: Dict[str, Any] = {} self._todo: List[Tuple[Callable, Tuple, Dict]] = [] self.tooltips: Optional[_Tooltips] = None @@ -414,7 +416,8 @@ def _translate(self): clabels = [[x] for x in clabels] clabels = list(zip(*clabels)) - cellstyle_map = defaultdict(list) + cellstyle_map: DefaultDict[Tuple[CSSPair, ...], List[str]] = defaultdict(list) + head = [] for r in range(n_clvls): @@ -531,25 +534,21 @@ def _translate(self): } # only add an id if the cell has a style - props = [] + props: CSSList = [] if self.cell_ids or (r, c) in ctx: row_dict["id"] = "_".join(cs[1:]) - for x in ctx[r, c]: - # have to handle empty styles like [''] - if x.count(":"): - props.append(tuple(x.split(":"))) - else: - props.append(("", "")) + props.extend(ctx[r, c]) # add custom classes from cell context cs.extend(cell_context.get("data", {}).get(r, {}).get(c, [])) row_dict["class"] = " ".join(cs) row_es.append(row_dict) - cellstyle_map[tuple(props)].append(f"row{r}_col{c}") + if props: # (), [] won't be in cellstyle_map, cellstyle respectively + cellstyle_map[tuple(props)].append(f"row{r}_col{c}") body.append(row_es) - cellstyle = [ + cellstyle: List[Dict[str, Union[CSSList, List[str]]]] = [ {"props": list(props), "selectors": selectors} for props, selectors in cellstyle_map.items() ] @@ -755,19 +754,14 @@ def render(self, **kwargs) -> str: self._compute() # TODO: namespace all the pandas keys d = self._translate() - # filter out empty styles, every cell will have a class - # but the list of props may just be [['', '']]. - # so we have the nested anys below - trimmed = [x for x in d["cellstyle"] if any(any(y) for y in x["props"])] - d["cellstyle"] = trimmed d.update(kwargs) return self.template.render(**d) def _update_ctx(self, attrs: DataFrame) -> None: """ - Update the state of the Styler. + Update the state of the Styler for data cells. - Collects a mapping of {index_label: ['<property>: <value>']}. + Collects a mapping of {index_label: [('<property>', '<value>'), ..]}. Parameters ---------- @@ -776,20 +770,13 @@ def _update_ctx(self, attrs: DataFrame) -> None: Whitespace shouldn't matter and the final trailing ';' shouldn't matter. """ - coli = {k: i for i, k in enumerate(self.columns)} - rowi = {k: i for i, k in enumerate(self.index)} - for jj in range(len(attrs.columns)): - cn = attrs.columns[jj] - j = coli[cn] + for cn in attrs.columns: for rn, c in attrs[[cn]].itertuples(): if not c: continue - c = c.rstrip(";") - if not c: - continue - i = rowi[rn] - for pair in c.split(";"): - self.ctx[(i, j)].append(pair) + css_list = _maybe_convert_css_to_tuples(c) + i, j = self.index.get_loc(rn), self.columns.get_loc(cn) + self.ctx[(i, j)].extend(css_list) def _copy(self, deepcopy: bool = False) -> Styler: styler = Styler( @@ -2068,7 +2055,7 @@ def _maybe_wrap_formatter( raise TypeError(msg) -def _maybe_convert_css_to_tuples(style: CSSProperties) -> CSSSequence: +def _maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList: """ Convert css-string to sequence of tuples format if needed. 'color:red; border:1px solid black;' -> [('color', 'red'), diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index b8df18d593667..4304ccc94cc7f 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -19,6 +19,19 @@ ) +def bar_grad(a=None, b=None, c=None, d=None): + """Used in multiple tests to simplify formatting of expected result""" + ret = [("width", "10em"), ("height", "80%")] + if all(x is None for x in [a, b, c, d]): + return ret + return ret + [ + ( + "background", + f"linear-gradient(90deg,{','.join(x for x in [a, b, c, d] if x)})", + ) + ] + + class TestStyler: def setup_method(self, method): np.random.seed(24) @@ -61,24 +74,15 @@ def test_repr_html_mathjax(self): def test_update_ctx(self): self.styler._update_ctx(self.attrs) - expected = {(0, 0): ["color: red"], (1, 0): ["color: blue"]} + expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]} assert self.styler.ctx == expected - def test_update_ctx_flatten_multi(self): - attrs = DataFrame({"A": ["color: red; foo: bar", "color: blue; foo: baz"]}) + def test_update_ctx_flatten_multi_and_trailing_semi(self): + attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]}) self.styler._update_ctx(attrs) expected = { - (0, 0): ["color: red", " foo: bar"], - (1, 0): ["color: blue", " foo: baz"], - } - assert self.styler.ctx == expected - - def test_update_ctx_flatten_multi_traliing_semi(self): - attrs = DataFrame({"A": ["color: red; foo: bar;", "color: blue; foo: baz;"]}) - self.styler._update_ctx(attrs) - expected = { - (0, 0): ["color: red", " foo: bar"], - (1, 0): ["color: blue", " foo: baz"], + (0, 0): [("color", "red"), ("foo", "bar")], + (1, 0): [("color", "blue"), ("foo", "baz")], } assert self.styler.ctx == expected @@ -141,7 +145,7 @@ def test_multiple_render(self): s.render() # do 2 renders to ensure css styles not duplicated assert ( '<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n' - " color: red;\n}\n</style>" in s.render() + " color: red;\n}\n</style>" in s.render() ) def test_render_empty_dfs(self): @@ -167,7 +171,7 @@ def test_set_properties(self): df = DataFrame({"A": [0, 1]}) result = df.style.set_properties(color="white", size="10px")._compute().ctx # order is deterministic - v = ["color: white", "size: 10px"] + v = [("color", "white"), ("size", "10px")] expected = {(0, 0): v, (1, 0): v} assert result.keys() == expected.keys() for v1, v2 in zip(result.values(), expected.values()): @@ -180,7 +184,7 @@ def test_set_properties_subset(self): ._compute() .ctx ) - expected = {(0, 0): ["color: white"]} + expected = {(0, 0): [("color", "white")]} assert result == expected def test_empty_index_name_doesnt_display(self): @@ -313,19 +317,19 @@ def test_apply_axis(self): assert len(result.ctx) == 0 result._compute() expected = { - (0, 0): ["val: 1"], - (0, 1): ["val: 1"], - (1, 0): ["val: 1"], - (1, 1): ["val: 1"], + (0, 0): [("val", "1")], + (0, 1): [("val", "1")], + (1, 0): [("val", "1")], + (1, 1): [("val", "1")], } assert result.ctx == expected result = df.style.apply(f, axis=0) expected = { - (0, 0): ["val: 0"], - (0, 1): ["val: 1"], - (1, 0): ["val: 0"], - (1, 1): ["val: 1"], + (0, 0): [("val", "0")], + (0, 1): [("val", "1")], + (1, 0): [("val", "0")], + (1, 1): [("val", "1")], } result._compute() assert result.ctx == expected @@ -333,53 +337,52 @@ def test_apply_axis(self): result._compute() assert result.ctx == expected - def test_apply_subset(self): - axes = [0, 1] - slices = [ + @pytest.mark.parametrize( + "slice_", + [ pd.IndexSlice[:], pd.IndexSlice[:, ["A"]], pd.IndexSlice[[1], :], pd.IndexSlice[[1], ["A"]], pd.IndexSlice[:2, ["A", "B"]], - ] - for ax in axes: - for slice_ in slices: - result = ( - self.df.style.apply(self.h, axis=ax, subset=slice_, foo="baz") - ._compute() - .ctx - ) - expected = { - (r, c): ["color: baz"] - for r, row in enumerate(self.df.index) - for c, col in enumerate(self.df.columns) - if row in self.df.loc[slice_].index - and col in self.df.loc[slice_].columns - } - assert result == expected - - def test_applymap_subset(self): - def f(x): - return "foo: bar" + ], + ) + @pytest.mark.parametrize("axis", [0, 1]) + def test_apply_subset(self, slice_, axis): + result = ( + self.df.style.apply(self.h, axis=axis, subset=slice_, foo="baz") + ._compute() + .ctx + ) + expected = { + (r, c): [("color", "baz")] + for r, row in enumerate(self.df.index) + for c, col in enumerate(self.df.columns) + if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns + } + assert result == expected - slices = [ + @pytest.mark.parametrize( + "slice_", + [ pd.IndexSlice[:], pd.IndexSlice[:, ["A"]], pd.IndexSlice[[1], :], pd.IndexSlice[[1], ["A"]], pd.IndexSlice[:2, ["A", "B"]], - ] - - for slice_ in slices: - result = self.df.style.applymap(f, subset=slice_)._compute().ctx - expected = { - (r, c): ["foo: bar"] - for r, row in enumerate(self.df.index) - for c, col in enumerate(self.df.columns) - if row in self.df.loc[slice_].index - and col in self.df.loc[slice_].columns - } - assert result == expected + ], + ) + def test_applymap_subset(self, slice_): + result = ( + self.df.style.applymap(lambda x: "color:baz;", subset=slice_)._compute().ctx + ) + expected = { + (r, c): [("color", "baz")] + for r, row in enumerate(self.df.index) + for c, col in enumerate(self.df.columns) + if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns + } + assert result == expected @pytest.mark.parametrize( "slice_", @@ -430,14 +433,24 @@ def f(x): result = self.df.style.where(f, style1)._compute().ctx expected = { - (r, c): [style1] + (r, c): [("foo", "bar")] for r, row in enumerate(self.df.index) for c, col in enumerate(self.df.columns) if f(self.df.loc[row, col]) } assert result == expected - def test_where_subset(self): + @pytest.mark.parametrize( + "slice_", + [ + pd.IndexSlice[:], + pd.IndexSlice[:, ["A"]], + pd.IndexSlice[[1], :], + pd.IndexSlice[[1], ["A"]], + pd.IndexSlice[:2, ["A", "B"]], + ], + ) + def test_where_subset(self, slice_): # GH 17474 def f(x): return x > 0.5 @@ -445,26 +458,14 @@ def f(x): style1 = "foo: bar" style2 = "baz: foo" - slices = [ - pd.IndexSlice[:], - pd.IndexSlice[:, ["A"]], - pd.IndexSlice[[1], :], - pd.IndexSlice[[1], ["A"]], - pd.IndexSlice[:2, ["A", "B"]], - ] - - for slice_ in slices: - result = ( - self.df.style.where(f, style1, style2, subset=slice_)._compute().ctx - ) - expected = { - (r, c): [style1 if f(self.df.loc[row, col]) else style2] - for r, row in enumerate(self.df.index) - for c, col in enumerate(self.df.columns) - if row in self.df.loc[slice_].index - and col in self.df.loc[slice_].columns - } - assert result == expected + result = self.df.style.where(f, style1, style2, subset=slice_)._compute().ctx + expected = { + (r, c): [("foo", "bar") if f(self.df.loc[row, col]) else ("baz", "foo")] + for r, row in enumerate(self.df.index) + for c, col in enumerate(self.df.columns) + if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns + } + assert result == expected def test_where_subset_compare_with_applymap(self): # GH 17474 @@ -495,11 +496,11 @@ def g(x): def test_empty(self): df = DataFrame({"A": [1, 0]}) s = df.style - s.ctx = {(0, 0): ["color: red"], (1, 0): [""]} + s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]} result = s._translate()["cellstyle"] expected = [ - {"props": [("color", " red")], "selectors": ["row0_col0"]}, + {"props": [("color", "red")], "selectors": ["row0_col0"]}, {"props": [("", "")], "selectors": ["row1_col0"]}, ] assert result == expected @@ -507,11 +508,11 @@ def test_empty(self): def test_duplicate(self): df = DataFrame({"A": [1, 0]}) s = df.style - s.ctx = {(0, 0): ["color: red"], (1, 0): ["color: red"]} + s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]} result = s._translate()["cellstyle"] expected = [ - {"props": [("color", " red")], "selectors": ["row0_col0", "row1_col0"]} + {"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]} ] assert result == expected @@ -519,35 +520,17 @@ def test_bar_align_left(self): df = DataFrame({"A": [0, 1, 2]}) result = df.style.bar()._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(" - "90deg,#d65f5f 50.0%, transparent 50.0%)", - ], - (2, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(" - "90deg,#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad("#d65f5f 50.0%", " transparent 50.0%"), + (2, 0): bar_grad("#d65f5f 100.0%", " transparent 100.0%"), } assert result == expected result = df.style.bar(color="red", width=50)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,red 25.0%, transparent 25.0%)", - ], - (2, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,red 50.0%, transparent 50.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad("red 25.0%", " transparent 25.0%"), + (2, 0): bar_grad("red 50.0%", " transparent 50.0%"), } assert result == expected @@ -562,118 +545,54 @@ def test_bar_align_left_0points(self): df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) result = df.style.bar()._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (0, 1): ["width: 10em", " height: 80%"], - (0, 2): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 50.0%, transparent 50.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 50.0%, transparent 50.0%)", - ], - (1, 2): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 50.0%, transparent 50.0%)", - ], - (2, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 100.0%" - ", transparent 100.0%)", - ], - (2, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 100.0%" - ", transparent 100.0%)", - ], - (2, 2): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 100.0%" - ", transparent 100.0%)", - ], + (0, 0): bar_grad(), + (0, 1): bar_grad(), + (0, 2): bar_grad(), + (1, 0): bar_grad("#d65f5f 50.0%", " transparent 50.0%"), + (1, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"), + (1, 2): bar_grad("#d65f5f 50.0%", " transparent 50.0%"), + (2, 0): bar_grad("#d65f5f 100.0%", " transparent 100.0%"), + (2, 1): bar_grad("#d65f5f 100.0%", " transparent 100.0%"), + (2, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"), } assert result == expected result = df.style.bar(axis=1)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 50.0%, transparent 50.0%)", - ], - (0, 2): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 100.0%" - ", transparent 100.0%)", - ], - (1, 0): ["width: 10em", " height: 80%"], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 50.0%" - ", transparent 50.0%)", - ], - (1, 2): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 100.0%" - ", transparent 100.0%)", - ], - (2, 0): ["width: 10em", " height: 80%"], - (2, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 50.0%" - ", transparent 50.0%)", - ], - (2, 2): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg,#d65f5f 100.0%" - ", transparent 100.0%)", - ], + (0, 0): bar_grad(), + (0, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"), + (0, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"), + (1, 0): bar_grad(), + (1, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"), + (1, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"), + (2, 0): bar_grad(), + (2, 1): bar_grad("#d65f5f 50.0%", " transparent 50.0%"), + (2, 2): bar_grad("#d65f5f 100.0%", " transparent 100.0%"), } assert result == expected def test_bar_align_mid_pos_and_neg(self): df = DataFrame({"A": [-10, 0, 20, 90]}) - result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx - expected = { - (0, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 10.0%, transparent 10.0%)", - ], - (1, 0): ["width: 10em", " height: 80%"], - (2, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 10.0%, #5fba7d 10.0%" - ", #5fba7d 30.0%, transparent 30.0%)", - ], - (3, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 10.0%, " - "#5fba7d 10.0%, #5fba7d 100.0%, " - "transparent 100.0%)", - ], + (0, 0): bar_grad( + "#d65f5f 10.0%", + " transparent 10.0%", + ), + (1, 0): bar_grad(), + (2, 0): bar_grad( + " transparent 10.0%", + " #5fba7d 10.0%", + " #5fba7d 30.0%", + " transparent 30.0%", + ), + (3, 0): bar_grad( + " transparent 10.0%", + " #5fba7d 10.0%", + " #5fba7d 100.0%", + " transparent 100.0%", + ), } - assert result == expected def test_bar_align_mid_all_pos(self): @@ -682,30 +601,22 @@ def test_bar_align_mid_all_pos(self): result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx expected = { - (0, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#5fba7d 10.0%, transparent 10.0%)", - ], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#5fba7d 20.0%, transparent 20.0%)", - ], - (2, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#5fba7d 50.0%, transparent 50.0%)", - ], - (3, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#5fba7d 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad( + "#5fba7d 10.0%", + " transparent 10.0%", + ), + (1, 0): bar_grad( + "#5fba7d 20.0%", + " transparent 20.0%", + ), + (2, 0): bar_grad( + "#5fba7d 50.0%", + " transparent 50.0%", + ), + (3, 0): bar_grad( + "#5fba7d 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -716,36 +627,28 @@ def test_bar_align_mid_all_neg(self): result = df.style.bar(align="mid", color=["#d65f5f", "#5fba7d"])._compute().ctx expected = { - (0, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 100.0%, transparent 100.0%)", - ], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 40.0%, " - "#d65f5f 40.0%, #d65f5f 100.0%, " - "transparent 100.0%)", - ], - (2, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 70.0%, " - "#d65f5f 70.0%, #d65f5f 100.0%, " - "transparent 100.0%)", - ], - (3, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 80.0%, " - "#d65f5f 80.0%, #d65f5f 100.0%, " - "transparent 100.0%)", - ], + (0, 0): bar_grad( + "#d65f5f 100.0%", + " transparent 100.0%", + ), + (1, 0): bar_grad( + " transparent 40.0%", + " #d65f5f 40.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), + (2, 0): bar_grad( + " transparent 70.0%", + " #d65f5f 70.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), + (3, 0): bar_grad( + " transparent 80.0%", + " #d65f5f 80.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -759,28 +662,25 @@ def test_bar_align_zero_pos_and_neg(self): .ctx ) expected = { - (0, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 40.0%, #d65f5f 40.0%, " - "#d65f5f 45.0%, transparent 45.0%)", - ], - (1, 0): ["width: 10em", " height: 80%"], - (2, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 45.0%, #5fba7d 45.0%, " - "#5fba7d 55.0%, transparent 55.0%)", - ], - (3, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 45.0%, #5fba7d 45.0%, " - "#5fba7d 90.0%, transparent 90.0%)", - ], + (0, 0): bar_grad( + " transparent 40.0%", + " #d65f5f 40.0%", + " #d65f5f 45.0%", + " transparent 45.0%", + ), + (1, 0): bar_grad(), + (2, 0): bar_grad( + " transparent 45.0%", + " #5fba7d 45.0%", + " #5fba7d 55.0%", + " transparent 55.0%", + ), + (3, 0): bar_grad( + " transparent 45.0%", + " #5fba7d 45.0%", + " #5fba7d 90.0%", + " transparent 90.0%", + ), } assert result == expected @@ -788,25 +688,19 @@ def test_bar_align_left_axis_none(self): df = DataFrame({"A": [0, 1], "B": [2, 4]}) result = df.style.bar(axis=None)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 25.0%, transparent 25.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 50.0%, transparent 50.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad( + "#d65f5f 25.0%", + " transparent 25.0%", + ), + (0, 1): bar_grad( + "#d65f5f 50.0%", + " transparent 50.0%", + ), + (1, 1): bar_grad( + "#d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -814,28 +708,25 @@ def test_bar_align_zero_axis_none(self): df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="zero", axis=None)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 50.0%, #d65f5f 50.0%, " - "#d65f5f 62.5%, transparent 62.5%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 25.0%, #d65f5f 25.0%, " - "#d65f5f 50.0%, transparent 50.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 50.0%, #d65f5f 50.0%, " - "#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad( + " transparent 50.0%", + " #d65f5f 50.0%", + " #d65f5f 62.5%", + " transparent 62.5%", + ), + (0, 1): bar_grad( + " transparent 25.0%", + " #d65f5f 25.0%", + " #d65f5f 50.0%", + " transparent 50.0%", + ), + (1, 1): bar_grad( + " transparent 50.0%", + " #d65f5f 50.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -843,27 +734,23 @@ def test_bar_align_mid_axis_none(self): df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 33.3%, #d65f5f 33.3%, " - "#d65f5f 50.0%, transparent 50.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 33.3%, transparent 33.3%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 33.3%, #d65f5f 33.3%, " - "#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad( + " transparent 33.3%", + " #d65f5f 33.3%", + " #d65f5f 50.0%", + " transparent 50.0%", + ), + (0, 1): bar_grad( + "#d65f5f 33.3%", + " transparent 33.3%", + ), + (1, 1): bar_grad( + " transparent 33.3%", + " #d65f5f 33.3%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -871,28 +758,25 @@ def test_bar_align_mid_vmin(self): df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmin=-6)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 60.0%, #d65f5f 60.0%, " - "#d65f5f 70.0%, transparent 70.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 40.0%, #d65f5f 40.0%, " - "#d65f5f 60.0%, transparent 60.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 60.0%, #d65f5f 60.0%, " - "#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad( + " transparent 60.0%", + " #d65f5f 60.0%", + " #d65f5f 70.0%", + " transparent 70.0%", + ), + (0, 1): bar_grad( + " transparent 40.0%", + " #d65f5f 40.0%", + " #d65f5f 60.0%", + " transparent 60.0%", + ), + (1, 1): bar_grad( + " transparent 60.0%", + " #d65f5f 60.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -900,27 +784,23 @@ def test_bar_align_mid_vmax(self): df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmax=8)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 20.0%, #d65f5f 20.0%, " - "#d65f5f 30.0%, transparent 30.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 20.0%, transparent 20.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 20.0%, #d65f5f 20.0%, " - "#d65f5f 60.0%, transparent 60.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad( + " transparent 20.0%", + " #d65f5f 20.0%", + " #d65f5f 30.0%", + " transparent 30.0%", + ), + (0, 1): bar_grad( + "#d65f5f 20.0%", + " transparent 20.0%", + ), + (1, 1): bar_grad( + " transparent 20.0%", + " #d65f5f 20.0%", + " #d65f5f 60.0%", + " transparent 60.0%", + ), } assert result == expected @@ -928,28 +808,25 @@ def test_bar_align_mid_vmin_vmax_wide(self): df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmin=-3, vmax=7)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 30.0%, #d65f5f 30.0%, " - "#d65f5f 40.0%, transparent 40.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 10.0%, #d65f5f 10.0%, " - "#d65f5f 30.0%, transparent 30.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 30.0%, #d65f5f 30.0%, " - "#d65f5f 70.0%, transparent 70.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad( + " transparent 30.0%", + " #d65f5f 30.0%", + " #d65f5f 40.0%", + " transparent 40.0%", + ), + (0, 1): bar_grad( + " transparent 10.0%", + " #d65f5f 10.0%", + " #d65f5f 30.0%", + " transparent 30.0%", + ), + (1, 1): bar_grad( + " transparent 30.0%", + " #d65f5f 30.0%", + " #d65f5f 70.0%", + " transparent 70.0%", + ), } assert result == expected @@ -957,27 +834,20 @@ def test_bar_align_mid_vmin_vmax_clipping(self): df = DataFrame({"A": [0, 1], "B": [-2, 4]}) result = df.style.bar(align="mid", axis=None, vmin=-1, vmax=3)._compute().ctx expected = { - (0, 0): ["width: 10em", " height: 80%"], - (1, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 25.0%, #d65f5f 25.0%, " - "#d65f5f 50.0%, transparent 50.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 25.0%, transparent 25.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 25.0%, #d65f5f 25.0%, " - "#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad(), + (1, 0): bar_grad( + " transparent 25.0%", + " #d65f5f 25.0%", + " #d65f5f 50.0%", + " transparent 50.0%", + ), + (0, 1): bar_grad("#d65f5f 25.0%", " transparent 25.0%"), + (1, 1): bar_grad( + " transparent 25.0%", + " #d65f5f 25.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -985,26 +855,19 @@ def test_bar_align_mid_nans(self): df = DataFrame({"A": [1, None], "B": [-1, 3]}) result = df.style.bar(align="mid", axis=None)._compute().ctx expected = { - (0, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 25.0%, #d65f5f 25.0%, " - "#d65f5f 50.0%, transparent 50.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg," - "#d65f5f 25.0%, transparent 25.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 25.0%, #d65f5f 25.0%, " - "#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad( + " transparent 25.0%", + " #d65f5f 25.0%", + " #d65f5f 50.0%", + " transparent 50.0%", + ), + (0, 1): bar_grad("#d65f5f 25.0%", " transparent 25.0%"), + (1, 1): bar_grad( + " transparent 25.0%", + " #d65f5f 25.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -1012,27 +875,24 @@ def test_bar_align_zero_nans(self): df = DataFrame({"A": [1, None], "B": [-1, 2]}) result = df.style.bar(align="zero", axis=None)._compute().ctx expected = { - (0, 0): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 50.0%, #d65f5f 50.0%, " - "#d65f5f 75.0%, transparent 75.0%)", - ], - (0, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 25.0%, #d65f5f 25.0%, " - "#d65f5f 50.0%, transparent 50.0%)", - ], - (1, 1): [ - "width: 10em", - " height: 80%", - "background: linear-gradient(90deg, " - "transparent 50.0%, #d65f5f 50.0%, " - "#d65f5f 100.0%, transparent 100.0%)", - ], + (0, 0): bar_grad( + " transparent 50.0%", + " #d65f5f 50.0%", + " #d65f5f 75.0%", + " transparent 75.0%", + ), + (0, 1): bar_grad( + " transparent 25.0%", + " #d65f5f 25.0%", + " #d65f5f 50.0%", + " transparent 50.0%", + ), + (1, 1): bar_grad( + " transparent 50.0%", + " #d65f5f 50.0%", + " #d65f5f 100.0%", + " transparent 100.0%", + ), } assert result == expected @@ -1115,7 +975,7 @@ def test_format_with_bad_na_rep(self): def test_highlight_null(self, null_color="red"): df = DataFrame({"A": [0, np.nan]}) result = df.style.highlight_null()._compute().ctx - expected = {(1, 0): ["background-color: red"]} + expected = {(1, 0): [("background-color", "red")]} assert result == expected def test_highlight_null_subset(self): @@ -1128,8 +988,8 @@ def test_highlight_null_subset(self): .ctx ) expected = { - (1, 0): ["background-color: red"], - (1, 1): ["background-color: green"], + (1, 0): [("background-color", "red")], + (1, 1): [("background-color", "green")], } assert result == expected @@ -1228,7 +1088,7 @@ def f(x): ) result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx - assert result[(1, 1)] == ["color: red"] + assert result[(1, 1)] == [("color", "red")] def test_trim(self): result = self.df.style.render() # trim=True @@ -1239,6 +1099,7 @@ def test_trim(self): def test_highlight_max(self): df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + css_seq = [("background-color", "yellow")] # max(df) = min(-df) for max_ in [True, False]: if max_: @@ -1247,35 +1108,35 @@ def test_highlight_max(self): df = -df attr = "highlight_min" result = getattr(df.style, attr)()._compute().ctx - assert result[(1, 1)] == ["background-color: yellow"] + assert result[(1, 1)] == css_seq result = getattr(df.style, attr)(color="green")._compute().ctx - assert result[(1, 1)] == ["background-color: green"] + assert result[(1, 1)] == [("background-color", "green")] result = getattr(df.style, attr)(subset="A")._compute().ctx - assert result[(1, 0)] == ["background-color: yellow"] + assert result[(1, 0)] == css_seq result = getattr(df.style, attr)(axis=0)._compute().ctx expected = { - (1, 0): ["background-color: yellow"], - (1, 1): ["background-color: yellow"], + (1, 0): css_seq, + (1, 1): css_seq, } assert result == expected result = getattr(df.style, attr)(axis=1)._compute().ctx expected = { - (0, 1): ["background-color: yellow"], - (1, 1): ["background-color: yellow"], + (0, 1): css_seq, + (1, 1): css_seq, } assert result == expected # separate since we can't negate the strs df["C"] = ["a", "b"] result = df.style.highlight_max()._compute().ctx - expected = {(1, 1): ["background-color: yellow"]} + expected = {(1, 1): css_seq} result = df.style.highlight_min()._compute().ctx - expected = {(0, 0): ["background-color: yellow"]} + expected = {(0, 0): css_seq} def test_export(self): f = lambda x: "color: red" if x > 0 else "color: blue" @@ -1983,7 +1844,7 @@ def test_background_gradient(self): for c_map in [None, "YlOrRd"]: result = df.style.background_gradient(cmap=c_map)._compute().ctx - assert all("#" in x[0] for x in result.values()) + assert all("#" in x[0][1] for x in result.values()) assert result[(0, 0)] == result[(0, 1)] assert result[(1, 0)] == result[(1, 1)] @@ -1991,7 +1852,7 @@ def test_background_gradient(self): df.style.background_gradient(subset=pd.IndexSlice[1, "A"])._compute().ctx ) - assert result[(1, 0)] == ["background-color: #fff7fb", "color: #000000"] + assert result[(1, 0)] == [("background-color", "#fff7fb"), ("color", "#000000")] @pytest.mark.parametrize( "c_map,expected", @@ -1999,15 +1860,15 @@ def test_background_gradient(self): ( None, { - (0, 0): ["background-color: #440154", "color: #f1f1f1"], - (1, 0): ["background-color: #fde725", "color: #000000"], + (0, 0): [("background-color", "#440154"), ("color", "#f1f1f1")], + (1, 0): [("background-color", "#fde725"), ("color", "#000000")], }, ), ( "YlOrRd", { - (0, 0): ["background-color: #ffffcc", "color: #000000"], - (1, 0): ["background-color: #800026", "color: #f1f1f1"], + (0, 0): [("background-color", "#ffffcc"), ("color", "#000000")], + (1, 0): [("background-color", "#800026"), ("color", "#f1f1f1")], }, ), ], @@ -2030,9 +1891,9 @@ def test_text_color_threshold_raises(self, text_color_threshold): def test_background_gradient_axis(self): df = DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) - low = ["background-color: #f7fbff", "color: #000000"] - high = ["background-color: #08306b", "color: #f1f1f1"] - mid = ["background-color: #abd0e6", "color: #000000"] + low = [("background-color", "#f7fbff"), ("color", "#000000")] + high = [("background-color", "#08306b"), ("color", "#f1f1f1")] + mid = [("background-color", "#abd0e6"), ("color", "#000000")] result = df.style.background_gradient(cmap="Blues", axis=0)._compute().ctx assert result[(0, 0)] == low assert result[(0, 1)] == low
- [x] tests changed / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them `Styler` currently has three formats for keeping track of CSS: A) `CSSList type: [('a1', 'v1'), ('a2', 'v2')]` B) `List[str] type: ['a1:v1', 'a2:v2']` C) `Str type: 'a1:v1; a2:v2;'` `C` is only ever used as user input format, and now, as of #39564 `C` can be used in *all* cases for user input (albeit `A` remains available for backwards compatibility in `.set_table_styles()`) `A` remains the most algorithimcally accessible, and is what `C` is already converted to in some cases for internal variables. `B` is only ever used by one internal variable. The `ctx` object is updated with `B` type converted from `C`. For final render this `B` type is converted to `A` type for looping. In the process, white space is poorly handled. This PR eliminates `B` type and converts `C` directly to `A`. Tests using the `ctx` object needed updating for new type.
https://api.github.com/repos/pandas-dev/pandas/pulls/39660
2021-02-07T23:12:16Z
2021-02-19T01:10:21Z
2021-02-19T01:10:21Z
2021-07-07T10:01:22Z
Backport PR #39605: REGR: appending to existing excel file created corrupt files
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index b2b7326b9cb04..078ae7bd95a9b 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) +- Fixed regression in :meth:`~DataFrame.to_excel` creating corrupt files when appending (``mode="a"``) to an existing file (:issue:`39576`) - Fixed regression in :meth:`DataFrame.transform` failing in case of an empty DataFrame or Series (:issue:`39636`) - Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) - diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 4f02aff2eb992..e17500892d9db 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -1,4 +1,5 @@ from distutils.version import LooseVersion +import mmap from typing import TYPE_CHECKING, Dict, List, Optional import numpy as np @@ -38,6 +39,7 @@ def __init__( from openpyxl import load_workbook self.book = load_workbook(self.handles.handle) + self.handles.handle.seek(0) else: # Create workbook object with default optimized_write=True. self.book = Workbook() @@ -50,6 +52,9 @@ def save(self): Save workbook to disk. """ self.book.save(self.handles.handle) + if "r+" in self.mode and not isinstance(self.handles.handle, mmap.mmap): + # truncate file to the written content + self.handles.handle.truncate() @classmethod def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, "Serialisable"]: diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index 640501baffc62..b365f4edab83c 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -1,4 +1,5 @@ from distutils.version import LooseVersion +from pathlib import Path import numpy as np import pytest @@ -149,3 +150,22 @@ def test_read_with_bad_dimension(datapath, ext, header, expected_data, filename) result = pd.read_excel(path, header=header) expected = DataFrame(expected_data) tm.assert_frame_equal(result, expected) + + +def test_append_mode_file(ext): + # GH 39576 + df = DataFrame() + + with tm.ensure_clean(ext) as f: + df.to_excel(f, engine="openpyxl") + + with ExcelWriter(f, mode="a", engine="openpyxl") as writer: + df.to_excel(writer) + + # make sure that zip files are not concatenated by making sure that + # "docProps/app.xml" only occurs twice in the file + data = Path(f).read_bytes() + first = data.find(b"docProps/app.xml") + second = data.find(b"docProps/app.xml", first + 1) + third = data.find(b"docProps/app.xml", second + 1) + assert second != -1 and third == -1
Backport PR #39605
https://api.github.com/repos/pandas-dev/pandas/pulls/39659
2021-02-07T21:56:29Z
2021-02-08T06:44:22Z
2021-02-08T06:44:22Z
2022-05-16T11:30:45Z
DOC: pin sphinx theme to avoid mobile dropdown bug
diff --git a/environment.yml b/environment.yml index 1224336dadda4..6376da76bd580 100644 --- a/environment.yml +++ b/environment.yml @@ -113,5 +113,5 @@ dependencies: - tabulate>=0.8.3 # DataFrame.to_markdown - natsort # DataFrame.sort_values - pip: - - git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master + - git+https://github.com/pandas-dev/pydata-sphinx-theme.git@2488b7defbd3d753dd5fcfc890fc4a7e79d25103 - git+https://github.com/numpy/numpydoc diff --git a/requirements-dev.txt b/requirements-dev.txt index 0e693773dbeea..ec9da940ca4ed 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -76,5 +76,5 @@ cftime pyreadstat tabulate>=0.8.3 natsort -git+https://github.com/pandas-dev/pydata-sphinx-theme.git@master +git+https://github.com/pandas-dev/pydata-sphinx-theme.git@2488b7defbd3d753dd5fcfc890fc4a7e79d25103 git+https://github.com/numpy/numpydoc
In the latest master version of the pydata-sphinx-theme, there is a bug in the menu dropdown on mobile mode (https://github.com/pydata/pydata-sphinx-theme/issues/304), and since this will probably not be fixed by tomorrow when releasing 1.2.2, I am pinning it here to a few commits earlier to avoid this specific bug (but still using the latest version to have the same looks as the current docs, since we are using a few features that are not yet in the latest stable release).
https://api.github.com/repos/pandas-dev/pandas/pulls/39658
2021-02-07T21:07:15Z
2021-02-08T13:44:03Z
2021-02-08T13:44:02Z
2021-02-08T13:46:59Z
Backport PR #39646 on branch 1.2.x (DOC: typo in 1.2.2 whatsnew)
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 974f84d3b244a..f38657b1b4613 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -17,7 +17,7 @@ Fixed regressions - Fixed regression in :func:`read_excel` that caused it to raise ``AttributeError`` when checking version of older xlrd versions (:issue:`38955`) - Fixed regression in :class:`DataFrame` constructor reordering element when construction from datetime ndarray with dtype not ``"datetime64[ns]"`` (:issue:`39422`) -- Fixed regression in :class:`DataFrame.astype` and :class:`Series.astype` not casting to bytes dtype (:issue:`39474`) +- Fixed regression in :meth:`DataFrame.astype` and :meth:`Series.astype` not casting to bytes dtype (:issue:`39474`) - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`)
Backport PR #39646: DOC: typo in 1.2.2 whatsnew
https://api.github.com/repos/pandas-dev/pandas/pulls/39657
2021-02-07T20:17:29Z
2021-02-07T20:18:30Z
2021-02-07T20:18:30Z
2021-02-07T20:18:30Z
BUG: Series[dt64tz].__setitem__(mask, different_tz) raises
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 9e6c89345d76a..23bb9947686f4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4528,7 +4528,7 @@ def putmask(self, mask, value): if not mask.any(): return self.copy() - if value is None: + if value is None and (self._is_numeric_dtype or self.dtype == object): value = self._na_value try: converted = self._validate_fill_value(value) diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index 89fdcc3f74cfc..ea3678a7e15d9 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -353,11 +353,6 @@ def insert(self: _T, loc: int, item) -> _T: new_arr = arr._from_backing_data(new_vals) return type(self)._simple_new(new_arr, name=self.name) - @doc(Index.where) - def where(self: _T, cond: np.ndarray, other=None) -> _T: - res_values = self._data.where(cond, other) - return type(self)._simple_new(res_values, name=self.name) - def putmask(self, mask, value): res_values = self._data.copy() try: diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 297502707a613..30e94b99b53c9 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1014,14 +1014,6 @@ def putmask(self, mask, new) -> List[Block]: new = self.fill_value if self._can_hold_element(new): - if self.dtype.kind in ["m", "M"]: - arr = self.array_values() - arr = cast("NDArrayBackedExtensionArray", arr) - if transpose: - arr = arr.T - arr.putmask(mask, new) - return [self] - if transpose: new_values = new_values.T @@ -2025,6 +2017,18 @@ def to_native_types(self, na_rep="NaT", **kwargs): result = arr._format_native_types(na_rep=na_rep, **kwargs) return self.make_block(result) + def putmask(self, mask, new) -> List[Block]: + mask = _extract_bool_array(mask) + + if not self._can_hold_element(new): + return self.astype(object).putmask(mask, new) + + # TODO(EA2D): reshape unnecessary with 2D EAs + arr = self.array_values().reshape(self.shape) + arr = cast("NDArrayBackedExtensionArray", arr) + arr.T.putmask(mask, new) + return [self] + def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: # TODO(EA2D): reshape unnecessary with 2D EAs arr = self.array_values().reshape(self.shape) @@ -2099,6 +2103,7 @@ class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): diff = DatetimeBlock.diff fill_value = np.datetime64("NaT", "ns") where = DatetimeBlock.where + putmask = DatetimeLikeBlockMixin.putmask array_values = ExtensionBlock.array_values diff --git a/pandas/tests/indexes/categorical/test_indexing.py b/pandas/tests/indexes/categorical/test_indexing.py index 13e622a61b4bd..7ec67d83e4b3b 100644 --- a/pandas/tests/indexes/categorical/test_indexing.py +++ b/pandas/tests/indexes/categorical/test_indexing.py @@ -305,10 +305,11 @@ def test_where_non_categories(self): ci = CategoricalIndex(["a", "b", "c", "d"]) mask = np.array([True, False, True, False]) - msg = "Cannot setitem on a Categorical with a new category" - with pytest.raises(ValueError, match=msg): - ci.where(mask, 2) + result = ci.where(mask, 2) + expected = Index(["a", 2, "c", 2], dtype=object) + tm.assert_index_equal(result, expected) + msg = "Cannot setitem on a Categorical with a new category" with pytest.raises(ValueError, match=msg): # Test the Categorical method directly ci._data.where(mask, 2) diff --git a/pandas/tests/indexes/datetimelike.py b/pandas/tests/indexes/datetimelike.py index 41f8e3408d191..4c8ab27d2c824 100644 --- a/pandas/tests/indexes/datetimelike.py +++ b/pandas/tests/indexes/datetimelike.py @@ -117,9 +117,9 @@ def test_where_cast_str(self): result = index.where(mask, [str(index[0])]) tm.assert_index_equal(result, expected) - msg = "value should be a '.*', 'NaT', or array of those" - with pytest.raises(TypeError, match=msg): - index.where(mask, "foo") + expected = index.astype(object).where(mask, "foo") + result = index.where(mask, "foo") + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - index.where(mask, ["foo"]) + result = index.where(mask, ["foo"]) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index 385390e9d7b98..3e935d0dfdd5f 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -175,40 +175,56 @@ def test_where_other(self): def test_where_invalid_dtypes(self): dti = date_range("20130101", periods=3, tz="US/Eastern") - i2 = Index([pd.NaT, pd.NaT] + dti[2:].tolist()) + tail = dti[2:].tolist() + i2 = Index([pd.NaT, pd.NaT] + tail) - msg = "value should be a 'Timestamp', 'NaT', or array of those. Got" - msg2 = "Cannot compare tz-naive and tz-aware datetime-like objects" - with pytest.raises(TypeError, match=msg2): - # passing tz-naive ndarray to tzaware DTI - dti.where(notna(i2), i2.values) + mask = notna(i2) - with pytest.raises(TypeError, match=msg2): - # passing tz-aware DTI to tznaive DTI - dti.tz_localize(None).where(notna(i2), i2) + # passing tz-naive ndarray to tzaware DTI + result = dti.where(mask, i2.values) + expected = Index([pd.NaT.asm8, pd.NaT.asm8] + tail, dtype=object) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - dti.where(notna(i2), i2.tz_localize(None).to_period("D")) + # passing tz-aware DTI to tznaive DTI + naive = dti.tz_localize(None) + result = naive.where(mask, i2) + expected = Index([i2[0], i2[1]] + naive[2:].tolist(), dtype=object) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - dti.where(notna(i2), i2.asi8.view("timedelta64[ns]")) + pi = i2.tz_localize(None).to_period("D") + result = dti.where(mask, pi) + expected = Index([pi[0], pi[1]] + tail, dtype=object) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - dti.where(notna(i2), i2.asi8) + tda = i2.asi8.view("timedelta64[ns]") + result = dti.where(mask, tda) + expected = Index([tda[0], tda[1]] + tail, dtype=object) + assert isinstance(expected[0], np.timedelta64) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - # non-matching scalar - dti.where(notna(i2), pd.Timedelta(days=4)) + result = dti.where(mask, i2.asi8) + expected = Index([pd.NaT.value, pd.NaT.value] + tail, dtype=object) + assert isinstance(expected[0], int) + tm.assert_index_equal(result, expected) + + # non-matching scalar + td = pd.Timedelta(days=4) + result = dti.where(mask, td) + expected = Index([td, td] + tail, dtype=object) + assert expected[0] is td + tm.assert_index_equal(result, expected) def test_where_mismatched_nat(self, tz_aware_fixture): tz = tz_aware_fixture dti = date_range("2013-01-01", periods=3, tz=tz) cond = np.array([True, False, True]) - msg = "value should be a 'Timestamp', 'NaT', or array of those. Got" - with pytest.raises(TypeError, match=msg): - # wrong-dtyped NaT - dti.where(cond, np.timedelta64("NaT", "ns")) + tdnat = np.timedelta64("NaT", "ns") + expected = Index([dti[0], tdnat, dti[2]], dtype=object) + assert expected[1] is tdnat + + result = dti.where(cond, tdnat) + tm.assert_index_equal(result, expected) def test_where_tz(self): i = date_range("20130101", periods=3, tz="US/Eastern") diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 9378a0ffc5f84..00babd2d56aeb 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -603,30 +603,42 @@ def test_where_other(self): def test_where_invalid_dtypes(self): pi = period_range("20130101", periods=5, freq="D") - i2 = PeriodIndex([NaT, NaT] + pi[2:].tolist(), freq="D") + tail = pi[2:].tolist() + i2 = PeriodIndex([NaT, NaT] + tail, freq="D") + mask = notna(i2) - msg = "value should be a 'Period', 'NaT', or array of those" - with pytest.raises(TypeError, match=msg): - pi.where(notna(i2), i2.asi8) + result = pi.where(mask, i2.asi8) + expected = pd.Index([NaT.value, NaT.value] + tail, dtype=object) + assert isinstance(expected[0], int) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - pi.where(notna(i2), i2.asi8.view("timedelta64[ns]")) + tdi = i2.asi8.view("timedelta64[ns]") + expected = pd.Index([tdi[0], tdi[1]] + tail, dtype=object) + assert isinstance(expected[0], np.timedelta64) + result = pi.where(mask, tdi) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - pi.where(notna(i2), i2.to_timestamp("S")) + dti = i2.to_timestamp("S") + expected = pd.Index([dti[0], dti[1]] + tail, dtype=object) + assert expected[0] is NaT + result = pi.where(mask, dti) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - # non-matching scalar - pi.where(notna(i2), Timedelta(days=4)) + td = Timedelta(days=4) + expected = pd.Index([td, td] + tail, dtype=object) + assert expected[0] == td + result = pi.where(mask, td) + tm.assert_index_equal(result, expected) def test_where_mismatched_nat(self): pi = period_range("20130101", periods=5, freq="D") cond = np.array([True, False, True, True, False]) - msg = "value should be a 'Period', 'NaT', or array of those" - with pytest.raises(TypeError, match=msg): - # wrong-dtyped NaT - pi.where(cond, np.timedelta64("NaT", "ns")) + tdnat = np.timedelta64("NaT", "ns") + expected = pd.Index([pi[0], tdnat, pi[2], pi[3], tdnat], dtype=object) + assert expected[1] is tdnat + result = pi.where(cond, tdnat) + tm.assert_index_equal(result, expected) class TestTake: diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index d79865c1446db..34316e1470573 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -148,30 +148,39 @@ def test_where_doesnt_retain_freq(self): def test_where_invalid_dtypes(self): tdi = timedelta_range("1 day", periods=3, freq="D", name="idx") - i2 = Index([pd.NaT, pd.NaT] + tdi[2:].tolist()) + tail = tdi[2:].tolist() + i2 = Index([pd.NaT, pd.NaT] + tail) + mask = notna(i2) - msg = "value should be a 'Timedelta', 'NaT', or array of those" - with pytest.raises(TypeError, match=msg): - tdi.where(notna(i2), i2.asi8) + expected = Index([pd.NaT.value, pd.NaT.value] + tail, dtype=object, name="idx") + assert isinstance(expected[0], int) + result = tdi.where(mask, i2.asi8) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - tdi.where(notna(i2), i2 + pd.Timestamp.now()) + ts = i2 + pd.Timestamp.now() + expected = Index([ts[0], ts[1]] + tail, dtype=object, name="idx") + result = tdi.where(mask, ts) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - tdi.where(notna(i2), (i2 + pd.Timestamp.now()).to_period("D")) + per = (i2 + pd.Timestamp.now()).to_period("D") + expected = Index([per[0], per[1]] + tail, dtype=object, name="idx") + result = tdi.where(mask, per) + tm.assert_index_equal(result, expected) - with pytest.raises(TypeError, match=msg): - # non-matching scalar - tdi.where(notna(i2), pd.Timestamp.now()) + ts = pd.Timestamp.now() + expected = Index([ts, ts] + tail, dtype=object, name="idx") + result = tdi.where(mask, ts) + tm.assert_index_equal(result, expected) def test_where_mismatched_nat(self): tdi = timedelta_range("1 day", periods=3, freq="D", name="idx") cond = np.array([True, False, False]) - msg = "value should be a 'Timedelta', 'NaT', or array of those" - with pytest.raises(TypeError, match=msg): - # wrong-dtyped NaT - tdi.where(cond, np.datetime64("NaT", "ns")) + dtnat = np.datetime64("NaT", "ns") + expected = Index([tdi[0], dtnat, dtnat], dtype=object, name="idx") + assert expected[2] is dtnat + result = tdi.where(cond, dtnat) + tm.assert_index_equal(result, expected) class TestTake: diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 69b9e63d7e215..f6f04e935ac52 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -802,10 +802,13 @@ def test_where_index_timedelta64(self, value): result = tdi.where(cond, value) tm.assert_index_equal(result, expected) - msg = "value should be a 'Timedelta', 'NaT', or array of thos" - with pytest.raises(TypeError, match=msg): - # wrong-dtyped NaT - tdi.where(cond, np.datetime64("NaT", "ns")) + # wrong-dtyped NaT + dtnat = np.datetime64("NaT", "ns") + expected = pd.Index([tdi[0], dtnat, dtnat, tdi[3]], dtype=object) + assert expected[1] is dtnat + + result = tdi.where(cond, dtnat) + tm.assert_index_equal(result, expected) def test_where_index_period(self): dti = pd.date_range("2016-01-01", periods=3, freq="QS") @@ -825,14 +828,16 @@ def test_where_index_period(self): expected = pd.PeriodIndex([other[0], pi[1], other[2]]) tm.assert_index_equal(result, expected) - # Passing a mismatched scalar - msg = "value should be a 'Period', 'NaT', or array of those" - with pytest.raises(TypeError, match=msg): - pi.where(cond, pd.Timedelta(days=4)) + # Passing a mismatched scalar -> casts to object + td = pd.Timedelta(days=4) + expected = pd.Index([td, pi[1], td], dtype=object) + result = pi.where(cond, td) + tm.assert_index_equal(result, expected) - msg = r"Input has different freq=D from PeriodArray\(freq=Q-DEC\)" - with pytest.raises(ValueError, match=msg): - pi.where(cond, pd.Period("2020-04-21", "D")) + per = pd.Period("2020-04-21", "D") + expected = pd.Index([per, pi[1], per], dtype=object) + result = pi.where(cond, per) + tm.assert_index_equal(result, expected) class TestFillnaSeriesCoercion(CoercionBase): diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 3023baca6ff5d..da78dec4915c9 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -50,19 +50,6 @@ def test_setitem_with_string_index(self): assert ser.Date == date.today() assert ser["Date"] == date.today() - def test_setitem_with_different_tz_casts_to_object(self): - # GH#24024 - ser = Series(date_range("2000", periods=2, tz="US/Central")) - ser[0] = Timestamp("2000", tz="US/Eastern") - expected = Series( - [ - Timestamp("2000-01-01 00:00:00-05:00", tz="US/Eastern"), - Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"), - ], - dtype=object, - ) - tm.assert_series_equal(ser, expected) - def test_setitem_tuple_with_datetimetz_values(self): # GH#20441 arr = date_range("2017", periods=4, tz="US/Eastern") @@ -646,3 +633,29 @@ def val(self, request): @pytest.fixture def is_inplace(self): return True + + +class TestSetitemMismatchedTZCastsToObject(SetitemCastingEquivalents): + # GH#24024 + @pytest.fixture + def obj(self): + return Series(date_range("2000", periods=2, tz="US/Central")) + + @pytest.fixture + def val(self): + return Timestamp("2000", tz="US/Eastern") + + @pytest.fixture + def key(self): + return 0 + + @pytest.fixture + def expected(self): + expected = Series( + [ + Timestamp("2000-01-01 00:00:00-05:00", tz="US/Eastern"), + Timestamp("2000-01-02 00:00:00-06:00", tz="US/Central"), + ], + dtype=object, + ) + return expected
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry sits on top of #39650 after this we'll be ready to cleanup/de-duplicate some of the putmask/where methods
https://api.github.com/repos/pandas-dev/pandas/pulls/39656
2021-02-07T20:05:08Z
2021-02-10T23:13:01Z
2021-02-10T23:13:01Z
2021-02-10T23:16:06Z
REGR: fix case all-NaN/numeric object column in groupby
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index b6e81e44e6f08..c9cbf33ff2ad9 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -23,7 +23,8 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) - Fixed regression in :meth:`~DataFrame.to_excel` creating corrupt files when appending (``mode="a"``) to an existing file (:issue:`39576`) - Fixed regression in :meth:`DataFrame.transform` failing in case of an empty DataFrame or Series (:issue:`39636`) -- Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) +- Fixed regression in :meth:`~DataFrame.groupby` or :meth:`~DataFrame.resample` when aggregating an all-NaN or numeric object dtype column (:issue:`39329`) +- Fixed regression in :meth:`.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) - Fixed regression in :func:`read_excel` that incorrectly raised when the argument ``io`` was a non-path and non-buffer and the ``engine`` argument was specified (:issue:`39528`) - diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 7b6eb4c8fe2f9..04e9eb039c249 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1102,11 +1102,16 @@ def py_fallback(bvalues: ArrayLike) -> ArrayLike: assert isinstance(result, (Series, DataFrame)) # for mypy mgr = result._mgr assert isinstance(mgr, BlockManager) - assert len(mgr.blocks) == 1 # unwrap DataFrame to get array - result = mgr.blocks[0].values - return result + if len(mgr.blocks) != 1: + # We've split an object block! Everything we've assumed + # about a single block input returning a single block output + # is a lie. See eg GH-39329 + return mgr.as_array() + else: + result = mgr.blocks[0].values + return result def blk_func(bvalues: ArrayLike) -> ArrayLike: diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index de4ef0996ad49..48527de6b2047 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -1187,3 +1187,27 @@ def test_aggregate_datetime_objects(): result = df.groupby("A").B.max() expected = df.set_index("A")["B"] tm.assert_series_equal(result, expected) + + +def test_aggregate_numeric_object_dtype(): + # https://github.com/pandas-dev/pandas/issues/39329 + # simplified case: multiple object columns where one is all-NaN + # -> gets split as the all-NaN is inferred as float + df = DataFrame( + {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": [np.nan] * 4}, + ).astype(object) + result = df.groupby("key").min() + expected = DataFrame( + {"key": ["A", "B"], "col1": ["a", "c"], "col2": [np.nan, np.nan]} + ).set_index("key") + tm.assert_frame_equal(result, expected) + + # same but with numbers + df = DataFrame( + {"key": ["A", "A", "B", "B"], "col1": list("abcd"), "col2": range(4)}, + ).astype(object) + result = df.groupby("key").min() + expected = DataFrame( + {"key": ["A", "B"], "col1": ["a", "c"], "col2": [0, 2]} + ).set_index("key") + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index 1a10255a81a8c..39d4533ca08dc 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -392,3 +392,34 @@ def test_resample_groupby_agg(): result = resampled.agg({"num": "sum"}) tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("consolidate", [True, False]) +def test_resample_groupby_agg_object_dtype_all_nan(consolidate): + # https://github.com/pandas-dev/pandas/issues/39329 + + dates = pd.date_range("2020-01-01", periods=15, freq="D") + df1 = DataFrame({"key": "A", "date": dates, "col1": range(15), "col_object": "val"}) + df2 = DataFrame({"key": "B", "date": dates, "col1": range(15)}) + df = pd.concat([df1, df2], ignore_index=True) + if consolidate: + df = df._consolidate() + + result = df.groupby(["key"]).resample("W", on="date").min() + idx = pd.MultiIndex.from_arrays( + [ + ["A"] * 3 + ["B"] * 3, + pd.to_datetime(["2020-01-05", "2020-01-12", "2020-01-19"] * 2), + ], + names=["key", "date"], + ) + expected = DataFrame( + { + "key": ["A"] * 3 + ["B"] * 3, + "date": pd.to_datetime(["2020-01-01", "2020-01-06", "2020-01-13"] * 2), + "col1": [0, 5, 12] * 2, + "col_object": ["val"] * 3 + [np.nan] * 3, + }, + index=idx, + ) + tm.assert_frame_equal(result, expected)
Fixes #39329 This basically reverts #35841, although the code itself has changed quite a bit since then, so it's simpler here as the original PR² (not dealing with blocks anymore) The original code comment that was removed in #35841 *"We've split an object block!"* was correct, as it can actually happen in those example cases.
https://api.github.com/repos/pandas-dev/pandas/pulls/39655
2021-02-07T19:56:10Z
2021-02-08T13:42:32Z
2021-02-08T13:42:31Z
2021-02-08T14:47:13Z
Backport PR #39639: REGR: fix transform of empty DataFrame/Series
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 974f84d3b244a..63e793c013497 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) +- Fixed regression in :meth:`DataFrame.transform` failing in case of an empty DataFrame or Series (:issue:`39636`) - Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) - diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index c64f0bd71cf84..74f21bae39ba9 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -456,7 +456,7 @@ def transform( # Functions that transform may return empty Series/DataFrame # when the dtype is not appropriate - if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty: + if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty and not obj.empty: raise ValueError("Transform function failed") if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals( obj.index diff --git a/pandas/tests/frame/apply/test_frame_transform.py b/pandas/tests/frame/apply/test_frame_transform.py index db5b2f3d86dfe..d3a3b1482affd 100644 --- a/pandas/tests/frame/apply/test_frame_transform.py +++ b/pandas/tests/frame/apply/test_frame_transform.py @@ -258,3 +258,13 @@ def test_transform_missing_columns(axis): match = re.escape("Column(s) ['C'] do not exist") with pytest.raises(SpecificationError, match=match): df.transform({"C": "cumsum"}) + + +def test_transform_empty_dataframe(): + # https://github.com/pandas-dev/pandas/issues/39636 + df = DataFrame([], columns=["col1", "col2"]) + result = df.transform(lambda x: x + 10) + tm.assert_frame_equal(result, df) + + result = df["col1"].transform(lambda x: x + 10) + tm.assert_series_equal(result, df["col1"])
Backport PR #39639
https://api.github.com/repos/pandas-dev/pandas/pulls/39654
2021-02-07T18:29:53Z
2021-02-07T22:53:58Z
2021-02-07T22:53:58Z
2021-02-07T22:54:02Z
Backport PR #39586: REG: read_excel with engine specified raises on non-path/non-buffer
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 974f84d3b244a..9cd3f8b7ff6cf 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) - Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) +- Fixed regression in :func:`read_excel` that incorrectly raised when the argument ``io`` was a non-path and non-buffer and the ``engine`` argument was specified (:issue:`39528`) - .. --------------------------------------------------------------------------- diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 7d64ab77c962d..850570fc743b7 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -1062,14 +1062,16 @@ def __init__( xlrd_version = LooseVersion(get_version(xlrd)) - if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): - ext = "xls" - else: - ext = inspect_excel_format( - content=path_or_buffer, storage_options=storage_options - ) - + ext = None if engine is None: + # Only determine ext if it is needed + if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): + ext = "xls" + else: + ext = inspect_excel_format( + content=path_or_buffer, storage_options=storage_options + ) + if ext == "ods": engine = "odf" elif ext == "xls": @@ -1086,13 +1088,22 @@ def __init__( else: engine = "xlrd" - if engine == "xlrd" and ext != "xls" and xlrd_version is not None: - if xlrd_version >= "2": + if engine == "xlrd" and xlrd_version is not None: + if ext is None: + # Need ext to determine ext in order to raise/warn + if isinstance(path_or_buffer, xlrd.Book): + ext = "xls" + else: + ext = inspect_excel_format( + content=path_or_buffer, storage_options=storage_options + ) + + if ext != "xls" and xlrd_version >= "2": raise ValueError( f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, " f"only the xls format is supported. Install openpyxl instead." ) - else: + elif ext != "xls": caller = inspect.stack()[1] if ( caller.filename.endswith( diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 4f02aff2eb992..0f519c13f98e7 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -531,7 +531,11 @@ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: version = LooseVersion(get_version(openpyxl)) - if version >= "3.0.0": + # There is no good way of determining if a sheet is read-only + # https://foss.heptapod.net/openpyxl/openpyxl/-/issues/1605 + is_readonly = hasattr(sheet, "reset_dimensions") + + if version >= "3.0.0" and is_readonly: sheet.reset_dimensions() data: List[List[Scalar]] = [] @@ -539,7 +543,7 @@ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: converted_row = [self._convert_cell(cell, convert_float) for cell in row] data.append(converted_row) - if version >= "3.0.0" and len(data) > 0: + if version >= "3.0.0" and is_readonly and len(data) > 0: # With dimension reset, openpyxl no longer pads rows max_width = max(len(data_row) for data_row in data) if min(len(data_row) for data_row in data) < max_width: diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index 640501baffc62..da12829b579fe 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -122,6 +122,17 @@ def test_to_excel_with_openpyxl_engine(ext): styled.to_excel(filename, engine="openpyxl") +@pytest.mark.parametrize("read_only", [True, False]) +def test_read_workbook(datapath, ext, read_only): + # GH 39528 + filename = datapath("io", "data", "excel", "test1" + ext) + wb = openpyxl.load_workbook(filename, read_only=read_only) + result = pd.read_excel(wb, engine="openpyxl") + wb.close() + expected = pd.read_excel(filename) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( "header, expected_data", [ @@ -139,13 +150,22 @@ def test_to_excel_with_openpyxl_engine(ext): @pytest.mark.parametrize( "filename", ["dimension_missing", "dimension_small", "dimension_large"] ) -@pytest.mark.xfail( - LooseVersion(get_version(openpyxl)) < "3.0.0", - reason="openpyxl read-only sheet is incorrect when dimension data is wrong", -) -def test_read_with_bad_dimension(datapath, ext, header, expected_data, filename): +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_bad_dimension( + datapath, ext, header, expected_data, filename, read_only, request +): # GH 38956, 39001 - no/incorrect dimension information + version = LooseVersion(get_version(openpyxl)) + if (read_only or read_only is None) and version < "3.0.0": + msg = "openpyxl read-only sheet is incorrect when dimension data is wrong" + request.node.add_marker(pytest.mark.xfail(reason=msg)) path = datapath("io", "data", "excel", f"{filename}{ext}") - result = pd.read_excel(path, header=header) + if read_only is None: + result = pd.read_excel(path, header=header) + else: + wb = openpyxl.load_workbook(path, read_only=read_only) + result = pd.read_excel(wb, engine="openpyxl", header=header) + wb.close() expected = DataFrame(expected_data) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index 9b3d359dc01a5..bd3bfa207c4b0 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -2,6 +2,7 @@ from functools import partial import os from urllib.error import URLError +from zipfile import BadZipFile import numpy as np import pytest @@ -642,7 +643,13 @@ def test_missing_file_raises(self, read_ext): def test_corrupt_bytes_raises(self, read_ext, engine): bad_stream = b"foo" - with pytest.raises(ValueError, match="File is not a recognized excel file"): + if engine is None or engine == "xlrd": + error = ValueError + msg = "File is not a recognized excel file" + else: + error = BadZipFile + msg = "File is not a zip file" + with pytest.raises(error, match=msg): pd.read_excel(bad_stream) @tm.network
Backport PR #39586
https://api.github.com/repos/pandas-dev/pandas/pulls/39652
2021-02-07T17:58:43Z
2021-02-08T06:43:13Z
2021-02-08T06:43:13Z
2021-02-08T06:43:22Z
REF: implement Index._view, Index._rename
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index f4a6ed5f26c89..4e32b6e496929 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -323,7 +323,7 @@ def time_get_loc(self): self.index.get_loc(self.category) def time_shallow_copy(self): - self.index._shallow_copy() + self.index._view() def time_align(self): pd.DataFrame({"a": self.series, "b": self.series[:500]}) diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index e15d4c66e4fc0..74193ee62cfae 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -86,7 +86,7 @@ def time_get_loc(self): self.index.get_loc(self.period) def time_shallow_copy(self): - self.index._shallow_copy() + self.index._view() def time_series_loc(self): self.series.loc[self.period] diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index cfe05c3e257b1..207010b8cc943 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -74,7 +74,7 @@ def time_get_loc(self): self.index.get_loc(self.timedelta) def time_shallow_copy(self): - self.index._shallow_copy() + self.index._view() def time_series_loc(self): self.series.loc[self.timedelta] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f3f899f9fd90a..45cb3d318dcf9 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8917,7 +8917,7 @@ def _count_level(self, level: Level, axis: Axis = 0, numeric_only=False): level = count_axis._get_level_number(level) level_name = count_axis._names[level] - level_index = count_axis.levels[level]._shallow_copy(name=level_name) + level_index = count_axis.levels[level]._rename(name=level_name) level_codes = ensure_int64(count_axis.codes[level]) counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 5c1fabd67bc8d..8c2174bb2b64e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -581,7 +581,7 @@ def _get_attributes_dict(self): """ return {k: getattr(self, k, None) for k in self._attributes} - def _shallow_copy(self, values=None, name: Hashable = no_default): + def _shallow_copy(self, values, name: Hashable = no_default): """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking @@ -596,13 +596,26 @@ def _shallow_copy(self, values=None, name: Hashable = no_default): """ name = self.name if name is no_default else name - if values is not None: - return self._simple_new(values, name=name) + return self._simple_new(values, name=name) + + def _view(self: _IndexT) -> _IndexT: + """ + fastpath to make a shallow copy, i.e. new object with same data. + """ + result = self._simple_new(self._values, name=self.name) - result = self._simple_new(self._values, name=name) result._cache = self._cache return result + @final + def _rename(self: _IndexT, name: Hashable) -> _IndexT: + """ + fastpath for rename if new name is already validated. + """ + result = self._view() + result._name = name + return result + @final def is_(self, other) -> bool: """ @@ -731,7 +744,7 @@ def view(self, cls=None): if cls is not None and not hasattr(cls, "_typ"): result = self._data.view(cls) else: - result = self._shallow_copy() + result = self._view() if isinstance(result, Index): result._id = self._id return result @@ -937,9 +950,10 @@ def copy( """ name = self._validate_names(name=name, names=names, deep=deep)[0] if deep: - new_index = self._shallow_copy(self._data.copy(), name=name) + new_data = self._data.copy() + new_index = type(self)._simple_new(new_data, name=name) else: - new_index = self._shallow_copy(name=name) + new_index = self._rename(name=name) if dtype: warnings.warn( @@ -1234,7 +1248,7 @@ def to_series(self, index=None, name=None): from pandas import Series if index is None: - index = self._shallow_copy() + index = self._view() if name is None: name = self.name @@ -1492,7 +1506,7 @@ def set_names(self, names, level=None, inplace: bool = False): if inplace: idx = self else: - idx = self._shallow_copy() + idx = self._view() idx._set_names(names, level=level) if not inplace: @@ -2437,7 +2451,7 @@ def fillna(self, value=None, downcast=None): # no need to care metadata other than name # because it can't have freq if return Index(result, name=self.name) - return self._shallow_copy() + return self._view() def dropna(self, how="any"): """ @@ -2459,7 +2473,7 @@ def dropna(self, how="any"): if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) - return self._shallow_copy() + return self._view() # -------------------------------------------------------------------- # Uniqueness Methods @@ -2488,7 +2502,7 @@ def unique(self, level=None): self._validate_index_level(level) if self.is_unique: - return self._shallow_copy() + return self._view() result = super().unique() return self._shallow_copy(result) @@ -2541,7 +2555,7 @@ def drop_duplicates(self, keep="first"): Index(['cow', 'beetle', 'hippo'], dtype='object') """ if self.is_unique: - return self._shallow_copy() + return self._view() return super().drop_duplicates(keep=keep) @@ -3807,7 +3821,7 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) ) if len(other) == 0 and how in ("left", "outer"): - join_index = self._shallow_copy() + join_index = self._view() if return_indexers: rindexer = np.repeat(-1, len(join_index)) return join_index, None, rindexer @@ -3815,7 +3829,7 @@ def join(self, other, how="left", level=None, return_indexers=False, sort=False) return join_index if len(self) == 0 and how in ("right", "outer"): - join_index = other._shallow_copy() + join_index = other._view() if return_indexers: lindexer = np.repeat(-1, len(join_index)) return join_index, lindexer, None diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index eaee4789c29e0..32da2c1cd1f3f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -214,13 +214,10 @@ def _simple_new(cls, values: Categorical, name: Optional[Hashable] = None): # -------------------------------------------------------------------- - # error: Argument 1 of "_shallow_copy" is incompatible with supertype - # "ExtensionIndex"; supertype defines the argument type as - # "Optional[ExtensionArray]" [override] @doc(Index._shallow_copy) - def _shallow_copy( # type:ignore[override] + def _shallow_copy( self, - values: Optional[Categorical] = None, + values: Categorical, name: Hashable = no_default, ): name = self.name if name is no_default else name diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 55317938d528d..2f0bc12292173 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -797,7 +797,8 @@ def _fast_union(self: _T, other: _T, sort=None) -> _T: dates = concat_compat((left._values, right_chunk)) # With sort being False, we can't infer that result.freq == self.freq # TODO: no tests rely on the _with_freq("infer"); needed? - result = self._shallow_copy(dates)._with_freq("infer") + result = type(self)._simple_new(dates, name=self.name) + result = result._with_freq("infer") return result else: left, right = other, self diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e9c77fb12d3ff..e306698fe0202 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -469,7 +469,7 @@ def to_series(self, keep_tz=lib.no_default, index=None, name=None): from pandas import Series if index is None: - index = self._shallow_copy() + index = self._view() if name is None: name = self.name diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index d17ac52e7cdd8..c8bac1f04e50e 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -1,11 +1,10 @@ """ Shared methods for Index subclasses backed by ExtensionArray. """ -from typing import Hashable, List, Optional, TypeVar +from typing import List, TypeVar import numpy as np -from pandas._libs import lib from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly, doc @@ -213,19 +212,6 @@ class ExtensionIndex(Index): __le__ = _make_wrapped_comparison_op("__le__") __ge__ = _make_wrapped_comparison_op("__ge__") - @doc(Index._shallow_copy) - def _shallow_copy( - self, values: Optional[ExtensionArray] = None, name: Hashable = lib.no_default - ): - name = self.name if name is lib.no_default else name - - if values is not None: - return self._simple_new(values, name=name) - - result = self._simple_new(self._data, name=name) - result._cache = self._cache - return result - @property def _has_complex_internals(self) -> bool: # used to avoid libreduction code paths, which raise or require conversion diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 46dbcdaab42b0..934780eb9acef 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -735,9 +735,7 @@ def levels(self): # Use cache_readonly to ensure that self.get_locs doesn't repeatedly # create new IndexEngine # https://github.com/pandas-dev/pandas/issues/31648 - result = [ - x._shallow_copy(name=name) for x, name in zip(self._levels, self._names) - ] + result = [x._rename(name=name) for x, name in zip(self._levels, self._names)] for level in result: # disallow midx.levels[0].name = "foo" level._no_setting_name = True @@ -764,13 +762,13 @@ def _set_levels( if level is None: new_levels = FrozenList( - ensure_index(lev, copy=copy)._shallow_copy() for lev in levels + ensure_index(lev, copy=copy)._view() for lev in levels ) else: level_numbers = [self._get_level_number(lev) for lev in level] new_levels_list = list(self._levels) for lev_num, lev in zip(level_numbers, levels): - new_levels_list[lev_num] = ensure_index(lev, copy=copy)._shallow_copy() + new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view() new_levels = FrozenList(new_levels_list) if verify_integrity: @@ -886,7 +884,7 @@ def set_levels(self, levels, level=None, inplace=None, verify_integrity=True): if inplace: idx = self else: - idx = self._shallow_copy() + idx = self._view() idx._reset_identity() idx._set_levels( levels, level=level, validate=True, verify_integrity=verify_integrity @@ -1046,7 +1044,7 @@ def set_codes(self, codes, level=None, inplace=None, verify_integrity=True): if inplace: idx = self else: - idx = self._shallow_copy() + idx = self._view() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) if not inplace: @@ -1082,17 +1080,17 @@ def _constructor(self): return type(self).from_tuples @doc(Index._shallow_copy) - def _shallow_copy(self, values=None, name=lib.no_default): + def _shallow_copy(self, values, name=lib.no_default): names = name if name is not lib.no_default else self.names - if values is not None: - return type(self).from_tuples(values, sortorder=None, names=names) + return type(self).from_tuples(values, sortorder=None, names=names) + def _view(self: MultiIndex) -> MultiIndex: result = type(self)( levels=self.levels, codes=self.codes, sortorder=None, - names=names, + names=self.names, verify_integrity=False, ) result._cache = self._cache.copy() @@ -3652,7 +3650,7 @@ def astype(self, dtype, copy=True): "is not supported" ) elif copy is True: - return self._shallow_copy() + return self._view() return self def _validate_fill_value(self, item): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index d6427aed6edf3..3ef0fbcb99f73 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -111,8 +111,8 @@ def _maybe_cast_slice_bound(self, label, side: str, kind): # ---------------------------------------------------------------- @doc(Index._shallow_copy) - def _shallow_copy(self, values=None, name: Hashable = lib.no_default): - if values is not None and not self._can_hold_na and values.dtype.kind == "f": + def _shallow_copy(self, values, name: Hashable = lib.no_default): + if not self._can_hold_na and values.dtype.kind == "f": name = self.name if name is lib.no_default else name # Ensure we are not returning an Int64Index with float data: return Float64Index._simple_new(values, name=name) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 0f73b62faf6e7..525053cc620f6 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -422,22 +422,22 @@ def __iter__(self): yield from self._range @doc(Int64Index._shallow_copy) - def _shallow_copy(self, values=None, name: Hashable = no_default): + def _shallow_copy(self, values, name: Hashable = no_default): name = self.name if name is no_default else name - if values is not None: - if values.dtype.kind == "f": - return Float64Index(values, name=name) - return Int64Index._simple_new(values, name=name) + if values.dtype.kind == "f": + return Float64Index(values, name=name) + return Int64Index._simple_new(values, name=name) - result = self._simple_new(self._range, name=name) + def _view(self: RangeIndex) -> RangeIndex: + result = type(self)._simple_new(self._range, name=self.name) result._cache = self._cache return result @doc(Int64Index.copy) def copy(self, name=None, deep=False, dtype: Optional[Dtype] = None, names=None): name = self._validate_names(name=name, names=names, deep=deep)[0] - new_index = self._shallow_copy(name=name) + new_index = self._rename(name=name) if dtype: warnings.warn( diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index e80fde87ffdee..79eb8de083958 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -138,7 +138,7 @@ def __new__( if copy: return data.copy() else: - return data._shallow_copy() + return data._view() # - Cases checked above all return/raise before reaching here - # diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index abdc6ac9dfcbe..c335768748b26 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -261,7 +261,7 @@ def get_new_values(self, values, fill_value=None): def get_new_columns(self, value_columns): if value_columns is None: if self.lift == 0: - return self.removed_level._shallow_copy(name=self.removed_name) + return self.removed_level._rename(name=self.removed_name) lev = self.removed_level.insert(0, item=self.removed_level._na_value) return lev.rename(self.removed_name) @@ -639,7 +639,7 @@ def _convert_level_number(level_num, columns): new_names = this.columns.names[:-1] new_columns = MultiIndex.from_tuples(unique_groups, names=new_names) else: - new_columns = this.columns.levels[0]._shallow_copy(name=this.columns.names[0]) + new_columns = this.columns.levels[0]._rename(name=this.columns.names[0]) unique_groups = new_columns # time to ravel the values diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index c70401ac14e7d..0354e362dc4ac 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -733,7 +733,7 @@ def test_shallow_copy_shares_cache(self): # GH32669, GH36840 idx = self.create_index() idx.get_loc(idx[0]) # populates the _cache. - shallow_copy = idx._shallow_copy() + shallow_copy = idx._view() assert shallow_copy._cache is idx._cache diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py index 7ec3df9fee0e5..5dd74819444cf 100644 --- a/pandas/tests/indexes/multi/test_copy.py +++ b/pandas/tests/indexes/multi/test_copy.py @@ -30,7 +30,7 @@ def test_copy(idx): def test_shallow_copy(idx): - i_copy = idx._shallow_copy() + i_copy = idx._view() assert_multiindex_copied(i_copy, idx) diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index de7d2b4410d42..79d17dd0b6760 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -31,7 +31,7 @@ def test_changing_names(idx): view = idx.view() copy = idx.copy() - shallow_copy = idx._shallow_copy() + shallow_copy = idx._view() # changing names should not change level names on object new_names = [name + "a" for name in idx.names] diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index fd4b34a1b32a9..af5ce1945a671 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -82,7 +82,7 @@ def test_make_time_series(self): def test_shallow_copy_empty(self): # GH13067 idx = PeriodIndex([], freq="M") - result = idx._shallow_copy() + result = idx._view() expected = idx tm.assert_index_equal(result, expected)
take these 2 special cases out of _shallow_copy. We can get rid of some more _shallow_copy usages following #38140
https://api.github.com/repos/pandas-dev/pandas/pulls/39651
2021-02-07T17:55:19Z
2021-02-08T19:36:43Z
2021-02-08T19:36:43Z
2021-02-08T19:50:59Z
BUG: equals/assert_numpy_array_equals with non-singleton NAs
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index bed3484793bcc..a3a9a4c5eda08 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -454,7 +454,8 @@ Other - Bug in :func:`pandas.testing.assert_series_equal`, :func:`pandas.testing.assert_frame_equal`, :func:`pandas.testing.assert_index_equal` and :func:`pandas.testing.assert_extension_array_equal` incorrectly raising when an attribute has an unrecognized NA type (:issue:`39461`) - Bug in :class:`Styler` where ``subset`` arg in methods raised an error for some valid multiindex slices (:issue:`33562`) - :class:`Styler` rendered HTML output minor alterations to support w3 good code standard (:issue:`39626`) -- +- Bug in :meth:`DataFrame.equals`, :meth:`Series.equals`, :meth:`Index.equals` with object-dtype containing ``np.datetime64("NaT")`` or ``np.timedelta64("NaT")`` (:issue:`39650`) + .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 0f796b2b65dff..6793ea0ab3bc2 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -73,6 +73,7 @@ from pandas._libs.tslib import array_to_datetime from pandas._libs.missing cimport ( C_NA, checknull, + is_matching_na, is_null_datetime64, is_null_timedelta64, isnaobj, @@ -584,8 +585,10 @@ def array_equivalent_object(left: object[:], right: object[:]) -> bool: return False elif (x is C_NA) ^ (y is C_NA): return False - elif not (PyObject_RichCompareBool(x, y, Py_EQ) or - (x is None or is_nan(x)) and (y is None or is_nan(y))): + elif not ( + PyObject_RichCompareBool(x, y, Py_EQ) + or is_matching_na(x, y, nan_matches_none=True) + ): return False except ValueError: # Avoid raising ValueError when comparing Numpy arrays to other types diff --git a/pandas/_libs/missing.pxd b/pandas/_libs/missing.pxd index e02b84381b62c..ce8e8007e7630 100644 --- a/pandas/_libs/missing.pxd +++ b/pandas/_libs/missing.pxd @@ -1,6 +1,8 @@ from numpy cimport ndarray, uint8_t +cpdef bint is_matching_na(object left, object right, bint nan_matches_none=*) + cpdef bint checknull(object val) cpdef bint checknull_old(object val) cpdef ndarray[uint8_t] isnaobj(ndarray arr) diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index abf38265ddc6d..d91d0261a1b33 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -29,6 +29,58 @@ cdef: bint is_32bit = not IS64 +cpdef bint is_matching_na(object left, object right, bint nan_matches_none=False): + """ + Check if two scalars are both NA of matching types. + + Parameters + ---------- + left : Any + right : Any + nan_matches_none : bool, default False + For backwards compatibility, consider NaN as matching None. + + Returns + ------- + bool + """ + if left is None: + if nan_matches_none and util.is_nan(right): + return True + return right is None + elif left is C_NA: + return right is C_NA + elif left is NaT: + return right is NaT + elif util.is_float_object(left): + if nan_matches_none and right is None: + return True + return ( + util.is_nan(left) + and util.is_float_object(right) + and util.is_nan(right) + ) + elif util.is_complex_object(left): + return ( + util.is_nan(left) + and util.is_complex_object(right) + and util.is_nan(right) + ) + elif util.is_datetime64_object(left): + return ( + get_datetime64_value(left) == NPY_NAT + and util.is_datetime64_object(right) + and get_datetime64_value(right) == NPY_NAT + ) + elif util.is_timedelta64_object(left): + return ( + get_timedelta64_value(left) == NPY_NAT + and util.is_timedelta64_object(right) + and get_timedelta64_value(right) == NPY_NAT + ) + return False + + cpdef bint checknull(object val): """ Return boolean describing of the input is NA-like, defined here as any diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index 0d92ef02e07c8..f6566d205e65c 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -10,7 +10,7 @@ from pandas._libs import missing as libmissing from pandas._libs.tslibs import iNaT, is_null_datetimelike -from pandas.core.dtypes.common import is_scalar +from pandas.core.dtypes.common import is_float, is_scalar from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype, PeriodDtype from pandas.core.dtypes.missing import ( array_equivalent, @@ -653,3 +653,25 @@ def test_is_null_datetimelike(self): for value in never_na_vals: assert not is_null_datetimelike(value) + + def test_is_matching_na(self, nulls_fixture, nulls_fixture2): + left = nulls_fixture + right = nulls_fixture2 + + assert libmissing.is_matching_na(left, left) + + if left is right: + assert libmissing.is_matching_na(left, right) + elif is_float(left) and is_float(right): + # np.nan vs float("NaN") we consider as matching + assert libmissing.is_matching_na(left, right) + else: + assert not libmissing.is_matching_na(left, right) + + def test_is_matching_na_nan_matches_none(self): + + assert not libmissing.is_matching_na(None, np.nan) + assert not libmissing.is_matching_na(np.nan, None) + + assert libmissing.is_matching_na(None, np.nan, nan_matches_none=True) + assert libmissing.is_matching_na(np.nan, None, nan_matches_none=True) diff --git a/pandas/tests/series/methods/test_equals.py b/pandas/tests/series/methods/test_equals.py index cf55482fefe22..85d74196e59b4 100644 --- a/pandas/tests/series/methods/test_equals.py +++ b/pandas/tests/series/methods/test_equals.py @@ -1,9 +1,14 @@ from contextlib import nullcontext +import copy import numpy as np import pytest -from pandas import MultiIndex, Series +from pandas._libs.missing import is_matching_na + +from pandas.core.dtypes.common import is_float + +from pandas import Index, MultiIndex, Series import pandas._testing as tm @@ -65,3 +70,54 @@ def test_equals_false_negative(): assert s1.equals(s4) assert s1.equals(s5) assert s5.equals(s6) + + +def test_equals_matching_nas(): + # matching but not identical NAs + left = Series([np.datetime64("NaT")], dtype=object) + right = Series([np.datetime64("NaT")], dtype=object) + assert left.equals(right) + assert Index(left).equals(Index(right)) + assert left.array.equals(right.array) + + left = Series([np.timedelta64("NaT")], dtype=object) + right = Series([np.timedelta64("NaT")], dtype=object) + assert left.equals(right) + assert Index(left).equals(Index(right)) + assert left.array.equals(right.array) + + left = Series([np.float64("NaN")], dtype=object) + right = Series([np.float64("NaN")], dtype=object) + assert left.equals(right) + assert Index(left).equals(Index(right)) + assert left.array.equals(right.array) + + +def test_equals_mismatched_nas(nulls_fixture, nulls_fixture2): + # GH#39650 + left = nulls_fixture + right = nulls_fixture2 + if hasattr(right, "copy"): + right = right.copy() + else: + right = copy.copy(right) + + ser = Series([left], dtype=object) + ser2 = Series([right], dtype=object) + + if is_matching_na(left, right): + assert ser.equals(ser2) + elif (left is None and is_float(right)) or (right is None and is_float(left)): + assert ser.equals(ser2) + else: + assert not ser.equals(ser2) + + +def test_equals_none_vs_nan(): + # GH#39650 + ser = Series([1, None], dtype=object) + ser2 = Series([1, np.nan], dtype=object) + + assert ser.equals(ser2) + assert Index(ser).equals(Index(ser2)) + assert ser.array.equals(ser2.array) diff --git a/pandas/tests/util/test_assert_numpy_array_equal.py b/pandas/tests/util/test_assert_numpy_array_equal.py index d29ddedd2fdd6..660402ee857e3 100644 --- a/pandas/tests/util/test_assert_numpy_array_equal.py +++ b/pandas/tests/util/test_assert_numpy_array_equal.py @@ -1,3 +1,5 @@ +import copy + import numpy as np import pytest @@ -198,6 +200,14 @@ def test_numpy_array_equal_identical_na(nulls_fixture): tm.assert_numpy_array_equal(a, a) + # matching but not the identical object + if hasattr(nulls_fixture, "copy"): + other = nulls_fixture.copy() + else: + other = copy.copy(nulls_fixture) + b = np.array([other], dtype=object) + tm.assert_numpy_array_equal(a, b) + def test_numpy_array_equal_different_na(): a = np.array([np.nan], dtype=object)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39650
2021-02-07T17:48:47Z
2021-02-10T20:35:05Z
2021-02-10T20:35:05Z
2021-02-10T20:36:30Z
DOC: fix an incomplete sentence
diff --git a/doc/source/user_guide/dsintro.rst b/doc/source/user_guide/dsintro.rst index f2bb99dd2ebc0..7d65e0c6faff7 100644 --- a/doc/source/user_guide/dsintro.rst +++ b/doc/source/user_guide/dsintro.rst @@ -126,7 +126,7 @@ However, operations such as slicing will also slice the index. .. note:: We will address array-based indexing like ``s[[4, 3, 1]]`` - in :ref:`section <indexing>`. + in :ref:`section on indexing <indexing>`. Like a NumPy array, a pandas Series has a :attr:`~Series.dtype`.
"We will address array-based indexing like s[[4, 3, 1]] in section." -> "We will address array-based indexing like s[[4, 3, 1]] in section on indexing." - [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39648
2021-02-07T17:06:30Z
2021-02-08T21:47:22Z
2021-02-08T21:47:22Z
2021-02-08T22:52:16Z
DOC: typo in 1.2.2 whatsnew
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 52dbf3a4ae3ea..4f80263fda92d 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -17,7 +17,7 @@ Fixed regressions - Fixed regression in :func:`read_excel` that caused it to raise ``AttributeError`` when checking version of older xlrd versions (:issue:`38955`) - Fixed regression in :class:`DataFrame` constructor reordering element when construction from datetime ndarray with dtype not ``"datetime64[ns]"`` (:issue:`39422`) -- Fixed regression in :class:`DataFrame.astype` and :class:`Series.astype` not casting to bytes dtype (:issue:`39474`) +- Fixed regression in :meth:`DataFrame.astype` and :meth:`Series.astype` not casting to bytes dtype (:issue:`39474`) - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`)
- [ ] closes #xxxx - [ ] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39646
2021-02-07T17:00:17Z
2021-02-07T20:16:53Z
2021-02-07T20:16:53Z
2021-02-07T20:16:59Z
Backport PR #39374 on branch 1.2.x (DOC: Clarify behavior for Series with dict-like data and index)
diff --git a/pandas/core/series.py b/pandas/core/series.py index b4e8696ad9e13..4b0d5f0b407be 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -169,8 +169,8 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Values must be hashable and have the same length as `data`. Non-unique index values are allowed. Will default to RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like - and index is None, then the values in the index are used to - reindex the Series after it is created using the keys in the data. + and index is None, then the keys in the data are used as the index. If the + index is not None, the resulting Series is reindexed with the index values. dtype : str, numpy.dtype, or ExtensionDtype, optional Data type for the output Series. If not specified, this will be inferred from `data`. @@ -179,6 +179,33 @@ class Series(base.IndexOpsMixin, generic.NDFrame): The name to give to the Series. copy : bool, default False Copy input data. + + Examples + -------- + Constructing Series from a dictionary with an Index specified + + >>> d = {'a': 1, 'b': 2, 'c': 3} + >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) + >>> ser + a 1 + b 2 + c 3 + dtype: int64 + + The keys of the dictionary match with the Index values, hence the Index + values have no effect. + + >>> d = {'a': 1, 'b': 2, 'c': 3} + >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) + >>> ser + x NaN + y NaN + z NaN + dtype: float64 + + Note that the Index is first build with the keys from the dictionary. + After this the Series is reindexed with the given Index values, hence we + get all NaN as a result. """ _typ = "series"
Backport PR #39374: DOC: Clarify behavior for Series with dict-like data and index
https://api.github.com/repos/pandas-dev/pandas/pulls/39645
2021-02-07T16:26:47Z
2021-02-07T18:04:27Z
2021-02-07T18:04:27Z
2021-02-07T18:04:27Z
fix #39556 (infer_freq not working with freq="H" and DST
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 09e1853429d9f..d8c4f633d2d0d 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -287,6 +287,7 @@ Datetimelike - Bug in :meth:`Timestamp.round`, :meth:`Timestamp.floor`, :meth:`Timestamp.ceil` for values near the implementation bounds of :class:`Timestamp` (:issue:`39244`) - Bug in :meth:`Timedelta.round`, :meth:`Timedelta.floor`, :meth:`Timedelta.ceil` for values near the implementation bounds of :class:`Timedelta` (:issue:`38964`) - Bug in :func:`date_range` incorrectly creating :class:`DatetimeIndex` containing ``NaT`` instead of raising ``OutOfBoundsDatetime`` in corner cases (:issue:`24124`) +- Bug in :func:`infer_freq` incorrectly fails to infer 'H' frequency of :class:`DatetimeIndex` if the latter has a timezone and crosses DST boundaries (:issue:`39556`) Timedelta ^^^^^^^^^ diff --git a/pandas/tests/tseries/frequencies/test_inference.py b/pandas/tests/tseries/frequencies/test_inference.py index 95edd038dab9b..892be119093e5 100644 --- a/pandas/tests/tseries/frequencies/test_inference.py +++ b/pandas/tests/tseries/frequencies/test_inference.py @@ -267,7 +267,7 @@ def test_infer_freq_tz(tz_naive_fixture, expected, dates): ], ) @pytest.mark.parametrize( - "freq", ["3H", "10T", "3601S", "3600001L", "3600000001U", "3600000000001N"] + "freq", ["H", "3H", "10T", "3601S", "3600001L", "3600000001U", "3600000000001N"] ) def test_infer_freq_tz_transition(tz_naive_fixture, date_pair, freq): # see gh-8772 diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 0d5598fcaf890..2257ce47d8936 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -240,16 +240,17 @@ def get_freq(self) -> Optional[str]: return None delta = self.deltas[0] - if _is_multiple(delta, _ONE_DAY): + if delta and _is_multiple(delta, _ONE_DAY): return self._infer_daily_rule() # Business hourly, maybe. 17: one day / 65: one weekend if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return "BH" + # Possibly intraday frequency. Here we use the # original .asi8 values as the modified values # will not work around DST transitions. See #8772 - elif not self.is_unique_asi8: + if not self.is_unique_asi8: return None delta = self.deltas_asi8[0]
- check that the delta are unique before checking if they are day multiples - add test with freq="H" that raises the bug - [x] closes #39556 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry => not sure where to enter this ...
https://api.github.com/repos/pandas-dev/pandas/pulls/39644
2021-02-07T15:18:22Z
2021-02-23T02:25:47Z
2021-02-23T02:25:47Z
2021-02-23T02:25:56Z
DOC: Fixed comment for pandas.unique (#39557)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 2b0d3f5aa8862..cdbef673643e8 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -324,7 +324,8 @@ def unique(values): Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. - Significantly faster than numpy.unique. Includes NA values. + Significantly faster than numpy.unique for long enough sequences. + Includes NA values. Parameters ----------
xref https://github.com/pandas-dev/pandas/pull/39557#issuecomment-774667542
https://api.github.com/repos/pandas-dev/pandas/pulls/39643
2021-02-07T14:38:26Z
2021-02-07T16:35:23Z
2021-02-07T16:35:23Z
2021-02-07T16:36:05Z
CLN: reorganise vars in `Styler` init
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 3d9eb4e96f78a..b3eabf3552a75 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -157,53 +157,40 @@ def __init__( na_rep: Optional[str] = None, uuid_len: int = 5, ): - self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list) - self._todo: List[Tuple[Callable, Tuple, Dict]] = [] - + # validate ordered args if not isinstance(data, (pd.Series, pd.DataFrame)): raise TypeError("``data`` must be a Series or DataFrame") if data.ndim == 1: data = data.to_frame() if not data.index.is_unique or not data.columns.is_unique: raise ValueError("style is not supported for non-unique indices.") - - self.data = data - self.index = data.index - self.columns = data.columns - + assert isinstance(data, DataFrame) + self.data: DataFrame = data + self.index: pd.Index = data.index + self.columns: pd.Index = data.columns + if precision is None: + precision = get_option("display.precision") + self.precision = precision + self.table_styles = table_styles if not isinstance(uuid_len, int) or not uuid_len >= 0: raise TypeError("``uuid_len`` must be an integer in range [0, 32].") self.uuid_len = min(32, uuid_len) self.uuid = (uuid or uuid4().hex[: self.uuid_len]) + "_" - self.table_styles = table_styles self.caption = caption - if precision is None: - precision = get_option("display.precision") - self.precision = precision self.table_attributes = table_attributes - self.hidden_index = False - self.hidden_columns: Sequence[int] = [] self.cell_ids = cell_ids self.na_rep = na_rep - self.tooltips: Optional[_Tooltips] = None - + # assign additional default vars + self.hidden_index: bool = False + self.hidden_columns: Sequence[int] = [] + self.ctx: DefaultDict[Tuple[int, int], List[str]] = defaultdict(list) self.cell_context: Dict[str, Any] = {} - - # display_funcs maps (row, col) -> formatting function - - def default_display_func(x): - if self.na_rep is not None and pd.isna(x): - return self.na_rep - elif is_float(x): - display_format = f"{x:.{self.precision}f}" - return display_format - else: - return x - - self._display_funcs: DefaultDict[ + self._todo: List[Tuple[Callable, Tuple, Dict]] = [] + self.tooltips: Optional[_Tooltips] = None + self._display_funcs: DefaultDict[ # maps (row, col) -> formatting function Tuple[int, int], Callable[[Any], str] - ] = defaultdict(lambda: default_display_func) + ] = defaultdict(lambda: self._default_display_func) def _repr_html_(self) -> str: """ @@ -224,6 +211,15 @@ def _init_tooltips(self): if self.tooltips is None: self.tooltips = _Tooltips() + def _default_display_func(self, x): + if self.na_rep is not None and pd.isna(x): + return self.na_rep + elif is_float(x): + display_format = f"{x:.{self.precision}f}" + return display_format + else: + return x + def set_tooltips(self, ttips: DataFrame) -> Styler: """ Add string based tooltips that will appear in the `Styler` HTML result. These
Trying to clean up Styler class grouping and ordering variables and adding typing where necessary.
https://api.github.com/repos/pandas-dev/pandas/pulls/39642
2021-02-07T13:41:14Z
2021-02-08T13:50:14Z
2021-02-08T13:50:14Z
2021-02-10T08:36:18Z
Revert "Backport PR #39526 on branch 1.2.x (CI: pin numpy for CI / Checks github action)"
diff --git a/environment.yml b/environment.yml index 71d7e47894f9d..6f3f81d8a4d77 100644 --- a/environment.yml +++ b/environment.yml @@ -3,7 +3,7 @@ channels: - conda-forge dependencies: # required - - numpy>=1.16.5, <1.20 # gh-39513 + - numpy>=1.16.5 - python=3 - python-dateutil>=2.7.3 - pytz diff --git a/requirements-dev.txt b/requirements-dev.txt index 33a315884612d..f0d65104ead8e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ # This file is auto-generated from environment.yml, do not modify. # See that file for comments about the need/usage of each dependency. -numpy>=1.16.5, <1.20 +numpy>=1.16.5 python-dateutil>=2.7.3 pytz asv
Reverts pandas-dev/pandas#39529 will be merging this just prior to tagging 1.2.2 see https://github.com/pandas-dev/pandas/issues/39513#issuecomment-771006563 and then re-appling after release so that ci on 12.x is green
https://api.github.com/repos/pandas-dev/pandas/pulls/39641
2021-02-07T13:39:01Z
2021-02-08T18:07:41Z
2021-02-08T18:07:41Z
2021-02-08T18:07:45Z
REGR: fix transform of empty DataFrame/Series
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 974f84d3b244a..63e793c013497 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) +- Fixed regression in :meth:`DataFrame.transform` failing in case of an empty DataFrame or Series (:issue:`39636`) - Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) - diff --git a/pandas/core/aggregation.py b/pandas/core/aggregation.py index e3f159346cd51..4dbce8f75898f 100644 --- a/pandas/core/aggregation.py +++ b/pandas/core/aggregation.py @@ -457,7 +457,7 @@ def transform( # Functions that transform may return empty Series/DataFrame # when the dtype is not appropriate - if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty: + if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty and not obj.empty: raise ValueError("Transform function failed") if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals( obj.index diff --git a/pandas/tests/apply/test_frame_transform.py b/pandas/tests/apply/test_frame_transform.py index bff0306a50ee6..c4959ee2c8962 100644 --- a/pandas/tests/apply/test_frame_transform.py +++ b/pandas/tests/apply/test_frame_transform.py @@ -274,3 +274,13 @@ def test_transform_mixed_column_name_dtypes(): msg = r"Column\(s\) \[1, 'b'\] do not exist" with pytest.raises(SpecificationError, match=msg): df.transform({"a": int, 1: str, "b": int}) + + +def test_transform_empty_dataframe(): + # https://github.com/pandas-dev/pandas/issues/39636 + df = DataFrame([], columns=["col1", "col2"]) + result = df.transform(lambda x: x + 10) + tm.assert_frame_equal(result, df) + + result = df["col1"].transform(lambda x: x + 10) + tm.assert_series_equal(result, df["col1"])
Closes #39636
https://api.github.com/repos/pandas-dev/pandas/pulls/39639
2021-02-07T12:55:47Z
2021-02-07T16:22:38Z
2021-02-07T16:22:38Z
2021-02-07T19:08:27Z
API: transform behaves differently with 'ffill' on DataFrameGroupBy and SeriesGroupBy
diff --git a/pandas/tests/groupby/transform/test_transform.py b/pandas/tests/groupby/transform/test_transform.py index 3f04f0f1163e7..04eb2f42d745b 100644 --- a/pandas/tests/groupby/transform/test_transform.py +++ b/pandas/tests/groupby/transform/test_transform.py @@ -769,6 +769,18 @@ def test_transform_numeric_ret(cols, exp, comp_func, agg_func, request): comp_func(result, exp) +def test_transform_ffill(): + # GH 24211 + data = [["a", 0.0], ["a", float("nan")], ["b", 1.0], ["b", float("nan")]] + df = DataFrame(data, columns=["key", "values"]) + result = df.groupby("key").transform("ffill") + expected = DataFrame({"values": [0.0, 0.0, 1.0, 1.0]}) + tm.assert_frame_equal(result, expected) + result = df.groupby("key")["values"].transform("ffill") + expected = Series([0.0, 0.0, 1.0, 1.0], name="values") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("mix_groupings", [True, False]) @pytest.mark.parametrize("as_series", [True, False]) @pytest.mark.parametrize("val1,val2", [("foo", "bar"), (1, 2), (1.0, 2.0)])
- [ ] closes #24211 - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39638
2021-02-07T12:46:04Z
2021-02-15T22:34:48Z
2021-02-15T22:34:47Z
2021-02-15T22:34:51Z
Revert "Fixed comment for pandas.unique"
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 51eeabc14c4c9..58384405a5cab 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -321,8 +321,7 @@ def unique(values): Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. - Significantly faster than numpy.unique for long enough sequences. - Includes NA values. + Significantly faster than numpy.unique. Includes NA values. Parameters ----------
Reverts pandas-dev/pandas#39557
https://api.github.com/repos/pandas-dev/pandas/pulls/39637
2021-02-07T12:41:00Z
2021-02-07T16:25:04Z
2021-02-07T16:25:04Z
2021-02-07T16:28:05Z
Backport PR #39551: CI: numpy deprecation warnings
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 2db46abca119c..9f3689779d056 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -87,8 +87,8 @@ def setup(self): self.vals_short = np.arange(2).astype(object) self.vals_long = np.arange(10 ** 5).astype(object) # because of nans floats are special: - self.s_long_floats = Series(np.arange(10 ** 5, dtype=np.float)).astype(object) - self.vals_long_floats = np.arange(10 ** 5, dtype=np.float).astype(object) + self.s_long_floats = Series(np.arange(10 ** 5, dtype=np.float_)).astype(object) + self.vals_long_floats = np.arange(10 ** 5, dtype=np.float_).astype(object) def time_isin_nans(self): # if nan-objects are different objects,
Backport PR #39551
https://api.github.com/repos/pandas-dev/pandas/pulls/39635
2021-02-07T12:33:16Z
2021-02-07T13:32:00Z
2021-02-07T13:32:00Z
2021-02-07T13:32:04Z
Backport PR #39604: REGR: Rolling.count setting min_periods after call
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 0ee1abaa2a0eb..e9500ab531785 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) +- Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index e6185f8ae0679..3a3df1a15dbcc 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -2016,7 +2016,11 @@ def count(self): FutureWarning, ) self.min_periods = 0 - return super().count() + result = super().count() + self.min_periods = None + else: + result = super().count() + return result @Substitution(name="rolling") @Appender(_shared_docs["apply"]) diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 52c629f96b713..0c1d565a9bd5f 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -319,3 +319,17 @@ def test_multiple_agg_funcs(func, window_size, expected_vals): result = window.agg({"low": ["mean", "max"], "high": ["mean", "min"]}) tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:min_periods:FutureWarning") +def test_dont_modify_attributes_after_methods( + arithmetic_win_operators, closed, center, min_periods +): + # GH 39554 + roll_obj = Series(range(1)).rolling( + 1, center=center, closed=closed, min_periods=min_periods + ) + expected = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes} + getattr(roll_obj, arithmetic_win_operators)() + result = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes} + assert result == expected diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py index 1723330ec40e1..381edb89747b6 100644 --- a/pandas/tests/window/test_base_indexer.py +++ b/pandas/tests/window/test_base_indexer.py @@ -170,7 +170,10 @@ def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs) # Check that the function output matches applying an alternative function # if min_periods isn't specified - rolling3 = constructor(values).rolling(window=indexer) + # GH 39604: After count-min_periods deprecation, apply(lambda x: len(x)) + # is equivalent to count after setting min_periods=0 + min_periods = 0 if func == "count" else None + rolling3 = constructor(values).rolling(window=indexer, min_periods=min_periods) result3 = getattr(rolling3, func)() expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs))) tm.assert_equal(result3, expected3)
Backport PR #39604
https://api.github.com/repos/pandas-dev/pandas/pulls/39634
2021-02-07T12:22:37Z
2021-02-07T13:14:34Z
2021-02-07T13:14:34Z
2021-02-07T13:14:37Z
Backport PR #39486: BUG: read_excel with openpyxl and missing dimension'
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 0ee1abaa2a0eb..cc5653fe2f360 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -31,6 +31,7 @@ Bug fixes ~~~~~~~~~ - :func:`pandas.read_excel` error message when a specified ``sheetname`` does not exist is now uniform across engines (:issue:`39250`) +- Fixed bug in :func:`pandas.read_excel` producing incorrect results when the engine ``openpyxl`` is used and the excel file is missing or has incorrect dimension information; the fix requires ``openpyxl`` >= 3.0.0, prior versions may still fail (:issue:`38956`, :issue:`39001`) - .. --------------------------------------------------------------------------- diff --git a/pandas/compat/_optional.py b/pandas/compat/_optional.py index 4ed9df2c97fdb..2bde42357b96c 100644 --- a/pandas/compat/_optional.py +++ b/pandas/compat/_optional.py @@ -15,7 +15,7 @@ "matplotlib": "2.2.3", "numexpr": "2.6.8", "odfpy": "1.3.0", - "openpyxl": "2.5.7", + "openpyxl": "2.6.0", "pandas_gbq": "0.12.0", "pyarrow": "0.15.0", "pytest": "5.0.1", diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 583baf3b239d8..4f02aff2eb992 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -1,9 +1,10 @@ +from distutils.version import LooseVersion from typing import TYPE_CHECKING, Dict, List, Optional import numpy as np from pandas._typing import FilePathOrBuffer, Scalar, StorageOptions -from pandas.compat._optional import import_optional_dependency +from pandas.compat._optional import get_version, import_optional_dependency from pandas.io.excel._base import BaseExcelReader, ExcelWriter from pandas.io.excel._util import validate_freeze_panes @@ -503,14 +504,14 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar: from openpyxl.cell.cell import TYPE_BOOL, TYPE_ERROR, TYPE_NUMERIC - if cell.is_date: + if cell.value is None: + return "" # compat with xlrd + elif cell.is_date: return cell.value elif cell.data_type == TYPE_ERROR: return np.nan elif cell.data_type == TYPE_BOOL: return bool(cell.value) - elif cell.value is None: - return "" # compat with xlrd elif cell.data_type == TYPE_NUMERIC: # GH5394 if convert_float: @@ -523,8 +524,29 @@ def _convert_cell(self, cell, convert_float: bool) -> Scalar: return cell.value def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: + # GH 39001 + # Reading of excel file depends on dimension data being correct but + # writers sometimes omit or get it wrong + import openpyxl + + version = LooseVersion(get_version(openpyxl)) + + if version >= "3.0.0": + sheet.reset_dimensions() + data: List[List[Scalar]] = [] - for row in sheet.rows: - data.append([self._convert_cell(cell, convert_float) for cell in row]) + for row_number, row in enumerate(sheet.rows): + converted_row = [self._convert_cell(cell, convert_float) for cell in row] + data.append(converted_row) + + if version >= "3.0.0" and len(data) > 0: + # With dimension reset, openpyxl no longer pads rows + max_width = max(len(data_row) for data_row in data) + if min(len(data_row) for data_row in data) < max_width: + empty_cell: List[Scalar] = [""] + data = [ + data_row + (max_width - len(data_row)) * empty_cell + for data_row in data + ] return data diff --git a/pandas/tests/io/data/excel/dimension_large.xlsx b/pandas/tests/io/data/excel/dimension_large.xlsx new file mode 100644 index 0000000000000..d57abdf2fbbae Binary files /dev/null and b/pandas/tests/io/data/excel/dimension_large.xlsx differ diff --git a/pandas/tests/io/data/excel/dimension_missing.xlsx b/pandas/tests/io/data/excel/dimension_missing.xlsx new file mode 100644 index 0000000000000..9274896689a72 Binary files /dev/null and b/pandas/tests/io/data/excel/dimension_missing.xlsx differ diff --git a/pandas/tests/io/data/excel/dimension_small.xlsx b/pandas/tests/io/data/excel/dimension_small.xlsx new file mode 100644 index 0000000000000..78ce4723ebef4 Binary files /dev/null and b/pandas/tests/io/data/excel/dimension_small.xlsx differ diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index 3155e22d3ff5d..640501baffc62 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -1,6 +1,10 @@ +from distutils.version import LooseVersion + import numpy as np import pytest +from pandas.compat._optional import get_version + import pandas as pd from pandas import DataFrame import pandas._testing as tm @@ -116,3 +120,32 @@ def test_to_excel_with_openpyxl_engine(ext): ).highlight_max() styled.to_excel(filename, engine="openpyxl") + + +@pytest.mark.parametrize( + "header, expected_data", + [ + ( + 0, + { + "Title": [np.nan, "A", 1, 2, 3], + "Unnamed: 1": [np.nan, "B", 4, 5, 6], + "Unnamed: 2": [np.nan, "C", 7, 8, 9], + }, + ), + (2, {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}), + ], +) +@pytest.mark.parametrize( + "filename", ["dimension_missing", "dimension_small", "dimension_large"] +) +@pytest.mark.xfail( + LooseVersion(get_version(openpyxl)) < "3.0.0", + reason="openpyxl read-only sheet is incorrect when dimension data is wrong", +) +def test_read_with_bad_dimension(datapath, ext, header, expected_data, filename): + # GH 38956, 39001 - no/incorrect dimension information + path = datapath("io", "data", "excel", f"{filename}{ext}") + result = pd.read_excel(path, header=header) + expected = DataFrame(expected_data) + tm.assert_frame_equal(result, expected)
Backport PR #39486
https://api.github.com/repos/pandas-dev/pandas/pulls/39633
2021-02-07T12:04:34Z
2021-02-07T13:08:20Z
2021-02-07T13:08:20Z
2021-02-07T13:08:23Z
CLN: redundant function in `Styler`
diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 6eac9ba87c73d..3d9eb4e96f78a 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -391,9 +391,6 @@ def _translate(self): BLANK_CLASS = "blank" BLANK_VALUE = "" - def format_attr(pair): - return f"{pair['key']}={pair['value']}" - # for sparsifying a MultiIndex idx_lengths = _get_level_lengths(self.index) col_lengths = _get_level_lengths(self.columns, hidden_columns) @@ -462,9 +459,7 @@ def format_attr(pair): } colspan = col_lengths.get((r, c), 0) if colspan > 1: - es["attributes"] = [ - format_attr({"key": "colspan", "value": f'"{colspan}"'}) - ] + es["attributes"] = [f'colspan="{colspan}"'] row_es.append(es) head.append(row_es) @@ -508,9 +503,7 @@ def format_attr(pair): } rowspan = idx_lengths.get((c, r), 0) if rowspan > 1: - es["attributes"] = [ - format_attr({"key": "rowspan", "value": f'"{rowspan}"'}) - ] + es["attributes"] = [f'rowspan="{rowspan}"'] row_es.append(es) for c, col in enumerate(self.data.columns):
simplifies code by removing unnecessary function def.
https://api.github.com/repos/pandas-dev/pandas/pulls/39632
2021-02-07T11:05:48Z
2021-02-07T17:12:20Z
2021-02-07T17:12:20Z
2021-02-16T18:24:09Z
CLN: Styler HTML output adopt structured code standard
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 17d8c79994dbe..895c21e79886f 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -448,7 +448,7 @@ Other - :meth:`Index.where` behavior now mirrors :meth:`Index.putmask` behavior, i.e. ``index.where(mask, other)`` matches ``index.putmask(~mask, other)`` (:issue:`39412`) - Bug in :func:`pandas.testing.assert_series_equal`, :func:`pandas.testing.assert_frame_equal`, :func:`pandas.testing.assert_index_equal` and :func:`pandas.testing.assert_extension_array_equal` incorrectly raising when an attribute has an unrecognized NA type (:issue:`39461`) - Bug in :class:`Styler` where ``subset`` arg in methods raised an error for some valid multiindex slices (:issue:`33562`) -- +- :class:`Styler` rendered HTML output minor alterations to support w3 good code standard (:issue:`39626`) - .. --------------------------------------------------------------------------- diff --git a/pandas/io/formats/templates/html.tpl b/pandas/io/formats/templates/html.tpl index 97bfda9af089d..b315c57a65cdf 100644 --- a/pandas/io/formats/templates/html.tpl +++ b/pandas/io/formats/templates/html.tpl @@ -1,70 +1,70 @@ {# Update the template_structure.html document too #} {%- block before_style -%}{%- endblock before_style -%} {% block style %} -<style type="text/css" > +<style type="text/css"> {% block table_styles %} {% for s in table_styles %} - #T_{{uuid}} {{s.selector}} { - {% for p,val in s.props %} - {{p}}: {{val}}; - {% endfor -%} - } -{%- endfor -%} +#T_{{uuid}} {{s.selector}} { +{% for p,val in s.props %} + {{p}}: {{val}}; +{% endfor %} +} +{% endfor %} {% endblock table_styles %} {% block before_cellstyle %}{% endblock before_cellstyle %} {% block cellstyle %} -{%- for s in cellstyle %} - {%- for selector in s.selectors -%}{%- if not loop.first -%},{%- endif -%}#T_{{uuid}}{{selector}}{%- endfor -%} { - {% for p,val in s.props %} - {{p}}: {{val}}; - {% endfor %} - } -{%- endfor -%} -{%- endblock cellstyle %} +{% for s in cellstyle %} +{% for selector in s.selectors %}{% if not loop.first %}, {% endif %}#T_{{uuid}}{{selector}}{% endfor %} { +{% for p,val in s.props %} + {{p}}: {{val}}; +{% endfor %} +} +{% endfor %} +{% endblock cellstyle %} </style> -{%- endblock style %} -{%- block before_table %}{% endblock before_table %} -{%- block table %} -<table id="T_{{uuid}}" {% if table_attributes %}{{ table_attributes }}{% endif %}> -{%- block caption %} -{%- if caption -%} - <caption>{{caption}}</caption> -{%- endif -%} -{%- endblock caption %} -{%- block thead %} -<thead> - {%- block before_head_rows %}{% endblock %} - {%- for r in head %} - {%- block head_tr scoped %} +{% endblock style %} +{% block before_table %}{% endblock before_table %} +{% block table %} +<table id="T_{{uuid}}"{% if table_attributes %} {{table_attributes}}{% endif %}> +{% block caption %} +{% if caption %} + <caption>{{caption}}</caption> +{% endif %} +{% endblock caption %} +{% block thead %} + <thead> +{% block before_head_rows %}{% endblock %} +{% for r in head %} +{% block head_tr scoped %} <tr> - {%- for c in r %} - {%- if c.is_visible != False %} - <{{ c.type }} class="{{c.class}}" {{ c.attributes|join(" ") }}>{{c.value}}</{{ c.type }}> - {%- endif %} - {%- endfor %} +{% for c in r %} +{% if c.is_visible != False %} + <{{c.type}} class="{{c.class}}" {{c.attributes|join(" ")}}>{{c.value}}</{{c.type}}> +{% endif %} +{% endfor %} </tr> - {%- endblock head_tr %} - {%- endfor %} - {%- block after_head_rows %}{% endblock %} -</thead> -{%- endblock thead %} -{%- block tbody %} -<tbody> - {% block before_rows %}{% endblock before_rows %} - {% for r in body %} - {% block tr scoped %} +{% endblock head_tr %} +{% endfor %} +{% block after_head_rows %}{% endblock %} + </thead> +{% endblock thead %} +{% block tbody %} + <tbody> +{% block before_rows %}{% endblock before_rows %} +{% for r in body %} +{% block tr scoped %} <tr> - {% for c in r %} - {% if c.is_visible != False %} - <{{ c.type }} {% if c.id is defined -%} id="T_{{ uuid }}{{ c.id }}" {%- endif %} class="{{ c.class }}" {{ c.attributes|join(" ") }}>{{ c.display_value }}</{{ c.type }}> - {% endif %} - {%- endfor %} +{% for c in r %} +{% if c.is_visible != False %} + <{{c.type}} {% if c.id is defined -%} id="T_{{uuid}}{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes|join(" ")}}>{{c.display_value}}</{{c.type}}> +{% endif %} +{% endfor %} </tr> - {% endblock tr %} - {%- endfor %} - {%- block after_rows %}{%- endblock after_rows %} -</tbody> -{%- endblock tbody %} +{% endblock tr %} +{% endfor %} +{% block after_rows %}{% endblock after_rows %} + </tbody> +{% endblock tbody %} </table> -{%- endblock table %} -{%- block after_table %}{% endblock after_table %} +{% endblock table %} +{% block after_table %}{% endblock after_table %} diff --git a/pandas/tests/io/formats/test_style.py b/pandas/tests/io/formats/test_style.py index d2c5b5b9d0b2c..0bd0c5bd87761 100644 --- a/pandas/tests/io/formats/test_style.py +++ b/pandas/tests/io/formats/test_style.py @@ -140,8 +140,8 @@ def test_multiple_render(self): s = Styler(self.df, uuid_len=0).applymap(lambda x: "color: red;", subset=["A"]) s.render() # do 2 renders to ensure css styles not duplicated assert ( - '<style type="text/css" >\n#T__row0_col0,#T__row1_col0{\n ' - "color: red;\n }</style>" in s.render() + '<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n' + " color: red;\n}\n</style>" in s.render() ) def test_render_empty_dfs(self): @@ -1794,11 +1794,11 @@ def test_column_and_row_styling(self): df = DataFrame(data=[[0, 1], [1, 2]], columns=["A", "B"]) s = Styler(df, uuid_len=0) s = s.set_table_styles({"A": [{"selector": "", "props": [("color", "blue")]}]}) - assert "#T__ .col0 {\n color: blue;\n }" in s.render() + assert "#T__ .col0 {\n color: blue;\n}" in s.render() s = s.set_table_styles( {0: [{"selector": "", "props": [("color", "blue")]}]}, axis=1 ) - assert "#T__ .row0 {\n color: blue;\n }" in s.render() + assert "#T__ .row0 {\n color: blue;\n}" in s.render() def test_colspan_w3(self): # GH 36223 @@ -1855,12 +1855,12 @@ def test_tooltip_render(self, ttips): s = Styler(df, uuid_len=0).set_tooltips(ttips).render() # test tooltip table level class - assert "#T__ .pd-t {\n visibility: hidden;\n" in s + assert "#T__ .pd-t {\n visibility: hidden;\n" in s # test 'Min' tooltip added assert ( - "#T__ #T__row0_col0:hover .pd-t {\n visibility: visible;\n } " - + ' #T__ #T__row0_col0 .pd-t::after {\n content: "Min";\n }' + "#T__ #T__row0_col0:hover .pd-t {\n visibility: visible;\n}\n" + + '#T__ #T__row0_col0 .pd-t::after {\n content: "Min";\n}' in s ) assert ( @@ -1871,8 +1871,8 @@ def test_tooltip_render(self, ttips): # test 'Max' tooltip added assert ( - "#T__ #T__row0_col1:hover .pd-t {\n visibility: visible;\n } " - + ' #T__ #T__row0_col1 .pd-t::after {\n content: "Max";\n }' + "#T__ #T__row0_col1:hover .pd-t {\n visibility: visible;\n}\n" + + '#T__ #T__row0_col1 .pd-t::after {\n content: "Max";\n}' in s ) assert ( @@ -1892,16 +1892,16 @@ def test_tooltip_reindex(self): index=[0, 2], ) s = Styler(df, uuid_len=0).set_tooltips(DataFrame(ttips)).render() - assert '#T__ #T__row0_col0 .pd-t::after {\n content: "Mi";\n }' in s - assert '#T__ #T__row0_col2 .pd-t::after {\n content: "Ma";\n }' in s - assert '#T__ #T__row2_col0 .pd-t::after {\n content: "Mu";\n }' in s - assert '#T__ #T__row2_col2 .pd-t::after {\n content: "Mo";\n }' in s + assert '#T__ #T__row0_col0 .pd-t::after {\n content: "Mi";\n}' in s + assert '#T__ #T__row0_col2 .pd-t::after {\n content: "Ma";\n}' in s + assert '#T__ #T__row2_col0 .pd-t::after {\n content: "Mu";\n}' in s + assert '#T__ #T__row2_col2 .pd-t::after {\n content: "Mo";\n}' in s def test_tooltip_ignored(self): # GH 21266 df = DataFrame(data=[[0, 1], [2, 3]]) s = Styler(df).set_tooltips_class("pd-t").render() # no set_tooltips() - assert '<style type="text/css" >\n</style>' in s + assert '<style type="text/css">\n</style>' in s assert '<span class="pd-t"></span>' not in s def test_tooltip_class(self): @@ -1913,11 +1913,8 @@ def test_tooltip_class(self): .set_tooltips_class(name="other-class", properties=[("color", "green")]) .render() ) - assert "#T__ .other-class {\n color: green;\n" in s - assert ( - '#T__ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' - in s - ) + assert "#T__ .other-class {\n color: green;\n" in s + assert '#T__ #T__row0_col0 .other-class::after {\n content: "tooltip";\n' in s # GH 39563 s = ( @@ -1926,10 +1923,50 @@ def test_tooltip_class(self): .set_tooltips_class(name="other-class", properties="color:green;color:red;") .render() ) - assert ( - "#T__ .other-class {\n color: green;\n color: red;\n " - in s + assert "#T__ .other-class {\n color: green;\n color: red;\n}" in s + + def test_w3_html_format(self): + s = ( + Styler( + DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"]), + uuid_len=0, + ) + .set_table_styles([{"selector": "th", "props": "att2:v2;"}]) + .applymap(lambda x: "att1:v1;") + .set_table_attributes('class="my-cls1" style="attr3:v3;"') + .set_td_classes(DataFrame(["my-cls2"], index=["a"], columns=["A"])) + .format("{:.1f}") + .set_caption("A comprehensive test") ) + expected = """<style type="text/css"> +#T__ th { + att2: v2; +} +#T__row0_col0, #T__row1_col0 { + att1: v1; +} +</style> +<table id="T__" class="my-cls1" style="attr3:v3;"> + <caption>A comprehensive test</caption> + <thead> + <tr> + <th class="blank level0" ></th> + <th class="col_heading level0 col0" >A</th> + </tr> + </thead> + <tbody> + <tr> + <th id="T__level0_row0" class="row_heading level0 row0" >a</th> + <td id="T__row0_col0" class="data row0 col0 my-cls2" >2.6</td> + </tr> + <tr> + <th id="T__level0_row1" class="row_heading level0 row1" >b</th> + <td id="T__row1_col0" class="data row1 col0" >2.7</td> + </tr> + </tbody> +</table> +""" + assert expected == s.render() @td.skip_if_no_mpl
- [x] closes #39626 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39627
2021-02-06T21:26:34Z
2021-02-07T17:01:12Z
2021-02-07T17:01:12Z
2021-02-07T18:38:09Z
CLN: Use kwargs instead of kwds in apply functions
diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 533190e692891..828b460f84ec6 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -63,7 +63,7 @@ def frame_apply( raw: bool = False, result_type: Optional[str] = None, args=None, - kwds=None, + kwargs=None, ) -> FrameApply: """ construct and return a row or column based frame apply object """ axis = obj._get_axis_number(axis) @@ -79,7 +79,7 @@ def frame_apply( raw=raw, result_type=result_type, args=args, - kwds=kwds, + kwargs=kwargs, ) @@ -88,14 +88,14 @@ def series_apply( func: AggFuncType, convert_dtype: bool = True, args=None, - kwds=None, + kwargs=None, ) -> SeriesApply: return SeriesApply( obj, func, convert_dtype, args, - kwds, + kwargs, ) @@ -109,12 +109,12 @@ def __init__( raw: bool, result_type: Optional[str], args, - kwds, + kwargs, ): self.obj = obj self.raw = raw self.args = args or () - self.kwds = kwds or {} + self.kwargs = kwargs or {} if result_type not in [None, "reduce", "broadcast", "expand"]: raise ValueError( @@ -126,13 +126,13 @@ def __init__( # curry if needed if ( - (kwds or args) + (kwargs or args) and not isinstance(func, (np.ufunc, str)) and not is_list_like(func) ): def f(x): - return func(x, *args, **kwds) + return func(x, *args, **kwargs) else: f = func @@ -163,7 +163,7 @@ def agg(self) -> Tuple[Optional[FrameOrSeriesUnion], Optional[bool]]: obj = self.obj arg = self.f args = self.args - kwargs = self.kwds + kwargs = self.kwargs _axis = kwargs.pop("_axis", None) if _axis is None: @@ -413,10 +413,10 @@ def maybe_apply_str(self) -> Optional[FrameOrSeriesUnion]: if callable(func): sig = inspect.getfullargspec(func) if "axis" in sig.args: - self.kwds["axis"] = self.axis + self.kwargs["axis"] = self.axis elif self.axis != 0: raise ValueError(f"Operation {f} does not support axis=1") - return self.obj._try_aggregate_string_function(f, *self.args, **self.kwds) + return self.obj._try_aggregate_string_function(f, *self.args, **self.kwargs) def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]: """ @@ -430,7 +430,7 @@ def maybe_apply_multiple(self) -> Optional[FrameOrSeriesUnion]: # Note: dict-likes are list-like if not is_list_like(self.f): return None - return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwds) + return self.obj.aggregate(self.f, self.axis, *self.args, **self.kwargs) class FrameApply(Apply): @@ -806,7 +806,7 @@ def __init__( func: AggFuncType, convert_dtype: bool, args, - kwds, + kwargs, ): self.convert_dtype = convert_dtype @@ -816,7 +816,7 @@ def __init__( raw=False, result_type=None, args=args, - kwds=kwds, + kwargs=kwargs, ) def apply(self) -> FrameOrSeriesUnion: @@ -877,17 +877,17 @@ def __init__( obj: Union[SeriesGroupBy, DataFrameGroupBy], func: AggFuncType, args, - kwds, + kwargs, ): - kwds = kwds.copy() - self.axis = obj.obj._get_axis_number(kwds.get("axis", 0)) + kwargs = kwargs.copy() + self.axis = obj.obj._get_axis_number(kwargs.get("axis", 0)) super().__init__( obj, func, raw=False, result_type=None, args=args, - kwds=kwds, + kwargs=kwargs, ) def apply(self): @@ -903,7 +903,7 @@ def __init__( obj: Union[Resampler, BaseWindow], func: AggFuncType, args, - kwds, + kwargs, ): super().__init__( obj, @@ -911,7 +911,7 @@ def __init__( raw=False, result_type=None, args=args, - kwds=kwds, + kwargs=kwargs, ) def apply(self): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6357b8feb348b..ee27e38c6f556 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7718,7 +7718,7 @@ def _aggregate(self, arg, axis: Axis = 0, *args, **kwargs): func=arg, axis=0, args=args, - kwds=kwargs, + kwargs=kwargs, ) result, how = op.agg() @@ -7750,7 +7750,7 @@ def apply( raw: bool = False, result_type=None, args=(), - **kwds, + **kwargs, ): """ Apply a function along an axis of the DataFrame. @@ -7798,7 +7798,7 @@ def apply( args : tuple Positional arguments to pass to `func` in addition to the array/series. - **kwds + **kwargs Additional keyword arguments to pass as keywords arguments to `func`. @@ -7892,7 +7892,7 @@ def apply( raw=raw, result_type=result_type, args=args, - kwds=kwds, + kwargs=kwargs, ) return op.apply() diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 12698efa86b28..7b6eb4c8fe2f9 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -983,7 +983,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs) # try to treat as if we are passing a list try: result, _ = GroupByApply( - self, [func], args=(), kwds={"_axis": self.axis} + self, [func], args=(), kwargs={"_axis": self.axis} ).agg() # select everything except for the last level, which is the one diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 965de2e04bf40..34b7838d2280c 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -301,7 +301,7 @@ def pipe( def aggregate(self, func, *args, **kwargs): self._set_binner() - result, how = ResamplerWindowApply(self, func, args=args, kwds=kwargs).agg() + result, how = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: how = func grouper = None diff --git a/pandas/core/series.py b/pandas/core/series.py index 8bd325beede65..89ccddf4c9aa2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3940,7 +3940,7 @@ def aggregate(self, func=None, axis=0, *args, **kwargs): if func is None: func = dict(kwargs.items()) - op = series_apply(self, func, args=args, kwds=kwargs) + op = series_apply(self, func, args=args, kwargs=kwargs) result, how = op.agg() if result is None: @@ -3981,7 +3981,7 @@ def apply( func: AggFuncType, convert_dtype: bool = True, args: Tuple[Any, ...] = (), - **kwds, + **kwargs, ) -> FrameOrSeriesUnion: """ Invoke function on values of Series. @@ -3998,7 +3998,7 @@ def apply( False, leave as dtype=object. args : tuple Positional arguments passed to func after the series value. - **kwds + **kwargs Additional keyword arguments passed to func. Returns @@ -4079,7 +4079,7 @@ def apply( Helsinki 2.484907 dtype: float64 """ - op = series_apply(self, func, convert_dtype, args, kwds) + op = series_apply(self, func, convert_dtype, args, kwargs) return op.apply() def _reduce( diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 9a68e470201c7..bec6cfb375716 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -510,7 +510,7 @@ def calc(x): return self._apply_tablewise(homogeneous_func, name) def aggregate(self, func, *args, **kwargs): - result, how = ResamplerWindowApply(self, func, args=args, kwds=kwargs).agg() + result, how = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: return self.apply(func, raw=False, args=args, kwargs=kwargs) return result @@ -994,7 +994,7 @@ def calc(x): axis="", ) def aggregate(self, func, *args, **kwargs): - result, how = ResamplerWindowApply(self, func, args=args, kwds=kwargs).agg() + result, how = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: # these must apply directly
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them I've applied the change here to the API function `apply` but I don't believe this is an API change as you cannot pass to `**kwds` (or `**kwargs`) by name.
https://api.github.com/repos/pandas-dev/pandas/pulls/39625
2021-02-06T17:05:01Z
2021-02-07T16:41:20Z
2021-02-07T16:41:20Z
2021-02-07T16:43:05Z
CI/TST: Disallow usage of pytest.xfail
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0fc6e61049a44..fc08a1b164caf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -127,6 +127,12 @@ repos: types: [python] files: ^pandas/tests/ exclude: ^pandas/tests/extension/ + - id: unwanted-patters-pytest-xfail + name: Check for use of pytest.xfail + entry: pytest\.xfail + language: pygrep + types: [python] + files: ^pandas/tests/ - id: inconsistent-namespace-usage name: 'Check for inconsistent use of pandas namespace in tests' entry: python scripts/check_for_inconsistent_pandas_namespace.py diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst index 9477a9ac79dd6..19d83eb75e5bd 100644 --- a/doc/source/development/code_style.rst +++ b/doc/source/development/code_style.rst @@ -161,6 +161,46 @@ For example: # wrong from common import test_base +Testing +======= + +Failing tests +-------------- + +See https://docs.pytest.org/en/latest/skipping.html for background. + +Do not use ``pytest.xfail`` +--------------------------- + +Do not use this method. It has the same behavior as ``pytest.skip``, namely +it immediately stops the test and does not check if the test will fail. If +this is the behavior you desire, use ``pytest.skip`` instead. + +Using ``pytest.mark.xfail`` +--------------------------- + +Use this method if a test is known to fail but the manner in which it fails +is not meant to be captured. It is common to use this method for a test that +exhibits buggy behavior or a non-implemented feature. If +the failing test has flaky behavior, use the argument ``strict=False``. This +will make it so pytest does not fail if the test happens to pass. + +Prefer the decorator ``@pytest.mark.xfail`` and the argument ``pytest.param`` +over usage within a test so that the test is appropriately marked during the +collection phase of pytest. For xfailing a test that involves multiple +parameters, a fixture, or a combination of these, it is only possible to +xfail during the testing phase. To do so, use the ``request`` fixture: + +.. code-block:: python + + import pytest + + def test_xfail(request): + request.node.add_marker(pytest.mark.xfail(reason="Indicate why here")) + +xfail is not to be used for tests involving failure due to invalid user arguments. +For these tests, we need to verify the correct exception type and error message +is being raised, using ``pytest.raises`` instead. Miscellaneous =============
- [x] closes #38902 - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them
https://api.github.com/repos/pandas-dev/pandas/pulls/39624
2021-02-06T14:55:01Z
2021-02-08T21:47:50Z
2021-02-08T21:47:50Z
2021-02-08T22:05:35Z
BUG: Series.loc raising KeyError for Iterator indexer in case of setitem
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 1c7942dfedafa..1b011be839ff5 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -691,6 +691,8 @@ Indexing - Bug in :meth:`DataFrame.__setitem__` raising ``ValueError`` with empty :class:`DataFrame` and specified columns for string indexer and non empty :class:`DataFrame` to set (:issue:`38831`) - Bug in :meth:`DataFrame.loc.__setitem__` raising ValueError when expanding unique column for :class:`DataFrame` with duplicate columns (:issue:`38521`) - Bug in :meth:`DataFrame.iloc.__setitem__` and :meth:`DataFrame.loc.__setitem__` with mixed dtypes when setting with a dictionary value (:issue:`38335`) +- Bug in :meth:`Series.loc.__setitem__` and :meth:`DataFrame.loc.__setitem__` raising ``KeyError`` for boolean Iterator indexer (:issue:`39614`) +- Bug in :meth:`Series.iloc` and :meth:`DataFrame.iloc` raising ``KeyError`` for Iterator indexer (:issue:`39614`) - Bug in :meth:`DataFrame.__setitem__` not raising ``ValueError`` when right hand side is a :class:`DataFrame` with wrong number of columns (:issue:`38604`) - Bug in :meth:`Series.__setitem__` raising ``ValueError`` when setting a :class:`Series` with a scalar indexer (:issue:`38303`) - Bug in :meth:`DataFrame.loc` dropping levels of :class:`MultiIndex` when :class:`DataFrame` used as input has only one row (:issue:`10521`) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 82971e460a8a2..f9d52afeff355 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -703,6 +703,7 @@ def _ensure_listlike_indexer(self, key, axis=None, value=None): def __setitem__(self, key, value): if isinstance(key, tuple): + key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: key = com.apply_if_callable(key, self.obj) @@ -911,6 +912,7 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): def __getitem__(self, key): if type(key) is tuple: + key = tuple(list(x) if is_iterator(x) else x for x in key) key = tuple(com.apply_if_callable(x, self.obj) for x in key) if self._is_scalar_access(key): with suppress(KeyError, IndexError, AttributeError): @@ -1242,6 +1244,9 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): elif is_list_like_indexer(key): + if is_iterator(key): + key = list(key) + if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) (inds,) = key.nonzero() @@ -1530,6 +1535,9 @@ def _getitem_axis(self, key, axis: int): if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) + if is_iterator(key): + key = list(key) + if isinstance(key, list): key = np.asarray(key) @@ -1571,6 +1579,8 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): def _get_setitem_indexer(self, key): # GH#32257 Fall through to let numpy do validation + if is_iterator(key): + return list(key) return key # ------------------------------------------------------------------- diff --git a/pandas/tests/frame/indexing/test_getitem.py b/pandas/tests/frame/indexing/test_getitem.py index 290ba67c7d05b..073e7b0357124 100644 --- a/pandas/tests/frame/indexing/test_getitem.py +++ b/pandas/tests/frame/indexing/test_getitem.py @@ -144,6 +144,21 @@ def test_getitem_listlike(self, idx_type, levels, float_frame): with pytest.raises(KeyError, match="not in index"): frame[idx] + def test_getitem_iloc_generator(self): + # GH#39614 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + result = df.iloc[indexer] + expected = DataFrame({"a": [2, 3], "b": [5, 6]}, index=[1, 2]) + tm.assert_frame_equal(result, expected) + + def test_getitem_iloc_two_dimensional_generator(self): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + result = df.iloc[indexer, 1] + expected = Series([5, 6], name="b", index=[1, 2]) + tm.assert_series_equal(result, expected) + class TestGetitemCallable: def test_getitem_callable(self, float_frame): diff --git a/pandas/tests/frame/indexing/test_setitem.py b/pandas/tests/frame/indexing/test_setitem.py index 1d41426b93db6..3a1b2345ee7f0 100644 --- a/pandas/tests/frame/indexing/test_setitem.py +++ b/pandas/tests/frame/indexing/test_setitem.py @@ -610,6 +610,21 @@ def test_setitem_list_of_tuples(self, float_frame): expected = Series(tuples, index=float_frame.index, name="tuples") tm.assert_series_equal(result, expected) + def test_setitem_iloc_generator(self): + # GH#39614 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + df.iloc[indexer] = 1 + expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]}) + tm.assert_frame_equal(df, expected) + + def test_setitem_iloc_two_dimensional_generator(self): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + indexer = (x for x in [1, 2]) + df.iloc[indexer, 1] = 1 + expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]}) + tm.assert_frame_equal(df, expected) + class TestSetitemTZAwareValues: @pytest.fixture diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 88e6f0aa0b887..0372bf740640c 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -556,6 +556,9 @@ def test_int_key(self, obj, key, expected, val, indexer_sli, is_inplace): indkey = np.array(ilkey) self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace) + genkey = (x for x in [key]) + self.check_indexer(obj, genkey, expected, val, indexer_sli, is_inplace) + def test_slice_key(self, obj, key, expected, val, indexer_sli, is_inplace): if not isinstance(key, slice): return @@ -570,6 +573,9 @@ def test_slice_key(self, obj, key, expected, val, indexer_sli, is_inplace): indkey = np.array(ilkey) self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace) + genkey = (x for x in indkey) + self.check_indexer(obj, genkey, expected, val, indexer_sli, is_inplace) + def test_mask_key(self, obj, key, expected, val, indexer_sli): # setitem with boolean mask mask = np.zeros(obj.shape, dtype=bool)
- [x] closes #39614 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39623
2021-02-06T10:55:16Z
2021-04-16T00:46:28Z
2021-04-16T00:46:28Z
2021-04-16T08:13:47Z
REF: remove Float64Index get_loc, __contains__
diff --git a/pandas/_libs/index_class_helper.pxi.in b/pandas/_libs/index_class_helper.pxi.in index e1ea1fbf9bd46..e5026ce2fa292 100644 --- a/pandas/_libs/index_class_helper.pxi.in +++ b/pandas/_libs/index_class_helper.pxi.in @@ -34,10 +34,14 @@ cdef class {{name}}Engine(IndexEngine): cdef _make_hash_table(self, Py_ssize_t n): return _hash.{{name}}HashTable(n) - {{if name not in {'Float64', 'Float32'} }} cdef _check_type(self, object val): + {{if name not in {'Float64', 'Float32'} }} if not util.is_integer_object(val): raise KeyError(val) + {{else}} + if util.is_bool_object(val): + # avoid casting to True -> 1.0 + raise KeyError(val) {{endif}} cdef void _call_map_locations(self, values): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 777fc1c7c4ad2..d6427aed6edf3 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -1,4 +1,4 @@ -from typing import Any, Hashable, Optional +from typing import Hashable, Optional import warnings import numpy as np @@ -9,7 +9,6 @@ from pandas.core.dtypes.cast import astype_nansafe from pandas.core.dtypes.common import ( - is_bool, is_dtype_equal, is_extension_array_dtype, is_float, @@ -336,13 +335,6 @@ def _convert_slice_indexer(self, key: slice, kind: str): # translate to locations return self.slice_indexer(key.start, key.stop, key.step, kind=kind) - @doc(Index.get_loc) - def get_loc(self, key, method=None, tolerance=None): - if is_bool(key): - # Catch this to avoid accidentally casting to 1.0 - raise KeyError(key) - return super().get_loc(key, method=method, tolerance=tolerance) - # ---------------------------------------------------------------- def _format_native_types( @@ -359,10 +351,3 @@ def _format_native_types( fixed_width=False, ) return formatter.get_result_as_array() - - def __contains__(self, other: Any) -> bool: - hash(other) - if super().__contains__(other): - return True - - return is_float(other) and np.isnan(other) and self.hasnans
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39620
2021-02-06T04:39:09Z
2021-02-07T16:42:50Z
2021-02-07T16:42:49Z
2021-02-07T16:58:15Z
BUG: Series[int].__setitem__(mask, td64_or_dt64) incorrect casting
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 17d8c79994dbe..79e9cec71d03d 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -338,7 +338,7 @@ Indexing - Bug in :meth:`Series.__setitem__` raising ``ValueError`` when setting a :class:`Series` with a scalar indexer (:issue:`38303`) - Bug in :meth:`DataFrame.loc` dropping levels of :class:`MultiIndex` when :class:`DataFrame` used as input has only one row (:issue:`10521`) - Bug in :meth:`DataFrame.__getitem__` and :meth:`Series.__getitem__` always raising ``KeyError`` when slicing with existing strings an :class:`Index` with milliseconds (:issue:`33589`) -- Bug in setting ``timedelta64`` values into numeric :class:`Series` failing to cast to object dtype (:issue:`39086`) +- Bug in setting ``timedelta64`` or ``datetime64`` values into numeric :class:`Series` failing to cast to object dtype (:issue:`39086`, issue:`39619`) - Bug in setting :class:`Interval` values into a :class:`Series` or :class:`DataFrame` with mismatched :class:`IntervalDtype` incorrectly casting the new values to the existing dtype (:issue:`39120`) - Bug in setting ``datetime64`` values into a :class:`Series` with integer-dtype incorrect casting the datetime64 values to integers (:issue:`39266`) - Bug in :meth:`Index.get_loc` not raising ``KeyError`` when method is specified for ``NaN`` value when ``NaN`` is not in :class:`Index` (:issue:`39382`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 96b35f1aaab9c..1d8295ffb0eb1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8891,32 +8891,11 @@ def _where( if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: - - if self.ndim == 1: - - icond = cond._values - - # GH 2745 / GH 4192 - # treat like a scalar - if len(other) == 1: - other = other[0] - - # GH 3235 - # match True cond to other - elif len(cond[icond]) == len(other): - - # try to not change dtype at first - new_other = self._values - new_other = new_other.copy() - new_other[icond] = other - other = new_other - - else: - raise ValueError( - "Length of replacements must equal series length" - ) - - else: + if self.ndim != 1: + # In the ndim == 1 case we may have + # other length 1, which we treat as scalar (GH#2745, GH#4192) + # or len(other) == icond.sum(), which we treat like + # __setitem__ (GH#3235) raise ValueError( "other must be the same shape as self when an ndarray" ) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f2fd5ca9c62c7..5c1fabd67bc8d 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4550,11 +4550,16 @@ def putmask(self, mask, value): return self.astype(dtype).putmask(mask, value) values = self._values.copy() - if isinstance(converted, np.timedelta64) and self.dtype == object: + dtype, _ = infer_dtype_from(converted, pandas_dtype=True) + if dtype.kind in ["m", "M"]: # https://github.com/numpy/numpy/issues/12550 # timedelta64 will incorrectly cast to int - converted = [converted] * mask.sum() - values[mask] = converted + if not is_list_like(converted): + converted = [converted] * mask.sum() + values[mask] = converted + else: + converted = list(converted) + np.putmask(values, mask, converted) else: np.putmask(values, mask, converted) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9314666acdaad..75814cb2bae3d 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1031,7 +1031,8 @@ def putmask(self, mask, new) -> List[Block]: elif not mask.any(): return [self] - elif isinstance(new, np.timedelta64): + dtype, _ = infer_dtype_from(new) + if dtype.kind in ["m", "M"]: # using putmask with object dtype will incorrect cast to object # Having excluded self._can_hold_element, we know we cannot operate # in-place, so we are safe using `where` @@ -1317,10 +1318,15 @@ def where(self, other, cond, errors="raise", axis: int = 0) -> List[Block]: blocks = block.where(orig_other, cond, errors=errors, axis=axis) return self._maybe_downcast(blocks, "infer") - elif isinstance(other, np.timedelta64): - # expressions.where will cast np.timedelta64 to int - result = self.values.copy() - result[~cond] = [other] * (~cond).sum() + dtype, _ = infer_dtype_from(other, pandas_dtype=True) + if dtype.kind in ["m", "M"] and dtype.kind != values.dtype.kind: + # expressions.where would cast np.timedelta64 to int + if not is_list_like(other): + other = [other] * (~cond).sum() + else: + other = list(other) + result = values.copy() + np.putmask(result, ~cond, other) else: # convert datetime to datetime64, timedelta to timedelta64 diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 767b61e31698b..3a993f544b64a 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -74,18 +74,6 @@ def test_setitem_tuple_with_datetimetz_values(self): tm.assert_series_equal(result, expected) -class TestSetitemPeriodDtype: - @pytest.mark.parametrize("na_val", [None, np.nan]) - def test_setitem_na_period_dtype_casts_to_nat(self, na_val): - ser = Series(period_range("2000-01-01", periods=10, freq="D")) - - ser[3] = na_val - assert ser[3] is NaT - - ser[3:5] = na_val - assert ser[4] is NaT - - class TestSetitemScalarIndexer: def test_setitem_negative_out_of_bounds(self): ser = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10)) @@ -259,29 +247,6 @@ def test_setitem_callable_other(self): class TestSetitemCasting: - @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) - def test_setitem_dt64_into_int_series(self, dtype): - # dont cast dt64 to int when doing this setitem - orig = Series([1, 2, 3]) - - val = np.datetime64("2021-01-18 13:25:00", "ns") - if dtype == "m8[ns]": - val = val - val - - ser = orig.copy() - ser[:-1] = val - expected = Series([val, val, 3], dtype=object) - tm.assert_series_equal(ser, expected) - assert isinstance(ser[0], type(val)) - - ser = orig.copy() - ser[:-1] = [val, val] - tm.assert_series_equal(ser, expected) - - ser = orig.copy() - ser[:-1] = np.array([val, val]) - tm.assert_series_equal(ser, expected) - @pytest.mark.parametrize("unique", [True, False]) @pytest.mark.parametrize("val", [3, 3.0, "3"], ids=type) def test_setitem_non_bool_into_bool(self, val, indexer_sli, unique): @@ -599,3 +564,70 @@ def is_inplace(self): Indicate we do _not_ expect the setting to be done inplace. """ return False + + +class TestSetitemDT64IntoInt(SetitemCastingEquivalents): + # GH#39619 dont cast dt64 to int when doing this setitem + + @pytest.fixture(params=["M8[ns]", "m8[ns]"]) + def dtype(self, request): + return request.param + + @pytest.fixture + def scalar(self, dtype): + val = np.datetime64("2021-01-18 13:25:00", "ns") + if dtype == "m8[ns]": + val = val - val + return val + + @pytest.fixture + def expected(self, scalar): + expected = Series([scalar, scalar, 3], dtype=object) + assert isinstance(expected[0], type(scalar)) + return expected + + @pytest.fixture + def obj(self): + return Series([1, 2, 3]) + + @pytest.fixture + def key(self): + return slice(None, -1) + + @pytest.fixture(params=[None, list, np.array]) + def val(self, scalar, request): + box = request.param + if box is None: + return scalar + return box([scalar, scalar]) + + @pytest.fixture + def is_inplace(self): + return False + + +class TestSetitemNAPeriodDtype(SetitemCastingEquivalents): + # Setting compatible NA values into Series with PeriodDtype + + @pytest.fixture + def expected(self, key): + exp = Series(period_range("2000-01-01", periods=10, freq="D")) + exp._values.view("i8")[key] = NaT.value + assert exp[key] is NaT or all(x is NaT for x in exp[key]) + return exp + + @pytest.fixture + def obj(self): + return Series(period_range("2000-01-01", periods=10, freq="D")) + + @pytest.fixture(params=[3, slice(3, 5)]) + def key(self, request): + return request.param + + @pytest.fixture(params=[None, np.nan]) + def val(self, request): + return request.param + + @pytest.fixture + def is_inplace(self): + return True
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39619
2021-02-05T23:12:01Z
2021-02-07T16:58:58Z
2021-02-07T16:58:58Z
2021-02-07T17:01:42Z
REF: simplify BlockManager.quantile
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 11efa44e9d15f..477dbd4c1570e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -9401,6 +9401,14 @@ def quantile( """ validate_percentile(q) + if not is_list_like(q): + # BlockManager.quantile expects listlike, so we wrap and unwrap here + res = self.quantile( + [q], axis=axis, numeric_only=numeric_only, interpolation=interpolation + ) + return res.iloc[0] + + q = Index(q, dtype=np.float64) data = self._get_numeric_data() if numeric_only else self axis = self._get_axis_number(axis) is_transposed = axis == 1 @@ -9419,10 +9427,7 @@ def quantile( qs=q, axis=1, interpolation=interpolation, transposed=is_transposed ) - if result.ndim == 2: - result = self._constructor(result) - else: - result = self._constructor_sliced(result, name=q) + result = self._constructor(result) if is_transposed: result = result.T diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 9314666acdaad..ffa52f3be4a57 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -82,7 +82,7 @@ from pandas.core.nanops import nanpercentile if TYPE_CHECKING: - from pandas import Index + from pandas import Float64Index, Index from pandas.core.arrays._mixins import NDArrayBackedExtensionArray @@ -1383,15 +1383,20 @@ def _unstack(self, unstacker, fill_value, new_placement): blocks = [make_block(new_values, placement=new_placement)] return blocks, mask - def quantile(self, qs, interpolation="linear", axis: int = 0) -> Block: + def quantile( + self, qs: Float64Index, interpolation="linear", axis: int = 0 + ) -> Block: """ compute the quantiles of the Parameters ---------- - qs: a scalar or list of the quantiles to be computed - interpolation: type of interpolation, default 'linear' - axis: axis to compute, default 0 + qs : Float64Index + List of the quantiles to be computed. + interpolation : str, default 'linear' + Type of interpolation. + axis : int, default 0 + Axis to compute. Returns ------- @@ -1399,14 +1404,12 @@ def quantile(self, qs, interpolation="linear", axis: int = 0) -> Block: """ # We should always have ndim == 2 because Series dispatches to DataFrame assert self.ndim == 2 + assert axis == 1 # only ever called this way + assert is_list_like(qs) # caller is responsible for this values = self.get_values() is_empty = values.shape[axis] == 0 - orig_scalar = not is_list_like(qs) - if orig_scalar: - # make list-like, unpack later - qs = [qs] if is_empty: # create the array of na_values @@ -1430,14 +1433,7 @@ def quantile(self, qs, interpolation="linear", axis: int = 0) -> Block: result = np.array(result, copy=False) result = result.T - if orig_scalar and not lib.is_scalar(result): - # result could be scalar in case with is_empty and self.ndim == 1 - assert result.shape[-1] == 1, result.shape - result = result[..., 0] - result = lib.item_from_zerodim(result) - - ndim = np.ndim(result) - return make_block(result, placement=np.arange(len(result)), ndim=ndim) + return make_block(result, placement=self.mgr_locs, ndim=2) def _replace_coerce( self, @@ -2184,7 +2180,10 @@ def fillna( value, limit=limit, inplace=inplace, downcast=downcast ) - def quantile(self, qs, interpolation="linear", axis: int = 0) -> Block: + def quantile( + self, qs: Float64Index, interpolation="linear", axis: int = 0 + ) -> Block: + assert axis == 1 # only ever called this way naive = self.values.view("M8[ns]") # TODO(EA2D): kludge for 2D block with 1D values diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index c352fa39627d9..0aa97b4d6c0ed 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -31,7 +31,6 @@ is_extension_array_dtype, is_list_like, ) -from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCPandasArray, ABCSeries from pandas.core.dtypes.missing import array_equals, isna @@ -40,7 +39,7 @@ from pandas.core.arrays.sparse import SparseDtype from pandas.core.construction import extract_array from pandas.core.indexers import maybe_convert_indices -from pandas.core.indexes.api import Index, ensure_index +from pandas.core.indexes.api import Float64Index, Index, ensure_index from pandas.core.internals.base import DataManager from pandas.core.internals.blocks import ( Block, @@ -441,11 +440,11 @@ def apply( def quantile( self, + *, + qs: Float64Index, axis: int = 0, transposed: bool = False, interpolation="linear", - qs=None, - numeric_only=None, ) -> BlockManager: """ Iterate over blocks applying quantile reduction. @@ -460,8 +459,7 @@ def quantile( transposed: bool, default False we are holding transposed data interpolation : type of interpolation, default 'linear' - qs : a scalar or list of the quantiles to be computed - numeric_only : ignored + qs : list of the quantiles to be computed Returns ------- @@ -470,73 +468,25 @@ def quantile( # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 + assert is_list_like(qs) # caller is responsible for this + assert axis == 1 # only ever called this way - def get_axe(block, qs, axes): - # Because Series dispatches to DataFrame, we will always have - # block.ndim == 2 - from pandas import Float64Index - - if is_list_like(qs): - ax = Float64Index(qs) - else: - ax = axes[0] - return ax - - axes, blocks = [], [] - for b in self.blocks: - block = b.quantile(axis=axis, qs=qs, interpolation=interpolation) - - axe = get_axe(b, qs, axes=self.axes) - - axes.append(axe) - blocks.append(block) - - # note that some DatetimeTZ, Categorical are always ndim==1 - ndim = {b.ndim for b in blocks} - assert 0 not in ndim, ndim - - if 2 in ndim: - - new_axes = list(self.axes) - - # multiple blocks that are reduced - if len(blocks) > 1: - new_axes[1] = axes[0] - - # reset the placement to the original - for b, sb in zip(blocks, self.blocks): - b.mgr_locs = sb.mgr_locs - - else: - new_axes[axis] = Index(np.concatenate([ax._values for ax in axes])) - - if transposed: - new_axes = new_axes[::-1] - blocks = [ - b.make_block(b.values.T, placement=np.arange(b.shape[1])) - for b in blocks - ] - - return type(self)(blocks, new_axes) - - # single block, i.e. ndim == {1} - values = concat_compat([b.values for b in blocks]) - - # compute the orderings of our original data - if len(self.blocks) > 1: + new_axes = list(self.axes) + new_axes[1] = Float64Index(qs) - indexer = np.empty(len(self.axes[0]), dtype=np.intp) - i = 0 - for b in self.blocks: - for j in b.mgr_locs: - indexer[j] = i - i = i + 1 + blocks = [ + blk.quantile(axis=axis, qs=qs, interpolation=interpolation) + for blk in self.blocks + ] - values = values.take(indexer) + if transposed: + new_axes = new_axes[::-1] + blocks = [ + b.make_block(b.values.T, placement=np.arange(b.shape[1])) + for b in blocks + ] - return SingleBlockManager( - make_block(values, ndim=1, placement=np.arange(len(values))), axes[0] - ) + return type(self)(blocks, new_axes) def isna(self, func) -> BlockManager: return self.apply("apply", func=func)
Handle the scalar-qs case early on in DataFrame.quantile, hugely simplifies BlockManager.quantile and to a lesser extent Block.quantile
https://api.github.com/repos/pandas-dev/pandas/pulls/39618
2021-02-05T22:52:09Z
2021-02-08T13:50:34Z
2021-02-08T13:50:34Z
2021-02-08T15:05:15Z
BUG: fix Categorical.astype for dtype=np.int32 argument
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index b6e81e44e6f08..28ddfa197eae3 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) +- Fixed regression in :meth:`Categorical.astype` casting to incorrect dtype when ``np.int32`` is passed to dtype argument (:issue:`39402`) - Fixed regression in :meth:`~DataFrame.to_excel` creating corrupt files when appending (``mode="a"``) to an existing file (:issue:`39576`) - Fixed regression in :meth:`DataFrame.transform` failing in case of an empty DataFrame or Series (:issue:`39636`) - Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) diff --git a/pandas/conftest.py b/pandas/conftest.py index 829ac64884dac..6ec6d66203268 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1242,6 +1242,32 @@ def any_nullable_int_dtype(request): return request.param +@pytest.fixture(params=tm.ALL_INT_DTYPES + tm.ALL_EA_INT_DTYPES) +def any_int_or_nullable_int_dtype(request): + """ + Parameterized fixture for any nullable integer dtype. + + * int + * 'int8' + * 'uint8' + * 'int16' + * 'uint16' + * 'int32' + * 'uint32' + * 'int64' + * 'uint64' + * 'UInt8' + * 'Int8' + * 'UInt16' + * 'Int16' + * 'UInt32' + * 'Int32' + * 'UInt64' + * 'Int64' + """ + return request.param + + @pytest.fixture(params=tm.ALL_EA_INT_DTYPES + tm.FLOAT_EA_DTYPES) def any_nullable_numeric_dtype(request): """ diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index af78b84923a9c..13da3df93af14 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -466,7 +466,8 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: else: # GH8628 (PERF): astype category codes instead of astyping array try: - astyped_cats = self.categories.astype(dtype=dtype, copy=copy) + new_cats = np.asarray(self.categories) + new_cats = new_cats.astype(dtype=dtype, copy=copy) except ( TypeError, # downstream error msg for CategoricalIndex is misleading ValueError, @@ -474,8 +475,7 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike: msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}" raise ValueError(msg) - astyped_cats = extract_array(astyped_cats, extract_numpy=True) - result = take_1d(astyped_cats, libalgos.ensure_platform_int(self._codes)) + result = take_1d(new_cats, libalgos.ensure_platform_int(self._codes)) return result diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py index a2192b2810596..6be9a424a5544 100644 --- a/pandas/tests/arrays/categorical/test_dtypes.py +++ b/pandas/tests/arrays/categorical/test_dtypes.py @@ -138,7 +138,7 @@ def test_astype(self, ordered): tm.assert_numpy_array_equal(result, expected) result = cat.astype(int) - expected = np.array(cat, dtype="int64") + expected = np.array(cat, dtype="int") tm.assert_numpy_array_equal(result, expected) result = cat.astype(float) diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index d59f0c05c7462..d455e434f38be 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -68,7 +68,7 @@ def test_astype_categorical_to_other(self): exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"]) tm.assert_series_equal(cat.astype("str"), exp) s2 = Series(Categorical(["1", "2", "3", "4"])) - exp2 = Series([1, 2, 3, 4]).astype("int64") + exp2 = Series([1, 2, 3, 4]).astype("int") tm.assert_series_equal(s2.astype("int"), exp2) # object don't sort correctly, so just compare that we have the same @@ -94,6 +94,17 @@ def cmp(a, b): result = ser.astype("object").astype(CategoricalDtype()) tm.assert_series_equal(result, roundtrip_expected) + def test_categorical_astype_to_int(self, any_int_or_nullable_int_dtype): + # GH 39402 + + df = DataFrame(data={"col1": pd.array([2.0, 1.0, 3.0])}) + df.col1 = df.col1.astype("category") + df.col1 = df.col1.astype(any_int_or_nullable_int_dtype) + expected = DataFrame( + {"col1": pd.array([2, 1, 3], dtype=any_int_or_nullable_int_dtype)} + ) + tm.assert_frame_equal(df, expected) + def test_series_to_categorical(self): # see gh-16524: test conversion of Series to Categorical series = Series(["a", "b", "c"])
- [x] closes #39402 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry Fixing a bug I introduced in #37355
https://api.github.com/repos/pandas-dev/pandas/pulls/39615
2021-02-05T16:49:37Z
2021-02-08T13:43:34Z
2021-02-08T13:43:34Z
2021-02-08T14:21:27Z
[ArrayManager] REF: Implement concat with reindexing
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index b2d30f3540e77..cfadb3e9f45c5 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -30,11 +30,13 @@ ) -def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: +def cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: """ Helper function for `arr.astype(common_dtype)` but handling all special cases. """ + if is_dtype_equal(arr.dtype, dtype): + return arr if ( is_categorical_dtype(arr.dtype) and isinstance(dtype, np.dtype) @@ -121,7 +123,7 @@ def is_nonempty(x) -> bool: # for axis=0 if not single_dtype: target_dtype = find_common_type([x.dtype for x in to_concat]) - to_concat = [_cast_to_common_type(arr, target_dtype) for arr in to_concat] + to_concat = [cast_to_common_type(arr, target_dtype) for arr in to_concat] if isinstance(to_concat[0], ExtensionArray): cls = type(to_concat[0]) diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 5581305a9baea..aedb9488b7454 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -18,12 +18,14 @@ ) from pandas._typing import ( ArrayLike, + DtypeObj, Hashable, ) from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( astype_array_safe, + ensure_dtype_can_hold_na, infer_dtype_from_scalar, soft_convert_objects, ) @@ -49,6 +51,7 @@ from pandas.core.dtypes.missing import ( array_equals, isna, + na_value_for_dtype, ) import pandas.core.algorithms as algos @@ -952,10 +955,18 @@ def reindex_indexer( # ignored keywords consolidate: bool = True, only_slice: bool = False, + # ArrayManager specific keywords + use_na_proxy: bool = False, ) -> T: axis = self._normalize_axis(axis) return self._reindex_indexer( - new_axis, indexer, axis, fill_value, allow_dups, copy + new_axis, + indexer, + axis, + fill_value, + allow_dups, + copy, + use_na_proxy, ) def _reindex_indexer( @@ -966,6 +977,7 @@ def _reindex_indexer( fill_value=None, allow_dups: bool = False, copy: bool = True, + use_na_proxy: bool = False, ) -> T: """ Parameters @@ -1000,7 +1012,9 @@ def _reindex_indexer( new_arrays = [] for i in indexer: if i == -1: - arr = self._make_na_array(fill_value=fill_value) + arr = self._make_na_array( + fill_value=fill_value, use_na_proxy=use_na_proxy + ) else: arr = self.arrays[i] new_arrays.append(arr) @@ -1051,7 +1065,11 @@ def take(self: T, indexer, axis: int = 1, verify: bool = True) -> T: new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True ) - def _make_na_array(self, fill_value=None): + def _make_na_array(self, fill_value=None, use_na_proxy=False): + if use_na_proxy: + assert fill_value is None + return NullArrayProxy(self.shape_proper[0]) + if fill_value is None: fill_value = np.nan @@ -1271,3 +1289,50 @@ def set_values(self, values: ArrayLike): valid for the current SingleArrayManager (length, dtype, etc). """ self.arrays[0] = values + + +class NullArrayProxy: + """ + Proxy object for an all-NA array. + + Only stores the length of the array, and not the dtype. The dtype + will only be known when actually concatenating (after determining the + common dtype, for which this proxy is ignored). + Using this object avoids that the internals/concat.py needs to determine + the proper dtype and array type. + """ + + ndim = 1 + + def __init__(self, n: int): + self.n = n + + @property + def shape(self): + return (self.n,) + + def to_array(self, dtype: DtypeObj) -> ArrayLike: + """ + Helper function to create the actual all-NA array from the NullArrayProxy + object. + + Parameters + ---------- + arr : NullArrayProxy + dtype : the dtype for the resulting array + + Returns + ------- + np.ndarray or ExtensionArray + """ + if isinstance(dtype, ExtensionDtype): + empty = dtype.construct_array_type()._from_sequence([], dtype=dtype) + indexer = -np.ones(self.n, dtype=np.intp) + return empty.take(indexer, allow_fill=True) + else: + # when introducing missing values, int becomes float, bool becomes object + dtype = ensure_dtype_can_hold_na(dtype) + fill_value = na_value_for_dtype(dtype) + arr = np.empty(self.n, dtype=dtype) + arr.fill(fill_value) + return ensure_wrapped_if_datetimelike(arr) diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index b1f90834f09c3..687c8768fb251 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -28,7 +28,10 @@ is_extension_array_dtype, is_sparse, ) -from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.concat import ( + cast_to_common_type, + concat_compat, +) from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import ( is_valid_na_for_dtype, @@ -42,7 +45,10 @@ ExtensionArray, ) from pandas.core.construction import ensure_wrapped_if_datetimelike -from pandas.core.internals.array_manager import ArrayManager +from pandas.core.internals.array_manager import ( + ArrayManager, + NullArrayProxy, +) from pandas.core.internals.blocks import ( ensure_block_shape, new_block, @@ -74,14 +80,16 @@ def _concatenate_array_managers( mgrs = [] for mgr, indexers in mgrs_indexers: for ax, indexer in indexers.items(): - mgr = mgr.reindex_indexer(axes[ax], indexer, axis=ax, allow_dups=True) + mgr = mgr.reindex_indexer( + axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True + ) mgrs.append(mgr) if concat_axis == 1: # concatting along the rows -> concat the reindexed arrays # TODO(ArrayManager) doesn't yet preserve the correct dtype arrays = [ - concat_compat([mgrs[i].arrays[j] for i in range(len(mgrs))]) + concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))]) for j in range(len(mgrs[0].arrays)) ] return ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False) @@ -92,6 +100,68 @@ def _concatenate_array_managers( return ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False) +def concat_arrays(to_concat: list) -> ArrayLike: + """ + Alternative for concat_compat but specialized for use in the ArrayManager. + + Differences: only deals with 1D arrays (no axis keyword), assumes + ensure_wrapped_if_datetimelike and does not skip empty arrays to determine + the dtype. + In addition ensures that all NullArrayProxies get replaced with actual + arrays. + + Parameters + ---------- + to_concat : list of arrays + + Returns + ------- + np.ndarray or ExtensionArray + """ + # ignore the all-NA proxies to determine the resulting dtype + to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)] + + single_dtype = len({x.dtype for x in to_concat_no_proxy}) == 1 + + if not single_dtype: + target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy]) + else: + target_dtype = to_concat_no_proxy[0].dtype + + if target_dtype.kind in ["m", "M"]: + # for datetimelike use DatetimeArray/TimedeltaArray concatenation + # don't use arr.astype(target_dtype, copy=False), because that doesn't + # work for DatetimeArray/TimedeltaArray (returns ndarray) + to_concat = [ + arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr + for arr in to_concat + ] + return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0) + + to_concat = [ + arr.to_array(target_dtype) + if isinstance(arr, NullArrayProxy) + else cast_to_common_type(arr, target_dtype) + for arr in to_concat + ] + + if isinstance(to_concat[0], ExtensionArray): + cls = type(to_concat[0]) + return cls._concat_same_type(to_concat) + + result = np.concatenate(to_concat) + + # TODO decide on exact behaviour (we shouldn't do this only for empty result) + # see https://github.com/pandas-dev/pandas/issues/39817 + if len(result) == 0: + # all empties -> check for bool to not coerce to float + kinds = {obj.dtype.kind for obj in to_concat_no_proxy} + if len(kinds) != 1: + if "b" in kinds: + result = result.astype(object) + return result + + def concatenate_managers( mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool ) -> Manager: diff --git a/pandas/tests/extension/base/reshaping.py b/pandas/tests/extension/base/reshaping.py index 5a2d928eea744..de3af31ece7b0 100644 --- a/pandas/tests/extension/base/reshaping.py +++ b/pandas/tests/extension/base/reshaping.py @@ -3,8 +3,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas.api.extensions import ExtensionArray from pandas.core.internals import ExtensionBlock @@ -111,7 +109,6 @@ def test_concat_extension_arrays_copy_false(self, data, na_value): result = pd.concat([df1, df2], axis=1, copy=False) self.assert_frame_equal(result, expected) - @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) concat reindex def test_concat_with_reindex(self, data): # GH-33027 a = pd.DataFrame({"a": data[:5]}) diff --git a/pandas/tests/frame/methods/test_append.py b/pandas/tests/frame/methods/test_append.py index ba58d88fb4863..f9535e9c7ef17 100644 --- a/pandas/tests/frame/methods/test_append.py +++ b/pandas/tests/frame/methods/test_append.py @@ -1,8 +1,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import ( DataFrame, @@ -13,9 +11,6 @@ ) import pandas._testing as tm -# TODO td.skip_array_manager_not_yet_implemented -# appending with reindexing not yet working - class TestDataFrameAppend: def test_append_multiindex(self, multiindex_dataframe_random_data, frame_or_series): @@ -43,7 +38,6 @@ def test_append_empty_list(self): tm.assert_frame_equal(result, expected) assert result is not df # .append() should return a new object - @td.skip_array_manager_not_yet_implemented def test_append_series_dict(self): df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) @@ -84,7 +78,6 @@ def test_append_series_dict(self): expected = df.append(df[-1:], ignore_index=True) tm.assert_frame_equal(result, expected) - @td.skip_array_manager_not_yet_implemented def test_append_list_of_series_dicts(self): df = DataFrame(np.random.randn(5, 4), columns=["foo", "bar", "baz", "qux"]) @@ -103,7 +96,6 @@ def test_append_list_of_series_dicts(self): expected = df.append(DataFrame(dicts), ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) - @td.skip_array_manager_not_yet_implemented def test_append_missing_cols(self): # GH22252 # exercise the conditional branch in append method where the data @@ -148,8 +140,7 @@ def test_append_empty_dataframe(self): expected = df1.copy() tm.assert_frame_equal(result, expected) - @td.skip_array_manager_not_yet_implemented - def test_append_dtypes(self): + def test_append_dtypes(self, using_array_manager): # GH 5754 # row appends of different dtypes (so need to do by-item) @@ -173,6 +164,10 @@ def test_append_dtypes(self): expected = DataFrame( {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} ) + if using_array_manager: + # TODO(ArrayManager) decide on exact casting rules in concat + # With ArrayManager, all-NaN float is not ignored + expected = expected.astype(object) tm.assert_frame_equal(result, expected) df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) @@ -181,6 +176,9 @@ def test_append_dtypes(self): expected = DataFrame( {"bar": Series([Timestamp("20130101"), np.nan], dtype="M8[ns]")} ) + if using_array_manager: + # With ArrayManager, all-NaN float is not ignored + expected = expected.astype(object) tm.assert_frame_equal(result, expected) df1 = DataFrame({"bar": np.nan}, index=range(1)) @@ -189,6 +187,9 @@ def test_append_dtypes(self): expected = DataFrame( {"bar": Series([np.nan, Timestamp("20130101")], dtype="M8[ns]")} ) + if using_array_manager: + # With ArrayManager, all-NaN float is not ignored + expected = expected.astype(object) tm.assert_frame_equal(result, expected) df1 = DataFrame({"bar": Timestamp("20130101")}, index=range(1)) @@ -208,7 +209,6 @@ def test_append_timestamps_aware_or_naive(self, tz_naive_fixture, timestamp): expected = Series(Timestamp(timestamp, tz=tz), name=0) tm.assert_series_equal(result, expected) - @td.skip_array_manager_not_yet_implemented @pytest.mark.parametrize( "data, dtype", [ diff --git a/pandas/tests/reshape/concat/__init__.py b/pandas/tests/reshape/concat/__init__.py index 777923be02398..e69de29bb2d1d 100644 --- a/pandas/tests/reshape/concat/__init__.py +++ b/pandas/tests/reshape/concat/__init__.py @@ -1,4 +0,0 @@ -import pandas.util._test_decorators as td - -# TODO(ArrayManager) concat axis=0 -pytestmark = td.skip_array_manager_not_yet_implemented diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py index 7b9f8d1c2879e..1c533ec9fb1e3 100644 --- a/pandas/tests/reshape/concat/test_append.py +++ b/pandas/tests/reshape/concat/test_append.py @@ -6,6 +6,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import ( DataFrame, @@ -338,6 +340,10 @@ def test_append_missing_column_proper_upcast(self, sort): assert appended["A"].dtype == "f8" assert appended["B"].dtype == "O" + # TODO(ArrayManager) DataFrame.append reindexes a Series itself (giving + # float dtype) -> delay reindexing until concat_array_managers which properly + # takes care of all-null dtype inference + @td.skip_array_manager_not_yet_implemented def test_append_empty_frame_to_series_with_dateutil_tz(self): # GH 23682 date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc()) diff --git a/pandas/tests/reshape/concat/test_concat.py b/pandas/tests/reshape/concat/test_concat.py index 55ae754761a07..46029b8a695ea 100644 --- a/pandas/tests/reshape/concat/test_concat.py +++ b/pandas/tests/reshape/concat/test_concat.py @@ -8,6 +8,8 @@ import numpy as np import pytest +import pandas.util._test_decorators as td + import pandas as pd from pandas import ( DataFrame, @@ -41,6 +43,8 @@ def test_append_concat(self): assert isinstance(result.index, PeriodIndex) assert result.index[0] == s1.index[0] + # TODO(ArrayManager) using block internals to verify, needs rewrite + @td.skip_array_manager_invalid_test def test_concat_copy(self): df = DataFrame(np.random.randn(4, 3)) df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1)) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 4fa2865a9e320..9699a0dec4891 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -676,7 +676,7 @@ def _constructor(self): assert isinstance(result, NotADataFrame) - def test_join_append_timedeltas(self): + def test_join_append_timedeltas(self, using_array_manager): # timedelta64 issues with join/merge # GH 5695 @@ -690,6 +690,9 @@ def test_join_append_timedeltas(self): "t": [timedelta(0, 22500), timedelta(0, 22500)], } ) + if using_array_manager: + # TODO(ArrayManager) decide on exact casting rules in concat + expected = expected.astype(object) tm.assert_frame_equal(result, expected) td = np.timedelta64(300000000) diff --git a/pandas/tests/reshape/test_crosstab.py b/pandas/tests/reshape/test_crosstab.py index 5cc65feee869b..44299d51a878f 100644 --- a/pandas/tests/reshape/test_crosstab.py +++ b/pandas/tests/reshape/test_crosstab.py @@ -1,8 +1,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - from pandas.core.dtypes.common import is_categorical_dtype from pandas import ( @@ -440,8 +438,7 @@ def test_crosstab_normalize_arrays(self): ) tm.assert_frame_equal(test_case, norm_sum) - @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) concat axis=0 - def test_crosstab_with_empties(self): + def test_crosstab_with_empties(self, using_array_manager): # Check handling of empties df = DataFrame( { @@ -466,6 +463,9 @@ def test_crosstab_with_empties(self): index=Index([1, 2], name="a", dtype="int64"), columns=Index([3, 4], name="b"), ) + if using_array_manager: + # INFO(ArrayManager) column without NaNs can preserve int dtype + nans[3] = nans[3].astype("int64") calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=False) tm.assert_frame_equal(nans, calculated) diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 8d8a83c233444..20aa0c9e2ee9a 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -8,8 +8,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import ( Categorical, @@ -1199,8 +1197,7 @@ def test_pivot_table_with_margins_set_margin_name(self, margin_name): margins_name=margin_name, ) - @td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) concat axis=0 - def test_pivot_timegrouper(self): + def test_pivot_timegrouper(self, using_array_manager): df = DataFrame( { "Branch": "A A A A A A A B".split(), @@ -1254,6 +1251,9 @@ def test_pivot_timegrouper(self): ) expected.index.name = "Date" expected.columns.name = "Buyer" + if using_array_manager: + # INFO(ArrayManager) column without NaNs can preserve int dtype + expected["Carl"] = expected["Carl"].astype("int64") result = pivot_table( df, diff --git a/pandas/tests/reshape/test_pivot_multilevel.py b/pandas/tests/reshape/test_pivot_multilevel.py index ab41a94d1ff25..7801262554a5e 100644 --- a/pandas/tests/reshape/test_pivot_multilevel.py +++ b/pandas/tests/reshape/test_pivot_multilevel.py @@ -1,8 +1,6 @@ import numpy as np import pytest -import pandas.util._test_decorators as td - import pandas as pd from pandas import ( Index, @@ -198,8 +196,7 @@ def test_pivot_list_like_columns( tm.assert_frame_equal(result, expected) -@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) concat axis=0 -def test_pivot_multiindexed_rows_and_cols(): +def test_pivot_multiindexed_rows_and_cols(using_array_manager): # GH 36360 df = pd.DataFrame( @@ -221,11 +218,14 @@ def test_pivot_multiindexed_rows_and_cols(): ) expected = pd.DataFrame( - data=[[5.0, np.nan], [10.0, 7.0]], + data=[[5, np.nan], [10, 7.0]], columns=MultiIndex.from_tuples( [(0, 1, 0), (0, 1, 1)], names=["col_L0", "col_L1", "idx_L1"] ), index=Int64Index([0, 1], dtype="int64", name="idx_L0"), ) + if not using_array_manager: + # BlockManager does not preserve the dtypes + expected = expected.astype("float64") tm.assert_frame_equal(res, expected)
xref https://github.com/pandas-dev/pandas/issues/39146 (the concat work item) This PR implements `concat` for DataFrames using the ArrayManager, and all tests in `pandas/tests/reshape/` already pass. Summary of the changes: * In `internals/concat.py`, I added a `concatenate_block_managers` equivalent for ArrayManagers. This function rather simply reindexes the managers and then concats the arrays (axis=0) or combines them in a single manager (axis=1). * The main problem with that simple approach is that when reindexing the ArrayManager with the given indexer, we do not yet know the resulting dtype for when a new all-NA column needs to be added (i.e. the common dtype we would find when actually concatting). And we can't make this eg an all-NaN object array, as that would influence the `find_common_type` logic. * The way I tried to solve this at the moment is by adding a dummy "proxy" object to signal such an all-NA array (the `NullArrayProxy` class), only tracking its length. And when actually concatting, we can replace this with an actual all-NA array after we determined the resulting dtype. This proxy object only lives inside the concatting code (it only gets created when we will call `concat_compat` and inside this function they always get replaced), so the user will never see this. * An alternative could be to first determine what the resulting dtype will be in `internals/concat.py` so we can pass this to the `reindex` functionality, and after reindexing do the actual concatenation. But IMO that will be more complex; the concat / find_common_type logic gets involved twice (before and after reindexing), and getting those dtypes for which we want to know the common type is also quite tricky before actually reindexing the manager. Basically, this replaces the 600 lines of code of `internals/concat.py` with 25 lines of `concatenate_array_managers` + the `NullArrayProxy` and `_array_from_proxy` code in `dtypes/concat.py` (but I am quite likely still missing some corner cases!) Note: I did not explicitly test `copy=True/False` behaviour for concat along the columns with this new code. cc @jreback @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/39612
2021-02-05T12:15:00Z
2021-04-12T10:56:11Z
2021-04-12T10:56:11Z
2021-04-12T10:56:14Z
Minor fix in read_csv docs
diff --git a/pandas/io/parsers/readers.py b/pandas/io/parsers/readers.py index d547bfbfb3895..dc45336bb4c0f 100644 --- a/pandas/io/parsers/readers.py +++ b/pandas/io/parsers/readers.py @@ -283,7 +283,7 @@ error_bad_lines : bool, default True Lines with too many fields (e.g. a csv line with too many commas) will by default cause an exception to be raised, and no DataFrame will be returned. - If False, then these "bad lines" will dropped from the DataFrame that is + If False, then these "bad lines" will be dropped from the DataFrame that is returned. warn_bad_lines : bool, default True If error_bad_lines is False, and warn_bad_lines is True, a warning for each
Fix typo.
https://api.github.com/repos/pandas-dev/pandas/pulls/39608
2021-02-05T08:04:27Z
2021-02-05T08:53:02Z
2021-02-05T08:53:02Z
2021-02-05T08:53:13Z
BUG: quantile for ExtensionArray
diff --git a/pandas/core/array_algos/quantile.py b/pandas/core/array_algos/quantile.py new file mode 100644 index 0000000000000..8d4dd7be28839 --- /dev/null +++ b/pandas/core/array_algos/quantile.py @@ -0,0 +1,77 @@ +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import is_list_like + +from pandas.core.nanops import nanpercentile + + +def quantile_with_mask( + values: np.ndarray, + mask: np.ndarray, + fill_value, + qs, + interpolation: str, + axis: int, +) -> np.ndarray: + """ + Compute the quantiles of the given values for each quantile in `qs`. + + Parameters + ---------- + values : np.ndarray + For ExtensionArray, this is _values_for_factorize()[0] + mask : np.ndarray[bool] + mask = isna(values) + For ExtensionArray, this is computed before calling _value_for_factorize + fill_value : Scalar + The value to interpret fill NA entries with + For ExtensionArray, this is _values_for_factorize()[1] + qs : a scalar or list of the quantiles to be computed + interpolation : str + Type of interpolation + axis : int + Axis along which to compute quantiles. + + Returns + ------- + np.ndarray + + Notes + ----- + Assumes values is already 2D. For ExtensionArray this means np.atleast_2d + has been called on _values_for_factorize()[0] + """ + is_empty = values.shape[axis] == 0 + orig_scalar = not is_list_like(qs) + if orig_scalar: + # make list-like, unpack later + qs = [qs] + + if is_empty: + # create the array of na_values + # 2d len(values) * len(qs) + flat = np.array([fill_value] * len(qs)) + result = np.repeat(flat, len(values)).reshape(len(values), len(qs)) + else: + # asarray needed for Sparse, see GH#24600 + result = nanpercentile( + values, + np.array(qs) * 100, + axis=axis, + na_value=fill_value, + mask=mask, + ndim=values.ndim, + interpolation=interpolation, + ) + + result = np.array(result, copy=False) + result = result.T + + if orig_scalar: + assert result.shape[-1] == 1, result.shape + result = result[..., 0] + result = lib.item_from_zerodim(result) + + return result diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 48df98930244a..162a69370bc61 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -422,7 +422,8 @@ def copy(self: DatetimeLikeArrayT) -> DatetimeLikeArrayT: return new_obj def _values_for_factorize(self): - return self._ndarray, iNaT + # int64 instead of int ensures we have a "view" method + return self._ndarray, np.int64(iNaT) @classmethod def _from_factorized( diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 30e94b99b53c9..b38f3f3d7a87e 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -56,6 +56,7 @@ putmask_smart, putmask_without_repeat, ) +from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.array_algos.replace import ( compare_or_regex_search, replace_regex, @@ -79,7 +80,6 @@ is_scalar_indexer, ) import pandas.core.missing as missing -from pandas.core.nanops import nanpercentile if TYPE_CHECKING: from pandas import Float64Index, Index @@ -1405,31 +1405,11 @@ def quantile( assert axis == 1 # only ever called this way assert is_list_like(qs) # caller is responsible for this - values = self.get_values() - - is_empty = values.shape[axis] == 0 - - if is_empty: - # create the array of na_values - # 2d len(values) * len(qs) - result = np.repeat( - np.array([self.fill_value] * len(qs)), len(values) - ).reshape(len(values), len(qs)) - else: - # asarray needed for Sparse, see GH#24600 - mask = np.asarray(isna(values)) - result = nanpercentile( - values, - np.array(qs) * 100, - axis=axis, - na_value=self.fill_value, - mask=mask, - ndim=values.ndim, - interpolation=interpolation, - ) + fill_value = self.fill_value + values = self.values + mask = np.asarray(isna(values)) - result = np.array(result, copy=False) - result = result.T + result = quantile_with_mask(values, mask, fill_value, qs, interpolation, axis) return make_block(result, placement=self.mgr_locs, ndim=2) @@ -1860,6 +1840,24 @@ def _unstack(self, unstacker, fill_value, new_placement): ] return blocks, mask + def quantile(self, qs, interpolation="linear", axis: int = 0) -> Block: + # asarray needed for Sparse, see GH#24600 + mask = np.asarray(isna(self.values)) + mask = np.atleast_2d(mask) + + values, fill_value = self.values._values_for_factorize() + + values = np.atleast_2d(values) + + result = quantile_with_mask(values, mask, fill_value, qs, interpolation, axis) + + if not is_sparse(self.dtype): + # shape[0] should be 1 as long as EAs are 1D + assert result.shape == (1, len(qs)), result.shape + result = type(self.values)._from_factorized(result[0], self.values) + + return make_block(result, placement=self.mgr_locs, ndim=2) + class HybridMixin: """ @@ -2191,22 +2189,6 @@ def fillna( value, limit=limit, inplace=inplace, downcast=downcast ) - def quantile( - self, qs: Float64Index, interpolation="linear", axis: int = 0 - ) -> Block: - assert axis == 1 # only ever called this way - naive = self.values.view("M8[ns]") - - # TODO(EA2D): kludge for 2D block with 1D values - naive = naive.reshape(self.shape) - - blk = self.make_block(naive) - res_blk = blk.quantile(qs, interpolation=interpolation, axis=axis) - - # TODO(EA2D): ravel is kludge for 2D block with 1D values, assumes column-like - aware = self._holder(res_blk.values.ravel(), dtype=self.dtype) - return self.make_block_same_class(aware, ndim=res_blk.ndim) - def _check_ndim(self, values, ndim): """ ndim inference and validation. diff --git a/pandas/tests/frame/methods/test_quantile.py b/pandas/tests/frame/methods/test_quantile.py index 3f7f2e51add96..6d6016df52238 100644 --- a/pandas/tests/frame/methods/test_quantile.py +++ b/pandas/tests/frame/methods/test_quantile.py @@ -533,3 +533,113 @@ def test_quantile_item_cache(self): ser.values[0] = 99 assert df.iloc[0, 0] == df["A"][0] + + +class TestQuantileExtensionDtype: + # TODO: tests for axis=1? + # TODO: empty case? might as well do dt64 and td64 here too + + @pytest.fixture( + params=[ + pytest.param( + pd.IntervalIndex.from_breaks(range(10)), + marks=pytest.mark.xfail(reason="raises when trying to add Intervals"), + ), + pd.period_range("2016-01-01", periods=9, freq="D"), + pd.date_range("2016-01-01", periods=9, tz="US/Pacific"), + pytest.param( + pd.array(np.arange(9), dtype="Int64"), + marks=pytest.mark.xfail(reason="doesnt implement from_factorized"), + ), + pytest.param( + pd.array(np.arange(9), dtype="Float64"), + marks=pytest.mark.xfail(reason="doesnt implement from_factorized"), + ), + ], + ids=lambda x: str(x.dtype), + ) + def index(self, request): + idx = request.param + idx.name = "A" + return idx + + def compute_quantile(self, obj, qs): + if isinstance(obj, Series): + result = obj.quantile(qs) + else: + result = obj.quantile(qs, numeric_only=False) + return result + + def test_quantile_ea(self, index, frame_or_series): + obj = frame_or_series(index).copy() + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + # expected here assumes len(index) == 9 + expected = Series([index[4], index[0], index[-1]], index=qs, name="A") + expected = frame_or_series(expected) + + tm.assert_equal(result, expected) + + def test_quantile_ea_with_na(self, index, frame_or_series): + obj = frame_or_series(index).copy() + + obj.iloc[0] = index._na_value + obj.iloc[-1] = index._na_value + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + # expected here assumes len(index) == 9 + expected = Series([index[4], index[1], index[-2]], index=qs, name="A") + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + def test_quantile_ea_all_na(self, index, frame_or_series): + + obj = frame_or_series(index).copy() + + obj.iloc[:] = index._na_value + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value) + expected = Series(expected, index=qs) + expected = frame_or_series(expected) + tm.assert_equal(result, expected) + + def test_quantile_ea_scalar(self, index, frame_or_series): + # scalar qs + obj = frame_or_series(index).copy() + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.shuffle(indexer) + obj = obj.iloc[indexer] + + qs = 0.5 + result = self.compute_quantile(obj, qs) + + expected = Series({"A": index[4]}, name=0.5) + if frame_or_series is Series: + expected = expected["A"] + assert result == expected + else: + tm.assert_series_equal(result, expected)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry I _think_ this should work for arbitrary EAs, but we only have tests for sparse, dt64tz, and this adds Period (and some untested cases for dt64tz) Part of the idea is to get the code out of the Block method for ArrayManager compat.
https://api.github.com/repos/pandas-dev/pandas/pulls/39606
2021-02-05T04:50:25Z
2021-02-12T01:24:39Z
2021-02-12T01:24:39Z
2021-02-12T07:12:55Z
REGR: appending to existing excel file created corrupt files
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 63e793c013497..8e69bf5e1991a 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) +- Fixed regression in :meth:`~DataFrame.to_excel` creating corrupt files when appending (``mode="a"``) to an existing file (:issue:`39576`) - Fixed regression in :meth:`DataFrame.transform` failing in case of an empty DataFrame or Series (:issue:`39636`) - Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) - diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 64c64b5009b0c..b53db6c726c4d 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -1,6 +1,7 @@ from __future__ import annotations from distutils.version import LooseVersion +import mmap from typing import TYPE_CHECKING, Dict, List, Optional import numpy as np @@ -40,6 +41,7 @@ def __init__( from openpyxl import load_workbook self.book = load_workbook(self.handles.handle) + self.handles.handle.seek(0) else: # Create workbook object with default optimized_write=True. self.book = Workbook() @@ -52,6 +54,9 @@ def save(self): Save workbook to disk. """ self.book.save(self.handles.handle) + if "r+" in self.mode and not isinstance(self.handles.handle, mmap.mmap): + # truncate file to the written content + self.handles.handle.truncate() @classmethod def _convert_to_style_kwargs(cls, style_dict: dict) -> Dict[str, Serialisable]: diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index 640501baffc62..b365f4edab83c 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -1,4 +1,5 @@ from distutils.version import LooseVersion +from pathlib import Path import numpy as np import pytest @@ -149,3 +150,22 @@ def test_read_with_bad_dimension(datapath, ext, header, expected_data, filename) result = pd.read_excel(path, header=header) expected = DataFrame(expected_data) tm.assert_frame_equal(result, expected) + + +def test_append_mode_file(ext): + # GH 39576 + df = DataFrame() + + with tm.ensure_clean(ext) as f: + df.to_excel(f, engine="openpyxl") + + with ExcelWriter(f, mode="a", engine="openpyxl") as writer: + df.to_excel(writer) + + # make sure that zip files are not concatenated by making sure that + # "docProps/app.xml" only occurs twice in the file + data = Path(f).read_bytes() + first = data.find(b"docProps/app.xml") + second = data.find(b"docProps/app.xml", first + 1) + third = data.find(b"docProps/app.xml", second + 1) + assert second != -1 and third == -1
- [x] closes #39576 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Tentatively ready: I added a working test but I don't have MS Excel to make sure it is happy now. Waiting for someone to test it https://github.com/pandas-dev/pandas/issues/39576#issuecomment-774102850
https://api.github.com/repos/pandas-dev/pandas/pulls/39605
2021-02-05T03:32:07Z
2021-02-07T17:28:01Z
2021-02-07T17:28:01Z
2021-02-07T21:57:11Z
REGR: Rolling.count setting min_periods after call
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index cc5653fe2f360..974f84d3b244a 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -21,6 +21,7 @@ Fixed regressions - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) +- Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index b35d62e484280..9a68e470201c7 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -1507,7 +1507,11 @@ def count(self): FutureWarning, ) self.min_periods = 0 - return super().count() + result = super().count() + self.min_periods = None + else: + result = super().count() + return result @doc( template_header, diff --git a/pandas/tests/window/test_api.py b/pandas/tests/window/test_api.py index 7d3c29dc60be0..300f3f5729614 100644 --- a/pandas/tests/window/test_api.py +++ b/pandas/tests/window/test_api.py @@ -325,3 +325,17 @@ def test_is_datetimelike_deprecated(): s = Series(range(1)).rolling(1) with tm.assert_produces_warning(FutureWarning): assert not s.is_datetimelike + + +@pytest.mark.filterwarnings("ignore:min_periods:FutureWarning") +def test_dont_modify_attributes_after_methods( + arithmetic_win_operators, closed, center, min_periods +): + # GH 39554 + roll_obj = Series(range(1)).rolling( + 1, center=center, closed=closed, min_periods=min_periods + ) + expected = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes} + getattr(roll_obj, arithmetic_win_operators)() + result = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes} + assert result == expected diff --git a/pandas/tests/window/test_base_indexer.py b/pandas/tests/window/test_base_indexer.py index 94bc755f300a2..fd4dfa7b7ed2b 100644 --- a/pandas/tests/window/test_base_indexer.py +++ b/pandas/tests/window/test_base_indexer.py @@ -159,7 +159,10 @@ def test_rolling_forward_window(constructor, func, np_func, expected, np_kwargs) # Check that the function output matches applying an alternative function # if min_periods isn't specified - rolling3 = constructor(values).rolling(window=indexer) + # GH 39604: After count-min_periods deprecation, apply(lambda x: len(x)) + # is equivalent to count after setting min_periods=0 + min_periods = 0 if func == "count" else None + rolling3 = constructor(values).rolling(window=indexer, min_periods=min_periods) result3 = getattr(rolling3, func)() expected3 = constructor(rolling3.apply(lambda x: np_func(x, **np_kwargs))) tm.assert_equal(result3, expected3)
- [x] closes #39554 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39604
2021-02-05T03:28:52Z
2021-02-05T20:24:03Z
2021-02-05T20:24:03Z
2021-02-07T12:23:43Z
BUG: Timedelta.round near implementation bounds
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 4d0384abbf0c6..ccc34678a9f2c 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -279,6 +279,7 @@ Datetimelike - Bug in :class:`Categorical` incorrectly typecasting ``datetime`` object to ``Timestamp`` (:issue:`38878`) - Bug in comparisons between :class:`Timestamp` object and ``datetime64`` objects just outside the implementation bounds for nanosecond ``datetime64`` (:issue:`39221`) - Bug in :meth:`Timestamp.round`, :meth:`Timestamp.floor`, :meth:`Timestamp.ceil` for values near the implementation bounds of :class:`Timestamp` (:issue:`39244`) +- Bug in :meth:`Timedelta.round`, :meth:`Timedelta.floor`, :meth:`Timedelta.ceil` for values near the implementation bounds of :class:`Timedelta` (:issue:`38964`) - Bug in :func:`date_range` incorrectly creating :class:`DatetimeIndex` containing ``NaT`` instead of raising ``OutOfBoundsDatetime`` in corner cases (:issue:`24124`) Timedelta diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 57404b99c7628..2f25df9144f32 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -636,3 +636,154 @@ def get_locale_names(name_type: str, locale: object = None): """ with set_locale(locale, LC_TIME): return getattr(LocaleTime(), name_type) + + +# --------------------------------------------------------------------- +# Rounding + + +class RoundTo: + """ + enumeration defining the available rounding modes + + Attributes + ---------- + MINUS_INFTY + round towards -∞, or floor [2]_ + PLUS_INFTY + round towards +∞, or ceil [3]_ + NEAREST_HALF_EVEN + round to nearest, tie-break half to even [6]_ + NEAREST_HALF_MINUS_INFTY + round to nearest, tie-break half to -∞ [5]_ + NEAREST_HALF_PLUS_INFTY + round to nearest, tie-break half to +∞ [4]_ + + + References + ---------- + .. [1] "Rounding - Wikipedia" + https://en.wikipedia.org/wiki/Rounding + .. [2] "Rounding down" + https://en.wikipedia.org/wiki/Rounding#Rounding_down + .. [3] "Rounding up" + https://en.wikipedia.org/wiki/Rounding#Rounding_up + .. [4] "Round half up" + https://en.wikipedia.org/wiki/Rounding#Round_half_up + .. [5] "Round half down" + https://en.wikipedia.org/wiki/Rounding#Round_half_down + .. [6] "Round half to even" + https://en.wikipedia.org/wiki/Rounding#Round_half_to_even + """ + @property + def MINUS_INFTY(self) -> int: + return 0 + + @property + def PLUS_INFTY(self) -> int: + return 1 + + @property + def NEAREST_HALF_EVEN(self) -> int: + return 2 + + @property + def NEAREST_HALF_PLUS_INFTY(self) -> int: + return 3 + + @property + def NEAREST_HALF_MINUS_INFTY(self) -> int: + return 4 + + +cdef inline ndarray[int64_t] _floor_int64(int64_t[:] values, int64_t unit): + cdef: + Py_ssize_t i, n = len(values) + ndarray[int64_t] result = np.empty(n, dtype="i8") + int64_t res, value + + with cython.overflowcheck(True): + for i in range(n): + value = values[i] + if value == NPY_NAT: + res = NPY_NAT + else: + res = value - value % unit + result[i] = res + + return result + + +cdef inline ndarray[int64_t] _ceil_int64(int64_t[:] values, int64_t unit): + cdef: + Py_ssize_t i, n = len(values) + ndarray[int64_t] result = np.empty(n, dtype="i8") + int64_t res, value + + with cython.overflowcheck(True): + for i in range(n): + value = values[i] + + if value == NPY_NAT: + res = NPY_NAT + else: + remainder = value % unit + if remainder == 0: + res = value + else: + res = value + (unit - remainder) + + result[i] = res + + return result + + +cdef inline ndarray[int64_t] _rounddown_int64(values, int64_t unit): + return _ceil_int64(values - unit // 2, unit) + + +cdef inline ndarray[int64_t] _roundup_int64(values, int64_t unit): + return _floor_int64(values + unit // 2, unit) + + +def round_nsint64(values: np.ndarray, mode: RoundTo, nanos) -> np.ndarray: + """ + Applies rounding mode at given frequency + + Parameters + ---------- + values : np.ndarray[int64_t]` + mode : instance of `RoundTo` enumeration + nanos : np.int64 + Freq to round to, expressed in nanoseconds + + Returns + ------- + np.ndarray[int64_t] + """ + cdef: + int64_t unit = nanos + + if mode == RoundTo.MINUS_INFTY: + return _floor_int64(values, unit) + elif mode == RoundTo.PLUS_INFTY: + return _ceil_int64(values, unit) + elif mode == RoundTo.NEAREST_HALF_MINUS_INFTY: + return _rounddown_int64(values, unit) + elif mode == RoundTo.NEAREST_HALF_PLUS_INFTY: + return _roundup_int64(values, unit) + elif mode == RoundTo.NEAREST_HALF_EVEN: + # for odd unit there is no need of a tie break + if unit % 2: + return _rounddown_int64(values, unit) + quotient, remainder = np.divmod(values, unit) + mask = np.logical_or( + remainder > (unit // 2), + np.logical_and(remainder == (unit // 2), quotient % 2) + ) + quotient[mask] += 1 + return quotient * unit + + # if/elif above should catch all rounding modes defined in enum 'RoundTo': + # if flow of control arrives here, it is a bug + raise ValueError("round_nsint64 called with an unrecognized rounding mode") diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 25991cfbdb7a7..748a4c27e64ad 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -47,6 +47,7 @@ from pandas._libs.tslibs.util cimport ( is_integer_object, is_timedelta64_object, ) +from pandas._libs.tslibs.fields import RoundTo, round_nsint64 # ---------------------------------------------------------------------- # Constants @@ -1297,14 +1298,18 @@ class Timedelta(_Timedelta): object_state = self.value, return (Timedelta, object_state) - def _round(self, freq, rounder): + @cython.cdivision(True) + def _round(self, freq, mode): cdef: - int64_t result, unit + int64_t result, unit, remainder + ndarray[int64_t] arr from pandas._libs.tslibs.offsets import to_offset unit = to_offset(freq).nanos - result = unit * rounder(self.value / float(unit)) - return Timedelta(result, unit='ns') + + arr = np.array([self.value], dtype="i8") + result = round_nsint64(arr, mode, unit)[0] + return Timedelta(result, unit="ns") def round(self, freq): """ @@ -1323,7 +1328,7 @@ class Timedelta(_Timedelta): ------ ValueError if the freq cannot be converted """ - return self._round(freq, np.round) + return self._round(freq, RoundTo.NEAREST_HALF_EVEN) def floor(self, freq): """ @@ -1334,7 +1339,7 @@ class Timedelta(_Timedelta): freq : str Frequency string indicating the flooring resolution. """ - return self._round(freq, np.floor) + return self._round(freq, RoundTo.MINUS_INFTY) def ceil(self, freq): """ @@ -1345,7 +1350,7 @@ class Timedelta(_Timedelta): freq : str Frequency string indicating the ceiling resolution. """ - return self._round(freq, np.ceil) + return self._round(freq, RoundTo.PLUS_INFTY) # ---------------------------------------------------------------- # Arithmetic Methods diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 1df589073a6ba..5f6b614ac3d81 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -56,7 +56,12 @@ from pandas._libs.tslibs.util cimport ( is_timedelta64_object, ) -from pandas._libs.tslibs.fields import get_date_name_field, get_start_end_field +from pandas._libs.tslibs.fields import ( + RoundTo, + get_date_name_field, + get_start_end_field, + round_nsint64, +) from pandas._libs.tslibs.nattype cimport NPY_NAT, c_NaT as NaT from pandas._libs.tslibs.np_datetime cimport ( @@ -110,152 +115,6 @@ cdef inline object create_timestamp_from_ts(int64_t value, return ts_base -class RoundTo: - """ - enumeration defining the available rounding modes - - Attributes - ---------- - MINUS_INFTY - round towards -∞, or floor [2]_ - PLUS_INFTY - round towards +∞, or ceil [3]_ - NEAREST_HALF_EVEN - round to nearest, tie-break half to even [6]_ - NEAREST_HALF_MINUS_INFTY - round to nearest, tie-break half to -∞ [5]_ - NEAREST_HALF_PLUS_INFTY - round to nearest, tie-break half to +∞ [4]_ - - - References - ---------- - .. [1] "Rounding - Wikipedia" - https://en.wikipedia.org/wiki/Rounding - .. [2] "Rounding down" - https://en.wikipedia.org/wiki/Rounding#Rounding_down - .. [3] "Rounding up" - https://en.wikipedia.org/wiki/Rounding#Rounding_up - .. [4] "Round half up" - https://en.wikipedia.org/wiki/Rounding#Round_half_up - .. [5] "Round half down" - https://en.wikipedia.org/wiki/Rounding#Round_half_down - .. [6] "Round half to even" - https://en.wikipedia.org/wiki/Rounding#Round_half_to_even - """ - @property - def MINUS_INFTY(self) -> int: - return 0 - - @property - def PLUS_INFTY(self) -> int: - return 1 - - @property - def NEAREST_HALF_EVEN(self) -> int: - return 2 - - @property - def NEAREST_HALF_PLUS_INFTY(self) -> int: - return 3 - - @property - def NEAREST_HALF_MINUS_INFTY(self) -> int: - return 4 - - -cdef inline ndarray[int64_t] _floor_int64(int64_t[:] values, int64_t unit): - cdef: - Py_ssize_t i, n = len(values) - ndarray[int64_t] result = np.empty(n, dtype="i8") - int64_t res, value - - with cython.overflowcheck(True): - for i in range(n): - value = values[i] - if value == NPY_NAT: - res = NPY_NAT - else: - res = value - value % unit - result[i] = res - - return result - - -cdef inline ndarray[int64_t] _ceil_int64(int64_t[:] values, int64_t unit): - cdef: - Py_ssize_t i, n = len(values) - ndarray[int64_t] result = np.empty(n, dtype="i8") - int64_t res, value - - with cython.overflowcheck(True): - for i in range(n): - value = values[i] - - if value == NPY_NAT: - res = NPY_NAT - else: - remainder = value % unit - if remainder == 0: - res = value - else: - res = value + (unit - remainder) - - result[i] = res - - return result - - -cdef inline ndarray[int64_t] _rounddown_int64(values, int64_t unit): - return _ceil_int64(values - unit//2, unit) - - -cdef inline ndarray[int64_t] _roundup_int64(values, int64_t unit): - return _floor_int64(values + unit//2, unit) - - -def round_nsint64(values: np.ndarray, mode: RoundTo, freq) -> np.ndarray: - """ - Applies rounding mode at given frequency - - Parameters - ---------- - values : np.ndarray[int64_t]` - mode : instance of `RoundTo` enumeration - freq : str, obj - - Returns - ------- - np.ndarray[int64_t] - """ - - unit = to_offset(freq).nanos - - if mode == RoundTo.MINUS_INFTY: - return _floor_int64(values, unit) - elif mode == RoundTo.PLUS_INFTY: - return _ceil_int64(values, unit) - elif mode == RoundTo.NEAREST_HALF_MINUS_INFTY: - return _rounddown_int64(values, unit) - elif mode == RoundTo.NEAREST_HALF_PLUS_INFTY: - return _roundup_int64(values, unit) - elif mode == RoundTo.NEAREST_HALF_EVEN: - # for odd unit there is no need of a tie break - if unit % 2: - return _rounddown_int64(values, unit) - quotient, remainder = np.divmod(values, unit) - mask = np.logical_or( - remainder > (unit // 2), - np.logical_and(remainder == (unit // 2), quotient % 2) - ) - quotient[mask] += 1 - return quotient * unit - - # if/elif above should catch all rounding modes defined in enum 'RoundTo': - # if flow of control arrives here, it is a bug - raise ValueError("round_nsint64 called with an unrecognized rounding mode") - - # ---------------------------------------------------------------------- def integer_op_not_supported(obj): @@ -1181,6 +1040,9 @@ class Timestamp(_Timestamp): return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq, ts.fold) def _round(self, freq, mode, ambiguous='raise', nonexistent='raise'): + cdef: + int64_t nanos = to_offset(freq).nanos + if self.tz is not None: value = self.tz_localize(None).value else: @@ -1189,7 +1051,7 @@ class Timestamp(_Timestamp): value = np.array([value], dtype=np.int64) # Will only ever contain 1 element for timestamp - r = round_nsint64(value, mode, freq)[0] + r = round_nsint64(value, mode, nanos)[0] result = Timestamp(r, unit='ns') if self.tz is not None: result = result.tz_localize( diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 1032559766ada..5ee7a5715d6af 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -32,11 +32,8 @@ iNaT, to_offset, ) -from pandas._libs.tslibs.timestamps import ( - RoundTo, - integer_op_not_supported, - round_nsint64, -) +from pandas._libs.tslibs.fields import RoundTo, round_nsint64 +from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import DatetimeLikeScalar, Dtype, DtypeObj, NpDtype from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError, NullFrequencyError, PerformanceWarning @@ -1606,7 +1603,8 @@ def _round(self, freq, mode, ambiguous, nonexistent): ) values = self.view("i8") - result = round_nsint64(values, mode, freq) + nanos = to_offset(freq).nanos + result = round_nsint64(values, mode, nanos) result = self._maybe_mask_results(result, fill_value=iNaT) return self._simple_new(result, dtype=self.dtype) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 89b45b7266daa..906ed038c4840 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -357,6 +357,68 @@ def test_round_invalid(self): with pytest.raises(ValueError, match=msg): t1.round(freq) + def test_round_implementation_bounds(self): + # See also: analogous test for Timestamp + # GH#38964 + result = Timedelta.min.ceil("s") + expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193) + assert result == expected + + result = Timedelta.max.floor("s") + expected = Timedelta.max - Timedelta(854775807) + assert result == expected + + with pytest.raises(OverflowError, match="value too large"): + Timedelta.min.floor("s") + + # the second message here shows up in windows builds + msg = "|".join( + ["Python int too large to convert to C long", "int too big to convert"] + ) + with pytest.raises(OverflowError, match=msg): + Timedelta.max.ceil("s") + + @pytest.mark.parametrize("n", range(100)) + @pytest.mark.parametrize( + "method", [Timedelta.round, Timedelta.floor, Timedelta.ceil] + ) + def test_round_sanity(self, method, n, request): + iinfo = np.iinfo(np.int64) + val = np.random.randint(iinfo.min + 1, iinfo.max, dtype=np.int64) + td = Timedelta(val) + + assert method(td, "ns") == td + + res = method(td, "us") + nanos = 1000 + assert np.abs((res - td).value) < nanos + assert res.value % nanos == 0 + + res = method(td, "ms") + nanos = 1_000_000 + assert np.abs((res - td).value) < nanos + assert res.value % nanos == 0 + + res = method(td, "s") + nanos = 1_000_000_000 + assert np.abs((res - td).value) < nanos + assert res.value % nanos == 0 + + res = method(td, "min") + nanos = 60 * 1_000_000_000 + assert np.abs((res - td).value) < nanos + assert res.value % nanos == 0 + + res = method(td, "h") + nanos = 60 * 60 * 1_000_000_000 + assert np.abs((res - td).value) < nanos + assert res.value % nanos == 0 + + res = method(td, "D") + nanos = 24 * 60 * 60 * 1_000_000_000 + assert np.abs((res - td).value) < nanos + assert res.value % nanos == 0 + def test_contains(self): # Checking for any NaT-like objects # GH 13603 diff --git a/pandas/tests/series/methods/test_replace.py b/pandas/tests/series/methods/test_replace.py index b619c9c9632e3..3f3a3af658969 100644 --- a/pandas/tests/series/methods/test_replace.py +++ b/pandas/tests/series/methods/test_replace.py @@ -95,6 +95,8 @@ def test_replace_gh5319(self): expected = ser.ffill() result = ser.replace(np.nan) tm.assert_series_equal(result, expected) + + def test_replace_datetime64(self): # GH 5797 ser = pd.Series(pd.date_range("20130101", periods=5)) expected = ser.copy() @@ -104,6 +106,7 @@ def test_replace_gh5319(self): result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101")) tm.assert_series_equal(result, expected) + def test_replace_nat_with_tz(self): # GH 11792: Test with replacing NaT in a list with tz data ts = pd.Timestamp("2015/01/01", tz="UTC") s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
- [x] closes #38964 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39601
2021-02-05T00:20:32Z
2021-02-05T03:11:54Z
2021-02-05T03:11:54Z
2021-02-05T04:39:24Z
TST: misplaced tests
diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 3b1a8ebcb13d0..6808ffe65e561 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -1732,34 +1732,6 @@ def test_setitem(self, uint64_frame): ) -@pytest.mark.parametrize( - "src_idx", - [ - Index([]), - pd.CategoricalIndex([]), - ], -) -@pytest.mark.parametrize( - "cat_idx", - [ - # No duplicates - Index([]), - pd.CategoricalIndex([]), - Index(["A", "B"]), - pd.CategoricalIndex(["A", "B"]), - # Duplicates: GH#38906 - Index(["A", "A"]), - pd.CategoricalIndex(["A", "A"]), - ], -) -def test_reindex_empty(src_idx, cat_idx): - df = DataFrame(columns=src_idx, index=["K"], dtype="f8") - - result = df.reindex(columns=cat_idx) - expected = DataFrame(index=["K"], columns=cat_idx, dtype="f8") - tm.assert_frame_equal(result, expected) - - def test_object_casting_indexing_wraps_datetimelike(): # GH#31649, check the indexing methods all the way down the stack df = DataFrame( diff --git a/pandas/tests/frame/methods/test_reindex.py b/pandas/tests/frame/methods/test_reindex.py index c49375758345c..9116b1ff5ad65 100644 --- a/pandas/tests/frame/methods/test_reindex.py +++ b/pandas/tests/frame/methods/test_reindex.py @@ -151,7 +151,7 @@ def test_reindex_methods_nearest_special(self): def test_reindex_nearest_tz(self, tz_aware_fixture): # GH26683 tz = tz_aware_fixture - idx = pd.date_range("2019-01-01", periods=5, tz=tz) + idx = date_range("2019-01-01", periods=5, tz=tz) df = DataFrame({"x": list(range(5))}, index=idx) expected = df.head(3) @@ -759,7 +759,7 @@ def test_reindex_multi(self): def test_reindex_multi_categorical_time(self): # https://github.com/pandas-dev/pandas/issues/21390 - midx = pd.MultiIndex.from_product( + midx = MultiIndex.from_product( [ Categorical(["a", "b", "c"]), Categorical(date_range("2012-01-01", periods=3, freq="H")), @@ -906,3 +906,30 @@ def test_reindex_empty_frame(self, kwargs): result = df.reindex(idx, **kwargs) expected = DataFrame({"a": [pd.NA] * 3}, index=idx) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "src_idx", + [ + Index([]), + CategoricalIndex([]), + ], + ) + @pytest.mark.parametrize( + "cat_idx", + [ + # No duplicates + Index([]), + CategoricalIndex([]), + Index(["A", "B"]), + CategoricalIndex(["A", "B"]), + # Duplicates: GH#38906 + Index(["A", "A"]), + CategoricalIndex(["A", "A"]), + ], + ) + def test_reindex_empty(self, src_idx, cat_idx): + df = DataFrame(columns=src_idx, index=["K"], dtype="f8") + + result = df.reindex(columns=cat_idx) + expected = DataFrame(index=["K"], columns=cat_idx, dtype="f8") + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/indexing/test_indexing.py b/pandas/tests/series/indexing/test_indexing.py index dbc751dd614a1..c9e505ef4bbaf 100644 --- a/pandas/tests/series/indexing/test_indexing.py +++ b/pandas/tests/series/indexing/test_indexing.py @@ -607,29 +607,6 @@ def test_td64_series_assign_nat(nat_val, should_cast): tm.assert_series_equal(ser, expected) -@pytest.mark.parametrize( - "td", - [ - Timedelta("9 days"), - Timedelta("9 days").to_timedelta64(), - Timedelta("9 days").to_pytimedelta(), - ], -) -def test_append_timedelta_does_not_cast(td): - # GH#22717 inserting a Timedelta should _not_ cast to int64 - expected = Series(["x", td], index=[0, "td"], dtype=object) - - ser = Series(["x"]) - ser["td"] = td - tm.assert_series_equal(ser, expected) - assert isinstance(ser["td"], Timedelta) - - ser = Series(["x"]) - ser.loc["td"] = Timedelta("9 days") - tm.assert_series_equal(ser, expected) - assert isinstance(ser["td"], Timedelta) - - def test_underlying_data_conversion(): # GH 4080 df = DataFrame({c: [1, 2, 3] for c in ["a", "b", "c"]}) @@ -759,15 +736,11 @@ def test_getitem_unrecognized_scalar(): timedelta_range("0", periods=20, freq="H"), ], ) -def test_slice_with_zero_step_raises(index): - ts = Series(np.arange(20), index) +def test_slice_with_zero_step_raises(index, frame_or_series, indexer_sli): + ts = frame_or_series(np.arange(20), index=index) with pytest.raises(ValueError, match="slice step cannot be zero"): - ts[::0] - with pytest.raises(ValueError, match="slice step cannot be zero"): - ts.loc[::0] - with pytest.raises(ValueError, match="slice step cannot be zero"): - ts.iloc[::0] + indexer_sli(ts)[::0] @pytest.mark.parametrize( @@ -784,7 +757,6 @@ def assert_slices_equivalent(l_slc, i_slc): tm.assert_series_equal(ts[l_slc], expected) tm.assert_series_equal(ts.loc[l_slc], expected) - tm.assert_series_equal(ts.loc[l_slc], expected) keystr1 = str(index[9]) keystr2 = str(index[13]) diff --git a/pandas/tests/series/indexing/test_numeric.py b/pandas/tests/series/indexing/test_numeric.py index 4caf6d03d8d80..10b9360802c1c 100644 --- a/pandas/tests/series/indexing/test_numeric.py +++ b/pandas/tests/series/indexing/test_numeric.py @@ -4,29 +4,24 @@ import pandas._testing as tm -def test_slice_float64(): +def test_slice_float64(frame_or_series): values = np.arange(10.0, 50.0, 2) index = Index(values) start, end = values[[5, 15]] - s = Series(np.random.randn(20), index=index) + data = np.random.randn(20, 3) + if frame_or_series is not DataFrame: + data = data[:, 0] - result = s[start:end] - expected = s.iloc[5:16] - tm.assert_series_equal(result, expected) - - result = s.loc[start:end] - tm.assert_series_equal(result, expected) - - df = DataFrame(np.random.randn(20, 3), index=index) + obj = frame_or_series(data, index=index) - result = df[start:end] - expected = df.iloc[5:16] - tm.assert_frame_equal(result, expected) + result = obj[start:end] + expected = obj.iloc[5:16] + tm.assert_equal(result, expected) - result = df.loc[start:end] - tm.assert_frame_equal(result, expected) + result = obj.loc[start:end] + tm.assert_equal(result, expected) def test_getitem_setitem_slice_bug(): diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index c19dbaa5c6045..767b61e31698b 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -9,6 +9,7 @@ MultiIndex, NaT, Series, + Timedelta, Timestamp, date_range, period_range, @@ -508,6 +509,28 @@ def test_setitem_empty_series_timestamp_preserves_dtype(self): result = series["timestamp"] assert result == expected + @pytest.mark.parametrize( + "td", + [ + Timedelta("9 days"), + Timedelta("9 days").to_timedelta64(), + Timedelta("9 days").to_pytimedelta(), + ], + ) + def test_append_timedelta_does_not_cast(self, td): + # GH#22717 inserting a Timedelta should _not_ cast to int64 + expected = Series(["x", td], index=[0, "td"], dtype=object) + + ser = Series(["x"]) + ser["td"] = td + tm.assert_series_equal(ser, expected) + assert isinstance(ser["td"], Timedelta) + + ser = Series(["x"]) + ser.loc["td"] = Timedelta("9 days") + tm.assert_series_equal(ser, expected) + assert isinstance(ser["td"], Timedelta) + def test_setitem_scalar_into_readonly_backing_data(): # GH#14359: test that you cannot mutate a read only buffer
https://api.github.com/repos/pandas-dev/pandas/pulls/39598
2021-02-04T20:17:47Z
2021-02-04T22:40:37Z
2021-02-04T22:40:37Z
2021-02-05T00:02:51Z
[PERF] taking upper 32bit of PyObject_Hash into account
diff --git a/asv_bench/benchmarks/hash_functions.py b/asv_bench/benchmarks/hash_functions.py index 5227ad0f53a04..3743882b936e2 100644 --- a/asv_bench/benchmarks/hash_functions.py +++ b/asv_bench/benchmarks/hash_functions.py @@ -25,6 +25,15 @@ def time_isin_outside(self, dtype, exponent): self.s.isin(self.values_outside) +class UniqueForLargePyObjectInts: + def setup(self): + lst = [x << 32 for x in range(5000)] + self.arr = np.array(lst, dtype=np.object_) + + def time_unique(self): + pd.unique(self.arr) + + class IsinWithRandomFloat: params = [ [np.float64, np.object], diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 17d8c79994dbe..d5c0551abbdd7 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -253,6 +253,7 @@ Performance improvements - Performance improvement in :meth:`DataFrame.corr` for method=kendall (:issue:`28329`) - Performance improvement in :meth:`core.window.rolling.Rolling.corr` and :meth:`core.window.rolling.Rolling.cov` (:issue:`39388`) - Performance improvement in :meth:`core.window.rolling.RollingGroupby.corr`, :meth:`core.window.expanding.ExpandingGroupby.corr`, :meth:`core.window.expanding.ExpandingGroupby.corr` and :meth:`core.window.expanding.ExpandingGroupby.cov` (:issue:`39591`) +- Performance improvement in :func:`unique` for object data type (:issue:`37615`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h index 0073aaf0195c7..aee018262e3a6 100644 --- a/pandas/_libs/src/klib/khash_python.h +++ b/pandas/_libs/src/klib/khash_python.h @@ -178,11 +178,31 @@ int PANDAS_INLINE pyobject_cmp(PyObject* a, PyObject* b) { return result; } -// For PyObject_Hash holds: -// hash(0.0) == 0 == hash(-0.0) -// hash(X) == 0 if X is a NaN-value -// so it is OK to use it directly -#define kh_python_hash_func(key) (PyObject_Hash(key)) + +khint32_t PANDAS_INLINE kh_python_hash_func(PyObject* key){ + // For PyObject_Hash holds: + // hash(0.0) == 0 == hash(-0.0) + // hash(X) == 0 if X is a NaN-value + // so it is OK to use it directly for doubles + Py_hash_t hash = PyObject_Hash(key); + if (hash == -1) { + PyErr_Clear(); + return 0; + } + #if SIZEOF_PY_HASH_T == 4 + // it is already 32bit value + return hash; + #else + // for 64bit builds, + // we need information of the upper 32bits as well + // see GH 37615 + khuint64_t as_uint = (khuint64_t) hash; + // uints avoid undefined behavior of signed ints + return (as_uint>>32)^as_uint; + #endif +} + + #define kh_python_hash_equal(a, b) (pyobject_cmp(a, b))
- [x] closes #37615 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Until now the upper 32bits of PyObject_Hash aren't taken into account at all. Because for my built-in objects the hash function is very simple (e.g. integers) many "normal" series would have all hashes being 0, which would lead to O(n^2) running times. Thus for 64bit builds, we need to mangle the upper 32bit into the resulting 32bit-hash.
https://api.github.com/repos/pandas-dev/pandas/pulls/39592
2021-02-04T07:05:23Z
2021-02-07T16:42:08Z
2021-02-07T16:42:08Z
2021-02-09T07:17:39Z
PERF: Rolling/Expanding.cov/corr
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 5f8cdb2a0bdac..5738775fe2b27 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -140,8 +140,11 @@ class Pairwise: def setup(self, window, method, pairwise): N = 10 ** 4 + n_groups = 20 + groups = [i for _ in range(N // n_groups) for i in range(n_groups)] arr = np.random.random(N) self.df = pd.DataFrame(arr) + self.df_group = pd.DataFrame({"A": groups, "B": arr}).groupby("A") def time_pairwise(self, window, method, pairwise): if window is None: @@ -150,6 +153,13 @@ def time_pairwise(self, window, method, pairwise): r = self.df.rolling(window=window) getattr(r, method)(self.df, pairwise=pairwise) + def time_groupby(self, window, method, pairwise): + if window is None: + r = self.df_group.expanding() + else: + r = self.df_group.rolling(window=window) + getattr(r, method)(self.df, pairwise=pairwise) + class Quantile: params = ( diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 4d0384abbf0c6..251fbf538ca92 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -249,7 +249,8 @@ Performance improvements - Performance improvement in :meth:`Series.mean` for nullable data types (:issue:`34814`) - Performance improvement in :meth:`Series.isin` for nullable data types (:issue:`38340`) - Performance improvement in :meth:`DataFrame.corr` for method=kendall (:issue:`28329`) -- Performance improvement in :meth:`core.window.Rolling.corr` and :meth:`core.window.Rolling.cov` (:issue:`39388`) +- Performance improvement in :meth:`core.window.rolling.Rolling.corr` and :meth:`core.window.rolling.Rolling.cov` (:issue:`39388`) +- Performance improvement in :meth:`core.window.rolling.RollingGroupby.corr`, :meth:`core.window.expanding.ExpandingGroupby.corr`, :meth:`core.window.expanding.ExpandingGroupby.corr` and :meth:`core.window.expanding.ExpandingGroupby.cov` (:issue:`39591`) .. --------------------------------------------------------------------------- @@ -406,6 +407,8 @@ Groupby/resample/rolling - Bug in :meth:`.Resampler.aggregate` and :meth:`DataFrame.transform` raising ``TypeError`` instead of ``SpecificationError`` when missing keys had mixed dtypes (:issue:`39025`) - Bug in :meth:`.DataFrameGroupBy.idxmin` and :meth:`.DataFrameGroupBy.idxmax` with ``ExtensionDtype`` columns (:issue:`38733`) - Bug in :meth:`Series.resample` would raise when the index was a :class:`PeriodIndex` consisting of ``NaT`` (:issue:`39227`) +- Bug in :meth:`core.window.rolling.RollingGroupby.corr` and :meth:`core.window.expanding.ExpandingGroupby.corr` where the groupby column would return 0 instead of ``np.nan`` when providing ``other`` that was longer than each group (:issue:`39591`) +- Bug in :meth:`core.window.expanding.ExpandingGroupby.corr` and :meth:`core.window.expanding.ExpandingGroupby.cov` where 1 would be returned instead of ``np.nan`` when providing ``other`` that was longer than each group (:issue:`39591`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index 2948216e200de..e02555fb1e990 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -3,14 +3,14 @@ import datetime from functools import partial from textwrap import dedent -from typing import TYPE_CHECKING, Optional, Union +from typing import Optional, Union import warnings import numpy as np from pandas._libs.tslibs import Timedelta import pandas._libs.window.aggregations as window_aggregations -from pandas._typing import FrameOrSeries, TimedeltaConvertibleTypes +from pandas._typing import FrameOrSeries, FrameOrSeriesUnion, TimedeltaConvertibleTypes from pandas.compat.numpy import function as nv from pandas.util._decorators import doc @@ -19,7 +19,7 @@ import pandas.core.common as common from pandas.core.util.numba_ import maybe_use_numba -from pandas.core.window.common import flex_binary_moment, zsqrt +from pandas.core.window.common import zsqrt from pandas.core.window.doc import ( _shared_docs, args_compat, @@ -35,10 +35,7 @@ GroupbyIndexer, ) from pandas.core.window.numba_ import generate_numba_groupby_ewma_func -from pandas.core.window.rolling import BaseWindow, BaseWindowGroupby, dispatch - -if TYPE_CHECKING: - from pandas import Series +from pandas.core.window.rolling import BaseWindow, BaseWindowGroupby def get_center_of_mass( @@ -74,13 +71,20 @@ def get_center_of_mass( return float(comass) -def wrap_result(obj: Series, result: np.ndarray) -> Series: +def dispatch(name: str, *args, **kwargs): """ - Wrap a single 1D result. + Dispatch to groupby apply. """ - obj = obj._selected_obj - return obj._constructor(result, obj.index, name=obj.name) + def outer(self, *args, **kwargs): + def f(x): + x = self._shallow_copy(x, groupby=self._groupby) + return getattr(x, name)(*args, **kwargs) + + return self._groupby.apply(f) + + outer.__name__ = name + return outer class ExponentialMovingWindow(BaseWindow): @@ -443,36 +447,30 @@ def var_func(values, begin, end, min_periods): ) def cov( self, - other: Optional[Union[np.ndarray, FrameOrSeries]] = None, + other: Optional[FrameOrSeriesUnion] = None, pairwise: Optional[bool] = None, bias: bool = False, **kwargs, ): - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) - - def _get_cov(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) - cov = window_aggregations.ewmcov( - X._prep_values(), + from pandas import Series + + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + result = window_aggregations.ewmcov( + x_array, np.array([0], dtype=np.int64), np.array([0], dtype=np.int64), self.min_periods, - Y._prep_values(), + y_array, self.com, self.adjust, self.ignore_na, bias, ) - return wrap_result(X, cov) + return Series(result, index=x.index, name=x.name) - return flex_binary_moment( - self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise) - ) + return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func) @doc( template_header, @@ -502,45 +500,37 @@ def _get_cov(X, Y): ) def corr( self, - other: Optional[Union[np.ndarray, FrameOrSeries]] = None, + other: Optional[FrameOrSeriesUnion] = None, pairwise: Optional[bool] = None, **kwargs, ): - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - other = self._shallow_copy(other) + from pandas import Series - def _get_corr(X, Y): - X = self._shallow_copy(X) - Y = self._shallow_copy(Y) + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) - def _cov(x, y): + def _cov(X, Y): return window_aggregations.ewmcov( - x, + X, np.array([0], dtype=np.int64), np.array([0], dtype=np.int64), self.min_periods, - y, + Y, self.com, self.adjust, self.ignore_na, 1, ) - x_values = X._prep_values() - y_values = Y._prep_values() with np.errstate(all="ignore"): - cov = _cov(x_values, y_values) - x_var = _cov(x_values, x_values) - y_var = _cov(y_values, y_values) - corr = cov / zsqrt(x_var * y_var) - return wrap_result(X, corr) - - return flex_binary_moment( - self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise) - ) + cov = _cov(x_array, y_array) + x_var = _cov(x_array, x_array) + y_var = _cov(y_array, y_array) + result = cov / zsqrt(x_var * y_var) + return Series(result, index=x.index, name=x.name) + + return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func) class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow): diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index f3f697bf7309d..b35d62e484280 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -48,11 +48,14 @@ ) from pandas.core.dtypes.missing import notna +from pandas.core.algorithms import factorize from pandas.core.apply import ResamplerWindowApply from pandas.core.base import DataError, SelectionMixin +import pandas.core.common as common from pandas.core.construction import extract_array from pandas.core.groupby.base import GotItemMixin, ShallowMixin from pandas.core.indexes.api import Index, MultiIndex +from pandas.core.reshape.concat import concat from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, maybe_use_numba from pandas.core.window.common import flex_binary_moment, zsqrt from pandas.core.window.doc import ( @@ -406,6 +409,9 @@ def hfunc(bvalues: ArrayLike) -> ArrayLike: def _apply_tablewise( self, homogeneous_func: Callable[..., ArrayLike], name: Optional[str] = None ) -> FrameOrSeriesUnion: + """ + Apply the given function to the DataFrame across the entire object + """ if self._selected_obj.ndim == 1: raise ValueError("method='table' not applicable for Series objects.") obj = self._create_data(self._selected_obj) @@ -423,6 +429,23 @@ def _apply_tablewise( self._insert_on_column(out, obj) return out + def _apply_pairwise( + self, + target: FrameOrSeriesUnion, + other: Optional[FrameOrSeriesUnion], + pairwise: Optional[bool], + func: Callable[[FrameOrSeriesUnion, FrameOrSeriesUnion], FrameOrSeriesUnion], + ) -> FrameOrSeriesUnion: + """ + Apply the given pairwise function given 2 pandas objects (DataFrame/Series) + """ + if other is None: + other = target + # only default unset + pairwise = True if pairwise is None else pairwise + + return flex_binary_moment(target, other, func, pairwise=bool(pairwise)) + def _apply( self, func: Callable[..., Any], @@ -495,22 +518,6 @@ def aggregate(self, func, *args, **kwargs): agg = aggregate -def dispatch(name: str, *args, **kwargs): - """ - Dispatch to groupby apply. - """ - - def outer(self, *args, **kwargs): - def f(x): - x = self._shallow_copy(x, groupby=self._groupby) - return getattr(x, name)(*args, **kwargs) - - return self._groupby.apply(f) - - outer.__name__ = name - return outer - - class BaseWindowGroupby(GotItemMixin, BaseWindow): """ Provide the groupby windowing facilities. @@ -526,9 +533,6 @@ def __init__(self, obj, *args, **kwargs): self._groupby.grouper.mutated = True super().__init__(obj, *args, **kwargs) - corr = dispatch("corr", other=None, pairwise=None) - cov = dispatch("cov", other=None, pairwise=None) - def _apply( self, func: Callable[..., Any], @@ -591,6 +595,85 @@ def _apply( result.index = result_index return result + def _apply_pairwise( + self, + target: FrameOrSeriesUnion, + other: Optional[FrameOrSeriesUnion], + pairwise: Optional[bool], + func: Callable[[FrameOrSeriesUnion, FrameOrSeriesUnion], FrameOrSeriesUnion], + ) -> FrameOrSeriesUnion: + """ + Apply the given pairwise function given 2 pandas objects (DataFrame/Series) + """ + # Manually drop the grouping column first + target = target.drop(columns=self._groupby.grouper.names, errors="ignore") + result = super()._apply_pairwise(target, other, pairwise, func) + # 1) Determine the levels + codes of the groupby levels + if other is not None: + # When we have other, we must reindex (expand) the result + # from flex_binary_moment to a "transform"-like result + # per groupby combination + old_result_len = len(result) + result = concat( + [ + result.take(gb_indices).reindex(result.index) + for gb_indices in self._groupby.indices.values() + ] + ) + + gb_pairs = ( + common.maybe_make_list(pair) for pair in self._groupby.indices.keys() + ) + groupby_codes = [] + groupby_levels = [] + # e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]] + for gb_level_pair in map(list, zip(*gb_pairs)): + labels = np.repeat(np.array(gb_level_pair), old_result_len) + codes, levels = factorize(labels) + groupby_codes.append(codes) + groupby_levels.append(levels) + + else: + # When we evaluate the pairwise=True result, repeat the groupby + # labels by the number of columns in the original object + groupby_codes = self._groupby.grouper.codes + groupby_levels = self._groupby.grouper.levels + + group_indices = self._groupby.grouper.indices.values() + if group_indices: + indexer = np.concatenate(list(group_indices)) + else: + indexer = np.array([], dtype=np.intp) + + if target.ndim == 1: + repeat_by = 1 + else: + repeat_by = len(target.columns) + groupby_codes = [ + np.repeat(c.take(indexer), repeat_by) for c in groupby_codes + ] + # 2) Determine the levels + codes of the result from super()._apply_pairwise + if isinstance(result.index, MultiIndex): + result_codes = list(result.index.codes) + result_levels = list(result.index.levels) + result_names = list(result.index.names) + else: + idx_codes, idx_levels = factorize(result.index) + result_codes = [idx_codes] + result_levels = [idx_levels] + result_names = [result.index.name] + + # 3) Create the resulting index by combining 1) + 2) + result_codes = groupby_codes + result_codes + result_levels = groupby_levels + result_levels + result_names = self._groupby.grouper.names + result_names + + result_index = MultiIndex( + result_levels, result_codes, names=result_names, verify_integrity=False + ) + result.index = result_index + return result + def _create_data(self, obj: FrameOrSeries) -> FrameOrSeries: """ Split data into blocks & return conformed data. @@ -1205,11 +1288,6 @@ def quantile(self, quantile: float, interpolation: str = "linear", **kwargs): return self._apply(window_func, name="quantile", **kwargs) def cov(self, other=None, pairwise=None, ddof=1, **kwargs): - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise - from pandas import Series def cov_func(x, y): @@ -1239,15 +1317,9 @@ def cov_func(x, y): result = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof)) return Series(result, index=x.index, name=x.name) - return flex_binary_moment( - self._selected_obj, other, cov_func, pairwise=bool(pairwise) - ) + return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func) def corr(self, other=None, pairwise=None, ddof=1, **kwargs): - if other is None: - other = self._selected_obj - # only default unset - pairwise = True if pairwise is None else pairwise from pandas import Series @@ -1288,9 +1360,7 @@ def corr_func(x, y): result = numerator / denominator return Series(result, index=x.index, name=x.name) - return flex_binary_moment( - self._selected_obj, other, corr_func, pairwise=bool(pairwise) - ) + return self._apply_pairwise(self._selected_obj, other, pairwise, corr_func) class Rolling(RollingAndExpandingMixin): diff --git a/pandas/tests/window/test_groupby.py b/pandas/tests/window/test_groupby.py index b89fb35ac3a70..d3c2b5467e5bb 100644 --- a/pandas/tests/window/test_groupby.py +++ b/pandas/tests/window/test_groupby.py @@ -117,6 +117,9 @@ def func(x): return getattr(x.rolling(4), f)(self.frame) expected = g.apply(func) + # GH 39591: The grouped column should be all np.nan + # (groupby.apply inserts 0s for cov) + expected["A"] = np.nan tm.assert_frame_equal(result, expected) result = getattr(r.B, f)(pairwise=True) @@ -688,6 +691,13 @@ def func(x): return getattr(x.expanding(), f)(self.frame) expected = g.apply(func) + # GH 39591: groupby.apply returns 1 instead of nan for windows + # with all nan values + null_idx = list(range(20, 61)) + list(range(72, 113)) + expected.iloc[null_idx, 1] = np.nan + # GH 39591: The grouped column should be all np.nan + # (groupby.apply inserts 0s for cov) + expected["A"] = np.nan tm.assert_frame_equal(result, expected) result = getattr(r.B, f)(pairwise=True)
- [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry Also I think I stumbled on some bugs along the way; added whatsnew and modified the existing tests where necessary. ``` $ asv_bench % asv continuous -f 1.1 upstream/master HEAD -b rolling.Pairwise.time_groupby before after ratio [f79468b8] [6a1e8592] <master> <ref/groupby_rolling_pairwise> - 299±50ms 178±20ms 0.59 rolling.Pairwise.time_groupby(1000, 'corr', True) - 272±7ms 157±20ms 0.58 rolling.Pairwise.time_groupby(1000, 'cov', True) - 262±1ms 146±4ms 0.56 rolling.Pairwise.time_groupby(None, 'cov', True) - 303±10ms 158±6ms 0.52 rolling.Pairwise.time_groupby(10, 'cov', True) - 279±20ms 139±0.7ms 0.50 rolling.Pairwise.time_groupby(None, 'corr', True) - 315±10ms 149±10ms 0.47 rolling.Pairwise.time_groupby(10, 'corr', True) - 137±2ms 34.0±0.8ms 0.25 rolling.Pairwise.time_groupby(10, 'cov', False) - 127±5ms 28.2±1ms 0.22 rolling.Pairwise.time_groupby(None, 'cov', False) - 134±5ms 28.1±0.4ms 0.21 rolling.Pairwise.time_groupby(1000, 'cov', False) - 135±2ms 27.7±1ms 0.21 rolling.Pairwise.time_groupby(None, 'corr', False) - 178±10ms 33.7±2ms 0.19 rolling.Pairwise.time_groupby(10, 'corr', False) - 159±20ms 29.9±2ms 0.19 rolling.Pairwise.time_groupby(1000, 'corr', False) SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. PERFORMANCE INCREASED. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/39591
2021-02-04T06:40:51Z
2021-02-05T14:26:14Z
2021-02-05T14:26:13Z
2021-02-05T17:49:23Z
TST: Remove duplicate invalid sheet tests from io.excel
diff --git a/pandas/tests/io/excel/test_odf.py b/pandas/tests/io/excel/test_odf.py index c99d9ae62bf54..ddc3c42710a61 100644 --- a/pandas/tests/io/excel/test_odf.py +++ b/pandas/tests/io/excel/test_odf.py @@ -36,11 +36,3 @@ def test_read_writer_table(): result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0) tm.assert_frame_equal(result, expected) - - -def test_nonexistent_sheetname_raises(read_ext): - # GH-27676 - # Specifying a non-existent sheet_name parameter should throw an error - # with the sheet name. - with pytest.raises(ValueError, match="Worksheet named 'xyz' not found"): - pd.read_excel("blank.ods", sheet_name="xyz") diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py index 29055837a0721..c0d8acf8ab562 100644 --- a/pandas/tests/io/excel/test_xlrd.py +++ b/pandas/tests/io/excel/test_xlrd.py @@ -40,15 +40,6 @@ def test_read_xlrd_book(read_ext, frame): tm.assert_frame_equal(df, result) -# TODO: test for openpyxl as well -def test_excel_table_sheet_by_index(datapath, read_ext): - path = datapath("io", "data", "excel", f"test1{read_ext}") - msg = "Worksheet named 'invalid_sheet_name' not found" - with ExcelFile(path, engine="xlrd") as excel: - with pytest.raises(ValueError, match=msg): - pd.read_excel(excel, sheet_name="invalid_sheet_name") - - def test_excel_file_warning_with_xlsx_file(datapath): # GH 29375 path = datapath("io", "data", "excel", "test1.xlsx")
- [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them Followup from #39482 - removed tests are duplicates of `test_readers.test_bad_sheetname_raises`
https://api.github.com/repos/pandas-dev/pandas/pulls/39589
2021-02-04T01:52:04Z
2021-02-04T15:50:12Z
2021-02-04T15:50:12Z
2021-02-04T15:51:47Z
REG: read_excel with engine specified raises on non-path/non-buffer
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 974f84d3b244a..9cd3f8b7ff6cf 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -22,6 +22,7 @@ Fixed regressions - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) - Fixed regression in :meth:`core.window.rolling.Rolling.count` where the ``min_periods`` argument would be set to ``0`` after the operation (:issue:`39554`) +- Fixed regression in :func:`read_excel` that incorrectly raised when the argument ``io`` was a non-path and non-buffer and the ``engine`` argument was specified (:issue:`39528`) - .. --------------------------------------------------------------------------- diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 213be7c05b370..84b5cae09acce 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -1069,26 +1069,37 @@ def __init__( xlrd_version = LooseVersion(get_version(xlrd)) - if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): - ext = "xls" - else: - ext = inspect_excel_format( - content_or_path=path_or_buffer, storage_options=storage_options - ) - + ext = None if engine is None: + # Only determine ext if it is needed + if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): + ext = "xls" + else: + ext = inspect_excel_format( + content_or_path=path_or_buffer, storage_options=storage_options + ) + # ext will always be valid, otherwise inspect_excel_format would raise engine = config.get_option(f"io.excel.{ext}.reader", silent=True) if engine == "auto": engine = get_default_engine(ext, mode="reader") - if engine == "xlrd" and ext != "xls" and xlrd_version is not None: - if xlrd_version >= "2": + if engine == "xlrd" and xlrd_version is not None: + if ext is None: + # Need ext to determine ext in order to raise/warn + if isinstance(path_or_buffer, xlrd.Book): + ext = "xls" + else: + ext = inspect_excel_format( + path_or_buffer, storage_options=storage_options + ) + + if ext != "xls" and xlrd_version >= "2": raise ValueError( f"Your version of xlrd is {xlrd_version}. In xlrd >= 2.0, " f"only the xls format is supported. Install openpyxl instead." ) - else: + elif ext != "xls": caller = inspect.stack()[1] if ( caller.filename.endswith( diff --git a/pandas/io/excel/_openpyxl.py b/pandas/io/excel/_openpyxl.py index 64c64b5009b0c..274b18b2605cc 100644 --- a/pandas/io/excel/_openpyxl.py +++ b/pandas/io/excel/_openpyxl.py @@ -533,7 +533,11 @@ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: version = LooseVersion(get_version(openpyxl)) - if version >= "3.0.0": + # There is no good way of determining if a sheet is read-only + # https://foss.heptapod.net/openpyxl/openpyxl/-/issues/1605 + is_readonly = hasattr(sheet, "reset_dimensions") + + if version >= "3.0.0" and is_readonly: sheet.reset_dimensions() data: List[List[Scalar]] = [] @@ -541,7 +545,7 @@ def get_sheet_data(self, sheet, convert_float: bool) -> List[List[Scalar]]: converted_row = [self._convert_cell(cell, convert_float) for cell in row] data.append(converted_row) - if version >= "3.0.0" and len(data) > 0: + if version >= "3.0.0" and is_readonly and len(data) > 0: # With dimension reset, openpyxl no longer pads rows max_width = max(len(data_row) for data_row in data) if min(len(data_row) for data_row in data) < max_width: diff --git a/pandas/tests/io/excel/test_openpyxl.py b/pandas/tests/io/excel/test_openpyxl.py index 640501baffc62..da12829b579fe 100644 --- a/pandas/tests/io/excel/test_openpyxl.py +++ b/pandas/tests/io/excel/test_openpyxl.py @@ -122,6 +122,17 @@ def test_to_excel_with_openpyxl_engine(ext): styled.to_excel(filename, engine="openpyxl") +@pytest.mark.parametrize("read_only", [True, False]) +def test_read_workbook(datapath, ext, read_only): + # GH 39528 + filename = datapath("io", "data", "excel", "test1" + ext) + wb = openpyxl.load_workbook(filename, read_only=read_only) + result = pd.read_excel(wb, engine="openpyxl") + wb.close() + expected = pd.read_excel(filename) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( "header, expected_data", [ @@ -139,13 +150,22 @@ def test_to_excel_with_openpyxl_engine(ext): @pytest.mark.parametrize( "filename", ["dimension_missing", "dimension_small", "dimension_large"] ) -@pytest.mark.xfail( - LooseVersion(get_version(openpyxl)) < "3.0.0", - reason="openpyxl read-only sheet is incorrect when dimension data is wrong", -) -def test_read_with_bad_dimension(datapath, ext, header, expected_data, filename): +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_bad_dimension( + datapath, ext, header, expected_data, filename, read_only, request +): # GH 38956, 39001 - no/incorrect dimension information + version = LooseVersion(get_version(openpyxl)) + if (read_only or read_only is None) and version < "3.0.0": + msg = "openpyxl read-only sheet is incorrect when dimension data is wrong" + request.node.add_marker(pytest.mark.xfail(reason=msg)) path = datapath("io", "data", "excel", f"{filename}{ext}") - result = pd.read_excel(path, header=header) + if read_only is None: + result = pd.read_excel(path, header=header) + else: + wb = openpyxl.load_workbook(path, read_only=read_only) + result = pd.read_excel(wb, engine="openpyxl", header=header) + wb.close() expected = DataFrame(expected_data) tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py index b2e87de5580e6..a594718bd62d9 100644 --- a/pandas/tests/io/excel/test_readers.py +++ b/pandas/tests/io/excel/test_readers.py @@ -2,6 +2,7 @@ from functools import partial import os from urllib.error import URLError +from zipfile import BadZipFile import numpy as np import pytest @@ -685,7 +686,13 @@ def test_missing_file_raises(self, read_ext): def test_corrupt_bytes_raises(self, read_ext, engine): bad_stream = b"foo" - with pytest.raises(ValueError, match="File is not a recognized excel file"): + if engine is None or engine == "xlrd": + error = ValueError + msg = "File is not a recognized excel file" + else: + error = BadZipFile + msg = "File is not a zip file" + with pytest.raises(error, match=msg): pd.read_excel(bad_stream) @tm.network
- [x] closes #39528 - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39586
2021-02-03T23:40:06Z
2021-02-07T16:24:23Z
2021-02-07T16:24:23Z
2021-02-12T03:01:12Z
BUG: at/iat __setitem__ failing to cast
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 6abe70e56b9b9..e42bba1bf07bb 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -341,6 +341,8 @@ Indexing - Bug in incorrectly raising in :meth:`Index.insert`, when setting a new column that cannot be held in the existing ``frame.columns``, or in :meth:`Series.reset_index` or :meth:`DataFrame.reset_index` instead of casting to a compatible dtype (:issue:`39068`) - Bug in :meth:`RangeIndex.append` where a single object of length 1 was concatenated incorrectly (:issue:`39401`) - Bug in setting ``numpy.timedelta64`` values into an object-dtype :class:`Series` using a boolean indexer (:issue:`39488`) +- Bug in setting numeric values into a into a boolean-dtypes :class:`Series` using ``at`` or ``iat`` failing to cast to object-dtype (:issue:`39582`) +- Missing ^^^^^^^ diff --git a/pandas/core/series.py b/pandas/core/series.py index f75292f32dbca..8bd325beede65 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1043,17 +1043,17 @@ def _set_value(self, label, value, takeable: bool = False): Scalar value. takeable : interpret the index as indexers, default False """ - try: - if takeable: - self._values[label] = value - else: + if not takeable: + try: loc = self.index.get_loc(label) - validate_numeric_casting(self.dtype, value) - self._values[loc] = value - except KeyError: + except KeyError: + # set using a non-recursive method + self.loc[label] = value + return + else: + loc = label - # set using a non-recursive method - self.loc[label] = value + self._set_values(loc, value) # ---------------------------------------------------------------------- # Unsorted diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 657f89512d79e..338e1c65e582b 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -326,6 +326,11 @@ def test_int_key(self, obj, key, expected, val, indexer_sli): self.check_indexer(obj, key, expected, val, indexer_sli) + if indexer_sli is tm.loc: + self.check_indexer(obj, key, expected, val, tm.at) + elif indexer_sli is tm.iloc: + self.check_indexer(obj, key, expected, val, tm.iat) + rng = range(key, key + 1) self.check_indexer(obj, rng, expected, val, indexer_sli)
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39582
2021-02-03T18:11:38Z
2021-02-04T01:59:27Z
2021-02-04T01:59:27Z
2021-02-04T02:33:52Z
TST/REF: de-duplicate SetitemCastingEquivalents
diff --git a/pandas/tests/series/indexing/test_setitem.py b/pandas/tests/series/indexing/test_setitem.py index 338e1c65e582b..c19dbaa5c6045 100644 --- a/pandas/tests/series/indexing/test_setitem.py +++ b/pandas/tests/series/indexing/test_setitem.py @@ -307,57 +307,71 @@ class SetitemCastingEquivalents: - the setitem does not expand the obj """ - @pytest.fixture(params=[np.nan, np.float64("NaN")]) - def val(self, request): + @pytest.fixture + def is_inplace(self): """ - One python float NaN, one np.float64. Only np.float64 has a `dtype` - attribute. + Indicate that we are not (yet) checking whether or not setting is inplace. """ - return request.param + return None - def check_indexer(self, obj, key, expected, val, indexer): + def check_indexer(self, obj, key, expected, val, indexer, is_inplace): + orig = obj obj = obj.copy() + arr = obj._values + indexer(obj)[key] = val tm.assert_series_equal(obj, expected) - def test_int_key(self, obj, key, expected, val, indexer_sli): + self._check_inplace(is_inplace, orig, arr, obj) + + def _check_inplace(self, is_inplace, orig, arr, obj): + if is_inplace is None: + # We are not (yet) checking whether setting is inplace or not + pass + elif is_inplace: + assert obj._values is arr + else: + # otherwise original array should be unchanged + tm.assert_equal(arr, orig._values) + + def test_int_key(self, obj, key, expected, val, indexer_sli, is_inplace): if not isinstance(key, int): return - self.check_indexer(obj, key, expected, val, indexer_sli) + self.check_indexer(obj, key, expected, val, indexer_sli, is_inplace) if indexer_sli is tm.loc: - self.check_indexer(obj, key, expected, val, tm.at) + self.check_indexer(obj, key, expected, val, tm.at, is_inplace) elif indexer_sli is tm.iloc: - self.check_indexer(obj, key, expected, val, tm.iat) + self.check_indexer(obj, key, expected, val, tm.iat, is_inplace) rng = range(key, key + 1) - self.check_indexer(obj, rng, expected, val, indexer_sli) + self.check_indexer(obj, rng, expected, val, indexer_sli, is_inplace) if indexer_sli is not tm.loc: # Note: no .loc because that handles slice edges differently slc = slice(key, key + 1) - self.check_indexer(obj, slc, expected, val, indexer_sli) + self.check_indexer(obj, slc, expected, val, indexer_sli, is_inplace) ilkey = [key] - self.check_indexer(obj, ilkey, expected, val, indexer_sli) + self.check_indexer(obj, ilkey, expected, val, indexer_sli, is_inplace) indkey = np.array(ilkey) - self.check_indexer(obj, indkey, expected, val, indexer_sli) + self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace) - def test_slice_key(self, obj, key, expected, val, indexer_sli): + def test_slice_key(self, obj, key, expected, val, indexer_sli, is_inplace): if not isinstance(key, slice): return if indexer_sli is not tm.loc: # Note: no .loc because that handles slice edges differently - self.check_indexer(obj, key, expected, val, indexer_sli) + self.check_indexer(obj, key, expected, val, indexer_sli, is_inplace) ilkey = list(range(len(obj)))[key] - self.check_indexer(obj, ilkey, expected, val, indexer_sli) + self.check_indexer(obj, ilkey, expected, val, indexer_sli, is_inplace) indkey = np.array(ilkey) - self.check_indexer(obj, indkey, expected, val, indexer_sli) + self.check_indexer(obj, indkey, expected, val, indexer_sli, is_inplace) def test_mask_key(self, obj, key, expected, val, indexer_sli): # setitem with boolean mask @@ -368,14 +382,19 @@ def test_mask_key(self, obj, key, expected, val, indexer_sli): indexer_sli(obj)[mask] = val tm.assert_series_equal(obj, expected) - def test_series_where(self, obj, key, expected, val): + def test_series_where(self, obj, key, expected, val, is_inplace): mask = np.zeros(obj.shape, dtype=bool) mask[key] = True + orig = obj obj = obj.copy() + arr = obj._values + res = obj.where(~mask, val) tm.assert_series_equal(res, expected) + self._check_inplace(is_inplace, orig, arr, obj) + def test_index_where(self, obj, key, expected, val, request): if Index(obj).dtype != obj.dtype: pytest.skip("test not applicable for this dtype") @@ -519,7 +538,7 @@ def test_setitem_slice_into_readonly_backing_data(): assert not array.any() -class TestSetitemCastingEquivalentsTimedelta64IntoNumeric: +class TestSetitemTimedelta64IntoNumeric(SetitemCastingEquivalents): # timedelta64 should not be treated as integers when setting into # numeric Series @@ -527,7 +546,7 @@ class TestSetitemCastingEquivalentsTimedelta64IntoNumeric: def val(self): td = np.timedelta64(4, "ns") return td - return np.full((1,), td) + # TODO: could also try np.full((1,), td) @pytest.fixture(params=[complex, int, float]) def dtype(self, request): @@ -551,91 +570,9 @@ def expected(self, dtype): def key(self): return 0 - def check_indexer(self, obj, key, expected, val, indexer): - orig = obj - obj = obj.copy() - arr = obj._values - - indexer(obj)[key] = val - tm.assert_series_equal(obj, expected) - - tm.assert_equal(arr, orig._values) # original array is unchanged - - def test_int_key(self, obj, key, expected, val, indexer_sli): - if not isinstance(key, int): - return - - self.check_indexer(obj, key, expected, val, indexer_sli) - - rng = range(key, key + 1) - self.check_indexer(obj, rng, expected, val, indexer_sli) - - if indexer_sli is not tm.loc: - # Note: no .loc because that handles slice edges differently - slc = slice(key, key + 1) - self.check_indexer(obj, slc, expected, val, indexer_sli) - - ilkey = [key] - self.check_indexer(obj, ilkey, expected, val, indexer_sli) - - indkey = np.array(ilkey) - self.check_indexer(obj, indkey, expected, val, indexer_sli) - - def test_slice_key(self, obj, key, expected, val, indexer_sli): - if not isinstance(key, slice): - return - - if indexer_sli is not tm.loc: - # Note: no .loc because that handles slice edges differently - self.check_indexer(obj, key, expected, val, indexer_sli) - - ilkey = list(range(len(obj)))[key] - self.check_indexer(obj, ilkey, expected, val, indexer_sli) - - indkey = np.array(ilkey) - self.check_indexer(obj, indkey, expected, val, indexer_sli) - - def test_mask_key(self, obj, key, expected, val, indexer_sli): - # setitem with boolean mask - mask = np.zeros(obj.shape, dtype=bool) - mask[key] = True - - self.check_indexer(obj, mask, expected, val, indexer_sli) - - def test_series_where(self, obj, key, expected, val): - mask = np.zeros(obj.shape, dtype=bool) - mask[key] = True - - orig = obj - obj = obj.copy() - arr = obj._values - res = obj.where(~mask, val) - tm.assert_series_equal(res, expected) - - tm.assert_equal(arr, orig._values) # original array is unchanged - - def test_index_where(self, obj, key, expected, val, request): - if Index(obj).dtype != obj.dtype: - pytest.skip("test not applicable for this dtype") - - mask = np.zeros(obj.shape, dtype=bool) - mask[key] = True - - if obj.dtype == bool and not mask.all(): - # When mask is all True, casting behavior does not apply - msg = "Index/Series casting behavior inconsistent GH#38692" - mark = pytest.mark.xfail(reason=msg) - request.node.add_marker(mark) - - res = Index(obj).where(~mask, val) - tm.assert_index_equal(res, Index(expected)) - - def test_index_putmask(self, obj, key, expected, val): - if Index(obj).dtype != obj.dtype: - pytest.skip("test not applicable for this dtype") - - mask = np.zeros(obj.shape, dtype=bool) - mask[key] = True - - res = Index(obj).putmask(mask, val) - tm.assert_index_equal(res, Index(expected)) + @pytest.fixture + def is_inplace(self): + """ + Indicate we do _not_ expect the setting to be done inplace. + """ + return False
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39575
2021-02-03T04:51:24Z
2021-02-04T15:12:47Z
2021-02-04T15:12:47Z
2021-02-04T15:13:36Z
BUG: DataFrame.append with timedelta64
diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 99ae60859b68c..71874aea77e58 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -430,6 +430,7 @@ Reshaping - Bug in :meth:`DataFrame.apply` would give incorrect results when used with a string argument and ``axis=1`` when the axis argument was not supported and now raises a ``ValueError`` instead (:issue:`39211`) - Bug in :meth:`DataFrame.sort_values` not reshaping index correctly after sorting on columns, when ``ignore_index=True`` (:issue:`39464`) - Bug in :meth:`DataFrame.append` returning incorrect dtypes with combinations of ``ExtensionDtype`` dtypes (:issue:`39454`) +- Bug in :meth:`DataFrame.append` returning incorrect dtypes with combinations of ``datetime64`` and ``timedelta64`` dtypes (:issue:`39574`) Sparse ^^^^^^ diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 3bded4dd2834b..b766392e35601 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -61,7 +61,7 @@ def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: return arr.astype(dtype, copy=False) -def concat_compat(to_concat, axis: int = 0): +def concat_compat(to_concat, axis: int = 0, ea_compat_axis: bool = False): """ provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a @@ -72,6 +72,9 @@ def concat_compat(to_concat, axis: int = 0): ---------- to_concat : array of arrays axis : axis to provide concatenation + ea_compat_axis : bool, default False + For ExtensionArray compat, behave as if axis == 1 when determining + whether to drop empty arrays. Returns ------- @@ -91,7 +94,8 @@ def is_nonempty(x) -> bool: # marginal given that it would still require shape & dtype calculation and # np.concatenate which has them both implemented is compiled. non_empties = [x for x in to_concat if is_nonempty(x)] - if non_empties and axis == 0: + if non_empties and axis == 0 and not ea_compat_axis: + # ea_compat_axis see GH#39574 to_concat = non_empties kinds = {obj.dtype.kind for obj in to_concat} diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index f7705dd00c3be..a2c930f6d9b22 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -1,9 +1,8 @@ from __future__ import annotations -from collections import defaultdict import copy import itertools -from typing import TYPE_CHECKING, Dict, List, Sequence, cast +from typing import TYPE_CHECKING, Dict, List, Sequence import numpy as np @@ -14,16 +13,13 @@ from pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type from pandas.core.dtypes.common import ( is_categorical_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, + is_dtype_equal, is_extension_array_dtype, - is_float_dtype, - is_numeric_dtype, is_sparse, - is_timedelta64_dtype, ) from pandas.core.dtypes.concat import concat_compat -from pandas.core.dtypes.missing import isna_all +from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna_all import pandas.core.algorithms as algos from pandas.core.arrays import DatetimeArray, ExtensionArray @@ -33,7 +29,6 @@ if TYPE_CHECKING: from pandas import Index - from pandas.core.arrays.sparse.dtype import SparseDtype def concatenate_block_managers( @@ -232,6 +227,29 @@ def dtype(self): return blk.dtype return ensure_dtype_can_hold_na(blk.dtype) + def is_valid_na_for(self, dtype: DtypeObj) -> bool: + """ + Check that we are all-NA of a type/dtype that is compatible with this dtype. + Augments `self.is_na` with an additional check of the type of NA values. + """ + if not self.is_na: + return False + if self.block is None: + return True + + if self.dtype == object: + values = self.block.values + return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K")) + + if self.dtype.kind == dtype.kind == "M" and not is_dtype_equal( + self.dtype, dtype + ): + # fill_values match but we should not cast self.block.values to dtype + return False + + na_value = self.block.fill_value + return is_valid_na_for_dtype(na_value, dtype) + @cache_readonly def is_na(self) -> bool: if self.block is None: @@ -262,7 +280,7 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: else: fill_value = upcasted_na - if self.is_na: + if self.is_valid_na_for(empty_dtype): blk_dtype = getattr(self.block, "dtype", None) if blk_dtype == np.dtype(object): @@ -276,10 +294,9 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: if is_datetime64tz_dtype(blk_dtype) or is_datetime64tz_dtype( empty_dtype ): - if self.block is None: - # TODO(EA2D): special case unneeded with 2D EAs - i8values = np.full(self.shape[1], fill_value.value) - return DatetimeArray(i8values, dtype=empty_dtype) + # TODO(EA2D): special case unneeded with 2D EAs + i8values = np.full(self.shape[1], fill_value.value) + return DatetimeArray(i8values, dtype=empty_dtype) elif is_categorical_dtype(blk_dtype): pass elif is_extension_array_dtype(blk_dtype): @@ -295,6 +312,8 @@ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: empty_arr, allow_fill=True, fill_value=fill_value ) else: + # NB: we should never get here with empty_dtype integer or bool; + # if we did, the missing_arr.fill would cast to gibberish missing_arr = np.empty(self.shape, dtype=empty_dtype) missing_arr.fill(fill_value) return missing_arr @@ -362,14 +381,12 @@ def _concatenate_join_units( # concatting with at least one EA means we are concatting a single column # the non-EA values are 2D arrays with shape (1, n) to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat] - concat_values = concat_compat(to_concat, axis=0) - if not isinstance(concat_values, ExtensionArray) or ( - isinstance(concat_values, DatetimeArray) and concat_values.tz is None - ): + concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) + if not is_extension_array_dtype(concat_values.dtype): # if the result of concat is not an EA but an ndarray, reshape to # 2D to put it a non-EA Block - # special case DatetimeArray, which *is* an EA, but is put in a - # consolidated 2D block + # special case DatetimeArray/TimedeltaArray, which *is* an EA, but + # is put in a consolidated 2D block concat_values = np.atleast_2d(concat_values) else: concat_values = concat_compat(to_concat, axis=concat_axis) @@ -419,108 +436,17 @@ def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj: return empty_dtype has_none_blocks = any(unit.block is None for unit in join_units) - dtypes = [None if unit.block is None else unit.dtype for unit in join_units] - filtered_dtypes = [ + dtypes = [ unit.dtype for unit in join_units if unit.block is not None and not unit.is_na ] - if not len(filtered_dtypes): - filtered_dtypes = [unit.dtype for unit in join_units if unit.block is not None] - dtype_alt = find_common_type(filtered_dtypes) - - upcast_classes = _get_upcast_classes(join_units, dtypes) - - if is_extension_array_dtype(dtype_alt): - return dtype_alt - elif dtype_alt == object: - return dtype_alt - - # TODO: de-duplicate with maybe_promote? - # create the result - if "extension" in upcast_classes: - return np.dtype("object") - elif "bool" in upcast_classes: - if has_none_blocks: - return np.dtype(np.object_) - else: - return np.dtype(np.bool_) - elif "datetimetz" in upcast_classes: - # GH-25014. We use NaT instead of iNaT, since this eventually - # ends up in DatetimeArray.take, which does not allow iNaT. - dtype = upcast_classes["datetimetz"] - return dtype[0] - elif "datetime" in upcast_classes: - return np.dtype("M8[ns]") - elif "timedelta" in upcast_classes: - return np.dtype("m8[ns]") - else: - try: - common_dtype = np.find_common_type(upcast_classes, []) - except TypeError: - # At least one is an ExtensionArray - return np.dtype(np.object_) - else: - if is_float_dtype(common_dtype): - return common_dtype - elif is_numeric_dtype(common_dtype): - if has_none_blocks: - return np.dtype(np.float64) - else: - return common_dtype - - msg = "invalid dtype determination in get_concat_dtype" - raise AssertionError(msg) - - -def _get_upcast_classes( - join_units: Sequence[JoinUnit], - dtypes: Sequence[DtypeObj], -) -> Dict[str, List[DtypeObj]]: - """Create mapping between upcast class names and lists of dtypes.""" - upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list) - null_upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list) - for dtype, unit in zip(dtypes, join_units): - if dtype is None: - continue - - upcast_cls = _select_upcast_cls_from_dtype(dtype) - # Null blocks should not influence upcast class selection, unless there - # are only null blocks, when same upcasting rules must be applied to - # null upcast classes. - if unit.is_na: - null_upcast_classes[upcast_cls].append(dtype) - else: - upcast_classes[upcast_cls].append(dtype) - - if not upcast_classes: - upcast_classes = null_upcast_classes - - return upcast_classes - - -def _select_upcast_cls_from_dtype(dtype: DtypeObj) -> str: - """Select upcast class name based on dtype.""" - if is_categorical_dtype(dtype): - return "extension" - elif is_datetime64tz_dtype(dtype): - return "datetimetz" - elif is_extension_array_dtype(dtype): - return "extension" - elif issubclass(dtype.type, np.bool_): - return "bool" - elif issubclass(dtype.type, np.object_): - return "object" - elif is_datetime64_dtype(dtype): - return "datetime" - elif is_timedelta64_dtype(dtype): - return "timedelta" - elif is_sparse(dtype): - dtype = cast("SparseDtype", dtype) - return dtype.subtype.name - elif is_float_dtype(dtype) or is_numeric_dtype(dtype): - return dtype.name - else: - return "float" + if not len(dtypes): + dtypes = [unit.dtype for unit in join_units if unit.block is not None] + + dtype = find_common_type(dtypes) + if has_none_blocks: + dtype = ensure_dtype_can_hold_na(dtype) + return dtype def _is_uniform_join_units(join_units: List[JoinUnit]) -> bool: diff --git a/pandas/tests/reshape/concat/test_append.py b/pandas/tests/reshape/concat/test_append.py index dd6dbd79113e5..81d5526f5bd15 100644 --- a/pandas/tests/reshape/concat/test_append.py +++ b/pandas/tests/reshape/concat/test_append.py @@ -334,9 +334,9 @@ def test_append_missing_column_proper_upcast(self, sort): def test_append_empty_frame_to_series_with_dateutil_tz(self): # GH 23682 date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc()) - s = Series({"date": date, "a": 1.0, "b": 2.0}) + ser = Series({"date": date, "a": 1.0, "b": 2.0}) df = DataFrame(columns=["c", "d"]) - result_a = df.append(s, ignore_index=True) + result_a = df.append(ser, ignore_index=True) expected = DataFrame( [[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"] ) @@ -350,13 +350,12 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self): ) expected["c"] = expected["c"].astype(object) expected["d"] = expected["d"].astype(object) - - result_b = result_a.append(s, ignore_index=True) + result_b = result_a.append(ser, ignore_index=True) tm.assert_frame_equal(result_b, expected) # column order is different expected = expected[["c", "d", "date", "a", "b"]] - result = df.append([s, s], ignore_index=True) + result = df.append([ser, ser], ignore_index=True) tm.assert_frame_equal(result, expected) def test_append_empty_tz_frame_with_datetime64ns(self): @@ -378,12 +377,27 @@ def test_append_empty_tz_frame_with_datetime64ns(self): @pytest.mark.parametrize( "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] ) - def test_append_empty_frame_with_timedelta64ns_nat(self, dtype_str): + @pytest.mark.parametrize("val", [1, "NaT"]) + def test_append_empty_frame_with_timedelta64ns_nat(self, dtype_str, val): # https://github.com/pandas-dev/pandas/issues/35460 df = DataFrame(columns=["a"]).astype(dtype_str) - other = DataFrame({"a": [np.timedelta64("NaT", "ns")]}) + other = DataFrame({"a": [np.timedelta64(val, "ns")]}) result = df.append(other, ignore_index=True) expected = other.astype(object) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] + ) + @pytest.mark.parametrize("val", [1, "NaT"]) + def test_append_frame_with_timedelta64ns_nat(self, dtype_str, val): + # https://github.com/pandas-dev/pandas/issues/35460 + df = DataFrame({"a": pd.array([1], dtype=dtype_str)}) + + other = DataFrame({"a": [np.timedelta64(val, "ns")]}) + result = df.append(other, ignore_index=True) + + expected = DataFrame({"a": [df.iloc[0, 0], other.iloc[0, 0]]}, dtype=object) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/reshape/merge/test_multi.py b/pandas/tests/reshape/merge/test_multi.py index 673c97740594f..f47f4e1577277 100644 --- a/pandas/tests/reshape/merge/test_multi.py +++ b/pandas/tests/reshape/merge/test_multi.py @@ -528,10 +528,8 @@ def test_merge_datetime_multi_index_empty_df(self, merge_type): tm.assert_frame_equal(results_merge, expected) tm.assert_frame_equal(results_join, expected) - def test_join_multi_levels(self): - - # GH 3662 - # merge multi-levels + @pytest.fixture + def household(self): household = DataFrame( { "household_id": [1, 2, 3], @@ -540,6 +538,10 @@ def test_join_multi_levels(self): }, columns=["household_id", "male", "wealth"], ).set_index("household_id") + return household + + @pytest.fixture + def portfolio(self): portfolio = DataFrame( { "household_id": [1, 2, 2, 3, 3, 3, 4], @@ -565,7 +567,10 @@ def test_join_multi_levels(self): }, columns=["household_id", "asset_id", "name", "share"], ).set_index(["household_id", "asset_id"]) - result = household.join(portfolio, how="inner") + return portfolio + + @pytest.fixture + def expected(self): expected = ( DataFrame( { @@ -601,8 +606,21 @@ def test_join_multi_levels(self): .set_index(["household_id", "asset_id"]) .reindex(columns=["male", "wealth", "name", "share"]) ) + return expected + + def test_join_multi_levels(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + + # GH 3662 + # merge multi-levels + result = household.join(portfolio, how="inner") tm.assert_frame_equal(result, expected) + def test_join_multi_levels_merge_equivalence(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + # equivalency result = merge( household.reset_index(), @@ -612,6 +630,10 @@ def test_join_multi_levels(self): ).set_index(["household_id", "asset_id"]) tm.assert_frame_equal(result, expected) + def test_join_multi_levels_outer(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + result = household.join(portfolio, how="outer") expected = concat( [ @@ -630,6 +652,10 @@ def test_join_multi_levels(self): ).reindex(columns=expected.columns) tm.assert_frame_equal(result, expected) + def test_join_multi_levels_invalid(self, portfolio, household): + portfolio = portfolio.copy() + household = household.copy() + # invalid cases household.index.name = "foo"
- [ ] closes #xxxx - [x] tests added / passed - [x] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry AFAICT several of the existing tests are just wrong, xref #39122 cc @jorisvandenbossche Fixes (but havent yet added a test for) https://github.com/pandas-dev/pandas/issues/39037#issuecomment-756748197
https://api.github.com/repos/pandas-dev/pandas/pulls/39574
2021-02-03T04:16:46Z
2021-02-12T20:13:53Z
2021-02-12T20:13:53Z
2021-02-12T20:28:45Z
CLN: dtypes.concat
diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 624e71a5cf760..5b46bee96d4b3 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -1,7 +1,7 @@ """ Utility functions related to concat. """ -from typing import Set, cast +from typing import cast import numpy as np @@ -14,49 +14,13 @@ is_extension_array_dtype, is_sparse, ) -from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCRangeIndex, ABCSeries +from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCSeries from pandas.core.arrays import ExtensionArray from pandas.core.arrays.sparse import SparseArray from pandas.core.construction import array, ensure_wrapped_if_datetimelike -def _get_dtype_kinds(arrays) -> Set[str]: - """ - Parameters - ---------- - arrays : list of arrays - - Returns - ------- - set[str] - A set of kinds that exist in this list of arrays. - """ - typs: Set[str] = set() - for arr in arrays: - # Note: we use dtype.kind checks because they are much more performant - # than is_foo_dtype - - dtype = arr.dtype - if not isinstance(dtype, np.dtype): - # ExtensionDtype so we get - # e.g. "categorical", "datetime64[ns, US/Central]", "Sparse[itn64, 0]" - typ = str(dtype) - elif isinstance(arr, ABCRangeIndex): - typ = "range" - elif dtype.kind == "M": - typ = "datetime" - elif dtype.kind == "m": - typ = "timedelta" - elif dtype.kind in ["O", "b"]: - typ = str(dtype) # i.e. "object", "bool" - else: - typ = dtype.kind - - typs.add(typ) - return typs - - def _cast_to_common_type(arr: ArrayLike, dtype: DtypeObj) -> ArrayLike: """ Helper function for `arr.astype(common_dtype)` but handling all special @@ -130,8 +94,7 @@ def is_nonempty(x) -> bool: if non_empties and axis == 0: to_concat = non_empties - typs = _get_dtype_kinds(to_concat) - _contains_datetime = any(typ.startswith("datetime") for typ in typs) + kinds = {obj.dtype.kind for obj in to_concat} all_empty = not len(non_empties) single_dtype = len({x.dtype for x in to_concat}) == 1 @@ -150,17 +113,16 @@ def is_nonempty(x) -> bool: else: return np.concatenate(to_concat) - elif _contains_datetime or "timedelta" in typs: + elif any(kind in ["m", "M"] for kind in kinds): return _concat_datetime(to_concat, axis=axis) elif all_empty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) - typs = _get_dtype_kinds(to_concat) - if len(typs) != 1: + if len(kinds) != 1: - if not len(typs - {"i", "u", "f"}) or not len(typs - {"bool", "i", "u"}): + if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}): # let numpy coerce pass else: diff --git a/pandas/tests/dtypes/test_concat.py b/pandas/tests/dtypes/test_concat.py index 53d53e35c6eb5..a749955d35494 100644 --- a/pandas/tests/dtypes/test_concat.py +++ b/pandas/tests/dtypes/test_concat.py @@ -3,83 +3,10 @@ import pandas.core.dtypes.concat as _concat import pandas as pd -from pandas import DatetimeIndex, Period, PeriodIndex, Series, TimedeltaIndex +from pandas import Series import pandas._testing as tm -@pytest.mark.parametrize( - "to_concat, expected", - [ - # int/float/str - ([["a"], [1, 2]], ["i", "object"]), - ([[3, 4], [1, 2]], ["i"]), - ([[3, 4], [1, 2.1]], ["i", "f"]), - # datetimelike - ([DatetimeIndex(["2011-01-01"]), DatetimeIndex(["2011-01-02"])], ["datetime"]), - ([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 days"])], ["timedelta"]), - # datetimelike object - ( - [ - DatetimeIndex(["2011-01-01"]), - DatetimeIndex(["2011-01-02"], tz="US/Eastern"), - ], - ["datetime", "datetime64[ns, US/Eastern]"], - ), - ( - [ - DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"), - DatetimeIndex(["2011-01-02"], tz="US/Eastern"), - ], - ["datetime64[ns, Asia/Tokyo]", "datetime64[ns, US/Eastern]"], - ), - ([TimedeltaIndex(["1 days"]), TimedeltaIndex(["2 hours"])], ["timedelta"]), - ( - [ - DatetimeIndex(["2011-01-01"], tz="Asia/Tokyo"), - TimedeltaIndex(["1 days"]), - ], - ["datetime64[ns, Asia/Tokyo]", "timedelta"], - ), - ], -) -def test_get_dtype_kinds(index_or_series, to_concat, expected): - to_concat_klass = [index_or_series(c) for c in to_concat] - result = _concat._get_dtype_kinds(to_concat_klass) - assert result == set(expected) - - -@pytest.mark.parametrize( - "to_concat, expected", - [ - ( - [PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="M")], - ["period[M]"], - ), - ( - [ - Series([Period("2011-01", freq="M")]), - Series([Period("2011-02", freq="M")]), - ], - ["period[M]"], - ), - ( - [PeriodIndex(["2011-01"], freq="M"), PeriodIndex(["2011-01"], freq="D")], - ["period[M]", "period[D]"], - ), - ( - [ - Series([Period("2011-01", freq="M")]), - Series([Period("2011-02", freq="D")]), - ], - ["period[M]", "period[D]"], - ), - ], -) -def test_get_dtype_kinds_period(to_concat, expected): - result = _concat._get_dtype_kinds(to_concat) - assert result == set(expected) - - def test_concat_mismatched_categoricals_with_empty(): # concat_compat behavior on series._values should match pd.concat on series ser1 = Series(["a", "b", "c"], dtype="category")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39572
2021-02-03T02:54:31Z
2021-02-03T13:37:23Z
2021-02-03T13:37:23Z
2021-02-03T15:06:34Z
CLN: re-use na_value_for_dtype
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index e55ddbcc783d0..2b0d3f5aa8862 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -1660,18 +1660,6 @@ def take(arr, indices, axis: int = 0, allow_fill: bool = False, fill_value=None) return result -# TODO: can we de-duplicate with something in dtypes.missing? -def _get_default_fill_value(dtype, fill_value): - if fill_value is lib.no_default: - if is_extension_array_dtype(dtype): - fill_value = dtype.na_value - elif dtype.kind in ["m", "M"]: - fill_value = dtype.type("NaT") - else: - fill_value = np.nan - return fill_value - - def take_nd( arr, indexer, @@ -1713,7 +1701,8 @@ def take_nd( """ mask_info = None - fill_value = _get_default_fill_value(arr.dtype, fill_value) + if fill_value is lib.no_default: + fill_value = na_value_for_dtype(arr.dtype, compat=False) if isinstance(arr, ABCExtensionArray): # Check for EA to catch DatetimeArray, TimedeltaArray diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 8d3363df0d132..4af1084033ce2 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -417,8 +417,6 @@ def _na_for_min_count( if is_numeric_dtype(values): values = values.astype("float64") fill_value = na_value_for_dtype(values.dtype) - if fill_value is NaT: - fill_value = values.dtype.type("NaT", "ns") if values.ndim == 1: return fill_value
https://api.github.com/repos/pandas-dev/pandas/pulls/39571
2021-02-03T00:15:59Z
2021-02-03T01:31:46Z
2021-02-03T01:31:46Z
2021-02-03T02:08:35Z
Backport PR #39569 on branch 1.2.x (CI: ipython tab completion tests)
diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py index 9e164a250cdb1..6ba3347796e08 100644 --- a/pandas/tests/arrays/categorical/test_warnings.py +++ b/pandas/tests/arrays/categorical/test_warnings.py @@ -15,15 +15,8 @@ async def test_tab_complete_warning(self, ip): code = "import pandas as pd; c = Categorical([])" await ip.run_code(code) - # GH 31324 newer jedi version raises Deprecation warning - import jedi - - if jedi.__version__ < "0.16.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning( - DeprecationWarning, check_stacklevel=False - ) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("c.", 1)) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index a7e2fa760b7e4..29a2d9c17202e 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -274,17 +274,9 @@ async def test_tab_complete_warning(self, ip, frame_or_series): await ip.run_code(code) - # TODO: remove it when Ipython updates - # GH 33567, jedi version raises Deprecation warning in Ipython - import jedi - - if jedi.__version__ < "0.17.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning( - DeprecationWarning, check_stacklevel=False - ) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("obj.", 1)) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 372a1d290bca0..8095943a34f4d 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1942,16 +1942,9 @@ async def test_tab_complete_warning(self, ip): code = "import pandas as pd; idx = Index([1, 2])" await ip.run_code(code) - # GH 31324 newer jedi version raises Deprecation warning - import jedi - - if jedi.__version__ < "0.16.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning( - DeprecationWarning, check_stacklevel=False - ) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("idx.", 4)) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index da5bb0eb59f70..41f2f34da635d 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -31,15 +31,9 @@ async def test_tab_complete_ipython6_warning(ip): ) await ip.run_code(code) - # TODO: remove it when Ipython updates - # GH 33567, jedi version raises Deprecation warning in Ipython - import jedi - - if jedi.__version__ < "0.17.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("rs.", 1))
Backport PR #39569: CI: ipython tab completion tests
https://api.github.com/repos/pandas-dev/pandas/pulls/39570
2021-02-02T23:34:59Z
2021-02-03T00:48:44Z
2021-02-03T00:48:44Z
2021-02-03T00:48:44Z
CI: ipython tab completion tests
diff --git a/pandas/tests/arrays/categorical/test_warnings.py b/pandas/tests/arrays/categorical/test_warnings.py index 9e164a250cdb1..6ba3347796e08 100644 --- a/pandas/tests/arrays/categorical/test_warnings.py +++ b/pandas/tests/arrays/categorical/test_warnings.py @@ -15,15 +15,8 @@ async def test_tab_complete_warning(self, ip): code = "import pandas as pd; c = Categorical([])" await ip.run_code(code) - # GH 31324 newer jedi version raises Deprecation warning - import jedi - - if jedi.__version__ < "0.16.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning( - DeprecationWarning, check_stacklevel=False - ) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("c.", 1)) diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index a7e2fa760b7e4..29a2d9c17202e 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -274,17 +274,9 @@ async def test_tab_complete_warning(self, ip, frame_or_series): await ip.run_code(code) - # TODO: remove it when Ipython updates - # GH 33567, jedi version raises Deprecation warning in Ipython - import jedi - - if jedi.__version__ < "0.17.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning( - DeprecationWarning, check_stacklevel=False - ) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("obj.", 1)) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 42769e5daf059..5fd1a15416e23 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1595,16 +1595,9 @@ async def test_tab_complete_warning(self, ip): code = "import pandas as pd; idx = Index([1, 2])" await ip.run_code(code) - # GH 31324 newer jedi version raises Deprecation warning - import jedi - - if jedi.__version__ < "0.16.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning( - DeprecationWarning, check_stacklevel=False - ) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("idx.", 4)) diff --git a/pandas/tests/resample/test_resampler_grouper.py b/pandas/tests/resample/test_resampler_grouper.py index 86955ac4e4d22..1a10255a81a8c 100644 --- a/pandas/tests/resample/test_resampler_grouper.py +++ b/pandas/tests/resample/test_resampler_grouper.py @@ -31,15 +31,9 @@ async def test_tab_complete_ipython6_warning(ip): ) await ip.run_code(code) - # TODO: remove it when Ipython updates - # GH 33567, jedi version raises Deprecation warning in Ipython - import jedi - - if jedi.__version__ < "0.17.0": - warning = tm.assert_produces_warning(None) - else: - warning = tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False) - with warning: + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None): with provisionalcompleter("ignore"): list(ip.Completer.completions("rs.", 1))
- [ ] closes #xxxx - [ ] tests added / passed - [ ] Ensure all linting tests pass, see [here](https://pandas.pydata.org/pandas-docs/dev/development/contributing.html#code-standards) for how to run them - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/39569
2021-02-02T22:34:42Z
2021-02-02T23:33:56Z
2021-02-02T23:33:56Z
2021-02-02T23:41:21Z
Backport PR #39484: BUG: Regression in astype not casting to bytes
diff --git a/doc/source/whatsnew/v1.2.2.rst b/doc/source/whatsnew/v1.2.2.rst index 240acf787f9c9..0ee1abaa2a0eb 100644 --- a/doc/source/whatsnew/v1.2.2.rst +++ b/doc/source/whatsnew/v1.2.2.rst @@ -17,6 +17,7 @@ Fixed regressions - Fixed regression in :func:`read_excel` that caused it to raise ``AttributeError`` when checking version of older xlrd versions (:issue:`38955`) - Fixed regression in :class:`DataFrame` constructor reordering element when construction from datetime ndarray with dtype not ``"datetime64[ns]"`` (:issue:`39422`) +- Fixed regression in :class:`DataFrame.astype` and :class:`Series.astype` not casting to bytes dtype (:issue:`39474`) - Fixed regression in :meth:`~DataFrame.to_pickle` failing to create bz2/xz compressed pickle files with ``protocol=5`` (:issue:`39002`) - Fixed regression in :func:`pandas.testing.assert_series_equal` and :func:`pandas.testing.assert_frame_equal` always raising ``AssertionError`` when comparing extension dtypes (:issue:`39410`) - Fixed regression in :meth:`~DataFrame.to_csv` opening ``codecs.StreamWriter`` in binary mode instead of in text mode and ignoring user-provided ``mode`` (:issue:`39247`) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c77991ced3907..1338d22f74347 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1137,7 +1137,7 @@ def soft_convert_objects( # bound of nanosecond-resolution 64-bit integers. try: values = lib.maybe_convert_objects(values, convert_datetime=True) - except OutOfBoundsDatetime: + except (OutOfBoundsDatetime, ValueError): pass if timedelta and is_object_dtype(values.dtype): diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 32aade97c8736..99218cebc37e1 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2503,7 +2503,7 @@ class ObjectBlock(Block): _can_hold_na = True def _maybe_coerce_values(self, values): - if issubclass(values.dtype.type, (str, bytes)): + if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) return values diff --git a/pandas/tests/frame/methods/test_astype.py b/pandas/tests/frame/methods/test_astype.py index d79969eac0323..5264d4b432d34 100644 --- a/pandas/tests/frame/methods/test_astype.py +++ b/pandas/tests/frame/methods/test_astype.py @@ -611,3 +611,8 @@ def test_astype_tz_object_conversion(self, tz): # do real test: object dtype to a specified tz, different from construction tz. result = result.astype({"tz": "datetime64[ns, Europe/London]"}) tm.assert_frame_equal(result, expected) + + def test_astype_bytes(self): + # GH#39474 + result = DataFrame(["foo", "bar", "baz"]).astype(bytes) + assert result.dtypes[0] == np.dtype("S3") diff --git a/pandas/tests/series/methods/test_astype.py b/pandas/tests/series/methods/test_astype.py index 3cd9d52f8e754..a3624162a08d8 100644 --- a/pandas/tests/series/methods/test_astype.py +++ b/pandas/tests/series/methods/test_astype.py @@ -337,6 +337,11 @@ def test_astype_unicode(self): reload(sys) sys.setdefaultencoding(former_encoding) + def test_astype_bytes(self): + # GH#39474 + result = Series(["foo", "bar", "baz"]).astype(bytes) + assert result.dtypes == np.dtype("S3") + class TestAstypeCategorical: def test_astype_categorical_invalid_conversions(self):
Manual backport
https://api.github.com/repos/pandas-dev/pandas/pulls/39568
2021-02-02T22:21:24Z
2021-02-02T23:33:11Z
2021-02-02T23:33:11Z
2021-12-15T10:39:31Z