ZTWHHH commited on
Commit
11f629c
·
verified ·
1 Parent(s): 4907318

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc +0 -0
  2. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc +0 -0
  3. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc +0 -0
  4. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/accumulate.py +37 -0
  5. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/base.py +21 -0
  6. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py +87 -0
  7. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/interface.py +127 -0
  8. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py +609 -0
  9. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/reduce.py +75 -0
  10. videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py +367 -0
  11. videochat2/lib/python3.10/site-packages/pandas/tests/io/conftest.py +213 -0
  12. videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py +234 -0
  13. videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py +0 -0
  14. videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_info.py +503 -0
  15. videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py +198 -0
  16. videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py +898 -0
  17. videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py +90 -0
  18. videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py +359 -0
  19. videochat2/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py +341 -0
  20. videochat2/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py +0 -0
  21. videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py +120 -0
  22. videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py +18 -0
  23. videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py +316 -0
  24. videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py +893 -0
  25. videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py +1965 -0
  26. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc +0 -0
  27. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc +0 -0
  28. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc +0 -0
  29. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc +0 -0
  30. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc +0 -0
  31. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc +0 -0
  32. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc +0 -0
  33. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc +0 -0
  34. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc +0 -0
  35. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc +0 -0
  36. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc +0 -0
  37. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc +0 -0
  38. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc +0 -0
  39. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc +0 -0
  40. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc +0 -0
  41. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc +0 -0
  42. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc +0 -0
  43. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc +0 -0
  44. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc +0 -0
  45. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc +0 -0
  46. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py +50 -0
  47. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py +9 -0
  48. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py +910 -0
  49. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py +199 -0
  50. videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py +230 -0
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc ADDED
Binary file (5.38 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc ADDED
Binary file (7.74 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/accumulate.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import pandas as pd
4
+ from pandas.tests.extension.base.base import BaseExtensionTests
5
+
6
+
7
+ class BaseAccumulateTests(BaseExtensionTests):
8
+ """
9
+ Accumulation specific tests. Generally these only
10
+ make sense for numeric/boolean operations.
11
+ """
12
+
13
+ def check_accumulate(self, s, op_name, skipna):
14
+ result = getattr(s, op_name)(skipna=skipna)
15
+
16
+ if result.dtype == pd.Float32Dtype() and op_name == "cumprod" and skipna:
17
+ pytest.skip(
18
+ f"Float32 precision lead to large differences with op {op_name} "
19
+ f"and skipna={skipna}"
20
+ )
21
+
22
+ expected = getattr(s.astype("float64"), op_name)(skipna=skipna)
23
+ self.assert_series_equal(result, expected, check_dtype=False)
24
+
25
+ @pytest.mark.parametrize("skipna", [True, False])
26
+ def test_accumulate_series_raises(self, data, all_numeric_accumulations, skipna):
27
+ op_name = all_numeric_accumulations
28
+ ser = pd.Series(data)
29
+
30
+ with pytest.raises(NotImplementedError):
31
+ getattr(ser, op_name)(skipna=skipna)
32
+
33
+ @pytest.mark.parametrize("skipna", [True, False])
34
+ def test_accumulate_series(self, data, all_numeric_accumulations, skipna):
35
+ op_name = all_numeric_accumulations
36
+ ser = pd.Series(data)
37
+ self.check_accumulate(ser, op_name, skipna)
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/base.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas._testing as tm
2
+
3
+
4
+ class BaseExtensionTests:
5
+ # classmethod and different signature is needed
6
+ # to make inheritance compliant with mypy
7
+ @classmethod
8
+ def assert_equal(cls, left, right, **kwargs):
9
+ return tm.assert_equal(left, right, **kwargs)
10
+
11
+ @classmethod
12
+ def assert_series_equal(cls, left, right, *args, **kwargs):
13
+ return tm.assert_series_equal(left, right, *args, **kwargs)
14
+
15
+ @classmethod
16
+ def assert_frame_equal(cls, left, right, *args, **kwargs):
17
+ return tm.assert_frame_equal(left, right, *args, **kwargs)
18
+
19
+ @classmethod
20
+ def assert_extension_array_equal(cls, left, right, *args, **kwargs):
21
+ return tm.assert_extension_array_equal(left, right, *args, **kwargs)
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.compat import np_version_under1p21
5
+ import pandas.util._test_decorators as td
6
+
7
+ import pandas as pd
8
+ from pandas.core.internals import ObjectBlock
9
+ from pandas.tests.extension.base.base import BaseExtensionTests
10
+
11
+
12
+ class BaseCastingTests(BaseExtensionTests):
13
+ """Casting to and from ExtensionDtypes"""
14
+
15
+ def test_astype_object_series(self, all_data):
16
+ ser = pd.Series(all_data, name="A")
17
+ result = ser.astype(object)
18
+ assert result.dtype == np.dtype(object)
19
+ if hasattr(result._mgr, "blocks"):
20
+ assert isinstance(result._mgr.blocks[0], ObjectBlock)
21
+ assert isinstance(result._mgr.array, np.ndarray)
22
+ assert result._mgr.array.dtype == np.dtype(object)
23
+
24
+ def test_astype_object_frame(self, all_data):
25
+ df = pd.DataFrame({"A": all_data})
26
+
27
+ result = df.astype(object)
28
+ if hasattr(result._mgr, "blocks"):
29
+ blk = result._data.blocks[0]
30
+ assert isinstance(blk, ObjectBlock), type(blk)
31
+ assert isinstance(result._mgr.arrays[0], np.ndarray)
32
+ assert result._mgr.arrays[0].dtype == np.dtype(object)
33
+
34
+ # earlier numpy raises TypeError on e.g. np.dtype(np.int64) == "Int64"
35
+ if not np_version_under1p21:
36
+ # check that we can compare the dtypes
37
+ comp = result.dtypes == df.dtypes
38
+ assert not comp.any()
39
+
40
+ def test_tolist(self, data):
41
+ result = pd.Series(data).tolist()
42
+ expected = list(data)
43
+ assert result == expected
44
+
45
+ def test_astype_str(self, data):
46
+ result = pd.Series(data[:5]).astype(str)
47
+ expected = pd.Series([str(x) for x in data[:5]], dtype=str)
48
+ self.assert_series_equal(result, expected)
49
+
50
+ @pytest.mark.parametrize(
51
+ "nullable_string_dtype",
52
+ [
53
+ "string[python]",
54
+ pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
55
+ ],
56
+ )
57
+ def test_astype_string(self, data, nullable_string_dtype):
58
+ # GH-33465, GH#45326 as of 2.0 we decode bytes instead of calling str(obj)
59
+ result = pd.Series(data[:5]).astype(nullable_string_dtype)
60
+ expected = pd.Series(
61
+ [str(x) if not isinstance(x, bytes) else x.decode() for x in data[:5]],
62
+ dtype=nullable_string_dtype,
63
+ )
64
+ self.assert_series_equal(result, expected)
65
+
66
+ def test_to_numpy(self, data):
67
+ expected = np.asarray(data)
68
+
69
+ result = data.to_numpy()
70
+ self.assert_equal(result, expected)
71
+
72
+ result = pd.Series(data).to_numpy()
73
+ self.assert_equal(result, expected)
74
+
75
+ def test_astype_empty_dataframe(self, dtype):
76
+ # https://github.com/pandas-dev/pandas/issues/33113
77
+ df = pd.DataFrame()
78
+ result = df.astype(dtype)
79
+ self.assert_frame_equal(result, df)
80
+
81
+ @pytest.mark.parametrize("copy", [True, False])
82
+ def test_astype_own_type(self, data, copy):
83
+ # ensure that astype returns the original object for equal dtype and copy=False
84
+ # https://github.com/pandas-dev/pandas/issues/28488
85
+ result = data.astype(data.dtype, copy=copy)
86
+ assert (result is data) is (not copy)
87
+ self.assert_extension_array_equal(result, data)
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/interface.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas.core.dtypes.common import is_extension_array_dtype
4
+ from pandas.core.dtypes.dtypes import ExtensionDtype
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+ from pandas.tests.extension.base.base import BaseExtensionTests
9
+
10
+
11
+ class BaseInterfaceTests(BaseExtensionTests):
12
+ """Tests that the basic interface is satisfied."""
13
+
14
+ # ------------------------------------------------------------------------
15
+ # Interface
16
+ # ------------------------------------------------------------------------
17
+
18
+ def test_len(self, data):
19
+ assert len(data) == 100
20
+
21
+ def test_size(self, data):
22
+ assert data.size == 100
23
+
24
+ def test_ndim(self, data):
25
+ assert data.ndim == 1
26
+
27
+ def test_can_hold_na_valid(self, data):
28
+ # GH-20761
29
+ assert data._can_hold_na is True
30
+
31
+ def test_contains(self, data, data_missing):
32
+ # GH-37867
33
+ # Tests for membership checks. Membership checks for nan-likes is tricky and
34
+ # the settled on rule is: `nan_like in arr` is True if nan_like is
35
+ # arr.dtype.na_value and arr.isna().any() is True. Else the check returns False.
36
+
37
+ na_value = data.dtype.na_value
38
+ # ensure data without missing values
39
+ data = data[~data.isna()]
40
+
41
+ # first elements are non-missing
42
+ assert data[0] in data
43
+ assert data_missing[0] in data_missing
44
+
45
+ # check the presence of na_value
46
+ assert na_value in data_missing
47
+ assert na_value not in data
48
+
49
+ # the data can never contain other nan-likes than na_value
50
+ for na_value_obj in tm.NULL_OBJECTS:
51
+ if na_value_obj is na_value or type(na_value_obj) == type(na_value):
52
+ # type check for e.g. two instances of Decimal("NAN")
53
+ continue
54
+ assert na_value_obj not in data
55
+ assert na_value_obj not in data_missing
56
+
57
+ def test_memory_usage(self, data):
58
+ s = pd.Series(data)
59
+ result = s.memory_usage(index=False)
60
+ assert result == s.nbytes
61
+
62
+ def test_array_interface(self, data):
63
+ result = np.array(data)
64
+ assert result[0] == data[0]
65
+
66
+ result = np.array(data, dtype=object)
67
+ expected = np.array(list(data), dtype=object)
68
+ tm.assert_numpy_array_equal(result, expected)
69
+
70
+ def test_is_extension_array_dtype(self, data):
71
+ assert is_extension_array_dtype(data)
72
+ assert is_extension_array_dtype(data.dtype)
73
+ assert is_extension_array_dtype(pd.Series(data))
74
+ assert isinstance(data.dtype, ExtensionDtype)
75
+
76
+ def test_no_values_attribute(self, data):
77
+ # GH-20735: EA's with .values attribute give problems with internal
78
+ # code, disallowing this for now until solved
79
+ assert not hasattr(data, "values")
80
+ assert not hasattr(data, "_values")
81
+
82
+ def test_is_numeric_honored(self, data):
83
+ result = pd.Series(data)
84
+ if hasattr(result._mgr, "blocks"):
85
+ assert result._mgr.blocks[0].is_numeric is data.dtype._is_numeric
86
+
87
+ def test_isna_extension_array(self, data_missing):
88
+ # If your `isna` returns an ExtensionArray, you must also implement
89
+ # _reduce. At the *very* least, you must implement any and all
90
+ na = data_missing.isna()
91
+ if is_extension_array_dtype(na):
92
+ assert na._reduce("any")
93
+ assert na.any()
94
+
95
+ assert not na._reduce("all")
96
+ assert not na.all()
97
+
98
+ assert na.dtype._is_boolean
99
+
100
+ def test_copy(self, data):
101
+ # GH#27083 removing deep keyword from EA.copy
102
+ assert data[0] != data[1]
103
+ result = data.copy()
104
+
105
+ data[1] = data[0]
106
+ assert result[1] != result[0]
107
+
108
+ def test_view(self, data):
109
+ # view with no dtype should return a shallow copy, *not* the same
110
+ # object
111
+ assert data[1] != data[0]
112
+
113
+ result = data.view()
114
+ assert result is not data
115
+ assert type(result) == type(data)
116
+
117
+ result[1] = result[0]
118
+ assert data[1] == data[0]
119
+
120
+ # check specifically that the `dtype` kwarg is accepted
121
+ data.view(dtype=None)
122
+
123
+ def test_tolist(self, data):
124
+ result = data.tolist()
125
+ expected = list(data)
126
+ assert isinstance(result, list)
127
+ assert result == expected
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py ADDED
@@ -0,0 +1,609 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import operator
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas._typing import Dtype
8
+
9
+ from pandas.core.dtypes.common import is_bool_dtype
10
+ from pandas.core.dtypes.missing import na_value_for_dtype
11
+
12
+ import pandas as pd
13
+ import pandas._testing as tm
14
+ from pandas.core.sorting import nargsort
15
+ from pandas.tests.extension.base.base import BaseExtensionTests
16
+
17
+
18
+ class BaseMethodsTests(BaseExtensionTests):
19
+ """Various Series and DataFrame methods."""
20
+
21
+ def test_value_counts_default_dropna(self, data):
22
+ # make sure we have consistent default dropna kwarg
23
+ if not hasattr(data, "value_counts"):
24
+ pytest.skip(f"value_counts is not implemented for {type(data)}")
25
+ sig = inspect.signature(data.value_counts)
26
+ kwarg = sig.parameters["dropna"]
27
+ assert kwarg.default is True
28
+
29
+ @pytest.mark.parametrize("dropna", [True, False])
30
+ def test_value_counts(self, all_data, dropna):
31
+ all_data = all_data[:10]
32
+ if dropna:
33
+ other = all_data[~all_data.isna()]
34
+ else:
35
+ other = all_data
36
+
37
+ result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
38
+ expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
39
+
40
+ self.assert_series_equal(result, expected)
41
+
42
+ def test_value_counts_with_normalize(self, data):
43
+ # GH 33172
44
+ data = data[:10].unique()
45
+ values = np.array(data[~data.isna()])
46
+ ser = pd.Series(data, dtype=data.dtype)
47
+
48
+ result = ser.value_counts(normalize=True).sort_index()
49
+
50
+ if not isinstance(data, pd.Categorical):
51
+ expected = pd.Series(
52
+ [1 / len(values)] * len(values), index=result.index, name="proportion"
53
+ )
54
+ else:
55
+ expected = pd.Series(0.0, index=result.index, name="proportion")
56
+ expected[result > 0] = 1 / len(values)
57
+ if na_value_for_dtype(data.dtype) is pd.NA:
58
+ # TODO(GH#44692): avoid special-casing
59
+ expected = expected.astype("Float64")
60
+
61
+ self.assert_series_equal(result, expected)
62
+
63
+ def test_count(self, data_missing):
64
+ df = pd.DataFrame({"A": data_missing})
65
+ result = df.count(axis="columns")
66
+ expected = pd.Series([0, 1])
67
+ self.assert_series_equal(result, expected)
68
+
69
+ def test_series_count(self, data_missing):
70
+ # GH#26835
71
+ ser = pd.Series(data_missing)
72
+ result = ser.count()
73
+ expected = 1
74
+ assert result == expected
75
+
76
+ def test_apply_simple_series(self, data):
77
+ result = pd.Series(data).apply(id)
78
+ assert isinstance(result, pd.Series)
79
+
80
+ def test_argsort(self, data_for_sorting):
81
+ result = pd.Series(data_for_sorting).argsort()
82
+ # argsort result gets passed to take, so should be np.intp
83
+ expected = pd.Series(np.array([2, 0, 1], dtype=np.intp))
84
+ self.assert_series_equal(result, expected)
85
+
86
+ def test_argsort_missing_array(self, data_missing_for_sorting):
87
+ result = data_missing_for_sorting.argsort()
88
+ # argsort result gets passed to take, so should be np.intp
89
+ expected = np.array([2, 0, 1], dtype=np.intp)
90
+ tm.assert_numpy_array_equal(result, expected)
91
+
92
+ def test_argsort_missing(self, data_missing_for_sorting):
93
+ result = pd.Series(data_missing_for_sorting).argsort()
94
+ expected = pd.Series(np.array([1, -1, 0], dtype=np.intp))
95
+ self.assert_series_equal(result, expected)
96
+
97
+ def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value):
98
+ # GH 24382
99
+
100
+ # data_for_sorting -> [B, C, A] with A < B < C
101
+ assert data_for_sorting.argmax() == 1
102
+ assert data_for_sorting.argmin() == 2
103
+
104
+ # with repeated values -> first occurrence
105
+ data = data_for_sorting.take([2, 0, 0, 1, 1, 2])
106
+ assert data.argmax() == 3
107
+ assert data.argmin() == 0
108
+
109
+ # with missing values
110
+ # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
111
+ assert data_missing_for_sorting.argmax() == 0
112
+ assert data_missing_for_sorting.argmin() == 2
113
+
114
+ @pytest.mark.parametrize("method", ["argmax", "argmin"])
115
+ def test_argmin_argmax_empty_array(self, method, data):
116
+ # GH 24382
117
+ err_msg = "attempt to get"
118
+ with pytest.raises(ValueError, match=err_msg):
119
+ getattr(data[:0], method)()
120
+
121
+ @pytest.mark.parametrize("method", ["argmax", "argmin"])
122
+ def test_argmin_argmax_all_na(self, method, data, na_value):
123
+ # all missing with skipna=True is the same as empty
124
+ err_msg = "attempt to get"
125
+ data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype)
126
+ with pytest.raises(ValueError, match=err_msg):
127
+ getattr(data_na, method)()
128
+
129
+ @pytest.mark.parametrize(
130
+ "op_name, skipna, expected",
131
+ [
132
+ ("idxmax", True, 0),
133
+ ("idxmin", True, 2),
134
+ ("argmax", True, 0),
135
+ ("argmin", True, 2),
136
+ ("idxmax", False, np.nan),
137
+ ("idxmin", False, np.nan),
138
+ ("argmax", False, -1),
139
+ ("argmin", False, -1),
140
+ ],
141
+ )
142
+ def test_argreduce_series(
143
+ self, data_missing_for_sorting, op_name, skipna, expected
144
+ ):
145
+ # data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
146
+ ser = pd.Series(data_missing_for_sorting)
147
+ result = getattr(ser, op_name)(skipna=skipna)
148
+ tm.assert_almost_equal(result, expected)
149
+
150
+ def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting):
151
+ # GH#38733
152
+ data = data_missing_for_sorting
153
+
154
+ with pytest.raises(NotImplementedError, match=""):
155
+ data.argmin(skipna=False)
156
+
157
+ with pytest.raises(NotImplementedError, match=""):
158
+ data.argmax(skipna=False)
159
+
160
+ @pytest.mark.parametrize(
161
+ "na_position, expected",
162
+ [
163
+ ("last", np.array([2, 0, 1], dtype=np.dtype("intp"))),
164
+ ("first", np.array([1, 2, 0], dtype=np.dtype("intp"))),
165
+ ],
166
+ )
167
+ def test_nargsort(self, data_missing_for_sorting, na_position, expected):
168
+ # GH 25439
169
+ result = nargsort(data_missing_for_sorting, na_position=na_position)
170
+ tm.assert_numpy_array_equal(result, expected)
171
+
172
+ @pytest.mark.parametrize("ascending", [True, False])
173
+ def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
174
+ ser = pd.Series(data_for_sorting)
175
+ result = ser.sort_values(ascending=ascending, key=sort_by_key)
176
+ expected = ser.iloc[[2, 0, 1]]
177
+ if not ascending:
178
+ # GH 35922. Expect stable sort
179
+ if ser.nunique() == 2:
180
+ expected = ser.iloc[[0, 1, 2]]
181
+ else:
182
+ expected = ser.iloc[[1, 0, 2]]
183
+
184
+ self.assert_series_equal(result, expected)
185
+
186
+ @pytest.mark.parametrize("ascending", [True, False])
187
+ def test_sort_values_missing(
188
+ self, data_missing_for_sorting, ascending, sort_by_key
189
+ ):
190
+ ser = pd.Series(data_missing_for_sorting)
191
+ result = ser.sort_values(ascending=ascending, key=sort_by_key)
192
+ if ascending:
193
+ expected = ser.iloc[[2, 0, 1]]
194
+ else:
195
+ expected = ser.iloc[[0, 2, 1]]
196
+ self.assert_series_equal(result, expected)
197
+
198
+ @pytest.mark.parametrize("ascending", [True, False])
199
+ def test_sort_values_frame(self, data_for_sorting, ascending):
200
+ df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})
201
+ result = df.sort_values(["A", "B"])
202
+ expected = pd.DataFrame(
203
+ {"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1]
204
+ )
205
+ self.assert_frame_equal(result, expected)
206
+
207
+ @pytest.mark.parametrize("box", [pd.Series, lambda x: x])
208
+ @pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
209
+ def test_unique(self, data, box, method):
210
+ duplicated = box(data._from_sequence([data[0], data[0]]))
211
+
212
+ result = method(duplicated)
213
+
214
+ assert len(result) == 1
215
+ assert isinstance(result, type(data))
216
+ assert result[0] == duplicated[0]
217
+
218
+ def test_factorize(self, data_for_grouping):
219
+ codes, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True)
220
+ expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 2], dtype=np.intp)
221
+ expected_uniques = data_for_grouping.take([0, 4, 7])
222
+
223
+ tm.assert_numpy_array_equal(codes, expected_codes)
224
+ self.assert_extension_array_equal(uniques, expected_uniques)
225
+
226
+ def test_factorize_equivalence(self, data_for_grouping):
227
+ codes_1, uniques_1 = pd.factorize(data_for_grouping, use_na_sentinel=True)
228
+ codes_2, uniques_2 = data_for_grouping.factorize(use_na_sentinel=True)
229
+
230
+ tm.assert_numpy_array_equal(codes_1, codes_2)
231
+ self.assert_extension_array_equal(uniques_1, uniques_2)
232
+ assert len(uniques_1) == len(pd.unique(uniques_1))
233
+ assert uniques_1.dtype == data_for_grouping.dtype
234
+
235
+ def test_factorize_empty(self, data):
236
+ codes, uniques = pd.factorize(data[:0])
237
+ expected_codes = np.array([], dtype=np.intp)
238
+ expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)
239
+
240
+ tm.assert_numpy_array_equal(codes, expected_codes)
241
+ self.assert_extension_array_equal(uniques, expected_uniques)
242
+
243
+ def test_fillna_copy_frame(self, data_missing):
244
+ arr = data_missing.take([1, 1])
245
+ df = pd.DataFrame({"A": arr})
246
+ df_orig = df.copy()
247
+
248
+ filled_val = df.iloc[0, 0]
249
+ result = df.fillna(filled_val)
250
+
251
+ result.iloc[0, 0] = filled_val
252
+
253
+ self.assert_frame_equal(df, df_orig)
254
+
255
+ def test_fillna_copy_series(self, data_missing):
256
+ arr = data_missing.take([1, 1])
257
+ ser = pd.Series(arr, copy=False)
258
+ ser_orig = ser.copy()
259
+
260
+ filled_val = ser[0]
261
+ result = ser.fillna(filled_val)
262
+ result.iloc[0] = filled_val
263
+
264
+ self.assert_series_equal(ser, ser_orig)
265
+
266
+ def test_fillna_length_mismatch(self, data_missing):
267
+ msg = "Length of 'value' does not match."
268
+ with pytest.raises(ValueError, match=msg):
269
+ data_missing.fillna(data_missing.take([1]))
270
+
271
+ # Subclasses can override if we expect e.g Sparse[bool], boolean, pyarrow[bool]
272
+ _combine_le_expected_dtype: Dtype = np.dtype(bool)
273
+
274
+ def test_combine_le(self, data_repeated):
275
+ # GH 20825
276
+ # Test that combine works when doing a <= (le) comparison
277
+ orig_data1, orig_data2 = data_repeated(2)
278
+ s1 = pd.Series(orig_data1)
279
+ s2 = pd.Series(orig_data2)
280
+ result = s1.combine(s2, lambda x1, x2: x1 <= x2)
281
+ expected = pd.Series(
282
+ [a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
283
+ dtype=self._combine_le_expected_dtype,
284
+ )
285
+ self.assert_series_equal(result, expected)
286
+
287
+ val = s1.iloc[0]
288
+ result = s1.combine(val, lambda x1, x2: x1 <= x2)
289
+ expected = pd.Series(
290
+ [a <= val for a in list(orig_data1)],
291
+ dtype=self._combine_le_expected_dtype,
292
+ )
293
+ self.assert_series_equal(result, expected)
294
+
295
+ def test_combine_add(self, data_repeated):
296
+ # GH 20825
297
+ orig_data1, orig_data2 = data_repeated(2)
298
+ s1 = pd.Series(orig_data1)
299
+ s2 = pd.Series(orig_data2)
300
+ result = s1.combine(s2, lambda x1, x2: x1 + x2)
301
+ with np.errstate(over="ignore"):
302
+ expected = pd.Series(
303
+ orig_data1._from_sequence(
304
+ [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
305
+ )
306
+ )
307
+ self.assert_series_equal(result, expected)
308
+
309
+ val = s1.iloc[0]
310
+ result = s1.combine(val, lambda x1, x2: x1 + x2)
311
+ expected = pd.Series(
312
+ orig_data1._from_sequence([a + val for a in list(orig_data1)])
313
+ )
314
+ self.assert_series_equal(result, expected)
315
+
316
+ def test_combine_first(self, data):
317
+ # https://github.com/pandas-dev/pandas/issues/24147
318
+ a = pd.Series(data[:3])
319
+ b = pd.Series(data[2:5], index=[2, 3, 4])
320
+ result = a.combine_first(b)
321
+ expected = pd.Series(data[:5])
322
+ self.assert_series_equal(result, expected)
323
+
324
+ @pytest.mark.parametrize("frame", [True, False])
325
+ @pytest.mark.parametrize(
326
+ "periods, indices",
327
+ [(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])],
328
+ )
329
+ def test_container_shift(self, data, frame, periods, indices):
330
+ # https://github.com/pandas-dev/pandas/issues/22386
331
+ subset = data[:5]
332
+ data = pd.Series(subset, name="A")
333
+ expected = pd.Series(subset.take(indices, allow_fill=True), name="A")
334
+
335
+ if frame:
336
+ result = data.to_frame(name="A").assign(B=1).shift(periods)
337
+ expected = pd.concat(
338
+ [expected, pd.Series([1] * 5, name="B").shift(periods)], axis=1
339
+ )
340
+ compare = self.assert_frame_equal
341
+ else:
342
+ result = data.shift(periods)
343
+ compare = self.assert_series_equal
344
+
345
+ compare(result, expected)
346
+
347
+ def test_shift_0_periods(self, data):
348
+ # GH#33856 shifting with periods=0 should return a copy, not same obj
349
+ result = data.shift(0)
350
+ assert data[0] != data[1] # otherwise below is invalid
351
+ data[0] = data[1]
352
+ assert result[0] != result[1] # i.e. not the same object/view
353
+
354
+ @pytest.mark.parametrize("periods", [1, -2])
355
+ def test_diff(self, data, periods):
356
+ data = data[:5]
357
+ if is_bool_dtype(data.dtype):
358
+ op = operator.xor
359
+ else:
360
+ op = operator.sub
361
+ try:
362
+ # does this array implement ops?
363
+ op(data, data)
364
+ except Exception:
365
+ pytest.skip(f"{type(data)} does not support diff")
366
+ s = pd.Series(data)
367
+ result = s.diff(periods)
368
+ expected = pd.Series(op(data, data.shift(periods)))
369
+ self.assert_series_equal(result, expected)
370
+
371
+ df = pd.DataFrame({"A": data, "B": [1.0] * 5})
372
+ result = df.diff(periods)
373
+ if periods == 1:
374
+ b = [np.nan, 0, 0, 0, 0]
375
+ else:
376
+ b = [0, 0, 0, np.nan, np.nan]
377
+ expected = pd.DataFrame({"A": expected, "B": b})
378
+ self.assert_frame_equal(result, expected)
379
+
380
+ @pytest.mark.parametrize(
381
+ "periods, indices",
382
+ [[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]],
383
+ )
384
+ def test_shift_non_empty_array(self, data, periods, indices):
385
+ # https://github.com/pandas-dev/pandas/issues/23911
386
+ subset = data[:2]
387
+ result = subset.shift(periods)
388
+ expected = subset.take(indices, allow_fill=True)
389
+ self.assert_extension_array_equal(result, expected)
390
+
391
+ @pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4])
392
+ def test_shift_empty_array(self, data, periods):
393
+ # https://github.com/pandas-dev/pandas/issues/23911
394
+ empty = data[:0]
395
+ result = empty.shift(periods)
396
+ expected = empty
397
+ self.assert_extension_array_equal(result, expected)
398
+
399
+ def test_shift_zero_copies(self, data):
400
+ # GH#31502
401
+ result = data.shift(0)
402
+ assert result is not data
403
+
404
+ result = data[:0].shift(2)
405
+ assert result is not data
406
+
407
+ def test_shift_fill_value(self, data):
408
+ arr = data[:4]
409
+ fill_value = data[0]
410
+ result = arr.shift(1, fill_value=fill_value)
411
+ expected = data.take([0, 0, 1, 2])
412
+ self.assert_extension_array_equal(result, expected)
413
+
414
+ result = arr.shift(-2, fill_value=fill_value)
415
+ expected = data.take([2, 3, 0, 0])
416
+ self.assert_extension_array_equal(result, expected)
417
+
418
+ def test_not_hashable(self, data):
419
+ # We are in general mutable, so not hashable
420
+ with pytest.raises(TypeError, match="unhashable type"):
421
+ hash(data)
422
+
423
+ def test_hash_pandas_object_works(self, data, as_frame):
424
+ # https://github.com/pandas-dev/pandas/issues/23066
425
+ data = pd.Series(data)
426
+ if as_frame:
427
+ data = data.to_frame()
428
+ a = pd.util.hash_pandas_object(data)
429
+ b = pd.util.hash_pandas_object(data)
430
+ self.assert_equal(a, b)
431
+
432
+ def test_searchsorted(self, data_for_sorting, as_series):
433
+ b, c, a = data_for_sorting
434
+ arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]
435
+
436
+ if as_series:
437
+ arr = pd.Series(arr)
438
+ assert arr.searchsorted(a) == 0
439
+ assert arr.searchsorted(a, side="right") == 1
440
+
441
+ assert arr.searchsorted(b) == 1
442
+ assert arr.searchsorted(b, side="right") == 2
443
+
444
+ assert arr.searchsorted(c) == 2
445
+ assert arr.searchsorted(c, side="right") == 3
446
+
447
+ result = arr.searchsorted(arr.take([0, 2]))
448
+ expected = np.array([0, 2], dtype=np.intp)
449
+
450
+ tm.assert_numpy_array_equal(result, expected)
451
+
452
+ # sorter
453
+ sorter = np.array([1, 2, 0])
454
+ assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
455
+
456
+ def test_where_series(self, data, na_value, as_frame):
457
+ assert data[0] != data[1]
458
+ cls = type(data)
459
+ a, b = data[:2]
460
+
461
+ orig = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
462
+ ser = orig.copy()
463
+ cond = np.array([True, True, False, False])
464
+
465
+ if as_frame:
466
+ ser = ser.to_frame(name="a")
467
+ cond = cond.reshape(-1, 1)
468
+
469
+ result = ser.where(cond)
470
+ expected = pd.Series(
471
+ cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype)
472
+ )
473
+
474
+ if as_frame:
475
+ expected = expected.to_frame(name="a")
476
+ self.assert_equal(result, expected)
477
+
478
+ ser.mask(~cond, inplace=True)
479
+ self.assert_equal(ser, expected)
480
+
481
+ # array other
482
+ ser = orig.copy()
483
+ if as_frame:
484
+ ser = ser.to_frame(name="a")
485
+ cond = np.array([True, False, True, True])
486
+ other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
487
+ if as_frame:
488
+ other = pd.DataFrame({"a": other})
489
+ cond = pd.DataFrame({"a": cond})
490
+ result = ser.where(cond, other)
491
+ expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
492
+ if as_frame:
493
+ expected = expected.to_frame(name="a")
494
+ self.assert_equal(result, expected)
495
+
496
+ ser.mask(~cond, other, inplace=True)
497
+ self.assert_equal(ser, expected)
498
+
499
+ @pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
500
+ def test_repeat(self, data, repeats, as_series, use_numpy):
501
+ arr = type(data)._from_sequence(data[:3], dtype=data.dtype)
502
+ if as_series:
503
+ arr = pd.Series(arr)
504
+
505
+ result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats)
506
+
507
+ repeats = [repeats] * 3 if isinstance(repeats, int) else repeats
508
+ expected = [x for x, n in zip(arr, repeats) for _ in range(n)]
509
+ expected = type(data)._from_sequence(expected, dtype=data.dtype)
510
+ if as_series:
511
+ expected = pd.Series(expected, index=arr.index.repeat(repeats))
512
+
513
+ self.assert_equal(result, expected)
514
+
515
+ @pytest.mark.parametrize(
516
+ "repeats, kwargs, error, msg",
517
+ [
518
+ (2, {"axis": 1}, ValueError, "axis"),
519
+ (-1, {}, ValueError, "negative"),
520
+ ([1, 2], {}, ValueError, "shape"),
521
+ (2, {"foo": "bar"}, TypeError, "'foo'"),
522
+ ],
523
+ )
524
+ def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy):
525
+ with pytest.raises(error, match=msg):
526
+ if use_numpy:
527
+ np.repeat(data, repeats, **kwargs)
528
+ else:
529
+ data.repeat(repeats, **kwargs)
530
+
531
+ def test_delete(self, data):
532
+ result = data.delete(0)
533
+ expected = data[1:]
534
+ self.assert_extension_array_equal(result, expected)
535
+
536
+ result = data.delete([1, 3])
537
+ expected = data._concat_same_type([data[[0]], data[[2]], data[4:]])
538
+ self.assert_extension_array_equal(result, expected)
539
+
540
+ def test_insert(self, data):
541
+ # insert at the beginning
542
+ result = data[1:].insert(0, data[0])
543
+ self.assert_extension_array_equal(result, data)
544
+
545
+ result = data[1:].insert(-len(data[1:]), data[0])
546
+ self.assert_extension_array_equal(result, data)
547
+
548
+ # insert at the middle
549
+ result = data[:-1].insert(4, data[-1])
550
+
551
+ taker = np.arange(len(data))
552
+ taker[5:] = taker[4:-1]
553
+ taker[4] = len(data) - 1
554
+ expected = data.take(taker)
555
+ self.assert_extension_array_equal(result, expected)
556
+
557
+ def test_insert_invalid(self, data, invalid_scalar):
558
+ item = invalid_scalar
559
+
560
+ with pytest.raises((TypeError, ValueError)):
561
+ data.insert(0, item)
562
+
563
+ with pytest.raises((TypeError, ValueError)):
564
+ data.insert(4, item)
565
+
566
+ with pytest.raises((TypeError, ValueError)):
567
+ data.insert(len(data) - 1, item)
568
+
569
+ def test_insert_invalid_loc(self, data):
570
+ ub = len(data)
571
+
572
+ with pytest.raises(IndexError):
573
+ data.insert(ub + 1, data[0])
574
+
575
+ with pytest.raises(IndexError):
576
+ data.insert(-ub - 1, data[0])
577
+
578
+ with pytest.raises(TypeError):
579
+ # we expect TypeError here instead of IndexError to match np.insert
580
+ data.insert(1.5, data[0])
581
+
582
+ @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
583
+ def test_equals(self, data, na_value, as_series, box):
584
+ data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype)
585
+ data_na = type(data)._from_sequence([na_value] * len(data), dtype=data.dtype)
586
+
587
+ data = tm.box_expected(data, box, transpose=False)
588
+ data2 = tm.box_expected(data2, box, transpose=False)
589
+ data_na = tm.box_expected(data_na, box, transpose=False)
590
+
591
+ # we are asserting with `is True/False` explicitly, to test that the
592
+ # result is an actual Python bool, and not something "truthy"
593
+
594
+ assert data.equals(data) is True
595
+ assert data.equals(data.copy()) is True
596
+
597
+ # unequal other data
598
+ assert data.equals(data2) is False
599
+ assert data.equals(data_na) is False
600
+
601
+ # different length
602
+ assert data[:2].equals(data[:3]) is False
603
+
604
+ # empty are equal
605
+ assert data[:0].equals(data[:0]) is True
606
+
607
+ # other types
608
+ assert data.equals(None) is False
609
+ assert data[[0]].equals(data[0]) is False
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/reduce.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import pytest
4
+
5
+ import pandas as pd
6
+ import pandas._testing as tm
7
+ from pandas.tests.extension.base.base import BaseExtensionTests
8
+
9
+
10
+ class BaseReduceTests(BaseExtensionTests):
11
+ """
12
+ Reduction specific tests. Generally these only
13
+ make sense for numeric/boolean operations.
14
+ """
15
+
16
+ def check_reduce(self, s, op_name, skipna):
17
+ res_op = getattr(s, op_name)
18
+ exp_op = getattr(s.astype("float64"), op_name)
19
+ if op_name == "count":
20
+ result = res_op()
21
+ expected = exp_op()
22
+ else:
23
+ result = res_op(skipna=skipna)
24
+ expected = exp_op(skipna=skipna)
25
+ tm.assert_almost_equal(result, expected)
26
+
27
+
28
+ class BaseNoReduceTests(BaseReduceTests):
29
+ """we don't define any reductions"""
30
+
31
+ @pytest.mark.parametrize("skipna", [True, False])
32
+ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
33
+ op_name = all_numeric_reductions
34
+ s = pd.Series(data)
35
+
36
+ msg = (
37
+ "[Cc]annot perform|Categorical is not ordered for operation|"
38
+ "does not support reduction|"
39
+ )
40
+
41
+ with pytest.raises(TypeError, match=msg):
42
+ getattr(s, op_name)(skipna=skipna)
43
+
44
+ @pytest.mark.parametrize("skipna", [True, False])
45
+ def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
46
+ op_name = all_boolean_reductions
47
+ s = pd.Series(data)
48
+
49
+ msg = (
50
+ "[Cc]annot perform|Categorical is not ordered for operation|"
51
+ "does not support reduction|"
52
+ )
53
+
54
+ with pytest.raises(TypeError, match=msg):
55
+ getattr(s, op_name)(skipna=skipna)
56
+
57
+
58
+ class BaseNumericReduceTests(BaseReduceTests):
59
+ @pytest.mark.parametrize("skipna", [True, False])
60
+ def test_reduce_series(self, data, all_numeric_reductions, skipna):
61
+ op_name = all_numeric_reductions
62
+ s = pd.Series(data)
63
+
64
+ # min/max with empty produce numpy warnings
65
+ with warnings.catch_warnings():
66
+ warnings.simplefilter("ignore", RuntimeWarning)
67
+ self.check_reduce(s, op_name, skipna)
68
+
69
+
70
+ class BaseBooleanReduceTests(BaseReduceTests):
71
+ @pytest.mark.parametrize("skipna", [True, False])
72
+ def test_reduce_series(self, data, all_boolean_reductions, skipna):
73
+ op_name = all_boolean_reductions
74
+ s = pd.Series(data)
75
+ self.check_reduce(s, op_name, skipna)
videochat2/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas.api.extensions import ExtensionArray
8
+ from pandas.core.internals.blocks import EABackedBlock
9
+ from pandas.tests.extension.base.base import BaseExtensionTests
10
+
11
+
12
+ class BaseReshapingTests(BaseExtensionTests):
13
+ """Tests for reshaping and concatenation."""
14
+
15
+ @pytest.mark.parametrize("in_frame", [True, False])
16
+ def test_concat(self, data, in_frame):
17
+ wrapped = pd.Series(data)
18
+ if in_frame:
19
+ wrapped = pd.DataFrame(wrapped)
20
+ result = pd.concat([wrapped, wrapped], ignore_index=True)
21
+
22
+ assert len(result) == len(data) * 2
23
+
24
+ if in_frame:
25
+ dtype = result.dtypes[0]
26
+ else:
27
+ dtype = result.dtype
28
+
29
+ assert dtype == data.dtype
30
+ if hasattr(result._mgr, "blocks"):
31
+ assert isinstance(result._mgr.blocks[0], EABackedBlock)
32
+ assert isinstance(result._mgr.arrays[0], ExtensionArray)
33
+
34
+ @pytest.mark.parametrize("in_frame", [True, False])
35
+ def test_concat_all_na_block(self, data_missing, in_frame):
36
+ valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])
37
+ na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3])
38
+ if in_frame:
39
+ valid_block = pd.DataFrame({"a": valid_block})
40
+ na_block = pd.DataFrame({"a": na_block})
41
+ result = pd.concat([valid_block, na_block])
42
+ if in_frame:
43
+ expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])})
44
+ self.assert_frame_equal(result, expected)
45
+ else:
46
+ expected = pd.Series(data_missing.take([1, 1, 0, 0]))
47
+ self.assert_series_equal(result, expected)
48
+
49
+ def test_concat_mixed_dtypes(self, data):
50
+ # https://github.com/pandas-dev/pandas/issues/20762
51
+ df1 = pd.DataFrame({"A": data[:3]})
52
+ df2 = pd.DataFrame({"A": [1, 2, 3]})
53
+ df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
54
+ dfs = [df1, df2, df3]
55
+
56
+ # dataframes
57
+ result = pd.concat(dfs)
58
+ expected = pd.concat([x.astype(object) for x in dfs])
59
+ self.assert_frame_equal(result, expected)
60
+
61
+ # series
62
+ result = pd.concat([x["A"] for x in dfs])
63
+ expected = pd.concat([x["A"].astype(object) for x in dfs])
64
+ self.assert_series_equal(result, expected)
65
+
66
+ # simple test for just EA and one other
67
+ result = pd.concat([df1, df2.astype(object)])
68
+ expected = pd.concat([df1.astype("object"), df2.astype("object")])
69
+ self.assert_frame_equal(result, expected)
70
+
71
+ result = pd.concat([df1["A"], df2["A"].astype(object)])
72
+ expected = pd.concat([df1["A"].astype("object"), df2["A"].astype("object")])
73
+ self.assert_series_equal(result, expected)
74
+
75
+ def test_concat_columns(self, data, na_value):
76
+ df1 = pd.DataFrame({"A": data[:3]})
77
+ df2 = pd.DataFrame({"B": [1, 2, 3]})
78
+
79
+ expected = pd.DataFrame({"A": data[:3], "B": [1, 2, 3]})
80
+ result = pd.concat([df1, df2], axis=1)
81
+ self.assert_frame_equal(result, expected)
82
+ result = pd.concat([df1["A"], df2["B"]], axis=1)
83
+ self.assert_frame_equal(result, expected)
84
+
85
+ # non-aligned
86
+ df2 = pd.DataFrame({"B": [1, 2, 3]}, index=[1, 2, 3])
87
+ expected = pd.DataFrame(
88
+ {
89
+ "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
90
+ "B": [np.nan, 1, 2, 3],
91
+ }
92
+ )
93
+
94
+ result = pd.concat([df1, df2], axis=1)
95
+ self.assert_frame_equal(result, expected)
96
+ result = pd.concat([df1["A"], df2["B"]], axis=1)
97
+ self.assert_frame_equal(result, expected)
98
+
99
+ def test_concat_extension_arrays_copy_false(self, data, na_value):
100
+ # GH 20756
101
+ df1 = pd.DataFrame({"A": data[:3]})
102
+ df2 = pd.DataFrame({"B": data[3:7]})
103
+ expected = pd.DataFrame(
104
+ {
105
+ "A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
106
+ "B": data[3:7],
107
+ }
108
+ )
109
+ result = pd.concat([df1, df2], axis=1, copy=False)
110
+ self.assert_frame_equal(result, expected)
111
+
112
+ def test_concat_with_reindex(self, data):
113
+ # GH-33027
114
+ a = pd.DataFrame({"a": data[:5]})
115
+ b = pd.DataFrame({"b": data[:5]})
116
+ result = pd.concat([a, b], ignore_index=True)
117
+ expected = pd.DataFrame(
118
+ {
119
+ "a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True),
120
+ "b": data.take(([-1] * 5) + list(range(5)), allow_fill=True),
121
+ }
122
+ )
123
+ self.assert_frame_equal(result, expected)
124
+
125
+ def test_align(self, data, na_value):
126
+ a = data[:3]
127
+ b = data[2:5]
128
+ r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3]))
129
+
130
+ # Assumes that the ctor can take a list of scalars of the type
131
+ e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype))
132
+ e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype))
133
+ self.assert_series_equal(r1, e1)
134
+ self.assert_series_equal(r2, e2)
135
+
136
+ def test_align_frame(self, data, na_value):
137
+ a = data[:3]
138
+ b = data[2:5]
139
+ r1, r2 = pd.DataFrame({"A": a}).align(pd.DataFrame({"A": b}, index=[1, 2, 3]))
140
+
141
+ # Assumes that the ctor can take a list of scalars of the type
142
+ e1 = pd.DataFrame(
143
+ {"A": data._from_sequence(list(a) + [na_value], dtype=data.dtype)}
144
+ )
145
+ e2 = pd.DataFrame(
146
+ {"A": data._from_sequence([na_value] + list(b), dtype=data.dtype)}
147
+ )
148
+ self.assert_frame_equal(r1, e1)
149
+ self.assert_frame_equal(r2, e2)
150
+
151
+ def test_align_series_frame(self, data, na_value):
152
+ # https://github.com/pandas-dev/pandas/issues/20576
153
+ ser = pd.Series(data, name="a")
154
+ df = pd.DataFrame({"col": np.arange(len(ser) + 1)})
155
+ r1, r2 = ser.align(df)
156
+
157
+ e1 = pd.Series(
158
+ data._from_sequence(list(data) + [na_value], dtype=data.dtype),
159
+ name=ser.name,
160
+ )
161
+
162
+ self.assert_series_equal(r1, e1)
163
+ self.assert_frame_equal(r2, df)
164
+
165
+ def test_set_frame_expand_regular_with_extension(self, data):
166
+ df = pd.DataFrame({"A": [1] * len(data)})
167
+ df["B"] = data
168
+ expected = pd.DataFrame({"A": [1] * len(data), "B": data})
169
+ self.assert_frame_equal(df, expected)
170
+
171
+ def test_set_frame_expand_extension_with_regular(self, data):
172
+ df = pd.DataFrame({"A": data})
173
+ df["B"] = [1] * len(data)
174
+ expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
175
+ self.assert_frame_equal(df, expected)
176
+
177
+ def test_set_frame_overwrite_object(self, data):
178
+ # https://github.com/pandas-dev/pandas/issues/20555
179
+ df = pd.DataFrame({"A": [1] * len(data)}, dtype=object)
180
+ df["A"] = data
181
+ assert df.dtypes["A"] == data.dtype
182
+
183
+ def test_merge(self, data, na_value):
184
+ # GH-20743
185
+ df1 = pd.DataFrame({"ext": data[:3], "int1": [1, 2, 3], "key": [0, 1, 2]})
186
+ df2 = pd.DataFrame({"int2": [1, 2, 3, 4], "key": [0, 0, 1, 3]})
187
+
188
+ res = pd.merge(df1, df2)
189
+ exp = pd.DataFrame(
190
+ {
191
+ "int1": [1, 1, 2],
192
+ "int2": [1, 2, 3],
193
+ "key": [0, 0, 1],
194
+ "ext": data._from_sequence(
195
+ [data[0], data[0], data[1]], dtype=data.dtype
196
+ ),
197
+ }
198
+ )
199
+ self.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
200
+
201
+ res = pd.merge(df1, df2, how="outer")
202
+ exp = pd.DataFrame(
203
+ {
204
+ "int1": [1, 1, 2, 3, np.nan],
205
+ "int2": [1, 2, 3, np.nan, 4],
206
+ "key": [0, 0, 1, 2, 3],
207
+ "ext": data._from_sequence(
208
+ [data[0], data[0], data[1], data[2], na_value], dtype=data.dtype
209
+ ),
210
+ }
211
+ )
212
+ self.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
213
+
214
+ def test_merge_on_extension_array(self, data):
215
+ # GH 23020
216
+ a, b = data[:2]
217
+ key = type(data)._from_sequence([a, b], dtype=data.dtype)
218
+
219
+ df = pd.DataFrame({"key": key, "val": [1, 2]})
220
+ result = pd.merge(df, df, on="key")
221
+ expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]})
222
+ self.assert_frame_equal(result, expected)
223
+
224
+ # order
225
+ result = pd.merge(df.iloc[[1, 0]], df, on="key")
226
+ expected = expected.iloc[[1, 0]].reset_index(drop=True)
227
+ self.assert_frame_equal(result, expected)
228
+
229
+ def test_merge_on_extension_array_duplicates(self, data):
230
+ # GH 23020
231
+ a, b = data[:2]
232
+ key = type(data)._from_sequence([a, b, a], dtype=data.dtype)
233
+ df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
234
+ df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
235
+
236
+ result = pd.merge(df1, df2, on="key")
237
+ expected = pd.DataFrame(
238
+ {
239
+ "key": key.take([0, 0, 0, 0, 1]),
240
+ "val_x": [1, 1, 3, 3, 2],
241
+ "val_y": [1, 3, 1, 3, 2],
242
+ }
243
+ )
244
+ self.assert_frame_equal(result, expected)
245
+
246
+ @pytest.mark.parametrize(
247
+ "columns",
248
+ [
249
+ ["A", "B"],
250
+ pd.MultiIndex.from_tuples(
251
+ [("A", "a"), ("A", "b")], names=["outer", "inner"]
252
+ ),
253
+ ],
254
+ )
255
+ def test_stack(self, data, columns):
256
+ df = pd.DataFrame({"A": data[:5], "B": data[:5]})
257
+ df.columns = columns
258
+ result = df.stack()
259
+ expected = df.astype(object).stack()
260
+ # we need a second astype(object), in case the constructor inferred
261
+ # object -> specialized, as is done for period.
262
+ expected = expected.astype(object)
263
+
264
+ if isinstance(expected, pd.Series):
265
+ assert result.dtype == df.iloc[:, 0].dtype
266
+ else:
267
+ assert all(result.dtypes == df.iloc[:, 0].dtype)
268
+
269
+ result = result.astype(object)
270
+ self.assert_equal(result, expected)
271
+
272
+ @pytest.mark.parametrize(
273
+ "index",
274
+ [
275
+ # Two levels, uniform.
276
+ pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]),
277
+ # non-uniform
278
+ pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]),
279
+ # three levels, non-uniform
280
+ pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]),
281
+ pd.MultiIndex.from_tuples(
282
+ [
283
+ ("A", "a", 1),
284
+ ("A", "b", 0),
285
+ ("A", "a", 0),
286
+ ("B", "a", 0),
287
+ ("B", "c", 1),
288
+ ]
289
+ ),
290
+ ],
291
+ )
292
+ @pytest.mark.parametrize("obj", ["series", "frame"])
293
+ def test_unstack(self, data, index, obj):
294
+ data = data[: len(index)]
295
+ if obj == "series":
296
+ ser = pd.Series(data, index=index)
297
+ else:
298
+ ser = pd.DataFrame({"A": data, "B": data}, index=index)
299
+
300
+ n = index.nlevels
301
+ levels = list(range(n))
302
+ # [0, 1, 2]
303
+ # [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
304
+ combinations = itertools.chain.from_iterable(
305
+ itertools.permutations(levels, i) for i in range(1, n)
306
+ )
307
+
308
+ for level in combinations:
309
+ result = ser.unstack(level=level)
310
+ assert all(
311
+ isinstance(result[col].array, type(data)) for col in result.columns
312
+ )
313
+
314
+ if obj == "series":
315
+ # We should get the same result with to_frame+unstack+droplevel
316
+ df = ser.to_frame()
317
+
318
+ alt = df.unstack(level=level).droplevel(0, axis=1)
319
+ self.assert_frame_equal(result, alt)
320
+
321
+ obj_ser = ser.astype(object)
322
+
323
+ expected = obj_ser.unstack(level=level, fill_value=data.dtype.na_value)
324
+ if obj == "series":
325
+ assert (expected.dtypes == object).all()
326
+
327
+ result = result.astype(object)
328
+ self.assert_frame_equal(result, expected)
329
+
330
+ def test_ravel(self, data):
331
+ # as long as EA is 1D-only, ravel is a no-op
332
+ result = data.ravel()
333
+ assert type(result) == type(data)
334
+
335
+ # Check that we have a view, not a copy
336
+ result[0] = result[1]
337
+ assert data[0] == data[1]
338
+
339
+ def test_transpose(self, data):
340
+ result = data.transpose()
341
+ assert type(result) == type(data)
342
+
343
+ # check we get a new object
344
+ assert result is not data
345
+
346
+ # If we ever _did_ support 2D, shape should be reversed
347
+ assert result.shape == data.shape[::-1]
348
+
349
+ # Check that we have a view, not a copy
350
+ result[0] = result[1]
351
+ assert data[0] == data[1]
352
+
353
+ def test_transpose_frame(self, data):
354
+ df = pd.DataFrame({"A": data[:4], "B": data[:4]}, index=["a", "b", "c", "d"])
355
+ result = df.T
356
+ expected = pd.DataFrame(
357
+ {
358
+ "a": type(data)._from_sequence([data[0]] * 2, dtype=data.dtype),
359
+ "b": type(data)._from_sequence([data[1]] * 2, dtype=data.dtype),
360
+ "c": type(data)._from_sequence([data[2]] * 2, dtype=data.dtype),
361
+ "d": type(data)._from_sequence([data[3]] * 2, dtype=data.dtype),
362
+ },
363
+ index=["A", "B"],
364
+ )
365
+ self.assert_frame_equal(result, expected)
366
+ self.assert_frame_equal(np.transpose(np.transpose(df)), df)
367
+ self.assert_frame_equal(np.transpose(np.transpose(df[["A"]])), df[["A"]])
videochat2/lib/python3.10/site-packages/pandas/tests/io/conftest.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shlex
3
+ import subprocess
4
+ import time
5
+
6
+ import pytest
7
+
8
+ from pandas.compat import (
9
+ is_ci_environment,
10
+ is_platform_arm,
11
+ is_platform_mac,
12
+ is_platform_windows,
13
+ )
14
+ import pandas.util._test_decorators as td
15
+
16
+ import pandas._testing as tm
17
+
18
+ from pandas.io.parsers import read_csv
19
+
20
+
21
+ @pytest.fixture
22
+ def tips_file(datapath):
23
+ """Path to the tips dataset"""
24
+ return datapath("io", "data", "csv", "tips.csv")
25
+
26
+
27
+ @pytest.fixture
28
+ def jsonl_file(datapath):
29
+ """Path to a JSONL dataset"""
30
+ return datapath("io", "parser", "data", "items.jsonl")
31
+
32
+
33
+ @pytest.fixture
34
+ def salaries_table(datapath):
35
+ """DataFrame with the salaries dataset"""
36
+ return read_csv(datapath("io", "parser", "data", "salaries.csv"), sep="\t")
37
+
38
+
39
+ @pytest.fixture
40
+ def feather_file(datapath):
41
+ return datapath("io", "data", "feather", "feather-0_3_1.feather")
42
+
43
+
44
+ @pytest.fixture
45
+ def s3so(worker_id):
46
+ if is_ci_environment():
47
+ url = "http://localhost:5000/"
48
+ else:
49
+ worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
50
+ url = f"http://127.0.0.1:555{worker_id}/"
51
+ return {"client_kwargs": {"endpoint_url": url}}
52
+
53
+
54
+ @pytest.fixture(scope="session")
55
+ def s3_base(worker_id):
56
+ """
57
+ Fixture for mocking S3 interaction.
58
+
59
+ Sets up moto server in separate process locally
60
+ Return url for motoserver/moto CI service
61
+ """
62
+ pytest.importorskip("s3fs")
63
+ pytest.importorskip("boto3")
64
+
65
+ with tm.ensure_safe_environment_variables():
66
+ # temporary workaround as moto fails for botocore >= 1.11 otherwise,
67
+ # see https://github.com/spulec/moto/issues/1924 & 1952
68
+ os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key")
69
+ os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret")
70
+ if is_ci_environment():
71
+ if is_platform_arm() or is_platform_mac() or is_platform_windows():
72
+ # NOT RUN on Windows/macOS/ARM, only Ubuntu
73
+ # - subprocess in CI can cause timeouts
74
+ # - GitHub Actions do not support
75
+ # container services for the above OSs
76
+ # - CircleCI will probably hit the Docker rate pull limit
77
+ pytest.skip(
78
+ "S3 tests do not have a corresponding service in "
79
+ "Windows, macOS or ARM platforms"
80
+ )
81
+ else:
82
+ yield "http://localhost:5000"
83
+ else:
84
+ requests = pytest.importorskip("requests")
85
+ pytest.importorskip("moto", minversion="1.3.14")
86
+ pytest.importorskip("flask") # server mode needs flask too
87
+
88
+ # Launching moto in server mode, i.e., as a separate process
89
+ # with an S3 endpoint on localhost
90
+
91
+ worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
92
+ endpoint_port = f"555{worker_id}"
93
+ endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
94
+
95
+ # pipe to null to avoid logging in terminal
96
+ with subprocess.Popen(
97
+ shlex.split(f"moto_server s3 -p {endpoint_port}"),
98
+ stdout=subprocess.DEVNULL,
99
+ stderr=subprocess.DEVNULL,
100
+ ) as proc:
101
+ timeout = 5
102
+ while timeout > 0:
103
+ try:
104
+ # OK to go once server is accepting connections
105
+ r = requests.get(endpoint_uri)
106
+ if r.ok:
107
+ break
108
+ except Exception:
109
+ pass
110
+ timeout -= 0.1
111
+ time.sleep(0.1)
112
+ yield endpoint_uri
113
+
114
+ proc.terminate()
115
+
116
+
117
+ @pytest.fixture
118
+ def s3_resource(s3_base, tips_file, jsonl_file, feather_file):
119
+ """
120
+ Sets up S3 bucket with contents
121
+
122
+ The primary bucket name is "pandas-test". The following datasets
123
+ are loaded.
124
+
125
+ - tips.csv
126
+ - tips.csv.gz
127
+ - tips.csv.bz2
128
+ - items.jsonl
129
+
130
+ A private bucket "cant_get_it" is also created. The boto3 s3 resource
131
+ is yielded by the fixture.
132
+ """
133
+ import boto3
134
+ import s3fs
135
+
136
+ test_s3_files = [
137
+ ("tips#1.csv", tips_file),
138
+ ("tips.csv", tips_file),
139
+ ("tips.csv.gz", tips_file + ".gz"),
140
+ ("tips.csv.bz2", tips_file + ".bz2"),
141
+ ("items.jsonl", jsonl_file),
142
+ ("simple_dataset.feather", feather_file),
143
+ ]
144
+
145
+ def add_tips_files(bucket_name):
146
+ for s3_key, file_name in test_s3_files:
147
+ with open(file_name, "rb") as f:
148
+ cli.put_object(Bucket=bucket_name, Key=s3_key, Body=f)
149
+
150
+ bucket = "pandas-test"
151
+ conn = boto3.resource("s3", endpoint_url=s3_base)
152
+ cli = boto3.client("s3", endpoint_url=s3_base)
153
+
154
+ try:
155
+ cli.create_bucket(Bucket=bucket)
156
+ except Exception:
157
+ # OK is bucket already exists
158
+ pass
159
+ try:
160
+ cli.create_bucket(Bucket="cant_get_it", ACL="private")
161
+ except Exception:
162
+ # OK is bucket already exists
163
+ pass
164
+ timeout = 2
165
+ while not cli.list_buckets()["Buckets"] and timeout > 0:
166
+ time.sleep(0.1)
167
+ timeout -= 0.1
168
+
169
+ add_tips_files(bucket)
170
+ add_tips_files("cant_get_it")
171
+ s3fs.S3FileSystem.clear_instance_cache()
172
+ yield conn
173
+
174
+ s3 = s3fs.S3FileSystem(client_kwargs={"endpoint_url": s3_base})
175
+
176
+ try:
177
+ s3.rm(bucket, recursive=True)
178
+ except Exception:
179
+ pass
180
+ try:
181
+ s3.rm("cant_get_it", recursive=True)
182
+ except Exception:
183
+ pass
184
+ timeout = 2
185
+ while cli.list_buckets()["Buckets"] and timeout > 0:
186
+ time.sleep(0.1)
187
+ timeout -= 0.1
188
+
189
+
190
+ _compression_formats_params = [
191
+ (".no_compress", None),
192
+ ("", None),
193
+ (".gz", "gzip"),
194
+ (".GZ", "gzip"),
195
+ (".bz2", "bz2"),
196
+ (".BZ2", "bz2"),
197
+ (".zip", "zip"),
198
+ (".ZIP", "zip"),
199
+ (".xz", "xz"),
200
+ (".XZ", "xz"),
201
+ pytest.param((".zst", "zstd"), marks=td.skip_if_no("zstandard")),
202
+ pytest.param((".ZST", "zstd"), marks=td.skip_if_no("zstandard")),
203
+ ]
204
+
205
+
206
+ @pytest.fixture(params=_compression_formats_params[1:])
207
+ def compression_format(request):
208
+ return request.param
209
+
210
+
211
+ @pytest.fixture(params=_compression_formats_params)
212
+ def compression_ext(request):
213
+ return request.param[0]
videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_eng_formatting.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas import DataFrame
4
+ import pandas._testing as tm
5
+
6
+ import pandas.io.formats.format as fmt
7
+
8
+
9
+ class TestEngFormatter:
10
+ def test_eng_float_formatter(self):
11
+ df = DataFrame({"A": [1.41, 141.0, 14100, 1410000.0]})
12
+
13
+ fmt.set_eng_float_format()
14
+ result = df.to_string()
15
+ expected = (
16
+ " A\n"
17
+ "0 1.410E+00\n"
18
+ "1 141.000E+00\n"
19
+ "2 14.100E+03\n"
20
+ "3 1.410E+06"
21
+ )
22
+ assert result == expected
23
+
24
+ fmt.set_eng_float_format(use_eng_prefix=True)
25
+ result = df.to_string()
26
+ expected = " A\n0 1.410\n1 141.000\n2 14.100k\n3 1.410M"
27
+ assert result == expected
28
+
29
+ fmt.set_eng_float_format(accuracy=0)
30
+ result = df.to_string()
31
+ expected = " A\n0 1E+00\n1 141E+00\n2 14E+03\n3 1E+06"
32
+ assert result == expected
33
+
34
+ tm.reset_display_options()
35
+
36
+ def compare(self, formatter, input, output):
37
+ formatted_input = formatter(input)
38
+ assert formatted_input == output
39
+
40
+ def compare_all(self, formatter, in_out):
41
+ """
42
+ Parameters:
43
+ -----------
44
+ formatter: EngFormatter under test
45
+ in_out: list of tuples. Each tuple = (number, expected_formatting)
46
+
47
+ It is tested if 'formatter(number) == expected_formatting'.
48
+ *number* should be >= 0 because formatter(-number) == fmt is also
49
+ tested. *fmt* is derived from *expected_formatting*
50
+ """
51
+ for input, output in in_out:
52
+ self.compare(formatter, input, output)
53
+ self.compare(formatter, -input, "-" + output[1:])
54
+
55
+ def test_exponents_with_eng_prefix(self):
56
+ formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
57
+ f = np.sqrt(2)
58
+ in_out = [
59
+ (f * 10**-24, " 1.414y"),
60
+ (f * 10**-23, " 14.142y"),
61
+ (f * 10**-22, " 141.421y"),
62
+ (f * 10**-21, " 1.414z"),
63
+ (f * 10**-20, " 14.142z"),
64
+ (f * 10**-19, " 141.421z"),
65
+ (f * 10**-18, " 1.414a"),
66
+ (f * 10**-17, " 14.142a"),
67
+ (f * 10**-16, " 141.421a"),
68
+ (f * 10**-15, " 1.414f"),
69
+ (f * 10**-14, " 14.142f"),
70
+ (f * 10**-13, " 141.421f"),
71
+ (f * 10**-12, " 1.414p"),
72
+ (f * 10**-11, " 14.142p"),
73
+ (f * 10**-10, " 141.421p"),
74
+ (f * 10**-9, " 1.414n"),
75
+ (f * 10**-8, " 14.142n"),
76
+ (f * 10**-7, " 141.421n"),
77
+ (f * 10**-6, " 1.414u"),
78
+ (f * 10**-5, " 14.142u"),
79
+ (f * 10**-4, " 141.421u"),
80
+ (f * 10**-3, " 1.414m"),
81
+ (f * 10**-2, " 14.142m"),
82
+ (f * 10**-1, " 141.421m"),
83
+ (f * 10**0, " 1.414"),
84
+ (f * 10**1, " 14.142"),
85
+ (f * 10**2, " 141.421"),
86
+ (f * 10**3, " 1.414k"),
87
+ (f * 10**4, " 14.142k"),
88
+ (f * 10**5, " 141.421k"),
89
+ (f * 10**6, " 1.414M"),
90
+ (f * 10**7, " 14.142M"),
91
+ (f * 10**8, " 141.421M"),
92
+ (f * 10**9, " 1.414G"),
93
+ (f * 10**10, " 14.142G"),
94
+ (f * 10**11, " 141.421G"),
95
+ (f * 10**12, " 1.414T"),
96
+ (f * 10**13, " 14.142T"),
97
+ (f * 10**14, " 141.421T"),
98
+ (f * 10**15, " 1.414P"),
99
+ (f * 10**16, " 14.142P"),
100
+ (f * 10**17, " 141.421P"),
101
+ (f * 10**18, " 1.414E"),
102
+ (f * 10**19, " 14.142E"),
103
+ (f * 10**20, " 141.421E"),
104
+ (f * 10**21, " 1.414Z"),
105
+ (f * 10**22, " 14.142Z"),
106
+ (f * 10**23, " 141.421Z"),
107
+ (f * 10**24, " 1.414Y"),
108
+ (f * 10**25, " 14.142Y"),
109
+ (f * 10**26, " 141.421Y"),
110
+ ]
111
+ self.compare_all(formatter, in_out)
112
+
113
+ def test_exponents_without_eng_prefix(self):
114
+ formatter = fmt.EngFormatter(accuracy=4, use_eng_prefix=False)
115
+ f = np.pi
116
+ in_out = [
117
+ (f * 10**-24, " 3.1416E-24"),
118
+ (f * 10**-23, " 31.4159E-24"),
119
+ (f * 10**-22, " 314.1593E-24"),
120
+ (f * 10**-21, " 3.1416E-21"),
121
+ (f * 10**-20, " 31.4159E-21"),
122
+ (f * 10**-19, " 314.1593E-21"),
123
+ (f * 10**-18, " 3.1416E-18"),
124
+ (f * 10**-17, " 31.4159E-18"),
125
+ (f * 10**-16, " 314.1593E-18"),
126
+ (f * 10**-15, " 3.1416E-15"),
127
+ (f * 10**-14, " 31.4159E-15"),
128
+ (f * 10**-13, " 314.1593E-15"),
129
+ (f * 10**-12, " 3.1416E-12"),
130
+ (f * 10**-11, " 31.4159E-12"),
131
+ (f * 10**-10, " 314.1593E-12"),
132
+ (f * 10**-9, " 3.1416E-09"),
133
+ (f * 10**-8, " 31.4159E-09"),
134
+ (f * 10**-7, " 314.1593E-09"),
135
+ (f * 10**-6, " 3.1416E-06"),
136
+ (f * 10**-5, " 31.4159E-06"),
137
+ (f * 10**-4, " 314.1593E-06"),
138
+ (f * 10**-3, " 3.1416E-03"),
139
+ (f * 10**-2, " 31.4159E-03"),
140
+ (f * 10**-1, " 314.1593E-03"),
141
+ (f * 10**0, " 3.1416E+00"),
142
+ (f * 10**1, " 31.4159E+00"),
143
+ (f * 10**2, " 314.1593E+00"),
144
+ (f * 10**3, " 3.1416E+03"),
145
+ (f * 10**4, " 31.4159E+03"),
146
+ (f * 10**5, " 314.1593E+03"),
147
+ (f * 10**6, " 3.1416E+06"),
148
+ (f * 10**7, " 31.4159E+06"),
149
+ (f * 10**8, " 314.1593E+06"),
150
+ (f * 10**9, " 3.1416E+09"),
151
+ (f * 10**10, " 31.4159E+09"),
152
+ (f * 10**11, " 314.1593E+09"),
153
+ (f * 10**12, " 3.1416E+12"),
154
+ (f * 10**13, " 31.4159E+12"),
155
+ (f * 10**14, " 314.1593E+12"),
156
+ (f * 10**15, " 3.1416E+15"),
157
+ (f * 10**16, " 31.4159E+15"),
158
+ (f * 10**17, " 314.1593E+15"),
159
+ (f * 10**18, " 3.1416E+18"),
160
+ (f * 10**19, " 31.4159E+18"),
161
+ (f * 10**20, " 314.1593E+18"),
162
+ (f * 10**21, " 3.1416E+21"),
163
+ (f * 10**22, " 31.4159E+21"),
164
+ (f * 10**23, " 314.1593E+21"),
165
+ (f * 10**24, " 3.1416E+24"),
166
+ (f * 10**25, " 31.4159E+24"),
167
+ (f * 10**26, " 314.1593E+24"),
168
+ ]
169
+ self.compare_all(formatter, in_out)
170
+
171
+ def test_rounding(self):
172
+ formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
173
+ in_out = [
174
+ (5.55555, " 5.556"),
175
+ (55.5555, " 55.556"),
176
+ (555.555, " 555.555"),
177
+ (5555.55, " 5.556k"),
178
+ (55555.5, " 55.556k"),
179
+ (555555, " 555.555k"),
180
+ ]
181
+ self.compare_all(formatter, in_out)
182
+
183
+ formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
184
+ in_out = [
185
+ (5.55555, " 5.6"),
186
+ (55.5555, " 55.6"),
187
+ (555.555, " 555.6"),
188
+ (5555.55, " 5.6k"),
189
+ (55555.5, " 55.6k"),
190
+ (555555, " 555.6k"),
191
+ ]
192
+ self.compare_all(formatter, in_out)
193
+
194
+ formatter = fmt.EngFormatter(accuracy=0, use_eng_prefix=True)
195
+ in_out = [
196
+ (5.55555, " 6"),
197
+ (55.5555, " 56"),
198
+ (555.555, " 556"),
199
+ (5555.55, " 6k"),
200
+ (55555.5, " 56k"),
201
+ (555555, " 556k"),
202
+ ]
203
+ self.compare_all(formatter, in_out)
204
+
205
+ formatter = fmt.EngFormatter(accuracy=3, use_eng_prefix=True)
206
+ result = formatter(0)
207
+ assert result == " 0.000"
208
+
209
+ def test_nan(self):
210
+ # Issue #11981
211
+
212
+ formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
213
+ result = formatter(np.nan)
214
+ assert result == "NaN"
215
+
216
+ df = DataFrame(
217
+ {
218
+ "a": [1.5, 10.3, 20.5],
219
+ "b": [50.3, 60.67, 70.12],
220
+ "c": [100.2, 101.33, 120.33],
221
+ }
222
+ )
223
+ pt = df.pivot_table(values="a", index="b", columns="c")
224
+ fmt.set_eng_float_format(accuracy=1)
225
+ result = pt.to_string()
226
+ assert "NaN" in result
227
+ tm.reset_display_options()
228
+
229
+ def test_inf(self):
230
+ # Issue #11981
231
+
232
+ formatter = fmt.EngFormatter(accuracy=1, use_eng_prefix=True)
233
+ result = formatter(np.inf)
234
+ assert result == "inf"
videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_format.py ADDED
The diff for this file is too large to render. See raw diff
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_info.py ADDED
@@ -0,0 +1,503 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO
2
+ import re
3
+ from string import ascii_uppercase as uppercase
4
+ import sys
5
+ import textwrap
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas.compat import (
11
+ IS64,
12
+ PYPY,
13
+ )
14
+
15
+ from pandas import (
16
+ CategoricalIndex,
17
+ DataFrame,
18
+ MultiIndex,
19
+ Series,
20
+ date_range,
21
+ option_context,
22
+ )
23
+ import pandas._testing as tm
24
+
25
+
26
+ @pytest.fixture
27
+ def duplicate_columns_frame():
28
+ """Dataframe with duplicate column names."""
29
+ return DataFrame(np.random.randn(1500, 4), columns=["a", "a", "b", "b"])
30
+
31
+
32
+ def test_info_empty():
33
+ # GH #45494
34
+ df = DataFrame()
35
+ buf = StringIO()
36
+ df.info(buf=buf)
37
+ result = buf.getvalue()
38
+ expected = textwrap.dedent(
39
+ """\
40
+ <class 'pandas.core.frame.DataFrame'>
41
+ RangeIndex: 0 entries
42
+ Empty DataFrame\n"""
43
+ )
44
+ assert result == expected
45
+
46
+
47
+ def test_info_categorical_column_smoke_test():
48
+ n = 2500
49
+ df = DataFrame({"int64": np.random.randint(100, size=n)})
50
+ df["category"] = Series(
51
+ np.array(list("abcdefghij")).take(np.random.randint(0, 10, size=n))
52
+ ).astype("category")
53
+ df.isna()
54
+ buf = StringIO()
55
+ df.info(buf=buf)
56
+
57
+ df2 = df[df["category"] == "d"]
58
+ buf = StringIO()
59
+ df2.info(buf=buf)
60
+
61
+
62
+ @pytest.mark.parametrize(
63
+ "fixture_func_name",
64
+ [
65
+ "int_frame",
66
+ "float_frame",
67
+ "datetime_frame",
68
+ "duplicate_columns_frame",
69
+ ],
70
+ )
71
+ def test_info_smoke_test(fixture_func_name, request):
72
+ frame = request.getfixturevalue(fixture_func_name)
73
+ buf = StringIO()
74
+ frame.info(buf=buf)
75
+ result = buf.getvalue().splitlines()
76
+ assert len(result) > 10
77
+
78
+
79
+ @pytest.mark.parametrize(
80
+ "num_columns, max_info_columns, verbose",
81
+ [
82
+ (10, 100, True),
83
+ (10, 11, True),
84
+ (10, 10, True),
85
+ (10, 9, False),
86
+ (10, 1, False),
87
+ ],
88
+ )
89
+ def test_info_default_verbose_selection(num_columns, max_info_columns, verbose):
90
+ frame = DataFrame(np.random.randn(5, num_columns))
91
+ with option_context("display.max_info_columns", max_info_columns):
92
+ io_default = StringIO()
93
+ frame.info(buf=io_default)
94
+ result = io_default.getvalue()
95
+
96
+ io_explicit = StringIO()
97
+ frame.info(buf=io_explicit, verbose=verbose)
98
+ expected = io_explicit.getvalue()
99
+
100
+ assert result == expected
101
+
102
+
103
+ def test_info_verbose_check_header_separator_body():
104
+ buf = StringIO()
105
+ size = 1001
106
+ start = 5
107
+ frame = DataFrame(np.random.randn(3, size))
108
+ frame.info(verbose=True, buf=buf)
109
+
110
+ res = buf.getvalue()
111
+ header = " # Column Dtype \n--- ------ ----- "
112
+ assert header in res
113
+
114
+ frame.info(verbose=True, buf=buf)
115
+ buf.seek(0)
116
+ lines = buf.readlines()
117
+ assert len(lines) > 0
118
+
119
+ for i, line in enumerate(lines):
120
+ if start <= i < start + size:
121
+ line_nr = f" {i - start} "
122
+ assert line.startswith(line_nr)
123
+
124
+
125
+ @pytest.mark.parametrize(
126
+ "size, header_exp, separator_exp, first_line_exp, last_line_exp",
127
+ [
128
+ (
129
+ 4,
130
+ " # Column Non-Null Count Dtype ",
131
+ "--- ------ -------------- ----- ",
132
+ " 0 0 3 non-null float64",
133
+ " 3 3 3 non-null float64",
134
+ ),
135
+ (
136
+ 11,
137
+ " # Column Non-Null Count Dtype ",
138
+ "--- ------ -------------- ----- ",
139
+ " 0 0 3 non-null float64",
140
+ " 10 10 3 non-null float64",
141
+ ),
142
+ (
143
+ 101,
144
+ " # Column Non-Null Count Dtype ",
145
+ "--- ------ -------------- ----- ",
146
+ " 0 0 3 non-null float64",
147
+ " 100 100 3 non-null float64",
148
+ ),
149
+ (
150
+ 1001,
151
+ " # Column Non-Null Count Dtype ",
152
+ "--- ------ -------------- ----- ",
153
+ " 0 0 3 non-null float64",
154
+ " 1000 1000 3 non-null float64",
155
+ ),
156
+ (
157
+ 10001,
158
+ " # Column Non-Null Count Dtype ",
159
+ "--- ------ -------------- ----- ",
160
+ " 0 0 3 non-null float64",
161
+ " 10000 10000 3 non-null float64",
162
+ ),
163
+ ],
164
+ )
165
+ def test_info_verbose_with_counts_spacing(
166
+ size, header_exp, separator_exp, first_line_exp, last_line_exp
167
+ ):
168
+ """Test header column, spacer, first line and last line in verbose mode."""
169
+ frame = DataFrame(np.random.randn(3, size))
170
+ with StringIO() as buf:
171
+ frame.info(verbose=True, show_counts=True, buf=buf)
172
+ all_lines = buf.getvalue().splitlines()
173
+ # Here table would contain only header, separator and table lines
174
+ # dframe repr, index summary, memory usage and dtypes are excluded
175
+ table = all_lines[3:-2]
176
+ header, separator, first_line, *rest, last_line = table
177
+ assert header == header_exp
178
+ assert separator == separator_exp
179
+ assert first_line == first_line_exp
180
+ assert last_line == last_line_exp
181
+
182
+
183
+ def test_info_memory():
184
+ # https://github.com/pandas-dev/pandas/issues/21056
185
+ df = DataFrame({"a": Series([1, 2], dtype="i8")})
186
+ buf = StringIO()
187
+ df.info(buf=buf)
188
+ result = buf.getvalue()
189
+ bytes = float(df.memory_usage().sum())
190
+ expected = textwrap.dedent(
191
+ f"""\
192
+ <class 'pandas.core.frame.DataFrame'>
193
+ RangeIndex: 2 entries, 0 to 1
194
+ Data columns (total 1 columns):
195
+ # Column Non-Null Count Dtype
196
+ --- ------ -------------- -----
197
+ 0 a 2 non-null int64
198
+ dtypes: int64(1)
199
+ memory usage: {bytes} bytes
200
+ """
201
+ )
202
+ assert result == expected
203
+
204
+
205
+ def test_info_wide():
206
+ io = StringIO()
207
+ df = DataFrame(np.random.randn(5, 101))
208
+ df.info(buf=io)
209
+
210
+ io = StringIO()
211
+ df.info(buf=io, max_cols=101)
212
+ result = io.getvalue()
213
+ assert len(result.splitlines()) > 100
214
+
215
+ expected = result
216
+ with option_context("display.max_info_columns", 101):
217
+ io = StringIO()
218
+ df.info(buf=io)
219
+ result = io.getvalue()
220
+ assert result == expected
221
+
222
+
223
+ def test_info_duplicate_columns_shows_correct_dtypes():
224
+ # GH11761
225
+ io = StringIO()
226
+ frame = DataFrame([[1, 2.0]], columns=["a", "a"])
227
+ frame.info(buf=io)
228
+ lines = io.getvalue().splitlines(True)
229
+ assert " 0 a 1 non-null int64 \n" == lines[5]
230
+ assert " 1 a 1 non-null float64\n" == lines[6]
231
+
232
+
233
+ def test_info_shows_column_dtypes():
234
+ dtypes = [
235
+ "int64",
236
+ "float64",
237
+ "datetime64[ns]",
238
+ "timedelta64[ns]",
239
+ "complex128",
240
+ "object",
241
+ "bool",
242
+ ]
243
+ data = {}
244
+ n = 10
245
+ for i, dtype in enumerate(dtypes):
246
+ data[i] = np.random.randint(2, size=n).astype(dtype)
247
+ df = DataFrame(data)
248
+ buf = StringIO()
249
+ df.info(buf=buf)
250
+ res = buf.getvalue()
251
+ header = (
252
+ " # Column Non-Null Count Dtype \n"
253
+ "--- ------ -------------- ----- "
254
+ )
255
+ assert header in res
256
+ for i, dtype in enumerate(dtypes):
257
+ name = f" {i:d} {i:d} {n:d} non-null {dtype}"
258
+ assert name in res
259
+
260
+
261
+ def test_info_max_cols():
262
+ df = DataFrame(np.random.randn(10, 5))
263
+ for len_, verbose in [(5, None), (5, False), (12, True)]:
264
+ # For verbose always ^ setting ^ summarize ^ full output
265
+ with option_context("max_info_columns", 4):
266
+ buf = StringIO()
267
+ df.info(buf=buf, verbose=verbose)
268
+ res = buf.getvalue()
269
+ assert len(res.strip().split("\n")) == len_
270
+
271
+ for len_, verbose in [(12, None), (5, False), (12, True)]:
272
+ # max_cols not exceeded
273
+ with option_context("max_info_columns", 5):
274
+ buf = StringIO()
275
+ df.info(buf=buf, verbose=verbose)
276
+ res = buf.getvalue()
277
+ assert len(res.strip().split("\n")) == len_
278
+
279
+ for len_, max_cols in [(12, 5), (5, 4)]:
280
+ # setting truncates
281
+ with option_context("max_info_columns", 4):
282
+ buf = StringIO()
283
+ df.info(buf=buf, max_cols=max_cols)
284
+ res = buf.getvalue()
285
+ assert len(res.strip().split("\n")) == len_
286
+
287
+ # setting wouldn't truncate
288
+ with option_context("max_info_columns", 5):
289
+ buf = StringIO()
290
+ df.info(buf=buf, max_cols=max_cols)
291
+ res = buf.getvalue()
292
+ assert len(res.strip().split("\n")) == len_
293
+
294
+
295
+ def test_info_memory_usage():
296
+ # Ensure memory usage is displayed, when asserted, on the last line
297
+ dtypes = [
298
+ "int64",
299
+ "float64",
300
+ "datetime64[ns]",
301
+ "timedelta64[ns]",
302
+ "complex128",
303
+ "object",
304
+ "bool",
305
+ ]
306
+ data = {}
307
+ n = 10
308
+ for i, dtype in enumerate(dtypes):
309
+ data[i] = np.random.randint(2, size=n).astype(dtype)
310
+ df = DataFrame(data)
311
+ buf = StringIO()
312
+
313
+ # display memory usage case
314
+ df.info(buf=buf, memory_usage=True)
315
+ res = buf.getvalue().splitlines()
316
+ assert "memory usage: " in res[-1]
317
+
318
+ # do not display memory usage case
319
+ df.info(buf=buf, memory_usage=False)
320
+ res = buf.getvalue().splitlines()
321
+ assert "memory usage: " not in res[-1]
322
+
323
+ df.info(buf=buf, memory_usage=True)
324
+ res = buf.getvalue().splitlines()
325
+
326
+ # memory usage is a lower bound, so print it as XYZ+ MB
327
+ assert re.match(r"memory usage: [^+]+\+", res[-1])
328
+
329
+ df.iloc[:, :5].info(buf=buf, memory_usage=True)
330
+ res = buf.getvalue().splitlines()
331
+
332
+ # excluded column with object dtype, so estimate is accurate
333
+ assert not re.match(r"memory usage: [^+]+\+", res[-1])
334
+
335
+ # Test a DataFrame with duplicate columns
336
+ dtypes = ["int64", "int64", "int64", "float64"]
337
+ data = {}
338
+ n = 100
339
+ for i, dtype in enumerate(dtypes):
340
+ data[i] = np.random.randint(2, size=n).astype(dtype)
341
+ df = DataFrame(data)
342
+ df.columns = dtypes
343
+
344
+ df_with_object_index = DataFrame({"a": [1]}, index=["foo"])
345
+ df_with_object_index.info(buf=buf, memory_usage=True)
346
+ res = buf.getvalue().splitlines()
347
+ assert re.match(r"memory usage: [^+]+\+", res[-1])
348
+
349
+ df_with_object_index.info(buf=buf, memory_usage="deep")
350
+ res = buf.getvalue().splitlines()
351
+ assert re.match(r"memory usage: [^+]+$", res[-1])
352
+
353
+ # Ensure df size is as expected
354
+ # (cols * rows * bytes) + index size
355
+ df_size = df.memory_usage().sum()
356
+ exp_size = len(dtypes) * n * 8 + df.index.nbytes
357
+ assert df_size == exp_size
358
+
359
+ # Ensure number of cols in memory_usage is the same as df
360
+ size_df = np.size(df.columns.values) + 1 # index=True; default
361
+ assert size_df == np.size(df.memory_usage())
362
+
363
+ # assert deep works only on object
364
+ assert df.memory_usage().sum() == df.memory_usage(deep=True).sum()
365
+
366
+ # test for validity
367
+ DataFrame(1, index=["a"], columns=["A"]).memory_usage(index=True)
368
+ DataFrame(1, index=["a"], columns=["A"]).index.nbytes
369
+ df = DataFrame(
370
+ data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"]
371
+ )
372
+ df.index.nbytes
373
+ df.memory_usage(index=True)
374
+ df.index.values.nbytes
375
+
376
+ mem = df.memory_usage(deep=True).sum()
377
+ assert mem > 0
378
+
379
+
380
+ @pytest.mark.skipif(PYPY, reason="on PyPy deep=True doesn't change result")
381
+ def test_info_memory_usage_deep_not_pypy():
382
+ df_with_object_index = DataFrame({"a": [1]}, index=["foo"])
383
+ assert (
384
+ df_with_object_index.memory_usage(index=True, deep=True).sum()
385
+ > df_with_object_index.memory_usage(index=True).sum()
386
+ )
387
+
388
+ df_object = DataFrame({"a": ["a"]})
389
+ assert df_object.memory_usage(deep=True).sum() > df_object.memory_usage().sum()
390
+
391
+
392
+ @pytest.mark.xfail(not PYPY, reason="on PyPy deep=True does not change result")
393
+ def test_info_memory_usage_deep_pypy():
394
+ df_with_object_index = DataFrame({"a": [1]}, index=["foo"])
395
+ assert (
396
+ df_with_object_index.memory_usage(index=True, deep=True).sum()
397
+ == df_with_object_index.memory_usage(index=True).sum()
398
+ )
399
+
400
+ df_object = DataFrame({"a": ["a"]})
401
+ assert df_object.memory_usage(deep=True).sum() == df_object.memory_usage().sum()
402
+
403
+
404
+ @pytest.mark.skipif(PYPY, reason="PyPy getsizeof() fails by design")
405
+ def test_usage_via_getsizeof():
406
+ df = DataFrame(
407
+ data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"]
408
+ )
409
+ mem = df.memory_usage(deep=True).sum()
410
+ # sys.getsizeof will call the .memory_usage with
411
+ # deep=True, and add on some GC overhead
412
+ diff = mem - sys.getsizeof(df)
413
+ assert abs(diff) < 100
414
+
415
+
416
+ def test_info_memory_usage_qualified():
417
+ buf = StringIO()
418
+ df = DataFrame(1, columns=list("ab"), index=[1, 2, 3])
419
+ df.info(buf=buf)
420
+ assert "+" not in buf.getvalue()
421
+
422
+ buf = StringIO()
423
+ df = DataFrame(1, columns=list("ab"), index=list("ABC"))
424
+ df.info(buf=buf)
425
+ assert "+" in buf.getvalue()
426
+
427
+ buf = StringIO()
428
+ df = DataFrame(
429
+ 1, columns=list("ab"), index=MultiIndex.from_product([range(3), range(3)])
430
+ )
431
+ df.info(buf=buf)
432
+ assert "+" not in buf.getvalue()
433
+
434
+ buf = StringIO()
435
+ df = DataFrame(
436
+ 1, columns=list("ab"), index=MultiIndex.from_product([range(3), ["foo", "bar"]])
437
+ )
438
+ df.info(buf=buf)
439
+ assert "+" in buf.getvalue()
440
+
441
+
442
+ def test_info_memory_usage_bug_on_multiindex():
443
+ # GH 14308
444
+ # memory usage introspection should not materialize .values
445
+
446
+ def memory_usage(f):
447
+ return f.memory_usage(deep=True).sum()
448
+
449
+ N = 100
450
+ M = len(uppercase)
451
+ index = MultiIndex.from_product(
452
+ [list(uppercase), date_range("20160101", periods=N)],
453
+ names=["id", "date"],
454
+ )
455
+ df = DataFrame({"value": np.random.randn(N * M)}, index=index)
456
+
457
+ unstacked = df.unstack("id")
458
+ assert df.values.nbytes == unstacked.values.nbytes
459
+ assert memory_usage(df) > memory_usage(unstacked)
460
+
461
+ # high upper bound
462
+ assert memory_usage(unstacked) - memory_usage(df) < 2000
463
+
464
+
465
+ def test_info_categorical():
466
+ # GH14298
467
+ idx = CategoricalIndex(["a", "b"])
468
+ df = DataFrame(np.zeros((2, 2)), index=idx, columns=idx)
469
+
470
+ buf = StringIO()
471
+ df.info(buf=buf)
472
+
473
+
474
+ @pytest.mark.xfail(not IS64, reason="GH 36579: fail on 32-bit system")
475
+ def test_info_int_columns():
476
+ # GH#37245
477
+ df = DataFrame({1: [1, 2], 2: [2, 3]}, index=["A", "B"])
478
+ buf = StringIO()
479
+ df.info(show_counts=True, buf=buf)
480
+ result = buf.getvalue()
481
+ expected = textwrap.dedent(
482
+ """\
483
+ <class 'pandas.core.frame.DataFrame'>
484
+ Index: 2 entries, A to B
485
+ Data columns (total 2 columns):
486
+ # Column Non-Null Count Dtype
487
+ --- ------ -------------- -----
488
+ 0 1 2 non-null int64
489
+ 1 2 2 non-null int64
490
+ dtypes: int64(2)
491
+ memory usage: 48.0+ bytes
492
+ """
493
+ )
494
+ assert result == expected
495
+
496
+
497
+ def test_memory_usage_empty_no_warning():
498
+ # GH#50066
499
+ df = DataFrame(index=["a", "b"])
500
+ with tm.assert_produces_warning(None):
501
+ result = df.memory_usage()
502
+ expected = Series(16 if IS64 else 8, index=["Index"])
503
+ tm.assert_series_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_printing.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+
3
+ import numpy as np
4
+
5
+ import pandas._config.config as cf
6
+
7
+ import pandas as pd
8
+
9
+ from pandas.io.formats import printing
10
+ import pandas.io.formats.format as fmt
11
+
12
+
13
+ def test_adjoin():
14
+ data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]]
15
+ expected = "a dd ggg\nb ee hhh\nc ff iii"
16
+
17
+ adjoined = printing.adjoin(2, *data)
18
+
19
+ assert adjoined == expected
20
+
21
+
22
+ def test_repr_binary_type():
23
+ letters = string.ascii_letters
24
+ try:
25
+ raw = bytes(letters, encoding=cf.get_option("display.encoding"))
26
+ except TypeError:
27
+ raw = bytes(letters)
28
+ b = str(raw.decode("utf-8"))
29
+ res = printing.pprint_thing(b, quote_strings=True)
30
+ assert res == repr(b)
31
+ res = printing.pprint_thing(b, quote_strings=False)
32
+ assert res == b
33
+
34
+
35
+ class TestFormattBase:
36
+ def test_adjoin(self):
37
+ data = [["a", "b", "c"], ["dd", "ee", "ff"], ["ggg", "hhh", "iii"]]
38
+ expected = "a dd ggg\nb ee hhh\nc ff iii"
39
+
40
+ adjoined = printing.adjoin(2, *data)
41
+
42
+ assert adjoined == expected
43
+
44
+ def test_adjoin_unicode(self):
45
+ data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "hhh", "いいい"]]
46
+ expected = "あ dd ggg\nb ええ hhh\nc ff いいい"
47
+ adjoined = printing.adjoin(2, *data)
48
+ assert adjoined == expected
49
+
50
+ adj = fmt.EastAsianTextAdjustment()
51
+
52
+ expected = """あ dd ggg
53
+ b ええ hhh
54
+ c ff いいい"""
55
+
56
+ adjoined = adj.adjoin(2, *data)
57
+ assert adjoined == expected
58
+ cols = adjoined.split("\n")
59
+ assert adj.len(cols[0]) == 13
60
+ assert adj.len(cols[1]) == 13
61
+ assert adj.len(cols[2]) == 16
62
+
63
+ expected = """あ dd ggg
64
+ b ええ hhh
65
+ c ff いいい"""
66
+
67
+ adjoined = adj.adjoin(7, *data)
68
+ assert adjoined == expected
69
+ cols = adjoined.split("\n")
70
+ assert adj.len(cols[0]) == 23
71
+ assert adj.len(cols[1]) == 23
72
+ assert adj.len(cols[2]) == 26
73
+
74
+ def test_justify(self):
75
+ adj = fmt.EastAsianTextAdjustment()
76
+
77
+ def just(x, *args, **kwargs):
78
+ # wrapper to test single str
79
+ return adj.justify([x], *args, **kwargs)[0]
80
+
81
+ assert just("abc", 5, mode="left") == "abc "
82
+ assert just("abc", 5, mode="center") == " abc "
83
+ assert just("abc", 5, mode="right") == " abc"
84
+ assert just("abc", 5, mode="left") == "abc "
85
+ assert just("abc", 5, mode="center") == " abc "
86
+ assert just("abc", 5, mode="right") == " abc"
87
+
88
+ assert just("パンダ", 5, mode="left") == "パンダ"
89
+ assert just("パンダ", 5, mode="center") == "パンダ"
90
+ assert just("パンダ", 5, mode="right") == "パンダ"
91
+
92
+ assert just("パンダ", 10, mode="left") == "パンダ "
93
+ assert just("パンダ", 10, mode="center") == " パンダ "
94
+ assert just("パンダ", 10, mode="right") == " パンダ"
95
+
96
+ def test_east_asian_len(self):
97
+ adj = fmt.EastAsianTextAdjustment()
98
+
99
+ assert adj.len("abc") == 3
100
+ assert adj.len("abc") == 3
101
+
102
+ assert adj.len("パンダ") == 6
103
+ assert adj.len("パンダ") == 5
104
+ assert adj.len("パンダpanda") == 11
105
+ assert adj.len("パンダpanda") == 10
106
+
107
+ def test_ambiguous_width(self):
108
+ adj = fmt.EastAsianTextAdjustment()
109
+ assert adj.len("¡¡ab") == 4
110
+
111
+ with cf.option_context("display.unicode.ambiguous_as_wide", True):
112
+ adj = fmt.EastAsianTextAdjustment()
113
+ assert adj.len("¡¡ab") == 6
114
+
115
+ data = [["あ", "b", "c"], ["dd", "ええ", "ff"], ["ggg", "¡¡ab", "いいい"]]
116
+ expected = "あ dd ggg \nb ええ ¡¡ab\nc ff いいい"
117
+ adjoined = adj.adjoin(2, *data)
118
+ assert adjoined == expected
119
+
120
+
121
+ class TestTableSchemaRepr:
122
+ def test_publishes(self, ip):
123
+ ipython = ip.instance(config=ip.config)
124
+ df = pd.DataFrame({"A": [1, 2]})
125
+ objects = [df["A"], df] # dataframe / series
126
+ expected_keys = [
127
+ {"text/plain", "application/vnd.dataresource+json"},
128
+ {"text/plain", "text/html", "application/vnd.dataresource+json"},
129
+ ]
130
+
131
+ opt = pd.option_context("display.html.table_schema", True)
132
+ last_obj = None
133
+ for obj, expected in zip(objects, expected_keys):
134
+ last_obj = obj
135
+ with opt:
136
+ formatted = ipython.display_formatter.format(obj)
137
+ assert set(formatted[0].keys()) == expected
138
+
139
+ with_latex = pd.option_context("styler.render.repr", "latex")
140
+
141
+ with opt, with_latex:
142
+ formatted = ipython.display_formatter.format(last_obj)
143
+
144
+ expected = {
145
+ "text/plain",
146
+ "text/html",
147
+ "text/latex",
148
+ "application/vnd.dataresource+json",
149
+ }
150
+ assert set(formatted[0].keys()) == expected
151
+
152
+ def test_publishes_not_implemented(self, ip):
153
+ # column MultiIndex
154
+ # GH 15996
155
+ midx = pd.MultiIndex.from_product([["A", "B"], ["a", "b", "c"]])
156
+ df = pd.DataFrame(np.random.randn(5, len(midx)), columns=midx)
157
+
158
+ opt = pd.option_context("display.html.table_schema", True)
159
+
160
+ with opt:
161
+ formatted = ip.instance(config=ip.config).display_formatter.format(df)
162
+
163
+ expected = {"text/plain", "text/html"}
164
+ assert set(formatted[0].keys()) == expected
165
+
166
+ def test_config_on(self):
167
+ df = pd.DataFrame({"A": [1, 2]})
168
+ with pd.option_context("display.html.table_schema", True):
169
+ result = df._repr_data_resource_()
170
+
171
+ assert result is not None
172
+
173
+ def test_config_default_off(self):
174
+ df = pd.DataFrame({"A": [1, 2]})
175
+ with pd.option_context("display.html.table_schema", False):
176
+ result = df._repr_data_resource_()
177
+
178
+ assert result is None
179
+
180
+ def test_enable_data_resource_formatter(self, ip):
181
+ # GH 10491
182
+ formatters = ip.instance(config=ip.config).display_formatter.formatters
183
+ mimetype = "application/vnd.dataresource+json"
184
+
185
+ with pd.option_context("display.html.table_schema", True):
186
+ assert "application/vnd.dataresource+json" in formatters
187
+ assert formatters[mimetype].enabled
188
+
189
+ # still there, just disabled
190
+ assert "application/vnd.dataresource+json" in formatters
191
+ assert not formatters[mimetype].enabled
192
+
193
+ # able to re-set
194
+ with pd.option_context("display.html.table_schema", True):
195
+ assert "application/vnd.dataresource+json" in formatters
196
+ assert formatters[mimetype].enabled
197
+ # smoke test that it works
198
+ ip.instance(config=ip.config).display_formatter.format(cf)
videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_html.py ADDED
@@ -0,0 +1,898 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from io import StringIO
3
+ import re
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ Index,
12
+ MultiIndex,
13
+ option_context,
14
+ )
15
+ import pandas._testing as tm
16
+
17
+ import pandas.io.formats.format as fmt
18
+
19
+ lorem_ipsum = (
20
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod "
21
+ "tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim "
22
+ "veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex "
23
+ "ea commodo consequat. Duis aute irure dolor in reprehenderit in "
24
+ "voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur "
25
+ "sint occaecat cupidatat non proident, sunt in culpa qui officia "
26
+ "deserunt mollit anim id est laborum."
27
+ )
28
+
29
+
30
+ def expected_html(datapath, name):
31
+ """
32
+ Read HTML file from formats data directory.
33
+
34
+ Parameters
35
+ ----------
36
+ datapath : pytest fixture
37
+ The datapath fixture injected into a test by pytest.
38
+ name : str
39
+ The name of the HTML file without the suffix.
40
+
41
+ Returns
42
+ -------
43
+ str : contents of HTML file.
44
+ """
45
+ filename = ".".join([name, "html"])
46
+ filepath = datapath("io", "formats", "data", "html", filename)
47
+ with open(filepath, encoding="utf-8") as f:
48
+ html = f.read()
49
+ return html.rstrip()
50
+
51
+
52
+ @pytest.fixture(params=["mixed", "empty"])
53
+ def biggie_df_fixture(request):
54
+ """Fixture for a big mixed Dataframe and an empty Dataframe"""
55
+ if request.param == "mixed":
56
+ df = DataFrame(
57
+ {"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
58
+ index=np.arange(200),
59
+ )
60
+ df.loc[:20, "A"] = np.nan
61
+ df.loc[:20, "B"] = np.nan
62
+ return df
63
+ elif request.param == "empty":
64
+ df = DataFrame(index=np.arange(200))
65
+ return df
66
+
67
+
68
+ @pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS)
69
+ def justify(request):
70
+ return request.param
71
+
72
+
73
+ @pytest.mark.parametrize("col_space", [30, 50])
74
+ def test_to_html_with_col_space(col_space):
75
+ df = DataFrame(np.random.random(size=(1, 3)))
76
+ # check that col_space affects HTML generation
77
+ # and be very brittle about it.
78
+ result = df.to_html(col_space=col_space)
79
+ hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)]
80
+ assert len(hdrs) > 0
81
+ for h in hdrs:
82
+ assert "min-width" in h
83
+ assert str(col_space) in h
84
+
85
+
86
+ def test_to_html_with_column_specific_col_space_raises():
87
+ df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
88
+
89
+ msg = (
90
+ "Col_space length\\(\\d+\\) should match "
91
+ "DataFrame number of columns\\(\\d+\\)"
92
+ )
93
+ with pytest.raises(ValueError, match=msg):
94
+ df.to_html(col_space=[30, 40])
95
+
96
+ with pytest.raises(ValueError, match=msg):
97
+ df.to_html(col_space=[30, 40, 50, 60])
98
+
99
+ msg = "unknown column"
100
+ with pytest.raises(ValueError, match=msg):
101
+ df.to_html(col_space={"a": "foo", "b": 23, "d": 34})
102
+
103
+
104
+ def test_to_html_with_column_specific_col_space():
105
+ df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
106
+
107
+ result = df.to_html(col_space={"a": "2em", "b": 23})
108
+ hdrs = [x for x in result.split("\n") if re.search(r"<th[>\s]", x)]
109
+ assert 'min-width: 2em;">a</th>' in hdrs[1]
110
+ assert 'min-width: 23px;">b</th>' in hdrs[2]
111
+ assert "<th>c</th>" in hdrs[3]
112
+
113
+ result = df.to_html(col_space=["1em", 2, 3])
114
+ hdrs = [x for x in result.split("\n") if re.search(r"<th[>\s]", x)]
115
+ assert 'min-width: 1em;">a</th>' in hdrs[1]
116
+ assert 'min-width: 2px;">b</th>' in hdrs[2]
117
+ assert 'min-width: 3px;">c</th>' in hdrs[3]
118
+
119
+
120
+ def test_to_html_with_empty_string_label():
121
+ # GH 3547, to_html regards empty string labels as repeated labels
122
+ data = {"c1": ["a", "b"], "c2": ["a", ""], "data": [1, 2]}
123
+ df = DataFrame(data).set_index(["c1", "c2"])
124
+ result = df.to_html()
125
+ assert "rowspan" not in result
126
+
127
+
128
+ @pytest.mark.parametrize(
129
+ "df,expected",
130
+ [
131
+ (DataFrame({"\u03c3": np.arange(10.0)}), "unicode_1"),
132
+ (DataFrame({"A": ["\u03c3"]}), "unicode_2"),
133
+ ],
134
+ )
135
+ def test_to_html_unicode(df, expected, datapath):
136
+ expected = expected_html(datapath, expected)
137
+ result = df.to_html()
138
+ assert result == expected
139
+
140
+
141
+ def test_to_html_encoding(float_frame, tmp_path):
142
+ # GH 28663
143
+ path = tmp_path / "test.html"
144
+ float_frame.to_html(path, encoding="gbk")
145
+ with open(str(path), encoding="gbk") as f:
146
+ assert float_frame.to_html() == f.read()
147
+
148
+
149
+ def test_to_html_decimal(datapath):
150
+ # GH 12031
151
+ df = DataFrame({"A": [6.0, 3.1, 2.2]})
152
+ result = df.to_html(decimal=",")
153
+ expected = expected_html(datapath, "gh12031_expected_output")
154
+ assert result == expected
155
+
156
+
157
+ @pytest.mark.parametrize(
158
+ "kwargs,string,expected",
159
+ [
160
+ ({}, "<type 'str'>", "escaped"),
161
+ ({"escape": False}, "<b>bold</b>", "escape_disabled"),
162
+ ],
163
+ )
164
+ def test_to_html_escaped(kwargs, string, expected, datapath):
165
+ a = "str<ing1 &amp;"
166
+ b = "stri>ng2 &amp;"
167
+
168
+ test_dict = {"co<l1": {a: string, b: string}, "co>l2": {a: string, b: string}}
169
+ result = DataFrame(test_dict).to_html(**kwargs)
170
+ expected = expected_html(datapath, expected)
171
+ assert result == expected
172
+
173
+
174
+ @pytest.mark.parametrize("index_is_named", [True, False])
175
+ def test_to_html_multiindex_index_false(index_is_named, datapath):
176
+ # GH 8452
177
+ df = DataFrame(
178
+ {"a": range(2), "b": range(3, 5), "c": range(5, 7), "d": range(3, 5)}
179
+ )
180
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
181
+ if index_is_named:
182
+ df.index = Index(df.index.values, name="idx")
183
+ result = df.to_html(index=False)
184
+ expected = expected_html(datapath, "gh8452_expected_output")
185
+ assert result == expected
186
+
187
+
188
+ @pytest.mark.parametrize(
189
+ "multi_sparse,expected",
190
+ [
191
+ (False, "multiindex_sparsify_false_multi_sparse_1"),
192
+ (False, "multiindex_sparsify_false_multi_sparse_2"),
193
+ (True, "multiindex_sparsify_1"),
194
+ (True, "multiindex_sparsify_2"),
195
+ ],
196
+ )
197
+ def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
198
+ index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]], names=["foo", None])
199
+ df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
200
+ if expected.endswith("2"):
201
+ df.columns = index[::2]
202
+ with option_context("display.multi_sparse", multi_sparse):
203
+ result = df.to_html()
204
+ expected = expected_html(datapath, expected)
205
+ assert result == expected
206
+
207
+
208
+ @pytest.mark.parametrize(
209
+ "max_rows,expected",
210
+ [
211
+ (60, "gh14882_expected_output_1"),
212
+ # Test that ... appears in a middle level
213
+ (56, "gh14882_expected_output_2"),
214
+ ],
215
+ )
216
+ def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
217
+ # GH 14882 - Issue on truncation with odd length DataFrame
218
+ index = MultiIndex.from_product(
219
+ [[100, 200, 300], [10, 20, 30], [1, 2, 3, 4, 5, 6, 7]], names=["a", "b", "c"]
220
+ )
221
+ df = DataFrame({"n": range(len(index))}, index=index)
222
+ result = df.to_html(max_rows=max_rows)
223
+ expected = expected_html(datapath, expected)
224
+ assert result == expected
225
+
226
+
227
+ @pytest.mark.parametrize(
228
+ "df,formatters,expected",
229
+ [
230
+ (
231
+ DataFrame(
232
+ [[0, 1], [2, 3], [4, 5], [6, 7]],
233
+ columns=["foo", None],
234
+ index=np.arange(4),
235
+ ),
236
+ {"__index__": lambda x: "abcd"[x]},
237
+ "index_formatter",
238
+ ),
239
+ (
240
+ DataFrame({"months": [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
241
+ {"months": lambda x: x.strftime("%Y-%m")},
242
+ "datetime64_monthformatter",
243
+ ),
244
+ (
245
+ DataFrame(
246
+ {
247
+ "hod": pd.to_datetime(
248
+ ["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f"
249
+ )
250
+ }
251
+ ),
252
+ {"hod": lambda x: x.strftime("%H:%M")},
253
+ "datetime64_hourformatter",
254
+ ),
255
+ (
256
+ DataFrame(
257
+ {
258
+ "i": pd.Series([1, 2], dtype="int64"),
259
+ "f": pd.Series([1, 2], dtype="float64"),
260
+ "I": pd.Series([1, 2], dtype="Int64"),
261
+ "s": pd.Series([1, 2], dtype="string"),
262
+ "b": pd.Series([True, False], dtype="boolean"),
263
+ "c": pd.Series(["a", "b"], dtype=pd.CategoricalDtype(["a", "b"])),
264
+ "o": pd.Series([1, "2"], dtype=object),
265
+ }
266
+ ),
267
+ [lambda x: "formatted"] * 7,
268
+ "various_dtypes_formatted",
269
+ ),
270
+ ],
271
+ )
272
+ def test_to_html_formatters(df, formatters, expected, datapath):
273
+ expected = expected_html(datapath, expected)
274
+ result = df.to_html(formatters=formatters)
275
+ assert result == expected
276
+
277
+
278
+ def test_to_html_regression_GH6098():
279
+ df = DataFrame(
280
+ {
281
+ "clé1": ["a", "a", "b", "b", "a"],
282
+ "clé2": ["1er", "2ème", "1er", "2ème", "1er"],
283
+ "données1": np.random.randn(5),
284
+ "données2": np.random.randn(5),
285
+ }
286
+ )
287
+
288
+ # it works
289
+ df.pivot_table(index=["clé1"], columns=["clé2"])._repr_html_()
290
+
291
+
292
+ def test_to_html_truncate(datapath):
293
+ index = pd.date_range(start="20010101", freq="D", periods=20)
294
+ df = DataFrame(index=index, columns=range(20))
295
+ result = df.to_html(max_rows=8, max_cols=4)
296
+ expected = expected_html(datapath, "truncate")
297
+ assert result == expected
298
+
299
+
300
+ @pytest.mark.parametrize("size", [1, 5])
301
+ def test_html_invalid_formatters_arg_raises(size):
302
+ # issue-28469
303
+ df = DataFrame(columns=["a", "b", "c"])
304
+ msg = "Formatters length({}) should match DataFrame number of columns(3)"
305
+ with pytest.raises(ValueError, match=re.escape(msg.format(size))):
306
+ df.to_html(formatters=["{}".format] * size)
307
+
308
+
309
+ def test_to_html_truncate_formatter(datapath):
310
+ # issue-25955
311
+ data = [
312
+ {"A": 1, "B": 2, "C": 3, "D": 4},
313
+ {"A": 5, "B": 6, "C": 7, "D": 8},
314
+ {"A": 9, "B": 10, "C": 11, "D": 12},
315
+ {"A": 13, "B": 14, "C": 15, "D": 16},
316
+ ]
317
+
318
+ df = DataFrame(data)
319
+ fmt = lambda x: str(x) + "_mod"
320
+ formatters = [fmt, fmt, None, None]
321
+ result = df.to_html(formatters=formatters, max_cols=3)
322
+ expected = expected_html(datapath, "truncate_formatter")
323
+ assert result == expected
324
+
325
+
326
+ @pytest.mark.parametrize(
327
+ "sparsify,expected",
328
+ [(True, "truncate_multi_index"), (False, "truncate_multi_index_sparse_off")],
329
+ )
330
+ def test_to_html_truncate_multi_index(sparsify, expected, datapath):
331
+ arrays = [
332
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
333
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
334
+ ]
335
+ df = DataFrame(index=arrays, columns=arrays)
336
+ result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
337
+ expected = expected_html(datapath, expected)
338
+ assert result == expected
339
+
340
+
341
+ @pytest.mark.parametrize(
342
+ "option,result,expected",
343
+ [
344
+ (None, lambda df: df.to_html(), "1"),
345
+ (None, lambda df: df.to_html(border=2), "2"),
346
+ (2, lambda df: df.to_html(), "2"),
347
+ (2, lambda df: df._repr_html_(), "2"),
348
+ ],
349
+ )
350
+ def test_to_html_border(option, result, expected):
351
+ df = DataFrame({"A": [1, 2]})
352
+ if option is None:
353
+ result = result(df)
354
+ else:
355
+ with option_context("display.html.border", option):
356
+ result = result(df)
357
+ expected = f'border="{expected}"'
358
+ assert expected in result
359
+
360
+
361
+ @pytest.mark.parametrize("biggie_df_fixture", ["mixed"], indirect=True)
362
+ def test_to_html(biggie_df_fixture):
363
+ # TODO: split this test
364
+ df = biggie_df_fixture
365
+ s = df.to_html()
366
+
367
+ buf = StringIO()
368
+ retval = df.to_html(buf=buf)
369
+ assert retval is None
370
+ assert buf.getvalue() == s
371
+
372
+ assert isinstance(s, str)
373
+
374
+ df.to_html(columns=["B", "A"], col_space=17)
375
+ df.to_html(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
376
+
377
+ df.to_html(columns=["B", "A"], float_format=str)
378
+ df.to_html(columns=["B", "A"], col_space=12, float_format=str)
379
+
380
+
381
+ @pytest.mark.parametrize("biggie_df_fixture", ["empty"], indirect=True)
382
+ def test_to_html_empty_dataframe(biggie_df_fixture):
383
+ df = biggie_df_fixture
384
+ df.to_html()
385
+
386
+
387
+ def test_to_html_filename(biggie_df_fixture, tmpdir):
388
+ df = biggie_df_fixture
389
+ expected = df.to_html()
390
+ path = tmpdir.join("test.html")
391
+ df.to_html(path)
392
+ result = path.read()
393
+ assert result == expected
394
+
395
+
396
+ def test_to_html_with_no_bold():
397
+ df = DataFrame({"x": np.random.randn(5)})
398
+ html = df.to_html(bold_rows=False)
399
+ result = html[html.find("</thead>")]
400
+ assert "<strong" not in result
401
+
402
+
403
+ def test_to_html_columns_arg(float_frame):
404
+ result = float_frame.to_html(columns=["A"])
405
+ assert "<th>B</th>" not in result
406
+
407
+
408
+ @pytest.mark.parametrize(
409
+ "columns,justify,expected",
410
+ [
411
+ (
412
+ MultiIndex.from_tuples(
413
+ list(zip(np.arange(2).repeat(2), np.mod(range(4), 2))),
414
+ names=["CL0", "CL1"],
415
+ ),
416
+ "left",
417
+ "multiindex_1",
418
+ ),
419
+ (
420
+ MultiIndex.from_tuples(list(zip(range(4), np.mod(range(4), 2)))),
421
+ "right",
422
+ "multiindex_2",
423
+ ),
424
+ ],
425
+ )
426
+ def test_to_html_multiindex(columns, justify, expected, datapath):
427
+ df = DataFrame([list("abcd"), list("efgh")], columns=columns)
428
+ result = df.to_html(justify=justify)
429
+ expected = expected_html(datapath, expected)
430
+ assert result == expected
431
+
432
+
433
+ def test_to_html_justify(justify, datapath):
434
+ df = DataFrame(
435
+ {"A": [6, 30000, 2], "B": [1, 2, 70000], "C": [223442, 0, 1]},
436
+ columns=["A", "B", "C"],
437
+ )
438
+ result = df.to_html(justify=justify)
439
+ expected = expected_html(datapath, "justify").format(justify=justify)
440
+ assert result == expected
441
+
442
+
443
+ @pytest.mark.parametrize(
444
+ "justify", ["super-right", "small-left", "noinherit", "tiny", "pandas"]
445
+ )
446
+ def test_to_html_invalid_justify(justify):
447
+ # GH 17527
448
+ df = DataFrame()
449
+ msg = "Invalid value for justify parameter"
450
+
451
+ with pytest.raises(ValueError, match=msg):
452
+ df.to_html(justify=justify)
453
+
454
+
455
+ class TestHTMLIndex:
456
+ @pytest.fixture
457
+ def df(self):
458
+ index = ["foo", "bar", "baz"]
459
+ df = DataFrame(
460
+ {"A": [1, 2, 3], "B": [1.2, 3.4, 5.6], "C": ["one", "two", np.nan]},
461
+ columns=["A", "B", "C"],
462
+ index=index,
463
+ )
464
+ return df
465
+
466
+ @pytest.fixture
467
+ def expected_without_index(self, datapath):
468
+ return expected_html(datapath, "index_2")
469
+
470
+ def test_to_html_flat_index_without_name(
471
+ self, datapath, df, expected_without_index
472
+ ):
473
+ expected_with_index = expected_html(datapath, "index_1")
474
+ assert df.to_html() == expected_with_index
475
+
476
+ result = df.to_html(index=False)
477
+ for i in df.index:
478
+ assert i not in result
479
+ assert result == expected_without_index
480
+
481
+ def test_to_html_flat_index_with_name(self, datapath, df, expected_without_index):
482
+ df.index = Index(["foo", "bar", "baz"], name="idx")
483
+ expected_with_index = expected_html(datapath, "index_3")
484
+ assert df.to_html() == expected_with_index
485
+ assert df.to_html(index=False) == expected_without_index
486
+
487
+ def test_to_html_multiindex_without_names(
488
+ self, datapath, df, expected_without_index
489
+ ):
490
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
491
+ df.index = MultiIndex.from_tuples(tuples)
492
+
493
+ expected_with_index = expected_html(datapath, "index_4")
494
+ assert df.to_html() == expected_with_index
495
+
496
+ result = df.to_html(index=False)
497
+ for i in ["foo", "bar", "car", "bike"]:
498
+ assert i not in result
499
+ # must be the same result as normal index
500
+ assert result == expected_without_index
501
+
502
+ def test_to_html_multiindex_with_names(self, datapath, df, expected_without_index):
503
+ tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
504
+ df.index = MultiIndex.from_tuples(tuples, names=["idx1", "idx2"])
505
+ expected_with_index = expected_html(datapath, "index_5")
506
+ assert df.to_html() == expected_with_index
507
+ assert df.to_html(index=False) == expected_without_index
508
+
509
+
510
+ @pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
511
+ def test_to_html_with_classes(classes, datapath):
512
+ df = DataFrame()
513
+ expected = expected_html(datapath, "with_classes")
514
+ result = df.to_html(classes=classes)
515
+ assert result == expected
516
+
517
+
518
+ def test_to_html_no_index_max_rows(datapath):
519
+ # GH 14998
520
+ df = DataFrame({"A": [1, 2, 3, 4]})
521
+ result = df.to_html(index=False, max_rows=1)
522
+ expected = expected_html(datapath, "gh14998_expected_output")
523
+ assert result == expected
524
+
525
+
526
+ def test_to_html_multiindex_max_cols(datapath):
527
+ # GH 6131
528
+ index = MultiIndex(
529
+ levels=[["ba", "bb", "bc"], ["ca", "cb", "cc"]],
530
+ codes=[[0, 1, 2], [0, 1, 2]],
531
+ names=["b", "c"],
532
+ )
533
+ columns = MultiIndex(
534
+ levels=[["d"], ["aa", "ab", "ac"]],
535
+ codes=[[0, 0, 0], [0, 1, 2]],
536
+ names=[None, "a"],
537
+ )
538
+ data = np.array(
539
+ [[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 3.0]]
540
+ )
541
+ df = DataFrame(data, index, columns)
542
+ result = df.to_html(max_cols=2)
543
+ expected = expected_html(datapath, "gh6131_expected_output")
544
+ assert result == expected
545
+
546
+
547
+ def test_to_html_multi_indexes_index_false(datapath):
548
+ # GH 22579
549
+ df = DataFrame(
550
+ {"a": range(10), "b": range(10, 20), "c": range(10, 20), "d": range(10, 20)}
551
+ )
552
+ df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
553
+ df.index = MultiIndex.from_product([["a", "b"], ["c", "d", "e", "f", "g"]])
554
+ result = df.to_html(index=False)
555
+ expected = expected_html(datapath, "gh22579_expected_output")
556
+ assert result == expected
557
+
558
+
559
+ @pytest.mark.parametrize("index_names", [True, False])
560
+ @pytest.mark.parametrize("header", [True, False])
561
+ @pytest.mark.parametrize("index", [True, False])
562
+ @pytest.mark.parametrize(
563
+ "column_index, column_type",
564
+ [
565
+ (Index([0, 1]), "unnamed_standard"),
566
+ (Index([0, 1], name="columns.name"), "named_standard"),
567
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
568
+ (
569
+ MultiIndex.from_product(
570
+ [["a"], ["b", "c"]], names=["columns.name.0", "columns.name.1"]
571
+ ),
572
+ "named_multi",
573
+ ),
574
+ ],
575
+ )
576
+ @pytest.mark.parametrize(
577
+ "row_index, row_type",
578
+ [
579
+ (Index([0, 1]), "unnamed_standard"),
580
+ (Index([0, 1], name="index.name"), "named_standard"),
581
+ (MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
582
+ (
583
+ MultiIndex.from_product(
584
+ [["a"], ["b", "c"]], names=["index.name.0", "index.name.1"]
585
+ ),
586
+ "named_multi",
587
+ ),
588
+ ],
589
+ )
590
+ def test_to_html_basic_alignment(
591
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
592
+ ):
593
+ # GH 22747, GH 22579
594
+ df = DataFrame(np.zeros((2, 2), dtype=int), index=row_index, columns=column_index)
595
+ result = df.to_html(index=index, header=header, index_names=index_names)
596
+
597
+ if not index:
598
+ row_type = "none"
599
+ elif not index_names and row_type.startswith("named"):
600
+ row_type = "un" + row_type
601
+
602
+ if not header:
603
+ column_type = "none"
604
+ elif not index_names and column_type.startswith("named"):
605
+ column_type = "un" + column_type
606
+
607
+ filename = "index_" + row_type + "_columns_" + column_type
608
+ expected = expected_html(datapath, filename)
609
+ assert result == expected
610
+
611
+
612
+ @pytest.mark.parametrize("index_names", [True, False])
613
+ @pytest.mark.parametrize("header", [True, False])
614
+ @pytest.mark.parametrize("index", [True, False])
615
+ @pytest.mark.parametrize(
616
+ "column_index, column_type",
617
+ [
618
+ (Index(np.arange(8)), "unnamed_standard"),
619
+ (Index(np.arange(8), name="columns.name"), "named_standard"),
620
+ (
621
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
622
+ "unnamed_multi",
623
+ ),
624
+ (
625
+ MultiIndex.from_product(
626
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
627
+ ),
628
+ "named_multi",
629
+ ),
630
+ ],
631
+ )
632
+ @pytest.mark.parametrize(
633
+ "row_index, row_type",
634
+ [
635
+ (Index(np.arange(8)), "unnamed_standard"),
636
+ (Index(np.arange(8), name="index.name"), "named_standard"),
637
+ (
638
+ MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
639
+ "unnamed_multi",
640
+ ),
641
+ (
642
+ MultiIndex.from_product(
643
+ [["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
644
+ ),
645
+ "named_multi",
646
+ ),
647
+ ],
648
+ )
649
+ def test_to_html_alignment_with_truncation(
650
+ datapath, row_index, row_type, column_index, column_type, index, header, index_names
651
+ ):
652
+ # GH 22747, GH 22579
653
+ df = DataFrame(np.arange(64).reshape(8, 8), index=row_index, columns=column_index)
654
+ result = df.to_html(
655
+ max_rows=4, max_cols=4, index=index, header=header, index_names=index_names
656
+ )
657
+
658
+ if not index:
659
+ row_type = "none"
660
+ elif not index_names and row_type.startswith("named"):
661
+ row_type = "un" + row_type
662
+
663
+ if not header:
664
+ column_type = "none"
665
+ elif not index_names and column_type.startswith("named"):
666
+ column_type = "un" + column_type
667
+
668
+ filename = "trunc_df_index_" + row_type + "_columns_" + column_type
669
+ expected = expected_html(datapath, filename)
670
+ assert result == expected
671
+
672
+
673
+ @pytest.mark.parametrize("index", [False, 0])
674
+ def test_to_html_truncation_index_false_max_rows(datapath, index):
675
+ # GH 15019
676
+ data = [
677
+ [1.764052, 0.400157],
678
+ [0.978738, 2.240893],
679
+ [1.867558, -0.977278],
680
+ [0.950088, -0.151357],
681
+ [-0.103219, 0.410599],
682
+ ]
683
+ df = DataFrame(data)
684
+ result = df.to_html(max_rows=4, index=index)
685
+ expected = expected_html(datapath, "gh15019_expected_output")
686
+ assert result == expected
687
+
688
+
689
+ @pytest.mark.parametrize("index", [False, 0])
690
+ @pytest.mark.parametrize(
691
+ "col_index_named, expected_output",
692
+ [(False, "gh22783_expected_output"), (True, "gh22783_named_columns_index")],
693
+ )
694
+ def test_to_html_truncation_index_false_max_cols(
695
+ datapath, index, col_index_named, expected_output
696
+ ):
697
+ # GH 22783
698
+ data = [
699
+ [1.764052, 0.400157, 0.978738, 2.240893, 1.867558],
700
+ [-0.977278, 0.950088, -0.151357, -0.103219, 0.410599],
701
+ ]
702
+ df = DataFrame(data)
703
+ if col_index_named:
704
+ df.columns.rename("columns.name", inplace=True)
705
+ result = df.to_html(max_cols=4, index=index)
706
+ expected = expected_html(datapath, expected_output)
707
+ assert result == expected
708
+
709
+
710
+ @pytest.mark.parametrize("notebook", [True, False])
711
+ def test_to_html_notebook_has_style(notebook):
712
+ df = DataFrame({"A": [1, 2, 3]})
713
+ result = df.to_html(notebook=notebook)
714
+
715
+ if notebook:
716
+ assert "tbody tr th:only-of-type" in result
717
+ assert "vertical-align: middle;" in result
718
+ assert "thead th" in result
719
+ else:
720
+ assert "tbody tr th:only-of-type" not in result
721
+ assert "vertical-align: middle;" not in result
722
+ assert "thead th" not in result
723
+
724
+
725
+ def test_to_html_with_index_names_false():
726
+ # GH 16493
727
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
728
+ result = df.to_html(index_names=False)
729
+ assert "myindexname" not in result
730
+
731
+
732
+ def test_to_html_with_id():
733
+ # GH 8496
734
+ df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
735
+ result = df.to_html(index_names=False, table_id="TEST_ID")
736
+ assert ' id="TEST_ID"' in result
737
+
738
+
739
+ @pytest.mark.parametrize(
740
+ "value,float_format,expected",
741
+ [
742
+ (0.19999, "%.3f", "gh21625_expected_output"),
743
+ (100.0, "%.0f", "gh22270_expected_output"),
744
+ ],
745
+ )
746
+ def test_to_html_float_format_no_fixed_width(value, float_format, expected, datapath):
747
+ # GH 21625, GH 22270
748
+ df = DataFrame({"x": [value]})
749
+ expected = expected_html(datapath, expected)
750
+ result = df.to_html(float_format=float_format)
751
+ assert result == expected
752
+
753
+
754
+ @pytest.mark.parametrize(
755
+ "render_links,expected",
756
+ [(True, "render_links_true"), (False, "render_links_false")],
757
+ )
758
+ def test_to_html_render_links(render_links, expected, datapath):
759
+ # GH 2679
760
+ data = [
761
+ [0, "https://pandas.pydata.org/?q1=a&q2=b", "pydata.org"],
762
+ [0, "www.pydata.org", "pydata.org"],
763
+ ]
764
+ df = DataFrame(data, columns=["foo", "bar", None])
765
+
766
+ result = df.to_html(render_links=render_links)
767
+ expected = expected_html(datapath, expected)
768
+ assert result == expected
769
+
770
+
771
+ @pytest.mark.parametrize(
772
+ "method,expected",
773
+ [
774
+ ("to_html", lambda x: lorem_ipsum),
775
+ ("_repr_html_", lambda x: lorem_ipsum[: x - 4] + "..."), # regression case
776
+ ],
777
+ )
778
+ @pytest.mark.parametrize("max_colwidth", [10, 20, 50, 100])
779
+ def test_ignore_display_max_colwidth(method, expected, max_colwidth):
780
+ # see gh-17004
781
+ df = DataFrame([lorem_ipsum])
782
+ with option_context("display.max_colwidth", max_colwidth):
783
+ result = getattr(df, method)()
784
+ expected = expected(max_colwidth)
785
+ assert expected in result
786
+
787
+
788
+ @pytest.mark.parametrize("classes", [True, 0])
789
+ def test_to_html_invalid_classes_type(classes):
790
+ # GH 25608
791
+ df = DataFrame()
792
+ msg = "classes must be a string, list, or tuple"
793
+
794
+ with pytest.raises(TypeError, match=msg):
795
+ df.to_html(classes=classes)
796
+
797
+
798
+ def test_to_html_round_column_headers():
799
+ # GH 17280
800
+ df = DataFrame([1], columns=[0.55555])
801
+ with option_context("display.precision", 3):
802
+ html = df.to_html(notebook=False)
803
+ notebook = df.to_html(notebook=True)
804
+ assert "0.55555" in html
805
+ assert "0.556" in notebook
806
+
807
+
808
+ @pytest.mark.parametrize("unit", ["100px", "10%", "5em", 150])
809
+ def test_to_html_with_col_space_units(unit):
810
+ # GH 25941
811
+ df = DataFrame(np.random.random(size=(1, 3)))
812
+ result = df.to_html(col_space=unit)
813
+ result = result.split("tbody")[0]
814
+ hdrs = [x for x in result.split("\n") if re.search(r"<th[>\s]", x)]
815
+ if isinstance(unit, int):
816
+ unit = str(unit) + "px"
817
+ for h in hdrs:
818
+ expected = f'<th style="min-width: {unit};">'
819
+ assert expected in h
820
+
821
+
822
+ def test_html_repr_min_rows_default(datapath):
823
+ # gh-27991
824
+
825
+ # default setting no truncation even if above min_rows
826
+ df = DataFrame({"a": range(20)})
827
+ result = df._repr_html_()
828
+ expected = expected_html(datapath, "html_repr_min_rows_default_no_truncation")
829
+ assert result == expected
830
+
831
+ # default of max_rows 60 triggers truncation if above
832
+ df = DataFrame({"a": range(61)})
833
+ result = df._repr_html_()
834
+ expected = expected_html(datapath, "html_repr_min_rows_default_truncated")
835
+ assert result == expected
836
+
837
+
838
+ @pytest.mark.parametrize(
839
+ "max_rows,min_rows,expected",
840
+ [
841
+ # truncated after first two rows
842
+ (10, 4, "html_repr_max_rows_10_min_rows_4"),
843
+ # when set to None, follow value of max_rows
844
+ (12, None, "html_repr_max_rows_12_min_rows_None"),
845
+ # when set value higher as max_rows, use the minimum
846
+ (10, 12, "html_repr_max_rows_10_min_rows_12"),
847
+ # max_rows of None -> never truncate
848
+ (None, 12, "html_repr_max_rows_None_min_rows_12"),
849
+ ],
850
+ )
851
+ def test_html_repr_min_rows(datapath, max_rows, min_rows, expected):
852
+ # gh-27991
853
+
854
+ df = DataFrame({"a": range(61)})
855
+ expected = expected_html(datapath, expected)
856
+ with option_context("display.max_rows", max_rows, "display.min_rows", min_rows):
857
+ result = df._repr_html_()
858
+ assert result == expected
859
+
860
+
861
+ def test_to_html_multilevel(multiindex_year_month_day_dataframe_random_data):
862
+ ymd = multiindex_year_month_day_dataframe_random_data
863
+
864
+ ymd.columns.name = "foo"
865
+ ymd.to_html()
866
+ ymd.T.to_html()
867
+
868
+
869
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
870
+ def test_to_html_na_rep_and_float_format(na_rep, datapath):
871
+ # https://github.com/pandas-dev/pandas/issues/13828
872
+ df = DataFrame(
873
+ [
874
+ ["A", 1.2225],
875
+ ["A", None],
876
+ ],
877
+ columns=["Group", "Data"],
878
+ )
879
+ result = df.to_html(na_rep=na_rep, float_format="{:.2f}".format)
880
+ expected = expected_html(datapath, "gh13828_expected_output")
881
+ expected = expected.format(na_rep=na_rep)
882
+ assert result == expected
883
+
884
+
885
+ def test_to_html_na_rep_non_scalar_data(datapath):
886
+ # GH47103
887
+ df = DataFrame([dict(a=1, b=[1, 2, 3])])
888
+ result = df.to_html(na_rep="-")
889
+ expected = expected_html(datapath, "gh47103_expected_output")
890
+ assert result == expected
891
+
892
+
893
+ def test_to_html_float_format_object_col(datapath):
894
+ # GH#40024
895
+ df = DataFrame(data={"x": [1000.0, "test"]})
896
+ result = df.to_html(float_format=lambda x: f"{x:,.0f}")
897
+ expected = expected_html(datapath, "gh40024_expected_output")
898
+ assert result == expected
videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_markdown.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO
2
+
3
+ import pytest
4
+
5
+ import pandas as pd
6
+
7
+ pytest.importorskip("tabulate")
8
+
9
+
10
+ def test_simple():
11
+ buf = StringIO()
12
+ df = pd.DataFrame([1, 2, 3])
13
+ df.to_markdown(buf=buf)
14
+ result = buf.getvalue()
15
+ assert (
16
+ result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
17
+ )
18
+
19
+
20
+ def test_empty_frame():
21
+ buf = StringIO()
22
+ df = pd.DataFrame({"id": [], "first_name": [], "last_name": []}).set_index("id")
23
+ df.to_markdown(buf=buf)
24
+ result = buf.getvalue()
25
+ assert result == (
26
+ "| id | first_name | last_name |\n"
27
+ "|------|--------------|-------------|"
28
+ )
29
+
30
+
31
+ def test_other_tablefmt():
32
+ buf = StringIO()
33
+ df = pd.DataFrame([1, 2, 3])
34
+ df.to_markdown(buf=buf, tablefmt="jira")
35
+ result = buf.getvalue()
36
+ assert result == "|| || 0 ||\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
37
+
38
+
39
+ def test_other_headers():
40
+ buf = StringIO()
41
+ df = pd.DataFrame([1, 2, 3])
42
+ df.to_markdown(buf=buf, headers=["foo", "bar"])
43
+ result = buf.getvalue()
44
+ assert result == (
45
+ "| foo | bar |\n|------:|------:|\n| 0 "
46
+ "| 1 |\n| 1 | 2 |\n| 2 | 3 |"
47
+ )
48
+
49
+
50
+ def test_series():
51
+ buf = StringIO()
52
+ s = pd.Series([1, 2, 3], name="foo")
53
+ s.to_markdown(buf=buf)
54
+ result = buf.getvalue()
55
+ assert result == (
56
+ "| | foo |\n|---:|------:|\n| 0 | 1 "
57
+ "|\n| 1 | 2 |\n| 2 | 3 |"
58
+ )
59
+
60
+
61
+ def test_no_buf():
62
+ df = pd.DataFrame([1, 2, 3])
63
+ result = df.to_markdown()
64
+ assert (
65
+ result == "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
66
+ )
67
+
68
+
69
+ @pytest.mark.parametrize("index", [True, False])
70
+ def test_index(index):
71
+ # GH 32667
72
+
73
+ df = pd.DataFrame([1, 2, 3])
74
+
75
+ result = df.to_markdown(index=index)
76
+
77
+ if index:
78
+ expected = (
79
+ "| | 0 |\n|---:|----:|\n| 0 | 1 |\n| 1 | 2 |\n| 2 | 3 |"
80
+ )
81
+ else:
82
+ expected = "| 0 |\n|----:|\n| 1 |\n| 2 |\n| 3 |"
83
+ assert result == expected
84
+
85
+
86
+ def test_showindex_disallowed_in_kwargs():
87
+ # GH 32667; disallowing showindex in kwargs enforced in 2.0
88
+ df = pd.DataFrame([1, 2, 3])
89
+ with pytest.raises(ValueError, match="Pass 'index' instead of 'showindex"):
90
+ df.to_markdown(index=True, showindex=True)
videochat2/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_string.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from io import StringIO
3
+ from textwrap import dedent
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas.util._test_decorators as td
9
+
10
+ from pandas import (
11
+ DataFrame,
12
+ Series,
13
+ option_context,
14
+ to_datetime,
15
+ )
16
+
17
+
18
+ def test_repr_embedded_ndarray():
19
+ arr = np.empty(10, dtype=[("err", object)])
20
+ for i in range(len(arr)):
21
+ arr["err"][i] = np.random.randn(i)
22
+
23
+ df = DataFrame(arr)
24
+ repr(df["err"])
25
+ repr(df)
26
+ df.to_string()
27
+
28
+
29
+ def test_repr_tuples():
30
+ buf = StringIO()
31
+
32
+ df = DataFrame({"tups": list(zip(range(10), range(10)))})
33
+ repr(df)
34
+ df.to_string(col_space=10, buf=buf)
35
+
36
+
37
+ def test_to_string_truncate():
38
+ # GH 9784 - dont truncate when calling DataFrame.to_string
39
+ df = DataFrame(
40
+ [
41
+ {
42
+ "a": "foo",
43
+ "b": "bar",
44
+ "c": "let's make this a very VERY long line that is longer "
45
+ "than the default 50 character limit",
46
+ "d": 1,
47
+ },
48
+ {"a": "foo", "b": "bar", "c": "stuff", "d": 1},
49
+ ]
50
+ )
51
+ df.set_index(["a", "b", "c"])
52
+ assert df.to_string() == (
53
+ " a b "
54
+ " c d\n"
55
+ "0 foo bar let's make this a very VERY long line t"
56
+ "hat is longer than the default 50 character limit 1\n"
57
+ "1 foo bar "
58
+ " stuff 1"
59
+ )
60
+ with option_context("max_colwidth", 20):
61
+ # the display option has no effect on the to_string method
62
+ assert df.to_string() == (
63
+ " a b "
64
+ " c d\n"
65
+ "0 foo bar let's make this a very VERY long line t"
66
+ "hat is longer than the default 50 character limit 1\n"
67
+ "1 foo bar "
68
+ " stuff 1"
69
+ )
70
+ assert df.to_string(max_colwidth=20) == (
71
+ " a b c d\n"
72
+ "0 foo bar let's make this ... 1\n"
73
+ "1 foo bar stuff 1"
74
+ )
75
+
76
+
77
+ @pytest.mark.parametrize(
78
+ "input_array, expected",
79
+ [
80
+ ("a", "a"),
81
+ (["a", "b"], "a\nb"),
82
+ ([1, "a"], "1\na"),
83
+ (1, "1"),
84
+ ([0, -1], " 0\n-1"),
85
+ (1.0, "1.0"),
86
+ ([" a", " b"], " a\n b"),
87
+ ([".1", "1"], ".1\n 1"),
88
+ (["10", "-10"], " 10\n-10"),
89
+ ],
90
+ )
91
+ def test_format_remove_leading_space_series(input_array, expected):
92
+ # GH: 24980
93
+ s = Series(input_array).to_string(index=False)
94
+ assert s == expected
95
+
96
+
97
+ @pytest.mark.parametrize(
98
+ "input_array, expected",
99
+ [
100
+ ({"A": ["a"]}, "A\na"),
101
+ ({"A": ["a", "b"], "B": ["c", "dd"]}, "A B\na c\nb dd"),
102
+ ({"A": ["a", 1], "B": ["aa", 1]}, "A B\na aa\n1 1"),
103
+ ],
104
+ )
105
+ def test_format_remove_leading_space_dataframe(input_array, expected):
106
+ # GH: 24980
107
+ df = DataFrame(input_array).to_string(index=False)
108
+ assert df == expected
109
+
110
+
111
+ @pytest.mark.parametrize(
112
+ "max_cols, max_rows, expected",
113
+ [
114
+ (
115
+ 10,
116
+ None,
117
+ " 0 1 2 3 4 ... 6 7 8 9 10\n"
118
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
119
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
120
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
121
+ " 0 0 0 0 0 ... 0 0 0 0 0",
122
+ ),
123
+ (
124
+ None,
125
+ 2,
126
+ " 0 1 2 3 4 5 6 7 8 9 10\n"
127
+ " 0 0 0 0 0 0 0 0 0 0 0\n"
128
+ " .. .. .. .. .. .. .. .. .. .. ..\n"
129
+ " 0 0 0 0 0 0 0 0 0 0 0",
130
+ ),
131
+ (
132
+ 10,
133
+ 2,
134
+ " 0 1 2 3 4 ... 6 7 8 9 10\n"
135
+ " 0 0 0 0 0 ... 0 0 0 0 0\n"
136
+ " .. .. .. .. .. ... .. .. .. .. ..\n"
137
+ " 0 0 0 0 0 ... 0 0 0 0 0",
138
+ ),
139
+ (
140
+ 9,
141
+ 2,
142
+ " 0 1 2 3 ... 7 8 9 10\n"
143
+ " 0 0 0 0 ... 0 0 0 0\n"
144
+ " .. .. .. .. ... .. .. .. ..\n"
145
+ " 0 0 0 0 ... 0 0 0 0",
146
+ ),
147
+ (
148
+ 1,
149
+ 1,
150
+ " 0 ...\n 0 ...\n.. ...",
151
+ ),
152
+ ],
153
+ )
154
+ def test_truncation_no_index(max_cols, max_rows, expected):
155
+ df = DataFrame([[0] * 11] * 4)
156
+ assert df.to_string(index=False, max_cols=max_cols, max_rows=max_rows) == expected
157
+
158
+
159
+ def test_to_string_unicode_columns(float_frame):
160
+ df = DataFrame({"\u03c3": np.arange(10.0)})
161
+
162
+ buf = StringIO()
163
+ df.to_string(buf=buf)
164
+ buf.getvalue()
165
+
166
+ buf = StringIO()
167
+ df.info(buf=buf)
168
+ buf.getvalue()
169
+
170
+ result = float_frame.to_string()
171
+ assert isinstance(result, str)
172
+
173
+
174
+ def test_to_string_utf8_columns():
175
+ n = "\u05d0".encode()
176
+
177
+ with option_context("display.max_rows", 1):
178
+ df = DataFrame([1, 2], columns=[n])
179
+ repr(df)
180
+
181
+
182
+ def test_to_string_unicode_two():
183
+ dm = DataFrame({"c/\u03c3": []})
184
+ buf = StringIO()
185
+ dm.to_string(buf)
186
+
187
+
188
+ def test_to_string_unicode_three():
189
+ dm = DataFrame(["\xc2"])
190
+ buf = StringIO()
191
+ dm.to_string(buf)
192
+
193
+
194
+ def test_to_string_with_formatters():
195
+ df = DataFrame(
196
+ {
197
+ "int": [1, 2, 3],
198
+ "float": [1.0, 2.0, 3.0],
199
+ "object": [(1, 2), True, False],
200
+ },
201
+ columns=["int", "float", "object"],
202
+ )
203
+
204
+ formatters = [
205
+ ("int", lambda x: f"0x{x:x}"),
206
+ ("float", lambda x: f"[{x: 4.1f}]"),
207
+ ("object", lambda x: f"-{x!s}-"),
208
+ ]
209
+ result = df.to_string(formatters=dict(formatters))
210
+ result2 = df.to_string(formatters=list(zip(*formatters))[1])
211
+ assert result == (
212
+ " int float object\n"
213
+ "0 0x1 [ 1.0] -(1, 2)-\n"
214
+ "1 0x2 [ 2.0] -True-\n"
215
+ "2 0x3 [ 3.0] -False-"
216
+ )
217
+ assert result == result2
218
+
219
+
220
+ def test_to_string_with_datetime64_monthformatter():
221
+ months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
222
+ x = DataFrame({"months": months})
223
+
224
+ def format_func(x):
225
+ return x.strftime("%Y-%m")
226
+
227
+ result = x.to_string(formatters={"months": format_func})
228
+ expected = dedent(
229
+ """\
230
+ months
231
+ 0 2016-01
232
+ 1 2016-02"""
233
+ )
234
+ assert result.strip() == expected
235
+
236
+
237
+ def test_to_string_with_datetime64_hourformatter():
238
+ x = DataFrame(
239
+ {"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")}
240
+ )
241
+
242
+ def format_func(x):
243
+ return x.strftime("%H:%M")
244
+
245
+ result = x.to_string(formatters={"hod": format_func})
246
+ expected = dedent(
247
+ """\
248
+ hod
249
+ 0 10:10
250
+ 1 12:12"""
251
+ )
252
+ assert result.strip() == expected
253
+
254
+
255
+ def test_to_string_with_formatters_unicode():
256
+ df = DataFrame({"c/\u03c3": [1, 2, 3]})
257
+ result = df.to_string(formatters={"c/\u03c3": str})
258
+ expected = dedent(
259
+ """\
260
+ c/\u03c3
261
+ 0 1
262
+ 1 2
263
+ 2 3"""
264
+ )
265
+ assert result == expected
266
+
267
+
268
+ def test_to_string_complex_number_trims_zeros():
269
+ s = Series([1.000000 + 1.000000j, 1.0 + 1.0j, 1.05 + 1.0j])
270
+ result = s.to_string()
271
+ expected = dedent(
272
+ """\
273
+ 0 1.00+1.00j
274
+ 1 1.00+1.00j
275
+ 2 1.05+1.00j"""
276
+ )
277
+ assert result == expected
278
+
279
+
280
+ def test_nullable_float_to_string(float_ea_dtype):
281
+ # https://github.com/pandas-dev/pandas/issues/36775
282
+ dtype = float_ea_dtype
283
+ s = Series([0.0, 1.0, None], dtype=dtype)
284
+ result = s.to_string()
285
+ expected = dedent(
286
+ """\
287
+ 0 0.0
288
+ 1 1.0
289
+ 2 <NA>"""
290
+ )
291
+ assert result == expected
292
+
293
+
294
+ def test_nullable_int_to_string(any_int_ea_dtype):
295
+ # https://github.com/pandas-dev/pandas/issues/36775
296
+ dtype = any_int_ea_dtype
297
+ s = Series([0, 1, None], dtype=dtype)
298
+ result = s.to_string()
299
+ expected = dedent(
300
+ """\
301
+ 0 0
302
+ 1 1
303
+ 2 <NA>"""
304
+ )
305
+ assert result == expected
306
+
307
+
308
+ @pytest.mark.parametrize("na_rep", ["NaN", "Ted"])
309
+ def test_to_string_na_rep_and_float_format(na_rep):
310
+ # GH 13828
311
+ df = DataFrame([["A", 1.2225], ["A", None]], columns=["Group", "Data"])
312
+ result = df.to_string(na_rep=na_rep, float_format="{:.2f}".format)
313
+ expected = dedent(
314
+ f"""\
315
+ Group Data
316
+ 0 A 1.22
317
+ 1 A {na_rep}"""
318
+ )
319
+ assert result == expected
320
+
321
+
322
+ @pytest.mark.parametrize(
323
+ "data,expected",
324
+ [
325
+ (
326
+ {"col1": [1, 2], "col2": [3, 4]},
327
+ " col1 col2\n0 1 3\n1 2 4",
328
+ ),
329
+ (
330
+ {"col1": ["Abc", 0.756], "col2": [np.nan, 4.5435]},
331
+ " col1 col2\n0 Abc NaN\n1 0.756 4.5435",
332
+ ),
333
+ (
334
+ {"col1": [np.nan, "a"], "col2": [0.009, 3.543], "col3": ["Abc", 23]},
335
+ " col1 col2 col3\n0 NaN 0.009 Abc\n1 a 3.543 23",
336
+ ),
337
+ ],
338
+ )
339
+ def test_to_string_max_rows_zero(data, expected):
340
+ # GH35394
341
+ result = DataFrame(data=data).to_string(max_rows=0)
342
+ assert result == expected
343
+
344
+
345
+ @td.skip_if_no("pyarrow")
346
+ def test_to_string_string_dtype():
347
+ # GH#50099
348
+ df = DataFrame({"x": ["foo", "bar", "baz"], "y": ["a", "b", "c"], "z": [1, 2, 3]})
349
+ df = df.astype(
350
+ {"x": "string[pyarrow]", "y": "string[python]", "z": "int64[pyarrow]"}
351
+ )
352
+ result = df.dtypes.to_string()
353
+ expected = dedent(
354
+ """\
355
+ x string[pyarrow]
356
+ y string[python]
357
+ z int64[pyarrow]"""
358
+ )
359
+ assert result == expected
videochat2/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ self-contained to write legacy storage pickle files
3
+
4
+ To use this script. Create an environment where you want
5
+ generate pickles, say its for 0.20.3, with your pandas clone
6
+ in ~/pandas
7
+
8
+ . activate pandas_0.20.3
9
+ cd ~/pandas/pandas
10
+
11
+ $ python -m tests.io.generate_legacy_storage_files \
12
+ tests/io/data/legacy_pickle/0.20.3/ pickle
13
+
14
+ This script generates a storage file for the current arch, system,
15
+ and python version
16
+ pandas version: 0.20.3
17
+ output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/
18
+ storage format: pickle
19
+ created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle
20
+
21
+ The idea here is you are using the *current* version of the
22
+ generate_legacy_storage_files with an *older* version of pandas to
23
+ generate a pickle file. We will then check this file into a current
24
+ branch, and test using test_pickle.py. This will load the *older*
25
+ pickles and test versus the current data that is generated
26
+ (with main). These are then compared.
27
+
28
+ If we have cases where we changed the signature (e.g. we renamed
29
+ offset -> freq in Timestamp). Then we have to conditionally execute
30
+ in the generate_legacy_storage_files.py to make it
31
+ run under the older AND the newer version.
32
+
33
+ """
34
+
35
+ from datetime import timedelta
36
+ import os
37
+ import pickle
38
+ import platform as pl
39
+ import sys
40
+
41
+ import numpy as np
42
+
43
+ import pandas
44
+ from pandas import (
45
+ Categorical,
46
+ DataFrame,
47
+ Index,
48
+ MultiIndex,
49
+ NaT,
50
+ Period,
51
+ RangeIndex,
52
+ Series,
53
+ Timestamp,
54
+ bdate_range,
55
+ date_range,
56
+ interval_range,
57
+ period_range,
58
+ timedelta_range,
59
+ )
60
+ from pandas.arrays import SparseArray
61
+
62
+ from pandas.tseries.offsets import (
63
+ FY5253,
64
+ BusinessDay,
65
+ BusinessHour,
66
+ CustomBusinessDay,
67
+ DateOffset,
68
+ Day,
69
+ Easter,
70
+ Hour,
71
+ LastWeekOfMonth,
72
+ Minute,
73
+ MonthBegin,
74
+ MonthEnd,
75
+ QuarterBegin,
76
+ QuarterEnd,
77
+ SemiMonthBegin,
78
+ SemiMonthEnd,
79
+ Week,
80
+ WeekOfMonth,
81
+ YearBegin,
82
+ YearEnd,
83
+ )
84
+
85
+
86
+ def _create_sp_series():
87
+ nan = np.nan
88
+
89
+ # nan-based
90
+ arr = np.arange(15, dtype=np.float64)
91
+ arr[7:12] = nan
92
+ arr[-1:] = nan
93
+
94
+ bseries = Series(SparseArray(arr, kind="block"))
95
+ bseries.name = "bseries"
96
+ return bseries
97
+
98
+
99
+ def _create_sp_tsseries():
100
+ nan = np.nan
101
+
102
+ # nan-based
103
+ arr = np.arange(15, dtype=np.float64)
104
+ arr[7:12] = nan
105
+ arr[-1:] = nan
106
+
107
+ date_index = bdate_range("1/1/2011", periods=len(arr))
108
+ bseries = Series(SparseArray(arr, kind="block"), index=date_index)
109
+ bseries.name = "btsseries"
110
+ return bseries
111
+
112
+
113
+ def _create_sp_frame():
114
+ nan = np.nan
115
+
116
+ data = {
117
+ "A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
118
+ "B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
119
+ "C": np.arange(10).astype(np.int64),
120
+ "D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
121
+ }
122
+
123
+ dates = bdate_range("1/1/2011", periods=10)
124
+ return DataFrame(data, index=dates).apply(SparseArray)
125
+
126
+
127
+ def create_data():
128
+ """create the pickle data"""
129
+ data = {
130
+ "A": [0.0, 1.0, 2.0, 3.0, np.nan],
131
+ "B": [0, 1, 0, 1, 0],
132
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
133
+ "D": date_range("1/1/2009", periods=5),
134
+ "E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
135
+ }
136
+
137
+ scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")}
138
+
139
+ index = {
140
+ "int": Index(np.arange(10)),
141
+ "date": date_range("20130101", periods=10),
142
+ "period": period_range("2013-01-01", freq="M", periods=10),
143
+ "float": Index(np.arange(10, dtype=np.float64)),
144
+ "uint": Index(np.arange(10, dtype=np.uint64)),
145
+ "timedelta": timedelta_range("00:00:00", freq="30T", periods=10),
146
+ }
147
+
148
+ index["range"] = RangeIndex(10)
149
+
150
+ index["interval"] = interval_range(0, periods=10)
151
+
152
+ mi = {
153
+ "reg2": MultiIndex.from_tuples(
154
+ tuple(
155
+ zip(
156
+ *[
157
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
158
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
159
+ ]
160
+ )
161
+ ),
162
+ names=["first", "second"],
163
+ )
164
+ }
165
+
166
+ series = {
167
+ "float": Series(data["A"]),
168
+ "int": Series(data["B"]),
169
+ "mixed": Series(data["E"]),
170
+ "ts": Series(
171
+ np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
172
+ ),
173
+ "mi": Series(
174
+ np.arange(5).astype(np.float64),
175
+ index=MultiIndex.from_tuples(
176
+ tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
177
+ ),
178
+ ),
179
+ "dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
180
+ "cat": Series(Categorical(["foo", "bar", "baz"])),
181
+ "dt": Series(date_range("20130101", periods=5)),
182
+ "dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")),
183
+ "period": Series([Period("2000Q1")] * 5),
184
+ }
185
+
186
+ mixed_dup_df = DataFrame(data)
187
+ mixed_dup_df.columns = list("ABCDA")
188
+ frame = {
189
+ "float": DataFrame({"A": series["float"], "B": series["float"] + 1}),
190
+ "int": DataFrame({"A": series["int"], "B": series["int"] + 1}),
191
+ "mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
192
+ "mi": DataFrame(
193
+ {"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
194
+ index=MultiIndex.from_tuples(
195
+ tuple(
196
+ zip(
197
+ *[
198
+ ["bar", "bar", "baz", "baz", "baz"],
199
+ ["one", "two", "one", "two", "three"],
200
+ ]
201
+ )
202
+ ),
203
+ names=["first", "second"],
204
+ ),
205
+ ),
206
+ "dup": DataFrame(
207
+ np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
208
+ ),
209
+ "cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}),
210
+ "cat_and_float": DataFrame(
211
+ {
212
+ "A": Categorical(["foo", "bar", "baz"]),
213
+ "B": np.arange(3).astype(np.int64),
214
+ }
215
+ ),
216
+ "mixed_dup": mixed_dup_df,
217
+ "dt_mixed_tzs": DataFrame(
218
+ {
219
+ "A": Timestamp("20130102", tz="US/Eastern"),
220
+ "B": Timestamp("20130603", tz="CET"),
221
+ },
222
+ index=range(5),
223
+ ),
224
+ "dt_mixed2_tzs": DataFrame(
225
+ {
226
+ "A": Timestamp("20130102", tz="US/Eastern"),
227
+ "B": Timestamp("20130603", tz="CET"),
228
+ "C": Timestamp("20130603", tz="UTC"),
229
+ },
230
+ index=range(5),
231
+ ),
232
+ }
233
+
234
+ cat = {
235
+ "int8": Categorical(list("abcdefg")),
236
+ "int16": Categorical(np.arange(1000)),
237
+ "int32": Categorical(np.arange(10000)),
238
+ }
239
+
240
+ timestamp = {
241
+ "normal": Timestamp("2011-01-01"),
242
+ "nat": NaT,
243
+ "tz": Timestamp("2011-01-01", tz="US/Eastern"),
244
+ }
245
+
246
+ off = {
247
+ "DateOffset": DateOffset(years=1),
248
+ "DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
249
+ "BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
250
+ "BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
251
+ "CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
252
+ "SemiMonthBegin": SemiMonthBegin(day_of_month=9),
253
+ "SemiMonthEnd": SemiMonthEnd(day_of_month=24),
254
+ "MonthBegin": MonthBegin(1),
255
+ "MonthEnd": MonthEnd(1),
256
+ "QuarterBegin": QuarterBegin(1),
257
+ "QuarterEnd": QuarterEnd(1),
258
+ "Day": Day(1),
259
+ "YearBegin": YearBegin(1),
260
+ "YearEnd": YearEnd(1),
261
+ "Week": Week(1),
262
+ "Week_Tues": Week(2, normalize=False, weekday=1),
263
+ "WeekOfMonth": WeekOfMonth(week=3, weekday=4),
264
+ "LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
265
+ "FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
266
+ "Easter": Easter(),
267
+ "Hour": Hour(1),
268
+ "Minute": Minute(1),
269
+ }
270
+
271
+ return {
272
+ "series": series,
273
+ "frame": frame,
274
+ "index": index,
275
+ "scalars": scalars,
276
+ "mi": mi,
277
+ "sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()},
278
+ "sp_frame": {"float": _create_sp_frame()},
279
+ "cat": cat,
280
+ "timestamp": timestamp,
281
+ "offsets": off,
282
+ }
283
+
284
+
285
+ def create_pickle_data():
286
+ data = create_data()
287
+
288
+ return data
289
+
290
+
291
+ def platform_name():
292
+ return "_".join(
293
+ [
294
+ str(pandas.__version__),
295
+ str(pl.machine()),
296
+ str(pl.system().lower()),
297
+ str(pl.python_version()),
298
+ ]
299
+ )
300
+
301
+
302
+ def write_legacy_pickles(output_dir):
303
+ version = pandas.__version__
304
+
305
+ print(
306
+ "This script generates a storage file for the current arch, system, "
307
+ "and python version"
308
+ )
309
+ print(f" pandas version: {version}")
310
+ print(f" output dir : {output_dir}")
311
+ print(" storage format: pickle")
312
+
313
+ pth = f"{platform_name()}.pickle"
314
+
315
+ with open(os.path.join(output_dir, pth), "wb") as fh:
316
+ pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL)
317
+
318
+ print(f"created pickle file: {pth}")
319
+
320
+
321
+ def write_legacy_file():
322
+ # force our cwd to be the first searched
323
+ sys.path.insert(0, ".")
324
+
325
+ if not 3 <= len(sys.argv) <= 4:
326
+ sys.exit(
327
+ "Specify output directory and storage type: generate_legacy_"
328
+ "storage_files.py <output_dir> <storage_type> "
329
+ )
330
+
331
+ output_dir = str(sys.argv[1])
332
+ storage_type = str(sys.argv[2])
333
+
334
+ if storage_type == "pickle":
335
+ write_legacy_pickles(output_dir=output_dir)
336
+ else:
337
+ sys.exit("storage_type must be one of {'pickle'}")
338
+
339
+
340
+ if __name__ == "__main__":
341
+ write_legacy_file()
videochat2/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_compression.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+
3
+ import pytest
4
+
5
+ import pandas.util._test_decorators as td
6
+
7
+ import pandas as pd
8
+ import pandas._testing as tm
9
+ from pandas.tests.io.test_compression import _compression_to_extension
10
+
11
+
12
+ def test_compression_roundtrip(compression):
13
+ df = pd.DataFrame(
14
+ [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
15
+ index=["A", "B"],
16
+ columns=["X", "Y", "Z"],
17
+ )
18
+
19
+ with tm.ensure_clean() as path:
20
+ df.to_json(path, compression=compression)
21
+ tm.assert_frame_equal(df, pd.read_json(path, compression=compression))
22
+
23
+ # explicitly ensure file was compressed.
24
+ with tm.decompress_file(path, compression) as fh:
25
+ result = fh.read().decode("utf8")
26
+ tm.assert_frame_equal(df, pd.read_json(result))
27
+
28
+
29
+ def test_read_zipped_json(datapath):
30
+ uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
31
+ uncompressed_df = pd.read_json(uncompressed_path)
32
+
33
+ compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
34
+ compressed_df = pd.read_json(compressed_path, compression="zip")
35
+
36
+ tm.assert_frame_equal(uncompressed_df, compressed_df)
37
+
38
+
39
+ @td.skip_if_not_us_locale
40
+ @pytest.mark.single_cpu
41
+ def test_with_s3_url(compression, s3_resource, s3so):
42
+ # Bucket "pandas-test" created in tests/io/conftest.py
43
+
44
+ df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
45
+
46
+ with tm.ensure_clean() as path:
47
+ df.to_json(path, compression=compression)
48
+ with open(path, "rb") as f:
49
+ s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f)
50
+
51
+ roundtripped_df = pd.read_json(
52
+ "s3://pandas-test/test-1", compression=compression, storage_options=s3so
53
+ )
54
+ tm.assert_frame_equal(df, roundtripped_df)
55
+
56
+
57
+ def test_lines_with_compression(compression):
58
+ with tm.ensure_clean() as path:
59
+ df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
60
+ df.to_json(path, orient="records", lines=True, compression=compression)
61
+ roundtripped_df = pd.read_json(path, lines=True, compression=compression)
62
+ tm.assert_frame_equal(df, roundtripped_df)
63
+
64
+
65
+ def test_chunksize_with_compression(compression):
66
+ with tm.ensure_clean() as path:
67
+ df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
68
+ df.to_json(path, orient="records", lines=True, compression=compression)
69
+
70
+ with pd.read_json(
71
+ path, lines=True, chunksize=1, compression=compression
72
+ ) as res:
73
+ roundtripped_df = pd.concat(res)
74
+ tm.assert_frame_equal(df, roundtripped_df)
75
+
76
+
77
+ def test_write_unsupported_compression_type():
78
+ df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
79
+ with tm.ensure_clean() as path:
80
+ msg = "Unrecognized compression type: unsupported"
81
+ with pytest.raises(ValueError, match=msg):
82
+ df.to_json(path, compression="unsupported")
83
+
84
+
85
+ def test_read_unsupported_compression_type():
86
+ with tm.ensure_clean() as path:
87
+ msg = "Unrecognized compression type: unsupported"
88
+ with pytest.raises(ValueError, match=msg):
89
+ pd.read_json(path, compression="unsupported")
90
+
91
+
92
+ @pytest.mark.parametrize("to_infer", [True, False])
93
+ @pytest.mark.parametrize("read_infer", [True, False])
94
+ def test_to_json_compression(compression_only, read_infer, to_infer):
95
+ # see gh-15008
96
+ compression = compression_only
97
+
98
+ # We'll complete file extension subsequently.
99
+ filename = "test."
100
+ filename += _compression_to_extension[compression]
101
+
102
+ df = pd.DataFrame({"A": [1]})
103
+
104
+ to_compression = "infer" if to_infer else compression
105
+ read_compression = "infer" if read_infer else compression
106
+
107
+ with tm.ensure_clean(filename) as path:
108
+ df.to_json(path, compression=to_compression)
109
+ result = pd.read_json(path, compression=read_compression)
110
+ tm.assert_frame_equal(result, df)
111
+
112
+
113
+ def test_to_json_compression_mode(compression):
114
+ # GH 39985 (read_json does not support user-provided binary files)
115
+ expected = pd.DataFrame({"A": [1]})
116
+
117
+ with BytesIO() as buffer:
118
+ expected.to_json(buffer, compression=compression)
119
+ # df = pd.read_json(buffer, compression=compression)
120
+ # tm.assert_frame_equal(expected, df)
videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_deprecated_kwargs.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the deprecated keyword arguments for `read_json`.
3
+ """
4
+
5
+ import pandas as pd
6
+ import pandas._testing as tm
7
+
8
+ from pandas.io.json import read_json
9
+
10
+
11
+ def test_good_kwargs():
12
+ df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2])
13
+ with tm.assert_produces_warning(None):
14
+ tm.assert_frame_equal(df, read_json(df.to_json(orient="split"), orient="split"))
15
+ tm.assert_frame_equal(
16
+ df, read_json(df.to_json(orient="columns"), orient="columns")
17
+ )
18
+ tm.assert_frame_equal(df, read_json(df.to_json(orient="index"), orient="index"))
videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_json_table_schema_ext_dtype.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests for ExtensionDtype Table Schema integration."""
2
+
3
+ from collections import OrderedDict
4
+ import datetime as dt
5
+ import decimal
6
+ import json
7
+
8
+ import pytest
9
+
10
+ from pandas import (
11
+ NA,
12
+ DataFrame,
13
+ Index,
14
+ array,
15
+ read_json,
16
+ )
17
+ import pandas._testing as tm
18
+ from pandas.core.arrays.integer import Int64Dtype
19
+ from pandas.core.arrays.string_ import StringDtype
20
+ from pandas.core.series import Series
21
+ from pandas.tests.extension.date import (
22
+ DateArray,
23
+ DateDtype,
24
+ )
25
+ from pandas.tests.extension.decimal.array import (
26
+ DecimalArray,
27
+ DecimalDtype,
28
+ )
29
+
30
+ from pandas.io.json._table_schema import (
31
+ as_json_table_type,
32
+ build_table_schema,
33
+ )
34
+
35
+
36
+ class TestBuildSchema:
37
+ def test_build_table_schema(self):
38
+ df = DataFrame(
39
+ {
40
+ "A": DateArray([dt.date(2021, 10, 10)]),
41
+ "B": DecimalArray([decimal.Decimal(10)]),
42
+ "C": array(["pandas"], dtype="string"),
43
+ "D": array([10], dtype="Int64"),
44
+ }
45
+ )
46
+ result = build_table_schema(df, version=False)
47
+ expected = {
48
+ "fields": [
49
+ {"name": "index", "type": "integer"},
50
+ {"name": "A", "type": "any", "extDtype": "DateDtype"},
51
+ {"name": "B", "type": "number", "extDtype": "decimal"},
52
+ {"name": "C", "type": "any", "extDtype": "string"},
53
+ {"name": "D", "type": "integer", "extDtype": "Int64"},
54
+ ],
55
+ "primaryKey": ["index"],
56
+ }
57
+ assert result == expected
58
+ result = build_table_schema(df)
59
+ assert "pandas_version" in result
60
+
61
+
62
+ class TestTableSchemaType:
63
+ @pytest.mark.parametrize(
64
+ "date_data",
65
+ [
66
+ DateArray([dt.date(2021, 10, 10)]),
67
+ DateArray(dt.date(2021, 10, 10)),
68
+ Series(DateArray(dt.date(2021, 10, 10))),
69
+ ],
70
+ )
71
+ def test_as_json_table_type_ext_date_array_dtype(self, date_data):
72
+ assert as_json_table_type(date_data.dtype) == "any"
73
+
74
+ def test_as_json_table_type_ext_date_dtype(self):
75
+ assert as_json_table_type(DateDtype()) == "any"
76
+
77
+ @pytest.mark.parametrize(
78
+ "decimal_data",
79
+ [
80
+ DecimalArray([decimal.Decimal(10)]),
81
+ Series(DecimalArray([decimal.Decimal(10)])),
82
+ ],
83
+ )
84
+ def test_as_json_table_type_ext_decimal_array_dtype(self, decimal_data):
85
+ assert as_json_table_type(decimal_data.dtype) == "number"
86
+
87
+ def test_as_json_table_type_ext_decimal_dtype(self):
88
+ assert as_json_table_type(DecimalDtype()) == "number"
89
+
90
+ @pytest.mark.parametrize(
91
+ "string_data",
92
+ [
93
+ array(["pandas"], dtype="string"),
94
+ Series(array(["pandas"], dtype="string")),
95
+ ],
96
+ )
97
+ def test_as_json_table_type_ext_string_array_dtype(self, string_data):
98
+ assert as_json_table_type(string_data.dtype) == "any"
99
+
100
+ def test_as_json_table_type_ext_string_dtype(self):
101
+ assert as_json_table_type(StringDtype()) == "any"
102
+
103
+ @pytest.mark.parametrize(
104
+ "integer_data",
105
+ [
106
+ array([10], dtype="Int64"),
107
+ Series(array([10], dtype="Int64")),
108
+ ],
109
+ )
110
+ def test_as_json_table_type_ext_integer_array_dtype(self, integer_data):
111
+ assert as_json_table_type(integer_data.dtype) == "integer"
112
+
113
+ def test_as_json_table_type_ext_integer_dtype(self):
114
+ assert as_json_table_type(Int64Dtype()) == "integer"
115
+
116
+
117
+ class TestTableOrient:
118
+ @pytest.fixture
119
+ def da(self):
120
+ return DateArray([dt.date(2021, 10, 10)])
121
+
122
+ @pytest.fixture
123
+ def dc(self):
124
+ return DecimalArray([decimal.Decimal(10)])
125
+
126
+ @pytest.fixture
127
+ def sa(self):
128
+ return array(["pandas"], dtype="string")
129
+
130
+ @pytest.fixture
131
+ def ia(self):
132
+ return array([10], dtype="Int64")
133
+
134
+ @pytest.fixture
135
+ def df(self, da, dc, sa, ia):
136
+ return DataFrame(
137
+ {
138
+ "A": da,
139
+ "B": dc,
140
+ "C": sa,
141
+ "D": ia,
142
+ }
143
+ )
144
+
145
+ def test_build_date_series(self, da):
146
+ s = Series(da, name="a")
147
+ s.index.name = "id"
148
+ result = s.to_json(orient="table", date_format="iso")
149
+ result = json.loads(result, object_pairs_hook=OrderedDict)
150
+
151
+ assert "pandas_version" in result["schema"]
152
+ result["schema"].pop("pandas_version")
153
+
154
+ fields = [
155
+ {"name": "id", "type": "integer"},
156
+ {"name": "a", "type": "any", "extDtype": "DateDtype"},
157
+ ]
158
+
159
+ schema = {"fields": fields, "primaryKey": ["id"]}
160
+
161
+ expected = OrderedDict(
162
+ [
163
+ ("schema", schema),
164
+ ("data", [OrderedDict([("id", 0), ("a", "2021-10-10T00:00:00.000")])]),
165
+ ]
166
+ )
167
+
168
+ assert result == expected
169
+
170
+ def test_build_decimal_series(self, dc):
171
+ s = Series(dc, name="a")
172
+ s.index.name = "id"
173
+ result = s.to_json(orient="table", date_format="iso")
174
+ result = json.loads(result, object_pairs_hook=OrderedDict)
175
+
176
+ assert "pandas_version" in result["schema"]
177
+ result["schema"].pop("pandas_version")
178
+
179
+ fields = [
180
+ {"name": "id", "type": "integer"},
181
+ {"name": "a", "type": "number", "extDtype": "decimal"},
182
+ ]
183
+
184
+ schema = {"fields": fields, "primaryKey": ["id"]}
185
+
186
+ expected = OrderedDict(
187
+ [
188
+ ("schema", schema),
189
+ ("data", [OrderedDict([("id", 0), ("a", 10.0)])]),
190
+ ]
191
+ )
192
+
193
+ assert result == expected
194
+
195
+ def test_build_string_series(self, sa):
196
+ s = Series(sa, name="a")
197
+ s.index.name = "id"
198
+ result = s.to_json(orient="table", date_format="iso")
199
+ result = json.loads(result, object_pairs_hook=OrderedDict)
200
+
201
+ assert "pandas_version" in result["schema"]
202
+ result["schema"].pop("pandas_version")
203
+
204
+ fields = [
205
+ {"name": "id", "type": "integer"},
206
+ {"name": "a", "type": "any", "extDtype": "string"},
207
+ ]
208
+
209
+ schema = {"fields": fields, "primaryKey": ["id"]}
210
+
211
+ expected = OrderedDict(
212
+ [
213
+ ("schema", schema),
214
+ ("data", [OrderedDict([("id", 0), ("a", "pandas")])]),
215
+ ]
216
+ )
217
+
218
+ assert result == expected
219
+
220
+ def test_build_int64_series(self, ia):
221
+ s = Series(ia, name="a")
222
+ s.index.name = "id"
223
+ result = s.to_json(orient="table", date_format="iso")
224
+ result = json.loads(result, object_pairs_hook=OrderedDict)
225
+
226
+ assert "pandas_version" in result["schema"]
227
+ result["schema"].pop("pandas_version")
228
+
229
+ fields = [
230
+ {"name": "id", "type": "integer"},
231
+ {"name": "a", "type": "integer", "extDtype": "Int64"},
232
+ ]
233
+
234
+ schema = {"fields": fields, "primaryKey": ["id"]}
235
+
236
+ expected = OrderedDict(
237
+ [
238
+ ("schema", schema),
239
+ ("data", [OrderedDict([("id", 0), ("a", 10)])]),
240
+ ]
241
+ )
242
+
243
+ assert result == expected
244
+
245
+ def test_to_json(self, df):
246
+ df = df.copy()
247
+ df.index.name = "idx"
248
+ result = df.to_json(orient="table", date_format="iso")
249
+ result = json.loads(result, object_pairs_hook=OrderedDict)
250
+
251
+ assert "pandas_version" in result["schema"]
252
+ result["schema"].pop("pandas_version")
253
+
254
+ fields = [
255
+ OrderedDict({"name": "idx", "type": "integer"}),
256
+ OrderedDict({"name": "A", "type": "any", "extDtype": "DateDtype"}),
257
+ OrderedDict({"name": "B", "type": "number", "extDtype": "decimal"}),
258
+ OrderedDict({"name": "C", "type": "any", "extDtype": "string"}),
259
+ OrderedDict({"name": "D", "type": "integer", "extDtype": "Int64"}),
260
+ ]
261
+
262
+ schema = OrderedDict({"fields": fields, "primaryKey": ["idx"]})
263
+ data = [
264
+ OrderedDict(
265
+ [
266
+ ("idx", 0),
267
+ ("A", "2021-10-10T00:00:00.000"),
268
+ ("B", 10.0),
269
+ ("C", "pandas"),
270
+ ("D", 10),
271
+ ]
272
+ )
273
+ ]
274
+ expected = OrderedDict([("schema", schema), ("data", data)])
275
+
276
+ assert result == expected
277
+
278
+ def test_json_ext_dtype_reading_roundtrip(self):
279
+ # GH#40255
280
+ df = DataFrame(
281
+ {
282
+ "a": Series([2, NA], dtype="Int64"),
283
+ "b": Series([1.5, NA], dtype="Float64"),
284
+ "c": Series([True, NA], dtype="boolean"),
285
+ },
286
+ index=Index([1, NA], dtype="Int64"),
287
+ )
288
+ expected = df.copy()
289
+ data_json = df.to_json(orient="table", indent=4)
290
+ result = read_json(data_json, orient="table")
291
+ tm.assert_frame_equal(result, expected)
292
+
293
+ def test_json_ext_dtype_reading(self):
294
+ # GH#40255
295
+ data_json = """{
296
+ "schema":{
297
+ "fields":[
298
+ {
299
+ "name":"a",
300
+ "type":"integer",
301
+ "extDtype":"Int64"
302
+ }
303
+ ],
304
+ },
305
+ "data":[
306
+ {
307
+ "a":2
308
+ },
309
+ {
310
+ "a":null
311
+ }
312
+ ]
313
+ }"""
314
+ result = read_json(data_json, orient="table")
315
+ expected = DataFrame({"a": Series([2, NA], dtype="Int64")})
316
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_normalize.py ADDED
@@ -0,0 +1,893 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ Index,
9
+ Series,
10
+ json_normalize,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+ from pandas.io.json._normalize import nested_to_record
15
+
16
+
17
+ @pytest.fixture
18
+ def deep_nested():
19
+ # deeply nested data
20
+ return [
21
+ {
22
+ "country": "USA",
23
+ "states": [
24
+ {
25
+ "name": "California",
26
+ "cities": [
27
+ {"name": "San Francisco", "pop": 12345},
28
+ {"name": "Los Angeles", "pop": 12346},
29
+ ],
30
+ },
31
+ {
32
+ "name": "Ohio",
33
+ "cities": [
34
+ {"name": "Columbus", "pop": 1234},
35
+ {"name": "Cleveland", "pop": 1236},
36
+ ],
37
+ },
38
+ ],
39
+ },
40
+ {
41
+ "country": "Germany",
42
+ "states": [
43
+ {"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
44
+ {
45
+ "name": "Nordrhein-Westfalen",
46
+ "cities": [
47
+ {"name": "Duesseldorf", "pop": 1238},
48
+ {"name": "Koeln", "pop": 1239},
49
+ ],
50
+ },
51
+ ],
52
+ },
53
+ ]
54
+
55
+
56
+ @pytest.fixture
57
+ def state_data():
58
+ return [
59
+ {
60
+ "counties": [
61
+ {"name": "Dade", "population": 12345},
62
+ {"name": "Broward", "population": 40000},
63
+ {"name": "Palm Beach", "population": 60000},
64
+ ],
65
+ "info": {"governor": "Rick Scott"},
66
+ "shortname": "FL",
67
+ "state": "Florida",
68
+ },
69
+ {
70
+ "counties": [
71
+ {"name": "Summit", "population": 1234},
72
+ {"name": "Cuyahoga", "population": 1337},
73
+ ],
74
+ "info": {"governor": "John Kasich"},
75
+ "shortname": "OH",
76
+ "state": "Ohio",
77
+ },
78
+ ]
79
+
80
+
81
+ @pytest.fixture
82
+ def author_missing_data():
83
+ return [
84
+ {"info": None},
85
+ {
86
+ "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
87
+ "author_name": {"first": "Jane", "last_name": "Doe"},
88
+ },
89
+ ]
90
+
91
+
92
+ @pytest.fixture
93
+ def missing_metadata():
94
+ return [
95
+ {
96
+ "name": "Alice",
97
+ "addresses": [
98
+ {
99
+ "number": 9562,
100
+ "street": "Morris St.",
101
+ "city": "Massillon",
102
+ "state": "OH",
103
+ "zip": 44646,
104
+ }
105
+ ],
106
+ "previous_residences": {"cities": [{"city_name": "Foo York City"}]},
107
+ },
108
+ {
109
+ "addresses": [
110
+ {
111
+ "number": 8449,
112
+ "street": "Spring St.",
113
+ "city": "Elizabethton",
114
+ "state": "TN",
115
+ "zip": 37643,
116
+ }
117
+ ],
118
+ "previous_residences": {"cities": [{"city_name": "Barmingham"}]},
119
+ },
120
+ ]
121
+
122
+
123
+ @pytest.fixture
124
+ def max_level_test_input_data():
125
+ """
126
+ input data to test json_normalize with max_level param
127
+ """
128
+ return [
129
+ {
130
+ "CreatedBy": {"Name": "User001"},
131
+ "Lookup": {
132
+ "TextField": "Some text",
133
+ "UserField": {"Id": "ID001", "Name": "Name001"},
134
+ },
135
+ "Image": {"a": "b"},
136
+ }
137
+ ]
138
+
139
+
140
+ class TestJSONNormalize:
141
+ def test_simple_records(self):
142
+ recs = [
143
+ {"a": 1, "b": 2, "c": 3},
144
+ {"a": 4, "b": 5, "c": 6},
145
+ {"a": 7, "b": 8, "c": 9},
146
+ {"a": 10, "b": 11, "c": 12},
147
+ ]
148
+
149
+ result = json_normalize(recs)
150
+ expected = DataFrame(recs)
151
+
152
+ tm.assert_frame_equal(result, expected)
153
+
154
+ def test_simple_normalize(self, state_data):
155
+ result = json_normalize(state_data[0], "counties")
156
+ expected = DataFrame(state_data[0]["counties"])
157
+ tm.assert_frame_equal(result, expected)
158
+
159
+ result = json_normalize(state_data, "counties")
160
+
161
+ expected = []
162
+ for rec in state_data:
163
+ expected.extend(rec["counties"])
164
+ expected = DataFrame(expected)
165
+
166
+ tm.assert_frame_equal(result, expected)
167
+
168
+ result = json_normalize(state_data, "counties", meta="state")
169
+ expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
170
+
171
+ tm.assert_frame_equal(result, expected)
172
+
173
+ def test_empty_array(self):
174
+ result = json_normalize([])
175
+ expected = DataFrame()
176
+ tm.assert_frame_equal(result, expected)
177
+
178
+ @pytest.mark.parametrize(
179
+ "data, record_path, exception_type",
180
+ [
181
+ ([{"a": 0}, {"a": 1}], None, None),
182
+ ({"a": [{"a": 0}, {"a": 1}]}, "a", None),
183
+ ('{"a": [{"a": 0}, {"a": 1}]}', None, NotImplementedError),
184
+ (None, None, NotImplementedError),
185
+ ],
186
+ )
187
+ def test_accepted_input(self, data, record_path, exception_type):
188
+ if exception_type is not None:
189
+ with pytest.raises(exception_type, match=tm.EMPTY_STRING_PATTERN):
190
+ json_normalize(data, record_path=record_path)
191
+ else:
192
+ result = json_normalize(data, record_path=record_path)
193
+ expected = DataFrame([0, 1], columns=["a"])
194
+ tm.assert_frame_equal(result, expected)
195
+
196
+ def test_simple_normalize_with_separator(self, deep_nested):
197
+ # GH 14883
198
+ result = json_normalize({"A": {"A": 1, "B": 2}})
199
+ expected = DataFrame([[1, 2]], columns=["A.A", "A.B"])
200
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
201
+
202
+ result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_")
203
+ expected = DataFrame([[1, 2]], columns=["A_A", "A_B"])
204
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
205
+
206
+ result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3")
207
+ expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"])
208
+ tm.assert_frame_equal(result.reindex_like(expected), expected)
209
+
210
+ result = json_normalize(
211
+ deep_nested,
212
+ ["states", "cities"],
213
+ meta=["country", ["states", "name"]],
214
+ sep="_",
215
+ )
216
+ expected = Index(["name", "pop", "country", "states_name"]).sort_values()
217
+ assert result.columns.sort_values().equals(expected)
218
+
219
+ def test_normalize_with_multichar_separator(self):
220
+ # GH #43831
221
+ data = {"a": [1, 2], "b": {"b_1": 2, "b_2": (3, 4)}}
222
+ result = json_normalize(data, sep="__")
223
+ expected = DataFrame([[[1, 2], 2, (3, 4)]], columns=["a", "b__b_1", "b__b_2"])
224
+ tm.assert_frame_equal(result, expected)
225
+
226
+ def test_value_array_record_prefix(self):
227
+ # GH 21536
228
+ result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.")
229
+ expected = DataFrame([[1], [2]], columns=["Prefix.0"])
230
+ tm.assert_frame_equal(result, expected)
231
+
232
+ def test_nested_object_record_path(self):
233
+ # GH 22706
234
+ data = {
235
+ "state": "Florida",
236
+ "info": {
237
+ "governor": "Rick Scott",
238
+ "counties": [
239
+ {"name": "Dade", "population": 12345},
240
+ {"name": "Broward", "population": 40000},
241
+ {"name": "Palm Beach", "population": 60000},
242
+ ],
243
+ },
244
+ }
245
+ result = json_normalize(data, record_path=["info", "counties"])
246
+ expected = DataFrame(
247
+ [["Dade", 12345], ["Broward", 40000], ["Palm Beach", 60000]],
248
+ columns=["name", "population"],
249
+ )
250
+ tm.assert_frame_equal(result, expected)
251
+
252
+ def test_more_deeply_nested(self, deep_nested):
253
+ result = json_normalize(
254
+ deep_nested, ["states", "cities"], meta=["country", ["states", "name"]]
255
+ )
256
+ ex_data = {
257
+ "country": ["USA"] * 4 + ["Germany"] * 3,
258
+ "states.name": [
259
+ "California",
260
+ "California",
261
+ "Ohio",
262
+ "Ohio",
263
+ "Bayern",
264
+ "Nordrhein-Westfalen",
265
+ "Nordrhein-Westfalen",
266
+ ],
267
+ "name": [
268
+ "San Francisco",
269
+ "Los Angeles",
270
+ "Columbus",
271
+ "Cleveland",
272
+ "Munich",
273
+ "Duesseldorf",
274
+ "Koeln",
275
+ ],
276
+ "pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239],
277
+ }
278
+
279
+ expected = DataFrame(ex_data, columns=result.columns)
280
+ tm.assert_frame_equal(result, expected)
281
+
282
+ def test_shallow_nested(self):
283
+ data = [
284
+ {
285
+ "state": "Florida",
286
+ "shortname": "FL",
287
+ "info": {"governor": "Rick Scott"},
288
+ "counties": [
289
+ {"name": "Dade", "population": 12345},
290
+ {"name": "Broward", "population": 40000},
291
+ {"name": "Palm Beach", "population": 60000},
292
+ ],
293
+ },
294
+ {
295
+ "state": "Ohio",
296
+ "shortname": "OH",
297
+ "info": {"governor": "John Kasich"},
298
+ "counties": [
299
+ {"name": "Summit", "population": 1234},
300
+ {"name": "Cuyahoga", "population": 1337},
301
+ ],
302
+ },
303
+ ]
304
+
305
+ result = json_normalize(
306
+ data, "counties", ["state", "shortname", ["info", "governor"]]
307
+ )
308
+ ex_data = {
309
+ "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],
310
+ "state": ["Florida"] * 3 + ["Ohio"] * 2,
311
+ "shortname": ["FL", "FL", "FL", "OH", "OH"],
312
+ "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,
313
+ "population": [12345, 40000, 60000, 1234, 1337],
314
+ }
315
+ expected = DataFrame(ex_data, columns=result.columns)
316
+ tm.assert_frame_equal(result, expected)
317
+
318
+ def test_nested_meta_path_with_nested_record_path(self, state_data):
319
+ # GH 27220
320
+ result = json_normalize(
321
+ data=state_data,
322
+ record_path=["counties"],
323
+ meta=["state", "shortname", ["info", "governor"]],
324
+ errors="ignore",
325
+ )
326
+
327
+ ex_data = {
328
+ "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],
329
+ "population": [12345, 40000, 60000, 1234, 1337],
330
+ "state": ["Florida"] * 3 + ["Ohio"] * 2,
331
+ "shortname": ["FL"] * 3 + ["OH"] * 2,
332
+ "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,
333
+ }
334
+
335
+ expected = DataFrame(ex_data)
336
+ tm.assert_frame_equal(result, expected)
337
+
338
+ def test_meta_name_conflict(self):
339
+ data = [
340
+ {
341
+ "foo": "hello",
342
+ "bar": "there",
343
+ "data": [
344
+ {"foo": "something", "bar": "else"},
345
+ {"foo": "something2", "bar": "else2"},
346
+ ],
347
+ }
348
+ ]
349
+
350
+ msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix"
351
+ with pytest.raises(ValueError, match=msg):
352
+ json_normalize(data, "data", meta=["foo", "bar"])
353
+
354
+ result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta")
355
+
356
+ for val in ["metafoo", "metabar", "foo", "bar"]:
357
+ assert val in result
358
+
359
+ def test_meta_parameter_not_modified(self):
360
+ # GH 18610
361
+ data = [
362
+ {
363
+ "foo": "hello",
364
+ "bar": "there",
365
+ "data": [
366
+ {"foo": "something", "bar": "else"},
367
+ {"foo": "something2", "bar": "else2"},
368
+ ],
369
+ }
370
+ ]
371
+
372
+ COLUMNS = ["foo", "bar"]
373
+ result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta")
374
+
375
+ assert COLUMNS == ["foo", "bar"]
376
+ for val in ["metafoo", "metabar", "foo", "bar"]:
377
+ assert val in result
378
+
379
+ def test_record_prefix(self, state_data):
380
+ result = json_normalize(state_data[0], "counties")
381
+ expected = DataFrame(state_data[0]["counties"])
382
+ tm.assert_frame_equal(result, expected)
383
+
384
+ result = json_normalize(
385
+ state_data, "counties", meta="state", record_prefix="county_"
386
+ )
387
+
388
+ expected = []
389
+ for rec in state_data:
390
+ expected.extend(rec["counties"])
391
+ expected = DataFrame(expected)
392
+ expected = expected.rename(columns=lambda x: "county_" + x)
393
+ expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
394
+
395
+ tm.assert_frame_equal(result, expected)
396
+
397
+ def test_non_ascii_key(self):
398
+ testjson = (
399
+ b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
400
+ + b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
401
+ ).decode("utf8")
402
+
403
+ testdata = {
404
+ b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1],
405
+ "sub.A": [1, 3],
406
+ "sub.B": [2, 4],
407
+ }
408
+ expected = DataFrame(testdata)
409
+
410
+ result = json_normalize(json.loads(testjson))
411
+ tm.assert_frame_equal(result, expected)
412
+
413
+ def test_missing_field(self, author_missing_data):
414
+ # GH20030:
415
+ result = json_normalize(author_missing_data)
416
+ ex_data = [
417
+ {
418
+ "info": np.nan,
419
+ "info.created_at": np.nan,
420
+ "info.last_updated": np.nan,
421
+ "author_name.first": np.nan,
422
+ "author_name.last_name": np.nan,
423
+ },
424
+ {
425
+ "info": None,
426
+ "info.created_at": "11/08/1993",
427
+ "info.last_updated": "26/05/2012",
428
+ "author_name.first": "Jane",
429
+ "author_name.last_name": "Doe",
430
+ },
431
+ ]
432
+ expected = DataFrame(ex_data)
433
+ tm.assert_frame_equal(result, expected)
434
+
435
+ @pytest.mark.parametrize(
436
+ "max_level,expected",
437
+ [
438
+ (
439
+ 0,
440
+ [
441
+ {
442
+ "TextField": "Some text",
443
+ "UserField": {"Id": "ID001", "Name": "Name001"},
444
+ "CreatedBy": {"Name": "User001"},
445
+ "Image": {"a": "b"},
446
+ },
447
+ {
448
+ "TextField": "Some text",
449
+ "UserField": {"Id": "ID001", "Name": "Name001"},
450
+ "CreatedBy": {"Name": "User001"},
451
+ "Image": {"a": "b"},
452
+ },
453
+ ],
454
+ ),
455
+ (
456
+ 1,
457
+ [
458
+ {
459
+ "TextField": "Some text",
460
+ "UserField.Id": "ID001",
461
+ "UserField.Name": "Name001",
462
+ "CreatedBy": {"Name": "User001"},
463
+ "Image": {"a": "b"},
464
+ },
465
+ {
466
+ "TextField": "Some text",
467
+ "UserField.Id": "ID001",
468
+ "UserField.Name": "Name001",
469
+ "CreatedBy": {"Name": "User001"},
470
+ "Image": {"a": "b"},
471
+ },
472
+ ],
473
+ ),
474
+ ],
475
+ )
476
+ def test_max_level_with_records_path(self, max_level, expected):
477
+ # GH23843: Enhanced JSON normalize
478
+ test_input = [
479
+ {
480
+ "CreatedBy": {"Name": "User001"},
481
+ "Lookup": [
482
+ {
483
+ "TextField": "Some text",
484
+ "UserField": {"Id": "ID001", "Name": "Name001"},
485
+ },
486
+ {
487
+ "TextField": "Some text",
488
+ "UserField": {"Id": "ID001", "Name": "Name001"},
489
+ },
490
+ ],
491
+ "Image": {"a": "b"},
492
+ "tags": [
493
+ {"foo": "something", "bar": "else"},
494
+ {"foo": "something2", "bar": "else2"},
495
+ ],
496
+ }
497
+ ]
498
+
499
+ result = json_normalize(
500
+ test_input,
501
+ record_path=["Lookup"],
502
+ meta=[["CreatedBy"], ["Image"]],
503
+ max_level=max_level,
504
+ )
505
+ expected_df = DataFrame(data=expected, columns=result.columns.values)
506
+ tm.assert_equal(expected_df, result)
507
+
508
+ def test_nested_flattening_consistent(self):
509
+ # see gh-21537
510
+ df1 = json_normalize([{"A": {"B": 1}}])
511
+ df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy")
512
+
513
+ # They should be the same.
514
+ tm.assert_frame_equal(df1, df2)
515
+
516
+ def test_nonetype_record_path(self, nulls_fixture):
517
+ # see gh-30148
518
+ # should not raise TypeError
519
+ result = json_normalize(
520
+ [
521
+ {"state": "Texas", "info": nulls_fixture},
522
+ {"state": "Florida", "info": [{"i": 2}]},
523
+ ],
524
+ record_path=["info"],
525
+ )
526
+ expected = DataFrame({"i": 2}, index=[0])
527
+ tm.assert_equal(result, expected)
528
+
529
+ @pytest.mark.parametrize("value", ["false", "true", "{}", "1", '"text"'])
530
+ def test_non_list_record_path_errors(self, value):
531
+ # see gh-30148, GH 26284
532
+ parsed_value = json.loads(value)
533
+ test_input = {"state": "Texas", "info": parsed_value}
534
+ test_path = "info"
535
+ msg = (
536
+ f"{test_input} has non list value {parsed_value} for path {test_path}. "
537
+ "Must be list or null."
538
+ )
539
+ with pytest.raises(TypeError, match=msg):
540
+ json_normalize([test_input], record_path=[test_path])
541
+
542
+ def test_meta_non_iterable(self):
543
+ # GH 31507
544
+ data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]"""
545
+
546
+ result = json_normalize(json.loads(data), record_path=["data"], meta=["id"])
547
+ expected = DataFrame(
548
+ {"one": [1], "two": [2], "id": np.array([99], dtype=object)}
549
+ )
550
+ tm.assert_frame_equal(result, expected)
551
+
552
+ def test_generator(self, state_data):
553
+ # GH35923 Fix pd.json_normalize to not skip the first element of a
554
+ # generator input
555
+ def generator_data():
556
+ yield from state_data[0]["counties"]
557
+
558
+ result = json_normalize(generator_data())
559
+ expected = DataFrame(state_data[0]["counties"])
560
+
561
+ tm.assert_frame_equal(result, expected)
562
+
563
+ def test_top_column_with_leading_underscore(self):
564
+ # 49861
565
+ data = {"_id": {"a1": 10, "l2": {"l3": 0}}, "gg": 4}
566
+ result = json_normalize(data, sep="_")
567
+ expected = DataFrame([[4, 10, 0]], columns=["gg", "_id_a1", "_id_l2_l3"])
568
+
569
+ tm.assert_frame_equal(result, expected)
570
+
571
+
572
+ class TestNestedToRecord:
573
+ def test_flat_stays_flat(self):
574
+ recs = [{"flat1": 1, "flat2": 2}, {"flat3": 3, "flat2": 4}]
575
+ result = nested_to_record(recs)
576
+ expected = recs
577
+ assert result == expected
578
+
579
+ def test_one_level_deep_flattens(self):
580
+ data = {"flat1": 1, "dict1": {"c": 1, "d": 2}}
581
+
582
+ result = nested_to_record(data)
583
+ expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1}
584
+
585
+ assert result == expected
586
+
587
+ def test_nested_flattens(self):
588
+ data = {
589
+ "flat1": 1,
590
+ "dict1": {"c": 1, "d": 2},
591
+ "nested": {"e": {"c": 1, "d": 2}, "d": 2},
592
+ }
593
+
594
+ result = nested_to_record(data)
595
+ expected = {
596
+ "dict1.c": 1,
597
+ "dict1.d": 2,
598
+ "flat1": 1,
599
+ "nested.d": 2,
600
+ "nested.e.c": 1,
601
+ "nested.e.d": 2,
602
+ }
603
+
604
+ assert result == expected
605
+
606
+ def test_json_normalize_errors(self, missing_metadata):
607
+ # GH14583:
608
+ # If meta keys are not always present a new option to set
609
+ # errors='ignore' has been implemented
610
+
611
+ msg = (
612
+ "Key 'name' not found. To replace missing values of "
613
+ "'name' with np.nan, pass in errors='ignore'"
614
+ )
615
+ with pytest.raises(KeyError, match=msg):
616
+ json_normalize(
617
+ data=missing_metadata,
618
+ record_path="addresses",
619
+ meta="name",
620
+ errors="raise",
621
+ )
622
+
623
+ def test_missing_meta(self, missing_metadata):
624
+ # GH25468
625
+ # If metadata is nullable with errors set to ignore, the null values
626
+ # should be numpy.nan values
627
+ result = json_normalize(
628
+ data=missing_metadata, record_path="addresses", meta="name", errors="ignore"
629
+ )
630
+ ex_data = [
631
+ [9562, "Morris St.", "Massillon", "OH", 44646, "Alice"],
632
+ [8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan],
633
+ ]
634
+ columns = ["number", "street", "city", "state", "zip", "name"]
635
+ expected = DataFrame(ex_data, columns=columns)
636
+ tm.assert_frame_equal(result, expected)
637
+
638
+ def test_missing_nested_meta(self):
639
+ # GH44312
640
+ # If errors="ignore" and nested metadata is null, we should return nan
641
+ data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]}
642
+ result = json_normalize(
643
+ data,
644
+ record_path="value",
645
+ meta=["meta", ["nested_meta", "leaf"]],
646
+ errors="ignore",
647
+ )
648
+ ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]]
649
+ columns = ["rec", "meta", "nested_meta.leaf"]
650
+ expected = DataFrame(ex_data, columns=columns).astype(
651
+ {"nested_meta.leaf": object}
652
+ )
653
+ tm.assert_frame_equal(result, expected)
654
+
655
+ # If errors="raise" and nested metadata is null, we should raise with the
656
+ # key of the first missing level
657
+ with pytest.raises(KeyError, match="'leaf' not found"):
658
+ json_normalize(
659
+ data,
660
+ record_path="value",
661
+ meta=["meta", ["nested_meta", "leaf"]],
662
+ errors="raise",
663
+ )
664
+
665
+ def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata):
666
+ # GH41876
667
+ # Ensure errors='raise' works as intended even when a record_path of length
668
+ # greater than one is passed in
669
+ msg = (
670
+ "Key 'name' not found. To replace missing values of "
671
+ "'name' with np.nan, pass in errors='ignore'"
672
+ )
673
+ with pytest.raises(KeyError, match=msg):
674
+ json_normalize(
675
+ data=missing_metadata,
676
+ record_path=["previous_residences", "cities"],
677
+ meta="name",
678
+ errors="raise",
679
+ )
680
+
681
+ def test_missing_meta_multilevel_record_path_errors_ignore(self, missing_metadata):
682
+ # GH41876
683
+ # Ensure errors='ignore' works as intended even when a record_path of length
684
+ # greater than one is passed in
685
+ result = json_normalize(
686
+ data=missing_metadata,
687
+ record_path=["previous_residences", "cities"],
688
+ meta="name",
689
+ errors="ignore",
690
+ )
691
+ ex_data = [
692
+ ["Foo York City", "Alice"],
693
+ ["Barmingham", np.nan],
694
+ ]
695
+ columns = ["city_name", "name"]
696
+ expected = DataFrame(ex_data, columns=columns)
697
+ tm.assert_frame_equal(result, expected)
698
+
699
+ def test_donot_drop_nonevalues(self):
700
+ # GH21356
701
+ data = [
702
+ {"info": None, "author_name": {"first": "Smith", "last_name": "Appleseed"}},
703
+ {
704
+ "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
705
+ "author_name": {"first": "Jane", "last_name": "Doe"},
706
+ },
707
+ ]
708
+ result = nested_to_record(data)
709
+ expected = [
710
+ {
711
+ "info": None,
712
+ "author_name.first": "Smith",
713
+ "author_name.last_name": "Appleseed",
714
+ },
715
+ {
716
+ "author_name.first": "Jane",
717
+ "author_name.last_name": "Doe",
718
+ "info.created_at": "11/08/1993",
719
+ "info.last_updated": "26/05/2012",
720
+ },
721
+ ]
722
+
723
+ assert result == expected
724
+
725
+ def test_nonetype_top_level_bottom_level(self):
726
+ # GH21158: If inner level json has a key with a null value
727
+ # make sure it does not do a new_d.pop twice and except
728
+ data = {
729
+ "id": None,
730
+ "location": {
731
+ "country": {
732
+ "state": {
733
+ "id": None,
734
+ "town.info": {
735
+ "id": None,
736
+ "region": None,
737
+ "x": 49.151580810546875,
738
+ "y": -33.148521423339844,
739
+ "z": 27.572303771972656,
740
+ },
741
+ }
742
+ }
743
+ },
744
+ }
745
+ result = nested_to_record(data)
746
+ expected = {
747
+ "id": None,
748
+ "location.country.state.id": None,
749
+ "location.country.state.town.info.id": None,
750
+ "location.country.state.town.info.region": None,
751
+ "location.country.state.town.info.x": 49.151580810546875,
752
+ "location.country.state.town.info.y": -33.148521423339844,
753
+ "location.country.state.town.info.z": 27.572303771972656,
754
+ }
755
+ assert result == expected
756
+
757
+ def test_nonetype_multiple_levels(self):
758
+ # GH21158: If inner level json has a key with a null value
759
+ # make sure it does not do a new_d.pop twice and except
760
+ data = {
761
+ "id": None,
762
+ "location": {
763
+ "id": None,
764
+ "country": {
765
+ "id": None,
766
+ "state": {
767
+ "id": None,
768
+ "town.info": {
769
+ "region": None,
770
+ "x": 49.151580810546875,
771
+ "y": -33.148521423339844,
772
+ "z": 27.572303771972656,
773
+ },
774
+ },
775
+ },
776
+ },
777
+ }
778
+ result = nested_to_record(data)
779
+ expected = {
780
+ "id": None,
781
+ "location.id": None,
782
+ "location.country.id": None,
783
+ "location.country.state.id": None,
784
+ "location.country.state.town.info.region": None,
785
+ "location.country.state.town.info.x": 49.151580810546875,
786
+ "location.country.state.town.info.y": -33.148521423339844,
787
+ "location.country.state.town.info.z": 27.572303771972656,
788
+ }
789
+ assert result == expected
790
+
791
+ @pytest.mark.parametrize(
792
+ "max_level, expected",
793
+ [
794
+ (
795
+ None,
796
+ [
797
+ {
798
+ "CreatedBy.Name": "User001",
799
+ "Lookup.TextField": "Some text",
800
+ "Lookup.UserField.Id": "ID001",
801
+ "Lookup.UserField.Name": "Name001",
802
+ "Image.a": "b",
803
+ }
804
+ ],
805
+ ),
806
+ (
807
+ 0,
808
+ [
809
+ {
810
+ "CreatedBy": {"Name": "User001"},
811
+ "Lookup": {
812
+ "TextField": "Some text",
813
+ "UserField": {"Id": "ID001", "Name": "Name001"},
814
+ },
815
+ "Image": {"a": "b"},
816
+ }
817
+ ],
818
+ ),
819
+ (
820
+ 1,
821
+ [
822
+ {
823
+ "CreatedBy.Name": "User001",
824
+ "Lookup.TextField": "Some text",
825
+ "Lookup.UserField": {"Id": "ID001", "Name": "Name001"},
826
+ "Image.a": "b",
827
+ }
828
+ ],
829
+ ),
830
+ ],
831
+ )
832
+ def test_with_max_level(self, max_level, expected, max_level_test_input_data):
833
+ # GH23843: Enhanced JSON normalize
834
+ output = nested_to_record(max_level_test_input_data, max_level=max_level)
835
+ assert output == expected
836
+
837
+ def test_with_large_max_level(self):
838
+ # GH23843: Enhanced JSON normalize
839
+ max_level = 100
840
+ input_data = [
841
+ {
842
+ "CreatedBy": {
843
+ "user": {
844
+ "name": {"firstname": "Leo", "LastName": "Thomson"},
845
+ "family_tree": {
846
+ "father": {
847
+ "name": "Father001",
848
+ "father": {
849
+ "Name": "Father002",
850
+ "father": {
851
+ "name": "Father003",
852
+ "father": {"Name": "Father004"},
853
+ },
854
+ },
855
+ }
856
+ },
857
+ }
858
+ }
859
+ }
860
+ ]
861
+ expected = [
862
+ {
863
+ "CreatedBy.user.name.firstname": "Leo",
864
+ "CreatedBy.user.name.LastName": "Thomson",
865
+ "CreatedBy.user.family_tree.father.name": "Father001",
866
+ "CreatedBy.user.family_tree.father.father.Name": "Father002",
867
+ "CreatedBy.user.family_tree.father.father.father.name": "Father003",
868
+ "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501
869
+ }
870
+ ]
871
+ output = nested_to_record(input_data, max_level=max_level)
872
+ assert output == expected
873
+
874
+ def test_series_non_zero_index(self):
875
+ # GH 19020
876
+ data = {
877
+ 0: {"id": 1, "name": "Foo", "elements": {"a": 1}},
878
+ 1: {"id": 2, "name": "Bar", "elements": {"b": 2}},
879
+ 2: {"id": 3, "name": "Baz", "elements": {"c": 3}},
880
+ }
881
+ s = Series(data)
882
+ s.index = [1, 2, 3]
883
+ result = json_normalize(s)
884
+ expected = DataFrame(
885
+ {
886
+ "id": [1, 2, 3],
887
+ "name": ["Foo", "Bar", "Baz"],
888
+ "elements.a": [1.0, np.nan, np.nan],
889
+ "elements.b": [np.nan, 2.0, np.nan],
890
+ "elements.c": [np.nan, np.nan, 3.0],
891
+ }
892
+ )
893
+ tm.assert_frame_equal(result, expected)
videochat2/lib/python3.10/site-packages/pandas/tests/io/json/test_pandas.py ADDED
@@ -0,0 +1,1965 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ from datetime import timedelta
3
+ from decimal import Decimal
4
+ from io import StringIO
5
+ import json
6
+ import os
7
+ import sys
8
+ import time
9
+
10
+ import numpy as np
11
+ import pytest
12
+
13
+ from pandas.compat import IS64
14
+ import pandas.util._test_decorators as td
15
+
16
+ import pandas as pd
17
+ from pandas import (
18
+ NA,
19
+ DataFrame,
20
+ DatetimeIndex,
21
+ Series,
22
+ Timestamp,
23
+ read_json,
24
+ )
25
+ import pandas._testing as tm
26
+ from pandas.core.arrays import (
27
+ ArrowStringArray,
28
+ StringArray,
29
+ )
30
+
31
+
32
+ def assert_json_roundtrip_equal(result, expected, orient):
33
+ if orient in ("records", "values"):
34
+ expected = expected.reset_index(drop=True)
35
+ if orient == "values":
36
+ expected.columns = range(len(expected.columns))
37
+ tm.assert_frame_equal(result, expected)
38
+
39
+
40
+ class TestPandasContainer:
41
+ @pytest.fixture
42
+ def categorical_frame(self):
43
+ _seriesd = tm.getSeriesData()
44
+
45
+ _cat_frame = DataFrame(_seriesd)
46
+
47
+ cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
48
+ _cat_frame.index = pd.CategoricalIndex(cat, name="E")
49
+ _cat_frame["E"] = list(reversed(cat))
50
+ _cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
51
+ return _cat_frame
52
+
53
+ @pytest.fixture
54
+ def datetime_series(self):
55
+ # Same as usual datetime_series, but with index freq set to None,
56
+ # since that doesn't round-trip, see GH#33711
57
+ ser = tm.makeTimeSeries()
58
+ ser.name = "ts"
59
+ ser.index = ser.index._with_freq(None)
60
+ return ser
61
+
62
+ @pytest.fixture
63
+ def datetime_frame(self):
64
+ # Same as usual datetime_frame, but with index freq set to None,
65
+ # since that doesn't round-trip, see GH#33711
66
+ df = DataFrame(tm.getTimeSeriesData())
67
+ df.index = df.index._with_freq(None)
68
+ return df
69
+
70
+ def test_frame_double_encoded_labels(self, orient):
71
+ df = DataFrame(
72
+ [["a", "b"], ["c", "d"]],
73
+ index=['index " 1', "index / 2"],
74
+ columns=["a \\ b", "y / z"],
75
+ )
76
+
77
+ result = read_json(df.to_json(orient=orient), orient=orient)
78
+ expected = df.copy()
79
+
80
+ assert_json_roundtrip_equal(result, expected, orient)
81
+
82
+ @pytest.mark.parametrize("orient", ["split", "records", "values"])
83
+ def test_frame_non_unique_index(self, orient):
84
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
85
+ result = read_json(df.to_json(orient=orient), orient=orient)
86
+ expected = df.copy()
87
+
88
+ assert_json_roundtrip_equal(result, expected, orient)
89
+
90
+ @pytest.mark.parametrize("orient", ["index", "columns"])
91
+ def test_frame_non_unique_index_raises(self, orient):
92
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
93
+ msg = f"DataFrame index must be unique for orient='{orient}'"
94
+ with pytest.raises(ValueError, match=msg):
95
+ df.to_json(orient=orient)
96
+
97
+ @pytest.mark.parametrize("orient", ["split", "values"])
98
+ @pytest.mark.parametrize(
99
+ "data",
100
+ [
101
+ [["a", "b"], ["c", "d"]],
102
+ [[1.5, 2.5], [3.5, 4.5]],
103
+ [[1, 2.5], [3, 4.5]],
104
+ [[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
105
+ ],
106
+ )
107
+ def test_frame_non_unique_columns(self, orient, data):
108
+ df = DataFrame(data, index=[1, 2], columns=["x", "x"])
109
+
110
+ result = read_json(
111
+ df.to_json(orient=orient), orient=orient, convert_dates=["x"]
112
+ )
113
+ if orient == "values":
114
+ expected = DataFrame(data)
115
+ if expected.iloc[:, 0].dtype == "datetime64[ns]":
116
+ # orient == "values" by default will write Timestamp objects out
117
+ # in milliseconds; these are internally stored in nanosecond,
118
+ # so divide to get where we need
119
+ # TODO: a to_epoch method would also solve; see GH 14772
120
+ expected.iloc[:, 0] = expected.iloc[:, 0].view(np.int64) // 1000000
121
+ elif orient == "split":
122
+ expected = df
123
+ expected.columns = ["x", "x.1"]
124
+
125
+ tm.assert_frame_equal(result, expected)
126
+
127
+ @pytest.mark.parametrize("orient", ["index", "columns", "records"])
128
+ def test_frame_non_unique_columns_raises(self, orient):
129
+ df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
130
+
131
+ msg = f"DataFrame columns must be unique for orient='{orient}'"
132
+ with pytest.raises(ValueError, match=msg):
133
+ df.to_json(orient=orient)
134
+
135
+ def test_frame_default_orient(self, float_frame):
136
+ assert float_frame.to_json() == float_frame.to_json(orient="columns")
137
+
138
+ @pytest.mark.parametrize("dtype", [False, float])
139
+ @pytest.mark.parametrize("convert_axes", [True, False])
140
+ def test_roundtrip_simple(self, orient, convert_axes, dtype, float_frame):
141
+ data = float_frame.to_json(orient=orient)
142
+ result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
143
+
144
+ expected = float_frame
145
+
146
+ assert_json_roundtrip_equal(result, expected, orient)
147
+
148
+ @pytest.mark.parametrize("dtype", [False, np.int64])
149
+ @pytest.mark.parametrize("convert_axes", [True, False])
150
+ def test_roundtrip_intframe(self, orient, convert_axes, dtype, int_frame):
151
+ data = int_frame.to_json(orient=orient)
152
+ result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
153
+ expected = int_frame
154
+ assert_json_roundtrip_equal(result, expected, orient)
155
+
156
+ @pytest.mark.parametrize("dtype", [None, np.float64, int, "U3"])
157
+ @pytest.mark.parametrize("convert_axes", [True, False])
158
+ def test_roundtrip_str_axes(self, orient, convert_axes, dtype):
159
+ df = DataFrame(
160
+ np.zeros((200, 4)),
161
+ columns=[str(i) for i in range(4)],
162
+ index=[str(i) for i in range(200)],
163
+ dtype=dtype,
164
+ )
165
+
166
+ data = df.to_json(orient=orient)
167
+ result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)
168
+
169
+ expected = df.copy()
170
+ if not dtype:
171
+ expected = expected.astype(np.int64)
172
+
173
+ # index columns, and records orients cannot fully preserve the string
174
+ # dtype for axes as the index and column labels are used as keys in
175
+ # JSON objects. JSON keys are by definition strings, so there's no way
176
+ # to disambiguate whether those keys actually were strings or numeric
177
+ # beforehand and numeric wins out.
178
+ if convert_axes and (orient in ("index", "columns")):
179
+ expected.columns = expected.columns.astype(np.int64)
180
+ expected.index = expected.index.astype(np.int64)
181
+ elif orient == "records" and convert_axes:
182
+ expected.columns = expected.columns.astype(np.int64)
183
+ elif convert_axes and orient == "split":
184
+ expected.columns = expected.columns.astype(np.int64)
185
+
186
+ assert_json_roundtrip_equal(result, expected, orient)
187
+
188
+ @pytest.mark.parametrize("convert_axes", [True, False])
189
+ def test_roundtrip_categorical(
190
+ self, request, orient, categorical_frame, convert_axes
191
+ ):
192
+ # TODO: create a better frame to test with and improve coverage
193
+ if orient in ("index", "columns"):
194
+ request.node.add_marker(
195
+ pytest.mark.xfail(
196
+ reason=f"Can't have duplicate index values for orient '{orient}')"
197
+ )
198
+ )
199
+
200
+ data = categorical_frame.to_json(orient=orient)
201
+
202
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
203
+
204
+ expected = categorical_frame.copy()
205
+ expected.index = expected.index.astype(str) # Categorical not preserved
206
+ expected.index.name = None # index names aren't preserved in JSON
207
+ assert_json_roundtrip_equal(result, expected, orient)
208
+
209
+ @pytest.mark.parametrize("convert_axes", [True, False])
210
+ def test_roundtrip_empty(self, orient, convert_axes):
211
+ empty_frame = DataFrame()
212
+ data = empty_frame.to_json(orient=orient)
213
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
214
+ if orient == "split":
215
+ idx = pd.Index([], dtype=(float if convert_axes else object))
216
+ expected = DataFrame(index=idx, columns=idx)
217
+ elif orient in ["index", "columns"]:
218
+ expected = DataFrame()
219
+ else:
220
+ expected = empty_frame.copy()
221
+
222
+ tm.assert_frame_equal(result, expected)
223
+
224
+ @pytest.mark.parametrize("convert_axes", [True, False])
225
+ def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame):
226
+ # TODO: improve coverage with date_format parameter
227
+ data = datetime_frame.to_json(orient=orient)
228
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
229
+ expected = datetime_frame.copy()
230
+
231
+ if not convert_axes: # one off for ts handling
232
+ # DTI gets converted to epoch values
233
+ idx = expected.index.view(np.int64) // 1000000
234
+ if orient != "split": # TODO: handle consistently across orients
235
+ idx = idx.astype(str)
236
+
237
+ expected.index = idx
238
+
239
+ assert_json_roundtrip_equal(result, expected, orient)
240
+
241
+ @pytest.mark.parametrize("convert_axes", [True, False])
242
+ def test_roundtrip_mixed(self, orient, convert_axes):
243
+ index = pd.Index(["a", "b", "c", "d", "e"])
244
+ values = {
245
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
246
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
247
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
248
+ "D": [True, False, True, False, True],
249
+ }
250
+
251
+ df = DataFrame(data=values, index=index)
252
+
253
+ data = df.to_json(orient=orient)
254
+ result = read_json(data, orient=orient, convert_axes=convert_axes)
255
+
256
+ expected = df.copy()
257
+ expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
258
+
259
+ assert_json_roundtrip_equal(result, expected, orient)
260
+
261
+ @pytest.mark.xfail(
262
+ reason="#50456 Column multiindex is stored and loaded differently",
263
+ raises=AssertionError,
264
+ )
265
+ @pytest.mark.parametrize(
266
+ "columns",
267
+ [
268
+ [["2022", "2022"], ["JAN", "FEB"]],
269
+ [["2022", "2023"], ["JAN", "JAN"]],
270
+ [["2022", "2022"], ["JAN", "JAN"]],
271
+ ],
272
+ )
273
+ def test_roundtrip_multiindex(self, columns):
274
+ df = DataFrame(
275
+ [[1, 2], [3, 4]],
276
+ columns=pd.MultiIndex.from_arrays(columns),
277
+ )
278
+
279
+ result = read_json(df.to_json(orient="split"), orient="split")
280
+
281
+ tm.assert_frame_equal(result, df)
282
+
283
+ @pytest.mark.parametrize(
284
+ "data,msg,orient",
285
+ [
286
+ ('{"key":b:a:d}', "Expected object or value", "columns"),
287
+ # too few indices
288
+ (
289
+ '{"columns":["A","B"],'
290
+ '"index":["2","3"],'
291
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
292
+ "|".join(
293
+ [
294
+ r"Length of values \(3\) does not match length of index \(2\)",
295
+ ]
296
+ ),
297
+ "split",
298
+ ),
299
+ # too many columns
300
+ (
301
+ '{"columns":["A","B","C"],'
302
+ '"index":["1","2","3"],'
303
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
304
+ "3 columns passed, passed data had 2 columns",
305
+ "split",
306
+ ),
307
+ # bad key
308
+ (
309
+ '{"badkey":["A","B"],'
310
+ '"index":["2","3"],'
311
+ '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
312
+ r"unexpected key\(s\): badkey",
313
+ "split",
314
+ ),
315
+ ],
316
+ )
317
+ def test_frame_from_json_bad_data_raises(self, data, msg, orient):
318
+ with pytest.raises(ValueError, match=msg):
319
+ read_json(StringIO(data), orient=orient)
320
+
321
+ @pytest.mark.parametrize("dtype", [True, False])
322
+ @pytest.mark.parametrize("convert_axes", [True, False])
323
+ def test_frame_from_json_missing_data(self, orient, convert_axes, dtype):
324
+ num_df = DataFrame([[1, 2], [4, 5, 6]])
325
+ result = read_json(
326
+ num_df.to_json(orient=orient),
327
+ orient=orient,
328
+ convert_axes=convert_axes,
329
+ dtype=dtype,
330
+ )
331
+ assert np.isnan(result.iloc[0, 2])
332
+
333
+ obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
334
+ result = read_json(
335
+ obj_df.to_json(orient=orient),
336
+ orient=orient,
337
+ convert_axes=convert_axes,
338
+ dtype=dtype,
339
+ )
340
+ assert np.isnan(result.iloc[0, 2])
341
+
342
+ @pytest.mark.parametrize("dtype", [True, False])
343
+ def test_frame_read_json_dtype_missing_value(self, dtype):
344
+ # GH28501 Parse missing values using read_json with dtype=False
345
+ # to NaN instead of None
346
+ result = read_json("[null]", dtype=dtype)
347
+ expected = DataFrame([np.nan])
348
+
349
+ tm.assert_frame_equal(result, expected)
350
+
351
+ @pytest.mark.parametrize("inf", [np.inf, np.NINF])
352
+ @pytest.mark.parametrize("dtype", [True, False])
353
+ def test_frame_infinity(self, inf, dtype):
354
+ # infinities get mapped to nulls which get mapped to NaNs during
355
+ # deserialisation
356
+ df = DataFrame([[1, 2], [4, 5, 6]])
357
+ df.loc[0, 2] = inf
358
+ result = read_json(df.to_json(), dtype=dtype)
359
+ assert np.isnan(result.iloc[0, 2])
360
+
361
+ @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")
362
+ @pytest.mark.parametrize(
363
+ "value,precision,expected_val",
364
+ [
365
+ (0.95, 1, 1.0),
366
+ (1.95, 1, 2.0),
367
+ (-1.95, 1, -2.0),
368
+ (0.995, 2, 1.0),
369
+ (0.9995, 3, 1.0),
370
+ (0.99999999999999944, 15, 1.0),
371
+ ],
372
+ )
373
+ def test_frame_to_json_float_precision(self, value, precision, expected_val):
374
+ df = DataFrame([{"a_float": value}])
375
+ encoded = df.to_json(double_precision=precision)
376
+ assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
377
+
378
+ def test_frame_to_json_except(self):
379
+ df = DataFrame([1, 2, 3])
380
+ msg = "Invalid value 'garbage' for option 'orient'"
381
+ with pytest.raises(ValueError, match=msg):
382
+ df.to_json(orient="garbage")
383
+
384
+ def test_frame_empty(self):
385
+ df = DataFrame(columns=["jim", "joe"])
386
+ assert not df._is_mixed_type
387
+ tm.assert_frame_equal(
388
+ read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
389
+ )
390
+ # GH 7445
391
+ result = DataFrame({"test": []}, index=[]).to_json(orient="columns")
392
+ expected = '{"test":{}}'
393
+ assert result == expected
394
+
395
+ def test_frame_empty_mixedtype(self):
396
+ # mixed type
397
+ df = DataFrame(columns=["jim", "joe"])
398
+ df["joe"] = df["joe"].astype("i8")
399
+ assert df._is_mixed_type
400
+ tm.assert_frame_equal(
401
+ read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
402
+ )
403
+
404
+ def test_frame_mixedtype_orient(self): # GH10289
405
+ vals = [
406
+ [10, 1, "foo", 0.1, 0.01],
407
+ [20, 2, "bar", 0.2, 0.02],
408
+ [30, 3, "baz", 0.3, 0.03],
409
+ [40, 4, "qux", 0.4, 0.04],
410
+ ]
411
+
412
+ df = DataFrame(
413
+ vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
414
+ )
415
+
416
+ assert df._is_mixed_type
417
+ right = df.copy()
418
+
419
+ for orient in ["split", "index", "columns"]:
420
+ inp = df.to_json(orient=orient)
421
+ left = read_json(inp, orient=orient, convert_axes=False)
422
+ tm.assert_frame_equal(left, right)
423
+
424
+ right.index = pd.RangeIndex(len(df))
425
+ inp = df.to_json(orient="records")
426
+ left = read_json(inp, orient="records", convert_axes=False)
427
+ tm.assert_frame_equal(left, right)
428
+
429
+ right.columns = pd.RangeIndex(df.shape[1])
430
+ inp = df.to_json(orient="values")
431
+ left = read_json(inp, orient="values", convert_axes=False)
432
+ tm.assert_frame_equal(left, right)
433
+
434
+ def test_v12_compat(self, datapath):
435
+ dti = pd.date_range("2000-01-03", "2000-01-07")
436
+ # freq doesn't roundtrip
437
+ dti = DatetimeIndex(np.asarray(dti), freq=None)
438
+ df = DataFrame(
439
+ [
440
+ [1.56808523, 0.65727391, 1.81021139, -0.17251653],
441
+ [-0.2550111, -0.08072427, -0.03202878, -0.17581665],
442
+ [1.51493992, 0.11805825, 1.629455, -1.31506612],
443
+ [-0.02765498, 0.44679743, 0.33192641, -0.27885413],
444
+ [0.05951614, -2.69652057, 1.28163262, 0.34703478],
445
+ ],
446
+ columns=["A", "B", "C", "D"],
447
+ index=dti,
448
+ )
449
+ df["date"] = Timestamp("19920106 18:21:32.12")
450
+ df.iloc[3, df.columns.get_loc("date")] = Timestamp("20130101")
451
+ df["modified"] = df["date"]
452
+ df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
453
+
454
+ dirpath = datapath("io", "json", "data")
455
+ v12_json = os.path.join(dirpath, "tsframe_v012.json")
456
+ df_unser = read_json(v12_json)
457
+ tm.assert_frame_equal(df, df_unser)
458
+
459
+ df_iso = df.drop(["modified"], axis=1)
460
+ v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
461
+ df_unser_iso = read_json(v12_iso_json)
462
+ tm.assert_frame_equal(df_iso, df_unser_iso)
463
+
464
+ def test_blocks_compat_GH9037(self):
465
+ index = pd.date_range("20000101", periods=10, freq="H")
466
+ # freq doesn't round-trip
467
+ index = DatetimeIndex(list(index), freq=None)
468
+
469
+ df_mixed = DataFrame(
470
+ {
471
+ "float_1": [
472
+ -0.92077639,
473
+ 0.77434435,
474
+ 1.25234727,
475
+ 0.61485564,
476
+ -0.60316077,
477
+ 0.24653374,
478
+ 0.28668979,
479
+ -2.51969012,
480
+ 0.95748401,
481
+ -1.02970536,
482
+ ],
483
+ "int_1": [
484
+ 19680418,
485
+ 75337055,
486
+ 99973684,
487
+ 65103179,
488
+ 79373900,
489
+ 40314334,
490
+ 21290235,
491
+ 4991321,
492
+ 41903419,
493
+ 16008365,
494
+ ],
495
+ "str_1": [
496
+ "78c608f1",
497
+ "64a99743",
498
+ "13d2ff52",
499
+ "ca7f4af2",
500
+ "97236474",
501
+ "bde7e214",
502
+ "1a6bde47",
503
+ "b1190be5",
504
+ "7a669144",
505
+ "8d64d068",
506
+ ],
507
+ "float_2": [
508
+ -0.0428278,
509
+ -1.80872357,
510
+ 3.36042349,
511
+ -0.7573685,
512
+ -0.48217572,
513
+ 0.86229683,
514
+ 1.08935819,
515
+ 0.93898739,
516
+ -0.03030452,
517
+ 1.43366348,
518
+ ],
519
+ "str_2": [
520
+ "14f04af9",
521
+ "d085da90",
522
+ "4bcfac83",
523
+ "81504caf",
524
+ "2ffef4a9",
525
+ "08e2f5c4",
526
+ "07e1af03",
527
+ "addbd4a7",
528
+ "1f6a09ba",
529
+ "4bfc4d87",
530
+ ],
531
+ "int_2": [
532
+ 86967717,
533
+ 98098830,
534
+ 51927505,
535
+ 20372254,
536
+ 12601730,
537
+ 20884027,
538
+ 34193846,
539
+ 10561746,
540
+ 24867120,
541
+ 76131025,
542
+ ],
543
+ },
544
+ index=index,
545
+ )
546
+
547
+ # JSON deserialisation always creates unicode strings
548
+ df_mixed.columns = df_mixed.columns.astype("unicode")
549
+
550
+ df_roundtrip = read_json(df_mixed.to_json(orient="split"), orient="split")
551
+ tm.assert_frame_equal(
552
+ df_mixed,
553
+ df_roundtrip,
554
+ check_index_type=True,
555
+ check_column_type=True,
556
+ by_blocks=True,
557
+ check_exact=True,
558
+ )
559
+
560
+ def test_frame_nonprintable_bytes(self):
561
+ # GH14256: failing column caused segfaults, if it is not the last one
562
+
563
+ class BinaryThing:
564
+ def __init__(self, hexed) -> None:
565
+ self.hexed = hexed
566
+ self.binary = bytes.fromhex(hexed)
567
+
568
+ def __str__(self) -> str:
569
+ return self.hexed
570
+
571
+ hexed = "574b4454ba8c5eb4f98a8f45"
572
+ binthing = BinaryThing(hexed)
573
+
574
+ # verify the proper conversion of printable content
575
+ df_printable = DataFrame({"A": [binthing.hexed]})
576
+ assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
577
+
578
+ # check if non-printable content throws appropriate Exception
579
+ df_nonprintable = DataFrame({"A": [binthing]})
580
+ msg = "Unsupported UTF-8 sequence length when encoding string"
581
+ with pytest.raises(OverflowError, match=msg):
582
+ df_nonprintable.to_json()
583
+
584
+ # the same with multiple columns threw segfaults
585
+ df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
586
+ with pytest.raises(OverflowError, match=msg):
587
+ df_mixed.to_json()
588
+
589
+ # default_handler should resolve exceptions for non-string types
590
+ result = df_nonprintable.to_json(default_handler=str)
591
+ expected = f'{{"A":{{"0":"{hexed}"}}}}'
592
+ assert result == expected
593
+ assert (
594
+ df_mixed.to_json(default_handler=str)
595
+ == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
596
+ )
597
+
598
+ def test_label_overflow(self):
599
+ # GH14256: buffer length not checked when writing label
600
+ result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
601
+ expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
602
+ assert result == expected
603
+
604
+ def test_series_non_unique_index(self):
605
+ s = Series(["a", "b"], index=[1, 1])
606
+
607
+ msg = "Series index must be unique for orient='index'"
608
+ with pytest.raises(ValueError, match=msg):
609
+ s.to_json(orient="index")
610
+
611
+ tm.assert_series_equal(
612
+ s, read_json(s.to_json(orient="split"), orient="split", typ="series")
613
+ )
614
+ unserialized = read_json(
615
+ s.to_json(orient="records"), orient="records", typ="series"
616
+ )
617
+ tm.assert_numpy_array_equal(s.values, unserialized.values)
618
+
619
+ def test_series_default_orient(self, string_series):
620
+ assert string_series.to_json() == string_series.to_json(orient="index")
621
+
622
+ def test_series_roundtrip_simple(self, orient, string_series):
623
+ data = string_series.to_json(orient=orient)
624
+ result = read_json(data, typ="series", orient=orient)
625
+
626
+ expected = string_series
627
+ if orient in ("values", "records"):
628
+ expected = expected.reset_index(drop=True)
629
+ if orient != "split":
630
+ expected.name = None
631
+
632
+ tm.assert_series_equal(result, expected)
633
+
634
+ @pytest.mark.parametrize("dtype", [False, None])
635
+ def test_series_roundtrip_object(self, orient, dtype, object_series):
636
+ data = object_series.to_json(orient=orient)
637
+ result = read_json(data, typ="series", orient=orient, dtype=dtype)
638
+
639
+ expected = object_series
640
+ if orient in ("values", "records"):
641
+ expected = expected.reset_index(drop=True)
642
+ if orient != "split":
643
+ expected.name = None
644
+
645
+ tm.assert_series_equal(result, expected)
646
+
647
+ def test_series_roundtrip_empty(self, orient):
648
+ empty_series = Series([], index=[], dtype=np.float64)
649
+ data = empty_series.to_json(orient=orient)
650
+ result = read_json(data, typ="series", orient=orient)
651
+
652
+ expected = empty_series.reset_index(drop=True)
653
+ if orient in ("split"):
654
+ expected.index = expected.index.astype(np.float64)
655
+
656
+ tm.assert_series_equal(result, expected)
657
+
658
+ def test_series_roundtrip_timeseries(self, orient, datetime_series):
659
+ data = datetime_series.to_json(orient=orient)
660
+ result = read_json(data, typ="series", orient=orient)
661
+
662
+ expected = datetime_series
663
+ if orient in ("values", "records"):
664
+ expected = expected.reset_index(drop=True)
665
+ if orient != "split":
666
+ expected.name = None
667
+
668
+ tm.assert_series_equal(result, expected)
669
+
670
+ @pytest.mark.parametrize("dtype", [np.float64, int])
671
+ def test_series_roundtrip_numeric(self, orient, dtype):
672
+ s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
673
+ data = s.to_json(orient=orient)
674
+ result = read_json(data, typ="series", orient=orient)
675
+
676
+ expected = s.copy()
677
+ if orient in ("values", "records"):
678
+ expected = expected.reset_index(drop=True)
679
+
680
+ tm.assert_series_equal(result, expected)
681
+
682
+ def test_series_to_json_except(self):
683
+ s = Series([1, 2, 3])
684
+ msg = "Invalid value 'garbage' for option 'orient'"
685
+ with pytest.raises(ValueError, match=msg):
686
+ s.to_json(orient="garbage")
687
+
688
+ def test_series_from_json_precise_float(self):
689
+ s = Series([4.56, 4.56, 4.56])
690
+ result = read_json(s.to_json(), typ="series", precise_float=True)
691
+ tm.assert_series_equal(result, s, check_index_type=False)
692
+
693
+ def test_series_with_dtype(self):
694
+ # GH 21986
695
+ s = Series([4.56, 4.56, 4.56])
696
+ result = read_json(s.to_json(), typ="series", dtype=np.int64)
697
+ expected = Series([4] * 3)
698
+ tm.assert_series_equal(result, expected)
699
+
700
+ @pytest.mark.parametrize(
701
+ "dtype,expected",
702
+ [
703
+ (True, Series(["2000-01-01"], dtype="datetime64[ns]")),
704
+ (False, Series([946684800000])),
705
+ ],
706
+ )
707
+ def test_series_with_dtype_datetime(self, dtype, expected):
708
+ s = Series(["2000-01-01"], dtype="datetime64[ns]")
709
+ data = s.to_json()
710
+ result = read_json(data, typ="series", dtype=dtype)
711
+ tm.assert_series_equal(result, expected)
712
+
713
+ def test_frame_from_json_precise_float(self):
714
+ df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
715
+ result = read_json(df.to_json(), precise_float=True)
716
+ tm.assert_frame_equal(result, df)
717
+
718
+ def test_typ(self):
719
+ s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
720
+ result = read_json(s.to_json(), typ=None)
721
+ tm.assert_series_equal(result, s)
722
+
723
+ def test_reconstruction_index(self):
724
+ df = DataFrame([[1, 2, 3], [4, 5, 6]])
725
+ result = read_json(df.to_json())
726
+
727
+ tm.assert_frame_equal(result, df)
728
+
729
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
730
+ result = read_json(df.to_json())
731
+ tm.assert_frame_equal(result, df)
732
+
733
+ def test_path(self, float_frame, int_frame, datetime_frame):
734
+ with tm.ensure_clean("test.json") as path:
735
+ for df in [float_frame, int_frame, datetime_frame]:
736
+ df.to_json(path)
737
+ read_json(path)
738
+
739
+ def test_axis_dates(self, datetime_series, datetime_frame):
740
+ # frame
741
+ json = datetime_frame.to_json()
742
+ result = read_json(json)
743
+ tm.assert_frame_equal(result, datetime_frame)
744
+
745
+ # series
746
+ json = datetime_series.to_json()
747
+ result = read_json(json, typ="series")
748
+ tm.assert_series_equal(result, datetime_series, check_names=False)
749
+ assert result.name is None
750
+
751
+ def test_convert_dates(self, datetime_series, datetime_frame):
752
+ # frame
753
+ df = datetime_frame
754
+ df["date"] = Timestamp("20130101")
755
+
756
+ json = df.to_json()
757
+ result = read_json(json)
758
+ tm.assert_frame_equal(result, df)
759
+
760
+ df["foo"] = 1.0
761
+ json = df.to_json(date_unit="ns")
762
+
763
+ result = read_json(json, convert_dates=False)
764
+ expected = df.copy()
765
+ expected["date"] = expected["date"].values.view("i8")
766
+ expected["foo"] = expected["foo"].astype("int64")
767
+ tm.assert_frame_equal(result, expected)
768
+
769
+ # series
770
+ ts = Series(Timestamp("20130101"), index=datetime_series.index)
771
+ json = ts.to_json()
772
+ result = read_json(json, typ="series")
773
+ tm.assert_series_equal(result, ts)
774
+
775
+ @pytest.mark.parametrize("date_format", ["epoch", "iso"])
776
+ @pytest.mark.parametrize("as_object", [True, False])
777
+ @pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp])
778
+ def test_date_index_and_values(self, date_format, as_object, date_typ):
779
+ data = [date_typ(year=2020, month=1, day=1), pd.NaT]
780
+ if as_object:
781
+ data.append("a")
782
+
783
+ ser = Series(data, index=data)
784
+ result = ser.to_json(date_format=date_format)
785
+
786
+ if date_format == "epoch":
787
+ expected = '{"1577836800000":1577836800000,"null":null}'
788
+ else:
789
+ expected = (
790
+ '{"2020-01-01T00:00:00.000":"2020-01-01T00:00:00.000","null":null}'
791
+ )
792
+
793
+ if as_object:
794
+ expected = expected.replace("}", ',"a":"a"}')
795
+
796
+ assert result == expected
797
+
798
+ @pytest.mark.parametrize(
799
+ "infer_word",
800
+ [
801
+ "trade_time",
802
+ "date",
803
+ "datetime",
804
+ "sold_at",
805
+ "modified",
806
+ "timestamp",
807
+ "timestamps",
808
+ ],
809
+ )
810
+ def test_convert_dates_infer(self, infer_word):
811
+ # GH10747
812
+ from pandas.io.json import dumps
813
+
814
+ data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
815
+ expected = DataFrame(
816
+ [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
817
+ )
818
+ result = read_json(dumps(data))[["id", infer_word]]
819
+ tm.assert_frame_equal(result, expected)
820
+
821
+ @pytest.mark.parametrize(
822
+ "date,date_unit",
823
+ [
824
+ ("20130101 20:43:42.123", None),
825
+ ("20130101 20:43:42", "s"),
826
+ ("20130101 20:43:42.123", "ms"),
827
+ ("20130101 20:43:42.123456", "us"),
828
+ ("20130101 20:43:42.123456789", "ns"),
829
+ ],
830
+ )
831
+ def test_date_format_frame(self, date, date_unit, datetime_frame):
832
+ df = datetime_frame
833
+
834
+ df["date"] = Timestamp(date)
835
+ df.iloc[1, df.columns.get_loc("date")] = pd.NaT
836
+ df.iloc[5, df.columns.get_loc("date")] = pd.NaT
837
+ if date_unit:
838
+ json = df.to_json(date_format="iso", date_unit=date_unit)
839
+ else:
840
+ json = df.to_json(date_format="iso")
841
+ result = read_json(json)
842
+ expected = df.copy()
843
+ tm.assert_frame_equal(result, expected)
844
+
845
+ def test_date_format_frame_raises(self, datetime_frame):
846
+ df = datetime_frame
847
+ msg = "Invalid value 'foo' for option 'date_unit'"
848
+ with pytest.raises(ValueError, match=msg):
849
+ df.to_json(date_format="iso", date_unit="foo")
850
+
851
+ @pytest.mark.parametrize(
852
+ "date,date_unit",
853
+ [
854
+ ("20130101 20:43:42.123", None),
855
+ ("20130101 20:43:42", "s"),
856
+ ("20130101 20:43:42.123", "ms"),
857
+ ("20130101 20:43:42.123456", "us"),
858
+ ("20130101 20:43:42.123456789", "ns"),
859
+ ],
860
+ )
861
+ def test_date_format_series(self, date, date_unit, datetime_series):
862
+ ts = Series(Timestamp(date), index=datetime_series.index)
863
+ ts.iloc[1] = pd.NaT
864
+ ts.iloc[5] = pd.NaT
865
+ if date_unit:
866
+ json = ts.to_json(date_format="iso", date_unit=date_unit)
867
+ else:
868
+ json = ts.to_json(date_format="iso")
869
+ result = read_json(json, typ="series")
870
+ expected = ts.copy()
871
+ tm.assert_series_equal(result, expected)
872
+
873
+ def test_date_format_series_raises(self, datetime_series):
874
+ ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)
875
+ msg = "Invalid value 'foo' for option 'date_unit'"
876
+ with pytest.raises(ValueError, match=msg):
877
+ ts.to_json(date_format="iso", date_unit="foo")
878
+
879
+ @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
880
+ def test_date_unit(self, unit, datetime_frame):
881
+ df = datetime_frame
882
+ df["date"] = Timestamp("20130101 20:43:42")
883
+ dl = df.columns.get_loc("date")
884
+ df.iloc[1, dl] = Timestamp("19710101 20:43:42")
885
+ df.iloc[2, dl] = Timestamp("21460101 20:43:42")
886
+ df.iloc[4, dl] = pd.NaT
887
+
888
+ json = df.to_json(date_format="epoch", date_unit=unit)
889
+
890
+ # force date unit
891
+ result = read_json(json, date_unit=unit)
892
+ tm.assert_frame_equal(result, df)
893
+
894
+ # detect date unit
895
+ result = read_json(json, date_unit=None)
896
+ tm.assert_frame_equal(result, df)
897
+
898
+ def test_weird_nested_json(self):
899
+ # this used to core dump the parser
900
+ s = r"""{
901
+ "status": "success",
902
+ "data": {
903
+ "posts": [
904
+ {
905
+ "id": 1,
906
+ "title": "A blog post",
907
+ "body": "Some useful content"
908
+ },
909
+ {
910
+ "id": 2,
911
+ "title": "Another blog post",
912
+ "body": "More content"
913
+ }
914
+ ]
915
+ }
916
+ }"""
917
+
918
+ read_json(s)
919
+
920
+ def test_doc_example(self):
921
+ dfj2 = DataFrame(np.random.randn(5, 2), columns=list("AB"))
922
+ dfj2["date"] = Timestamp("20130101")
923
+ dfj2["ints"] = range(5)
924
+ dfj2["bools"] = True
925
+ dfj2.index = pd.date_range("20130101", periods=5)
926
+
927
+ json = dfj2.to_json()
928
+ result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})
929
+ tm.assert_frame_equal(result, result)
930
+
931
+ def test_round_trip_exception_(self, datapath):
932
+ # GH 3867
933
+ path = datapath("io", "json", "data", "teams.csv")
934
+ df = pd.read_csv(path)
935
+ s = df.to_json()
936
+ result = read_json(s)
937
+ tm.assert_frame_equal(result.reindex(index=df.index, columns=df.columns), df)
938
+
939
+ @pytest.mark.network
940
+ @tm.network(
941
+ url="https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5",
942
+ check_before_test=True,
943
+ )
944
+ @pytest.mark.parametrize(
945
+ "field,dtype",
946
+ [
947
+ ["created_at", pd.DatetimeTZDtype(tz="UTC")],
948
+ ["closed_at", "datetime64[ns]"],
949
+ ["updated_at", pd.DatetimeTZDtype(tz="UTC")],
950
+ ],
951
+ )
952
+ def test_url(self, field, dtype):
953
+ url = "https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5"
954
+ result = read_json(url, convert_dates=True)
955
+ assert result[field].dtype == dtype
956
+
957
+ def test_timedelta(self):
958
+ converter = lambda x: pd.to_timedelta(x, unit="ms")
959
+
960
+ ser = Series([timedelta(23), timedelta(seconds=5)])
961
+ assert ser.dtype == "timedelta64[ns]"
962
+
963
+ result = read_json(ser.to_json(), typ="series").apply(converter)
964
+ tm.assert_series_equal(result, ser)
965
+
966
+ ser = Series([timedelta(23), timedelta(seconds=5)], index=pd.Index([0, 1]))
967
+ assert ser.dtype == "timedelta64[ns]"
968
+ result = read_json(ser.to_json(), typ="series").apply(converter)
969
+ tm.assert_series_equal(result, ser)
970
+
971
+ frame = DataFrame([timedelta(23), timedelta(seconds=5)])
972
+ assert frame[0].dtype == "timedelta64[ns]"
973
+ tm.assert_frame_equal(frame, read_json(frame.to_json()).apply(converter))
974
+
975
+ def test_timedelta2(self):
976
+ frame = DataFrame(
977
+ {
978
+ "a": [timedelta(days=23), timedelta(seconds=5)],
979
+ "b": [1, 2],
980
+ "c": pd.date_range(start="20130101", periods=2),
981
+ }
982
+ )
983
+
984
+ result = read_json(frame.to_json(date_unit="ns"))
985
+ result["a"] = pd.to_timedelta(result.a, unit="ns")
986
+ result["c"] = pd.to_datetime(result.c)
987
+ tm.assert_frame_equal(frame, result)
988
+
989
+ def test_mixed_timedelta_datetime(self):
990
+ td = timedelta(23)
991
+ ts = Timestamp("20130101")
992
+ frame = DataFrame({"a": [td, ts]}, dtype=object)
993
+
994
+ expected = DataFrame(
995
+ {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]}
996
+ )
997
+ result = read_json(frame.to_json(date_unit="ns"), dtype={"a": "int64"})
998
+ tm.assert_frame_equal(result, expected, check_index_type=False)
999
+
1000
+ @pytest.mark.parametrize("as_object", [True, False])
1001
+ @pytest.mark.parametrize("date_format", ["iso", "epoch"])
1002
+ @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])
1003
+ def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):
1004
+ # GH28156: to_json not correctly formatting Timedelta
1005
+ data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]
1006
+ if as_object:
1007
+ data.append("a")
1008
+
1009
+ ser = Series(data, index=data)
1010
+ if date_format == "iso":
1011
+ expected = (
1012
+ '{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'
1013
+ )
1014
+ else:
1015
+ expected = '{"86400000":86400000,"172800000":172800000,"null":null}'
1016
+
1017
+ if as_object:
1018
+ expected = expected.replace("}", ',"a":"a"}')
1019
+
1020
+ result = ser.to_json(date_format=date_format)
1021
+ assert result == expected
1022
+
1023
+ def test_default_handler(self):
1024
+ value = object()
1025
+ frame = DataFrame({"a": [7, value]})
1026
+ expected = DataFrame({"a": [7, str(value)]})
1027
+ result = read_json(frame.to_json(default_handler=str))
1028
+ tm.assert_frame_equal(expected, result, check_index_type=False)
1029
+
1030
+ def test_default_handler_indirect(self):
1031
+ from pandas.io.json import dumps
1032
+
1033
+ def default(obj):
1034
+ if isinstance(obj, complex):
1035
+ return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]
1036
+ return str(obj)
1037
+
1038
+ df_list = [
1039
+ 9,
1040
+ DataFrame(
1041
+ {"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},
1042
+ columns=["a", "b"],
1043
+ ),
1044
+ ]
1045
+ expected = (
1046
+ '[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
1047
+ '["re",4.0],["im",-5.0]],"N\\/A"]]]'
1048
+ )
1049
+ assert dumps(df_list, default_handler=default, orient="values") == expected
1050
+
1051
+ def test_default_handler_numpy_unsupported_dtype(self):
1052
+ # GH12554 to_json raises 'Unhandled numpy dtype 15'
1053
+ df = DataFrame(
1054
+ {"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},
1055
+ columns=["a", "b"],
1056
+ )
1057
+ expected = (
1058
+ '[["(1+0j)","(nan+0j)"],'
1059
+ '["(2.3+0j)","(nan+0j)"],'
1060
+ '["(4-5j)","(1.2+0j)"]]'
1061
+ )
1062
+ assert df.to_json(default_handler=str, orient="values") == expected
1063
+
1064
+ def test_default_handler_raises(self):
1065
+ msg = "raisin"
1066
+
1067
+ def my_handler_raises(obj):
1068
+ raise TypeError(msg)
1069
+
1070
+ with pytest.raises(TypeError, match=msg):
1071
+ DataFrame({"a": [1, 2, object()]}).to_json(
1072
+ default_handler=my_handler_raises
1073
+ )
1074
+ with pytest.raises(TypeError, match=msg):
1075
+ DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(
1076
+ default_handler=my_handler_raises
1077
+ )
1078
+
1079
+ def test_categorical(self):
1080
+ # GH4377 df.to_json segfaults with non-ndarray blocks
1081
+ df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
1082
+ df["B"] = df["A"]
1083
+ expected = df.to_json()
1084
+
1085
+ df["B"] = df["A"].astype("category")
1086
+ assert expected == df.to_json()
1087
+
1088
+ s = df["A"]
1089
+ sc = df["B"]
1090
+ assert s.to_json() == sc.to_json()
1091
+
1092
+ def test_datetime_tz(self):
1093
+ # GH4377 df.to_json segfaults with non-ndarray blocks
1094
+ tz_range = pd.date_range("20130101", periods=3, tz="US/Eastern")
1095
+ tz_naive = tz_range.tz_convert("utc").tz_localize(None)
1096
+
1097
+ df = DataFrame({"A": tz_range, "B": pd.date_range("20130101", periods=3)})
1098
+
1099
+ df_naive = df.copy()
1100
+ df_naive["A"] = tz_naive
1101
+ expected = df_naive.to_json()
1102
+ assert expected == df.to_json()
1103
+
1104
+ stz = Series(tz_range)
1105
+ s_naive = Series(tz_naive)
1106
+ assert stz.to_json() == s_naive.to_json()
1107
+
1108
+ def test_sparse(self):
1109
+ # GH4377 df.to_json segfaults with non-ndarray blocks
1110
+ df = DataFrame(np.random.randn(10, 4))
1111
+ df.loc[:8] = np.nan
1112
+
1113
+ sdf = df.astype("Sparse")
1114
+ expected = df.to_json()
1115
+ assert expected == sdf.to_json()
1116
+
1117
+ s = Series(np.random.randn(10))
1118
+ s.loc[:8] = np.nan
1119
+ ss = s.astype("Sparse")
1120
+
1121
+ expected = s.to_json()
1122
+ assert expected == ss.to_json()
1123
+
1124
+ @pytest.mark.parametrize(
1125
+ "ts",
1126
+ [
1127
+ Timestamp("2013-01-10 05:00:00Z"),
1128
+ Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),
1129
+ Timestamp("2013-01-10 00:00:00-0500"),
1130
+ ],
1131
+ )
1132
+ def test_tz_is_utc(self, ts):
1133
+ from pandas.io.json import dumps
1134
+
1135
+ exp = '"2013-01-10T05:00:00.000Z"'
1136
+
1137
+ assert dumps(ts, iso_dates=True) == exp
1138
+ dt = ts.to_pydatetime()
1139
+ assert dumps(dt, iso_dates=True) == exp
1140
+
1141
+ def test_tz_is_naive(self):
1142
+ from pandas.io.json import dumps
1143
+
1144
+ ts = Timestamp("2013-01-10 05:00:00")
1145
+ exp = '"2013-01-10T05:00:00.000"'
1146
+
1147
+ assert dumps(ts, iso_dates=True) == exp
1148
+ dt = ts.to_pydatetime()
1149
+ assert dumps(dt, iso_dates=True) == exp
1150
+
1151
+ @pytest.mark.parametrize(
1152
+ "tz_range",
1153
+ [
1154
+ pd.date_range("2013-01-01 05:00:00Z", periods=2),
1155
+ pd.date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),
1156
+ pd.date_range("2013-01-01 00:00:00-0500", periods=2),
1157
+ ],
1158
+ )
1159
+ def test_tz_range_is_utc(self, tz_range):
1160
+ from pandas.io.json import dumps
1161
+
1162
+ exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
1163
+ dfexp = (
1164
+ '{"DT":{'
1165
+ '"0":"2013-01-01T05:00:00.000Z",'
1166
+ '"1":"2013-01-02T05:00:00.000Z"}}'
1167
+ )
1168
+
1169
+ assert dumps(tz_range, iso_dates=True) == exp
1170
+ dti = DatetimeIndex(tz_range)
1171
+ # Ensure datetimes in object array are serialized correctly
1172
+ # in addition to the normal DTI case
1173
+ assert dumps(dti, iso_dates=True) == exp
1174
+ assert dumps(dti.astype(object), iso_dates=True) == exp
1175
+ df = DataFrame({"DT": dti})
1176
+ result = dumps(df, iso_dates=True)
1177
+ assert result == dfexp
1178
+ assert dumps(df.astype({"DT": object}), iso_dates=True)
1179
+
1180
+ def test_tz_range_is_naive(self):
1181
+ from pandas.io.json import dumps
1182
+
1183
+ dti = pd.date_range("2013-01-01 05:00:00", periods=2)
1184
+
1185
+ exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]'
1186
+ dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}'
1187
+
1188
+ # Ensure datetimes in object array are serialized correctly
1189
+ # in addition to the normal DTI case
1190
+ assert dumps(dti, iso_dates=True) == exp
1191
+ assert dumps(dti.astype(object), iso_dates=True) == exp
1192
+ df = DataFrame({"DT": dti})
1193
+ result = dumps(df, iso_dates=True)
1194
+ assert result == dfexp
1195
+ assert dumps(df.astype({"DT": object}), iso_dates=True)
1196
+
1197
+ def test_read_inline_jsonl(self):
1198
+ # GH9180
1199
+ result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
1200
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1201
+ tm.assert_frame_equal(result, expected)
1202
+
1203
+ @pytest.mark.single_cpu
1204
+ @td.skip_if_not_us_locale
1205
+ def test_read_s3_jsonl(self, s3_resource, s3so):
1206
+ # GH17200
1207
+
1208
+ result = read_json(
1209
+ "s3n://pandas-test/items.jsonl", lines=True, storage_options=s3so
1210
+ )
1211
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1212
+ tm.assert_frame_equal(result, expected)
1213
+
1214
+ def test_read_local_jsonl(self):
1215
+ # GH17200
1216
+ with tm.ensure_clean("tmp_items.json") as path:
1217
+ with open(path, "w") as infile:
1218
+ infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
1219
+ result = read_json(path, lines=True)
1220
+ expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1221
+ tm.assert_frame_equal(result, expected)
1222
+
1223
+ def test_read_jsonl_unicode_chars(self):
1224
+ # GH15132: non-ascii unicode characters
1225
+ # \u201d == RIGHT DOUBLE QUOTATION MARK
1226
+
1227
+ # simulate file handle
1228
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
1229
+ json = StringIO(json)
1230
+ result = read_json(json, lines=True)
1231
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
1232
+ tm.assert_frame_equal(result, expected)
1233
+
1234
+ # simulate string
1235
+ json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
1236
+ result = read_json(json, lines=True)
1237
+ expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
1238
+ tm.assert_frame_equal(result, expected)
1239
+
1240
+ @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)])
1241
+ def test_to_json_large_numbers(self, bigNum):
1242
+ # GH34473
1243
+ series = Series(bigNum, dtype=object, index=["articleId"])
1244
+ json = series.to_json()
1245
+ expected = '{"articleId":' + str(bigNum) + "}"
1246
+ assert json == expected
1247
+
1248
+ df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0])
1249
+ json = df.to_json()
1250
+ expected = '{"0":{"articleId":' + str(bigNum) + "}}"
1251
+ assert json == expected
1252
+
1253
+ @pytest.mark.parametrize("bigNum", [-(2**63) - 1, 2**64])
1254
+ def test_read_json_large_numbers(self, bigNum):
1255
+ # GH20599, 26068
1256
+ json = StringIO('{"articleId":' + str(bigNum) + "}")
1257
+ msg = r"Value is too small|Value is too big"
1258
+ with pytest.raises(ValueError, match=msg):
1259
+ read_json(json)
1260
+
1261
+ json = StringIO('{"0":{"articleId":' + str(bigNum) + "}}")
1262
+ with pytest.raises(ValueError, match=msg):
1263
+ read_json(json)
1264
+
1265
+ def test_read_json_large_numbers2(self):
1266
+ # GH18842
1267
+ json = '{"articleId": "1404366058080022500245"}'
1268
+ json = StringIO(json)
1269
+ result = read_json(json, typ="series")
1270
+ expected = Series(1.404366e21, index=["articleId"])
1271
+ tm.assert_series_equal(result, expected)
1272
+
1273
+ json = '{"0": {"articleId": "1404366058080022500245"}}'
1274
+ json = StringIO(json)
1275
+ result = read_json(json)
1276
+ expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])
1277
+ tm.assert_frame_equal(result, expected)
1278
+
1279
+ def test_to_jsonl(self):
1280
+ # GH9180
1281
+ df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
1282
+ result = df.to_json(orient="records", lines=True)
1283
+ expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'
1284
+ assert result == expected
1285
+
1286
+ df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
1287
+ result = df.to_json(orient="records", lines=True)
1288
+ expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'
1289
+ assert result == expected
1290
+ tm.assert_frame_equal(read_json(result, lines=True), df)
1291
+
1292
+ # GH15096: escaped characters in columns and data
1293
+ df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
1294
+ result = df.to_json(orient="records", lines=True)
1295
+ expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'
1296
+ assert result == expected
1297
+ tm.assert_frame_equal(read_json(result, lines=True), df)
1298
+
1299
+ # TODO: there is a near-identical test for pytables; can we share?
1300
+ @pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError)
1301
+ def test_latin_encoding(self):
1302
+ # GH 13774
1303
+ values = [
1304
+ [b"E\xc9, 17", b"", b"a", b"b", b"c"],
1305
+ [b"E\xc9, 17", b"a", b"b", b"c"],
1306
+ [b"EE, 17", b"", b"a", b"b", b"c"],
1307
+ [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
1308
+ [b"", b"a", b"b", b"c"],
1309
+ [b"\xf8\xfc", b"a", b"b", b"c"],
1310
+ [b"A\xf8\xfc", b"", b"a", b"b", b"c"],
1311
+ [np.nan, b"", b"b", b"c"],
1312
+ [b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
1313
+ ]
1314
+
1315
+ values = [
1316
+ [x.decode("latin-1") if isinstance(x, bytes) else x for x in y]
1317
+ for y in values
1318
+ ]
1319
+
1320
+ examples = []
1321
+ for dtype in ["category", object]:
1322
+ for val in values:
1323
+ examples.append(Series(val, dtype=dtype))
1324
+
1325
+ def roundtrip(s, encoding="latin-1"):
1326
+ with tm.ensure_clean("test.json") as path:
1327
+ s.to_json(path, encoding=encoding)
1328
+ retr = read_json(path, encoding=encoding)
1329
+ tm.assert_series_equal(s, retr, check_categorical=False)
1330
+
1331
+ for s in examples:
1332
+ roundtrip(s)
1333
+
1334
+ def test_data_frame_size_after_to_json(self):
1335
+ # GH15344
1336
+ df = DataFrame({"a": [str(1)]})
1337
+
1338
+ size_before = df.memory_usage(index=True, deep=True).sum()
1339
+ df.to_json()
1340
+ size_after = df.memory_usage(index=True, deep=True).sum()
1341
+
1342
+ assert size_before == size_after
1343
+
1344
+ @pytest.mark.parametrize(
1345
+ "index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]
1346
+ )
1347
+ @pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])
1348
+ def test_from_json_to_json_table_index_and_columns(self, index, columns):
1349
+ # GH25433 GH25435
1350
+ expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)
1351
+ dfjson = expected.to_json(orient="table")
1352
+ result = read_json(dfjson, orient="table")
1353
+ tm.assert_frame_equal(result, expected)
1354
+
1355
+ def test_from_json_to_json_table_dtypes(self):
1356
+ # GH21345
1357
+ expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
1358
+ dfjson = expected.to_json(orient="table")
1359
+ result = read_json(dfjson, orient="table")
1360
+ tm.assert_frame_equal(result, expected)
1361
+
1362
+ @pytest.mark.parametrize("orient", ["split", "records", "index", "columns"])
1363
+ def test_to_json_from_json_columns_dtypes(self, orient):
1364
+ # GH21892 GH33205
1365
+ expected = DataFrame.from_dict(
1366
+ {
1367
+ "Integer": Series([1, 2, 3], dtype="int64"),
1368
+ "Float": Series([None, 2.0, 3.0], dtype="float64"),
1369
+ "Object": Series([None, "", "c"], dtype="object"),
1370
+ "Bool": Series([True, False, True], dtype="bool"),
1371
+ "Category": Series(["a", "b", None], dtype="category"),
1372
+ "Datetime": Series(
1373
+ ["2020-01-01", None, "2020-01-03"], dtype="datetime64[ns]"
1374
+ ),
1375
+ }
1376
+ )
1377
+ dfjson = expected.to_json(orient=orient)
1378
+ result = read_json(
1379
+ dfjson,
1380
+ orient=orient,
1381
+ dtype={
1382
+ "Integer": "int64",
1383
+ "Float": "float64",
1384
+ "Object": "object",
1385
+ "Bool": "bool",
1386
+ "Category": "category",
1387
+ "Datetime": "datetime64[ns]",
1388
+ },
1389
+ )
1390
+ tm.assert_frame_equal(result, expected)
1391
+
1392
+ @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])
1393
+ def test_read_json_table_dtype_raises(self, dtype):
1394
+ # GH21345
1395
+ df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})
1396
+ dfjson = df.to_json(orient="table")
1397
+ msg = "cannot pass both dtype and orient='table'"
1398
+ with pytest.raises(ValueError, match=msg):
1399
+ read_json(dfjson, orient="table", dtype=dtype)
1400
+
1401
+ def test_read_json_table_convert_axes_raises(self):
1402
+ # GH25433 GH25435
1403
+ df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])
1404
+ dfjson = df.to_json(orient="table")
1405
+ msg = "cannot pass both convert_axes and orient='table'"
1406
+ with pytest.raises(ValueError, match=msg):
1407
+ read_json(dfjson, orient="table", convert_axes=True)
1408
+
1409
+ @pytest.mark.parametrize(
1410
+ "data, expected",
1411
+ [
1412
+ (
1413
+ DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),
1414
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
1415
+ ),
1416
+ (
1417
+ DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),
1418
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
1419
+ ),
1420
+ (
1421
+ DataFrame(
1422
+ [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
1423
+ ),
1424
+ {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},
1425
+ ),
1426
+ (Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),
1427
+ (
1428
+ Series([1, 2, 3], name="A").rename_axis("foo"),
1429
+ {"name": "A", "data": [1, 2, 3]},
1430
+ ),
1431
+ (
1432
+ Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),
1433
+ {"name": "A", "data": [1, 2]},
1434
+ ),
1435
+ ],
1436
+ )
1437
+ def test_index_false_to_json_split(self, data, expected):
1438
+ # GH 17394
1439
+ # Testing index=False in to_json with orient='split'
1440
+
1441
+ result = data.to_json(orient="split", index=False)
1442
+ result = json.loads(result)
1443
+
1444
+ assert result == expected
1445
+
1446
+ @pytest.mark.parametrize(
1447
+ "data",
1448
+ [
1449
+ (DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),
1450
+ (DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),
1451
+ (
1452
+ DataFrame(
1453
+ [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]
1454
+ )
1455
+ ),
1456
+ (Series([1, 2, 3], name="A")),
1457
+ (Series([1, 2, 3], name="A").rename_axis("foo")),
1458
+ (Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),
1459
+ ],
1460
+ )
1461
+ def test_index_false_to_json_table(self, data):
1462
+ # GH 17394
1463
+ # Testing index=False in to_json with orient='table'
1464
+
1465
+ result = data.to_json(orient="table", index=False)
1466
+ result = json.loads(result)
1467
+
1468
+ expected = {
1469
+ "schema": pd.io.json.build_table_schema(data, index=False),
1470
+ "data": DataFrame(data).to_dict(orient="records"),
1471
+ }
1472
+
1473
+ assert result == expected
1474
+
1475
+ @pytest.mark.parametrize("orient", ["records", "index", "columns", "values"])
1476
+ def test_index_false_error_to_json(self, orient):
1477
+ # GH 17394
1478
+ # Testing error message from to_json with index=False
1479
+
1480
+ df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])
1481
+
1482
+ msg = "'index=False' is only valid when 'orient' is 'split' or 'table'"
1483
+ with pytest.raises(ValueError, match=msg):
1484
+ df.to_json(orient=orient, index=False)
1485
+
1486
+ @pytest.mark.parametrize("orient", ["split", "table"])
1487
+ @pytest.mark.parametrize("index", [True, False])
1488
+ def test_index_false_from_json_to_json(self, orient, index):
1489
+ # GH25170
1490
+ # Test index=False in from_json to_json
1491
+ expected = DataFrame({"a": [1, 2], "b": [3, 4]})
1492
+ dfjson = expected.to_json(orient=orient, index=index)
1493
+ result = read_json(dfjson, orient=orient)
1494
+ tm.assert_frame_equal(result, expected)
1495
+
1496
+ def test_read_timezone_information(self):
1497
+ # GH 25546
1498
+ result = read_json(
1499
+ '{"2019-01-01T11:00:00.000Z":88}', typ="series", orient="index"
1500
+ )
1501
+ expected = Series([88], index=DatetimeIndex(["2019-01-01 11:00:00"], tz="UTC"))
1502
+ tm.assert_series_equal(result, expected)
1503
+
1504
+ @pytest.mark.parametrize(
1505
+ "url",
1506
+ [
1507
+ "s3://example-fsspec/",
1508
+ "gcs://another-fsspec/file.json",
1509
+ "https://example-site.com/data",
1510
+ "some-protocol://data.txt",
1511
+ ],
1512
+ )
1513
+ def test_read_json_with_url_value(self, url):
1514
+ # GH 36271
1515
+ result = read_json(f'{{"url":{{"0":"{url}"}}}}')
1516
+ expected = DataFrame({"url": [url]})
1517
+ tm.assert_frame_equal(result, expected)
1518
+
1519
+ @pytest.mark.parametrize(
1520
+ "compression",
1521
+ ["", ".gz", ".bz2", ".tar"],
1522
+ )
1523
+ def test_read_json_with_very_long_file_path(self, compression):
1524
+ # GH 46718
1525
+ long_json_path = f'{"a" * 1000}.json{compression}'
1526
+ with pytest.raises(
1527
+ FileNotFoundError, match=f"File {long_json_path} does not exist"
1528
+ ):
1529
+ # path too long for Windows is handled in file_exists() but raises in
1530
+ # _get_data_from_filepath()
1531
+ read_json(long_json_path)
1532
+
1533
+ @pytest.mark.parametrize(
1534
+ "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")]
1535
+ )
1536
+ def test_timedelta_as_label(self, date_format, key):
1537
+ df = DataFrame([[1]], columns=[pd.Timedelta("1D")])
1538
+ expected = f'{{"{key}":{{"0":1}}}}'
1539
+ result = df.to_json(date_format=date_format)
1540
+
1541
+ assert result == expected
1542
+
1543
+ @pytest.mark.parametrize(
1544
+ "orient,expected",
1545
+ [
1546
+ ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),
1547
+ ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),
1548
+ # TODO: the below have separate encoding procedures
1549
+ pytest.param(
1550
+ "split",
1551
+ "",
1552
+ marks=pytest.mark.xfail(
1553
+ reason="Produces JSON but not in a consistent manner"
1554
+ ),
1555
+ ),
1556
+ pytest.param(
1557
+ "table",
1558
+ "",
1559
+ marks=pytest.mark.xfail(
1560
+ reason="Produces JSON but not in a consistent manner"
1561
+ ),
1562
+ ),
1563
+ ],
1564
+ )
1565
+ def test_tuple_labels(self, orient, expected):
1566
+ # GH 20500
1567
+ df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")])
1568
+ result = df.to_json(orient=orient)
1569
+ assert result == expected
1570
+
1571
+ @pytest.mark.parametrize("indent", [1, 2, 4])
1572
+ def test_to_json_indent(self, indent):
1573
+ # GH 12004
1574
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
1575
+
1576
+ result = df.to_json(indent=indent)
1577
+ spaces = " " * indent
1578
+ expected = f"""{{
1579
+ {spaces}"a":{{
1580
+ {spaces}{spaces}"0":"foo",
1581
+ {spaces}{spaces}"1":"baz"
1582
+ {spaces}}},
1583
+ {spaces}"b":{{
1584
+ {spaces}{spaces}"0":"bar",
1585
+ {spaces}{spaces}"1":"qux"
1586
+ {spaces}}}
1587
+ }}"""
1588
+
1589
+ assert result == expected
1590
+
1591
+ @pytest.mark.parametrize(
1592
+ "orient,expected",
1593
+ [
1594
+ (
1595
+ "split",
1596
+ """{
1597
+ "columns":[
1598
+ "a",
1599
+ "b"
1600
+ ],
1601
+ "index":[
1602
+ 0,
1603
+ 1
1604
+ ],
1605
+ "data":[
1606
+ [
1607
+ "foo",
1608
+ "bar"
1609
+ ],
1610
+ [
1611
+ "baz",
1612
+ "qux"
1613
+ ]
1614
+ ]
1615
+ }""",
1616
+ ),
1617
+ (
1618
+ "records",
1619
+ """[
1620
+ {
1621
+ "a":"foo",
1622
+ "b":"bar"
1623
+ },
1624
+ {
1625
+ "a":"baz",
1626
+ "b":"qux"
1627
+ }
1628
+ ]""",
1629
+ ),
1630
+ (
1631
+ "index",
1632
+ """{
1633
+ "0":{
1634
+ "a":"foo",
1635
+ "b":"bar"
1636
+ },
1637
+ "1":{
1638
+ "a":"baz",
1639
+ "b":"qux"
1640
+ }
1641
+ }""",
1642
+ ),
1643
+ (
1644
+ "columns",
1645
+ """{
1646
+ "a":{
1647
+ "0":"foo",
1648
+ "1":"baz"
1649
+ },
1650
+ "b":{
1651
+ "0":"bar",
1652
+ "1":"qux"
1653
+ }
1654
+ }""",
1655
+ ),
1656
+ (
1657
+ "values",
1658
+ """[
1659
+ [
1660
+ "foo",
1661
+ "bar"
1662
+ ],
1663
+ [
1664
+ "baz",
1665
+ "qux"
1666
+ ]
1667
+ ]""",
1668
+ ),
1669
+ (
1670
+ "table",
1671
+ """{
1672
+ "schema":{
1673
+ "fields":[
1674
+ {
1675
+ "name":"index",
1676
+ "type":"integer"
1677
+ },
1678
+ {
1679
+ "name":"a",
1680
+ "type":"string"
1681
+ },
1682
+ {
1683
+ "name":"b",
1684
+ "type":"string"
1685
+ }
1686
+ ],
1687
+ "primaryKey":[
1688
+ "index"
1689
+ ],
1690
+ "pandas_version":"1.4.0"
1691
+ },
1692
+ "data":[
1693
+ {
1694
+ "index":0,
1695
+ "a":"foo",
1696
+ "b":"bar"
1697
+ },
1698
+ {
1699
+ "index":1,
1700
+ "a":"baz",
1701
+ "b":"qux"
1702
+ }
1703
+ ]
1704
+ }""",
1705
+ ),
1706
+ ],
1707
+ )
1708
+ def test_json_indent_all_orients(self, orient, expected):
1709
+ # GH 12004
1710
+ df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])
1711
+ result = df.to_json(orient=orient, indent=4)
1712
+ assert result == expected
1713
+
1714
+ def test_json_negative_indent_raises(self):
1715
+ with pytest.raises(ValueError, match="must be a nonnegative integer"):
1716
+ DataFrame().to_json(indent=-1)
1717
+
1718
+ def test_emca_262_nan_inf_support(self):
1719
+ # GH 12213
1720
+ data = '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'
1721
+ result = read_json(data)
1722
+ expected = DataFrame(
1723
+ ["a", np.nan, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"]
1724
+ )
1725
+ tm.assert_frame_equal(result, expected)
1726
+
1727
+ def test_frame_int_overflow(self):
1728
+ # GH 30320
1729
+ encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}])
1730
+ expected = DataFrame({"col": ["31900441201190696999", "Text"]})
1731
+ result = read_json(encoded_json)
1732
+ tm.assert_frame_equal(result, expected)
1733
+
1734
+ @pytest.mark.parametrize(
1735
+ "dataframe,expected",
1736
+ [
1737
+ (
1738
+ DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}),
1739
+ '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,'
1740
+ '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}',
1741
+ )
1742
+ ],
1743
+ )
1744
+ def test_json_multiindex(self, dataframe, expected):
1745
+ series = dataframe.stack()
1746
+ result = series.to_json(orient="index")
1747
+ assert result == expected
1748
+
1749
+ @pytest.mark.single_cpu
1750
+ def test_to_s3(self, s3_resource, s3so):
1751
+ # GH 28375
1752
+ mock_bucket_name, target_file = "pandas-test", "test.json"
1753
+ df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
1754
+ df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)
1755
+ timeout = 5
1756
+ while True:
1757
+ if target_file in (
1758
+ obj.key for obj in s3_resource.Bucket("pandas-test").objects.all()
1759
+ ):
1760
+ break
1761
+ time.sleep(0.1)
1762
+ timeout -= 0.1
1763
+ assert timeout > 0, "Timed out waiting for file to appear on moto"
1764
+
1765
+ def test_json_pandas_nulls(self, nulls_fixture, request):
1766
+ # GH 31615
1767
+ if isinstance(nulls_fixture, Decimal):
1768
+ mark = pytest.mark.xfail(reason="not implemented")
1769
+ request.node.add_marker(mark)
1770
+
1771
+ result = DataFrame([[nulls_fixture]]).to_json()
1772
+ assert result == '{"0":{"0":null}}'
1773
+
1774
+ def test_readjson_bool_series(self):
1775
+ # GH31464
1776
+ result = read_json("[true, true, false]", typ="series")
1777
+ expected = Series([True, True, False])
1778
+ tm.assert_series_equal(result, expected)
1779
+
1780
+ def test_to_json_multiindex_escape(self):
1781
+ # GH 15273
1782
+ df = DataFrame(
1783
+ True,
1784
+ index=pd.date_range("2017-01-20", "2017-01-23"),
1785
+ columns=["foo", "bar"],
1786
+ ).stack()
1787
+ result = df.to_json()
1788
+ expected = (
1789
+ "{\"(Timestamp('2017-01-20 00:00:00'), 'foo')\":true,"
1790
+ "\"(Timestamp('2017-01-20 00:00:00'), 'bar')\":true,"
1791
+ "\"(Timestamp('2017-01-21 00:00:00'), 'foo')\":true,"
1792
+ "\"(Timestamp('2017-01-21 00:00:00'), 'bar')\":true,"
1793
+ "\"(Timestamp('2017-01-22 00:00:00'), 'foo')\":true,"
1794
+ "\"(Timestamp('2017-01-22 00:00:00'), 'bar')\":true,"
1795
+ "\"(Timestamp('2017-01-23 00:00:00'), 'foo')\":true,"
1796
+ "\"(Timestamp('2017-01-23 00:00:00'), 'bar')\":true}"
1797
+ )
1798
+ assert result == expected
1799
+
1800
+ def test_to_json_series_of_objects(self):
1801
+ class _TestObject:
1802
+ def __init__(self, a, b, _c, d) -> None:
1803
+ self.a = a
1804
+ self.b = b
1805
+ self._c = _c
1806
+ self.d = d
1807
+
1808
+ def e(self):
1809
+ return 5
1810
+
1811
+ # JSON keys should be all non-callable non-underscore attributes, see GH-42768
1812
+ series = Series([_TestObject(a=1, b=2, _c=3, d=4)])
1813
+ assert json.loads(series.to_json()) == {"0": {"a": 1, "b": 2, "d": 4}}
1814
+
1815
+ @pytest.mark.parametrize(
1816
+ "data,expected",
1817
+ [
1818
+ (
1819
+ Series({0: -6 + 8j, 1: 0 + 1j, 2: 9 - 5j}),
1820
+ '{"0":{"imag":8.0,"real":-6.0},'
1821
+ '"1":{"imag":1.0,"real":0.0},'
1822
+ '"2":{"imag":-5.0,"real":9.0}}',
1823
+ ),
1824
+ (
1825
+ Series({0: -9.39 + 0.66j, 1: 3.95 + 9.32j, 2: 4.03 - 0.17j}),
1826
+ '{"0":{"imag":0.66,"real":-9.39},'
1827
+ '"1":{"imag":9.32,"real":3.95},'
1828
+ '"2":{"imag":-0.17,"real":4.03}}',
1829
+ ),
1830
+ (
1831
+ DataFrame([[-2 + 3j, -1 - 0j], [4 - 3j, -0 - 10j]]),
1832
+ '{"0":{"0":{"imag":3.0,"real":-2.0},'
1833
+ '"1":{"imag":-3.0,"real":4.0}},'
1834
+ '"1":{"0":{"imag":0.0,"real":-1.0},'
1835
+ '"1":{"imag":-10.0,"real":0.0}}}',
1836
+ ),
1837
+ (
1838
+ DataFrame(
1839
+ [[-0.28 + 0.34j, -1.08 - 0.39j], [0.41 - 0.34j, -0.78 - 1.35j]]
1840
+ ),
1841
+ '{"0":{"0":{"imag":0.34,"real":-0.28},'
1842
+ '"1":{"imag":-0.34,"real":0.41}},'
1843
+ '"1":{"0":{"imag":-0.39,"real":-1.08},'
1844
+ '"1":{"imag":-1.35,"real":-0.78}}}',
1845
+ ),
1846
+ ],
1847
+ )
1848
+ def test_complex_data_tojson(self, data, expected):
1849
+ # GH41174
1850
+ result = data.to_json()
1851
+ assert result == expected
1852
+
1853
+ def test_json_uint64(self):
1854
+ # GH21073
1855
+ expected = (
1856
+ '{"columns":["col1"],"index":[0,1],'
1857
+ '"data":[[13342205958987758245],[12388075603347835679]]}'
1858
+ )
1859
+ df = DataFrame(data={"col1": [13342205958987758245, 12388075603347835679]})
1860
+ result = df.to_json(orient="split")
1861
+ assert result == expected
1862
+
1863
+ @pytest.mark.parametrize(
1864
+ "orient", ["split", "records", "values", "index", "columns"]
1865
+ )
1866
+ def test_read_json_dtype_backend(self, string_storage, dtype_backend, orient):
1867
+ # GH#50750
1868
+ pa = pytest.importorskip("pyarrow")
1869
+ df = DataFrame(
1870
+ {
1871
+ "a": Series([1, np.nan, 3], dtype="Int64"),
1872
+ "b": Series([1, 2, 3], dtype="Int64"),
1873
+ "c": Series([1.5, np.nan, 2.5], dtype="Float64"),
1874
+ "d": Series([1.5, 2.0, 2.5], dtype="Float64"),
1875
+ "e": [True, False, None],
1876
+ "f": [True, False, True],
1877
+ "g": ["a", "b", "c"],
1878
+ "h": ["a", "b", None],
1879
+ }
1880
+ )
1881
+
1882
+ if string_storage == "python":
1883
+ string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
1884
+ string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
1885
+
1886
+ else:
1887
+ string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
1888
+ string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
1889
+
1890
+ out = df.to_json(orient=orient)
1891
+ with pd.option_context("mode.string_storage", string_storage):
1892
+ result = read_json(out, dtype_backend=dtype_backend, orient=orient)
1893
+
1894
+ expected = DataFrame(
1895
+ {
1896
+ "a": Series([1, np.nan, 3], dtype="Int64"),
1897
+ "b": Series([1, 2, 3], dtype="Int64"),
1898
+ "c": Series([1.5, np.nan, 2.5], dtype="Float64"),
1899
+ "d": Series([1.5, 2.0, 2.5], dtype="Float64"),
1900
+ "e": Series([True, False, NA], dtype="boolean"),
1901
+ "f": Series([True, False, True], dtype="boolean"),
1902
+ "g": string_array,
1903
+ "h": string_array_na,
1904
+ }
1905
+ )
1906
+
1907
+ if dtype_backend == "pyarrow":
1908
+ from pandas.arrays import ArrowExtensionArray
1909
+
1910
+ expected = DataFrame(
1911
+ {
1912
+ col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
1913
+ for col in expected.columns
1914
+ }
1915
+ )
1916
+
1917
+ if orient == "values":
1918
+ expected.columns = list(range(0, 8))
1919
+
1920
+ tm.assert_frame_equal(result, expected)
1921
+
1922
+ @pytest.mark.parametrize("orient", ["split", "records", "index"])
1923
+ def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):
1924
+ # GH#50750
1925
+ pa = pytest.importorskip("pyarrow")
1926
+ ser = Series([1, np.nan, 3], dtype="Int64")
1927
+
1928
+ out = ser.to_json(orient=orient)
1929
+ with pd.option_context("mode.string_storage", string_storage):
1930
+ result = read_json(
1931
+ out, dtype_backend=dtype_backend, orient=orient, typ="series"
1932
+ )
1933
+
1934
+ expected = Series([1, np.nan, 3], dtype="Int64")
1935
+
1936
+ if dtype_backend == "pyarrow":
1937
+ from pandas.arrays import ArrowExtensionArray
1938
+
1939
+ expected = Series(ArrowExtensionArray(pa.array(expected, from_pandas=True)))
1940
+
1941
+ tm.assert_series_equal(result, expected)
1942
+
1943
+ def test_invalid_dtype_backend(self):
1944
+ msg = (
1945
+ "dtype_backend numpy is invalid, only 'numpy_nullable' and "
1946
+ "'pyarrow' are allowed."
1947
+ )
1948
+ with pytest.raises(ValueError, match=msg):
1949
+ read_json("test", dtype_backend="numpy")
1950
+
1951
+
1952
+ def test_invalid_engine():
1953
+ # GH 48893
1954
+ ser = Series(range(1))
1955
+ out = ser.to_json()
1956
+ with pytest.raises(ValueError, match="The engine type foo"):
1957
+ read_json(out, engine="foo")
1958
+
1959
+
1960
+ def test_pyarrow_engine_lines_false():
1961
+ # GH 48893
1962
+ ser = Series(range(1))
1963
+ out = ser.to_json()
1964
+ with pytest.raises(ValueError, match="currently pyarrow engine only supports"):
1965
+ read_json(out, engine="pyarrow", lines=False)
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (395 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_categorical.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_compat.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_complex.cpython-310.pyc ADDED
Binary file (4.98 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_errors.cpython-310.pyc ADDED
Binary file (7.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_file_handling.cpython-310.pyc ADDED
Binary file (9.78 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_keys.cpython-310.pyc ADDED
Binary file (2.86 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_put.cpython-310.pyc ADDED
Binary file (9.6 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_pytables_missing.cpython-310.pyc ADDED
Binary file (773 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_read.cpython-310.pyc ADDED
Binary file (9.5 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_retain_attributes.cpython-310.pyc ADDED
Binary file (2.25 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_round_trip.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_select.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc ADDED
Binary file (25.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_time_series.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_timezones.cpython-310.pyc ADDED
Binary file (9.12 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ import pathlib
3
+ import tempfile
4
+ from typing import Generator
5
+
6
+ import pytest
7
+
8
+ from pandas.io.pytables import HDFStore
9
+
10
+ tables = pytest.importorskip("tables")
11
+ # set these parameters so we don't have file sharing
12
+ tables.parameters.MAX_NUMEXPR_THREADS = 1
13
+ tables.parameters.MAX_BLOSC_THREADS = 1
14
+ tables.parameters.MAX_THREADS = 1
15
+
16
+
17
+ def safe_close(store):
18
+ try:
19
+ if store is not None:
20
+ store.close()
21
+ except OSError:
22
+ pass
23
+
24
+
25
+ # contextmanager to ensure the file cleanup
26
+ @contextmanager
27
+ def ensure_clean_store(
28
+ path, mode="a", complevel=None, complib=None, fletcher32=False
29
+ ) -> Generator[HDFStore, None, None]:
30
+ with tempfile.TemporaryDirectory() as tmpdirname:
31
+ tmp_path = pathlib.Path(tmpdirname, path)
32
+ with HDFStore(
33
+ tmp_path,
34
+ mode=mode,
35
+ complevel=complevel,
36
+ complib=complib,
37
+ fletcher32=fletcher32,
38
+ ) as store:
39
+ yield store
40
+
41
+
42
+ def _maybe_remove(store, key):
43
+ """
44
+ For tests using tables, try removing the table to be sure there is
45
+ no content from previous tests using the same table name.
46
+ """
47
+ try:
48
+ store.remove(key)
49
+ except (ValueError, KeyError):
50
+ pass
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/conftest.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+
3
+ import pytest
4
+
5
+
6
+ @pytest.fixture
7
+ def setup_path():
8
+ """Fixture for setup path"""
9
+ return f"tmp.__{uuid.uuid4()}__.h5"
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py ADDED
@@ -0,0 +1,910 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ from datetime import timedelta
3
+ import re
4
+ from warnings import catch_warnings
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from pandas._libs.tslibs import Timestamp
10
+ import pandas.util._test_decorators as td
11
+
12
+ import pandas as pd
13
+ from pandas import (
14
+ DataFrame,
15
+ Series,
16
+ _testing as tm,
17
+ concat,
18
+ date_range,
19
+ read_hdf,
20
+ )
21
+ from pandas.tests.io.pytables.common import (
22
+ _maybe_remove,
23
+ ensure_clean_store,
24
+ )
25
+
26
+ pytestmark = pytest.mark.single_cpu
27
+
28
+
29
+ def test_append(setup_path):
30
+ with ensure_clean_store(setup_path) as store:
31
+ # this is allowed by almost always don't want to do it
32
+ # tables.NaturalNameWarning):
33
+ with catch_warnings(record=True):
34
+ df = tm.makeTimeDataFrame()
35
+ _maybe_remove(store, "df1")
36
+ store.append("df1", df[:10])
37
+ store.append("df1", df[10:])
38
+ tm.assert_frame_equal(store["df1"], df)
39
+
40
+ _maybe_remove(store, "df2")
41
+ store.put("df2", df[:10], format="table")
42
+ store.append("df2", df[10:])
43
+ tm.assert_frame_equal(store["df2"], df)
44
+
45
+ _maybe_remove(store, "df3")
46
+ store.append("/df3", df[:10])
47
+ store.append("/df3", df[10:])
48
+ tm.assert_frame_equal(store["df3"], df)
49
+
50
+ # this is allowed by almost always don't want to do it
51
+ # tables.NaturalNameWarning
52
+ _maybe_remove(store, "/df3 foo")
53
+ store.append("/df3 foo", df[:10])
54
+ store.append("/df3 foo", df[10:])
55
+ tm.assert_frame_equal(store["df3 foo"], df)
56
+
57
+ # dtype issues - mizxed type in a single object column
58
+ df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
59
+ df["mixed_column"] = "testing"
60
+ df.loc[2, "mixed_column"] = np.nan
61
+ _maybe_remove(store, "df")
62
+ store.append("df", df)
63
+ tm.assert_frame_equal(store["df"], df)
64
+
65
+ # uints - test storage of uints
66
+ uint_data = DataFrame(
67
+ {
68
+ "u08": Series(
69
+ np.random.randint(0, high=255, size=5), dtype=np.uint8
70
+ ),
71
+ "u16": Series(
72
+ np.random.randint(0, high=65535, size=5), dtype=np.uint16
73
+ ),
74
+ "u32": Series(
75
+ np.random.randint(0, high=2**30, size=5), dtype=np.uint32
76
+ ),
77
+ "u64": Series(
78
+ [2**58, 2**59, 2**60, 2**61, 2**62],
79
+ dtype=np.uint64,
80
+ ),
81
+ },
82
+ index=np.arange(5),
83
+ )
84
+ _maybe_remove(store, "uints")
85
+ store.append("uints", uint_data)
86
+ tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
87
+
88
+ # uints - test storage of uints in indexable columns
89
+ _maybe_remove(store, "uints")
90
+ # 64-bit indices not yet supported
91
+ store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
92
+ tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
93
+
94
+
95
+ def test_append_series(setup_path):
96
+ with ensure_clean_store(setup_path) as store:
97
+ # basic
98
+ ss = tm.makeStringSeries()
99
+ ts = tm.makeTimeSeries()
100
+ ns = Series(np.arange(100))
101
+
102
+ store.append("ss", ss)
103
+ result = store["ss"]
104
+ tm.assert_series_equal(result, ss)
105
+ assert result.name is None
106
+
107
+ store.append("ts", ts)
108
+ result = store["ts"]
109
+ tm.assert_series_equal(result, ts)
110
+ assert result.name is None
111
+
112
+ ns.name = "foo"
113
+ store.append("ns", ns)
114
+ result = store["ns"]
115
+ tm.assert_series_equal(result, ns)
116
+ assert result.name == ns.name
117
+
118
+ # select on the values
119
+ expected = ns[ns > 60]
120
+ result = store.select("ns", "foo>60")
121
+ tm.assert_series_equal(result, expected)
122
+
123
+ # select on the index and values
124
+ expected = ns[(ns > 70) & (ns.index < 90)]
125
+ result = store.select("ns", "foo>70 and index<90")
126
+ tm.assert_series_equal(result, expected, check_index_type=True)
127
+
128
+ # multi-index
129
+ mi = DataFrame(np.random.randn(5, 1), columns=["A"])
130
+ mi["B"] = np.arange(len(mi))
131
+ mi["C"] = "foo"
132
+ mi.loc[3:5, "C"] = "bar"
133
+ mi.set_index(["C", "B"], inplace=True)
134
+ s = mi.stack()
135
+ s.index = s.index.droplevel(2)
136
+ store.append("mi", s)
137
+ tm.assert_series_equal(store["mi"], s, check_index_type=True)
138
+
139
+
140
+ def test_append_some_nans(setup_path):
141
+ with ensure_clean_store(setup_path) as store:
142
+ df = DataFrame(
143
+ {
144
+ "A": Series(np.random.randn(20)).astype("int32"),
145
+ "A1": np.random.randn(20),
146
+ "A2": np.random.randn(20),
147
+ "B": "foo",
148
+ "C": "bar",
149
+ "D": Timestamp("20010101"),
150
+ "E": datetime.datetime(2001, 1, 2, 0, 0),
151
+ },
152
+ index=np.arange(20),
153
+ )
154
+ # some nans
155
+ _maybe_remove(store, "df1")
156
+ df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
157
+ store.append("df1", df[:10])
158
+ store.append("df1", df[10:])
159
+ tm.assert_frame_equal(store["df1"], df, check_index_type=True)
160
+
161
+ # first column
162
+ df1 = df.copy()
163
+ df1["A1"] = np.nan
164
+ _maybe_remove(store, "df1")
165
+ store.append("df1", df1[:10])
166
+ store.append("df1", df1[10:])
167
+ tm.assert_frame_equal(store["df1"], df1, check_index_type=True)
168
+
169
+ # 2nd column
170
+ df2 = df.copy()
171
+ df2["A2"] = np.nan
172
+ _maybe_remove(store, "df2")
173
+ store.append("df2", df2[:10])
174
+ store.append("df2", df2[10:])
175
+ tm.assert_frame_equal(store["df2"], df2, check_index_type=True)
176
+
177
+ # datetimes
178
+ df3 = df.copy()
179
+ df3["E"] = np.nan
180
+ _maybe_remove(store, "df3")
181
+ store.append("df3", df3[:10])
182
+ store.append("df3", df3[10:])
183
+ tm.assert_frame_equal(store["df3"], df3, check_index_type=True)
184
+
185
+
186
+ def test_append_all_nans(setup_path):
187
+ with ensure_clean_store(setup_path) as store:
188
+ df = DataFrame(
189
+ {"A1": np.random.randn(20), "A2": np.random.randn(20)},
190
+ index=np.arange(20),
191
+ )
192
+ df.loc[0:15, :] = np.nan
193
+
194
+ # nan some entire rows (dropna=True)
195
+ _maybe_remove(store, "df")
196
+ store.append("df", df[:10], dropna=True)
197
+ store.append("df", df[10:], dropna=True)
198
+ tm.assert_frame_equal(store["df"], df[-4:], check_index_type=True)
199
+
200
+ # nan some entire rows (dropna=False)
201
+ _maybe_remove(store, "df2")
202
+ store.append("df2", df[:10], dropna=False)
203
+ store.append("df2", df[10:], dropna=False)
204
+ tm.assert_frame_equal(store["df2"], df, check_index_type=True)
205
+
206
+ # tests the option io.hdf.dropna_table
207
+ with pd.option_context("io.hdf.dropna_table", False):
208
+ _maybe_remove(store, "df3")
209
+ store.append("df3", df[:10])
210
+ store.append("df3", df[10:])
211
+ tm.assert_frame_equal(store["df3"], df)
212
+
213
+ with pd.option_context("io.hdf.dropna_table", True):
214
+ _maybe_remove(store, "df4")
215
+ store.append("df4", df[:10])
216
+ store.append("df4", df[10:])
217
+ tm.assert_frame_equal(store["df4"], df[-4:])
218
+
219
+ # nan some entire rows (string are still written!)
220
+ df = DataFrame(
221
+ {
222
+ "A1": np.random.randn(20),
223
+ "A2": np.random.randn(20),
224
+ "B": "foo",
225
+ "C": "bar",
226
+ },
227
+ index=np.arange(20),
228
+ )
229
+
230
+ df.loc[0:15, :] = np.nan
231
+
232
+ _maybe_remove(store, "df")
233
+ store.append("df", df[:10], dropna=True)
234
+ store.append("df", df[10:], dropna=True)
235
+ tm.assert_frame_equal(store["df"], df, check_index_type=True)
236
+
237
+ _maybe_remove(store, "df2")
238
+ store.append("df2", df[:10], dropna=False)
239
+ store.append("df2", df[10:], dropna=False)
240
+ tm.assert_frame_equal(store["df2"], df, check_index_type=True)
241
+
242
+ # nan some entire rows (but since we have dates they are still
243
+ # written!)
244
+ df = DataFrame(
245
+ {
246
+ "A1": np.random.randn(20),
247
+ "A2": np.random.randn(20),
248
+ "B": "foo",
249
+ "C": "bar",
250
+ "D": Timestamp("20010101"),
251
+ "E": datetime.datetime(2001, 1, 2, 0, 0),
252
+ },
253
+ index=np.arange(20),
254
+ )
255
+
256
+ df.loc[0:15, :] = np.nan
257
+
258
+ _maybe_remove(store, "df")
259
+ store.append("df", df[:10], dropna=True)
260
+ store.append("df", df[10:], dropna=True)
261
+ tm.assert_frame_equal(store["df"], df, check_index_type=True)
262
+
263
+ _maybe_remove(store, "df2")
264
+ store.append("df2", df[:10], dropna=False)
265
+ store.append("df2", df[10:], dropna=False)
266
+ tm.assert_frame_equal(store["df2"], df, check_index_type=True)
267
+
268
+
269
+ def test_append_frame_column_oriented(setup_path):
270
+ with ensure_clean_store(setup_path) as store:
271
+ # column oriented
272
+ df = tm.makeTimeDataFrame()
273
+ df.index = df.index._with_freq(None) # freq doesn't round-trip
274
+
275
+ _maybe_remove(store, "df1")
276
+ store.append("df1", df.iloc[:, :2], axes=["columns"])
277
+ store.append("df1", df.iloc[:, 2:])
278
+ tm.assert_frame_equal(store["df1"], df)
279
+
280
+ result = store.select("df1", "columns=A")
281
+ expected = df.reindex(columns=["A"])
282
+ tm.assert_frame_equal(expected, result)
283
+
284
+ # selection on the non-indexable
285
+ result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
286
+ expected = df.reindex(columns=["A"], index=df.index[0:4])
287
+ tm.assert_frame_equal(expected, result)
288
+
289
+ # this isn't supported
290
+ msg = re.escape(
291
+ "passing a filterable condition to a non-table indexer "
292
+ "[Filter: Not Initialized]"
293
+ )
294
+ with pytest.raises(TypeError, match=msg):
295
+ store.select("df1", "columns=A and index>df.index[4]")
296
+
297
+
298
+ def test_append_with_different_block_ordering(setup_path):
299
+ # GH 4096; using same frames, but different block orderings
300
+ with ensure_clean_store(setup_path) as store:
301
+ for i in range(10):
302
+ df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
303
+ df["index"] = range(10)
304
+ df["index"] += i * 10
305
+ df["int64"] = Series([1] * len(df), dtype="int64")
306
+ df["int16"] = Series([1] * len(df), dtype="int16")
307
+
308
+ if i % 2 == 0:
309
+ del df["int64"]
310
+ df["int64"] = Series([1] * len(df), dtype="int64")
311
+ if i % 3 == 0:
312
+ a = df.pop("A")
313
+ df["A"] = a
314
+
315
+ df.set_index("index", inplace=True)
316
+
317
+ store.append("df", df)
318
+
319
+ # test a different ordering but with more fields (like invalid
320
+ # combinations)
321
+ with ensure_clean_store(setup_path) as store:
322
+ df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
323
+ df["int64"] = Series([1] * len(df), dtype="int64")
324
+ df["int16"] = Series([1] * len(df), dtype="int16")
325
+ store.append("df", df)
326
+
327
+ # store additional fields in different blocks
328
+ df["int16_2"] = Series([1] * len(df), dtype="int16")
329
+ msg = re.escape(
330
+ "cannot match existing table structure for [int16] on appending data"
331
+ )
332
+ with pytest.raises(ValueError, match=msg):
333
+ store.append("df", df)
334
+
335
+ # store multiple additional fields in different blocks
336
+ df["float_3"] = Series([1.0] * len(df), dtype="float64")
337
+ msg = re.escape(
338
+ "cannot match existing table structure for [A,B] on appending data"
339
+ )
340
+ with pytest.raises(ValueError, match=msg):
341
+ store.append("df", df)
342
+
343
+
344
+ def test_append_with_strings(setup_path):
345
+ with ensure_clean_store(setup_path) as store:
346
+ with catch_warnings(record=True):
347
+
348
+ def check_col(key, name, size):
349
+ assert (
350
+ getattr(store.get_storer(key).table.description, name).itemsize
351
+ == size
352
+ )
353
+
354
+ # avoid truncation on elements
355
+ df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
356
+ store.append("df_big", df)
357
+ tm.assert_frame_equal(store.select("df_big"), df)
358
+ check_col("df_big", "values_block_1", 15)
359
+
360
+ # appending smaller string ok
361
+ df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
362
+ store.append("df_big", df2)
363
+ expected = concat([df, df2])
364
+ tm.assert_frame_equal(store.select("df_big"), expected)
365
+ check_col("df_big", "values_block_1", 15)
366
+
367
+ # avoid truncation on elements
368
+ df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
369
+ store.append("df_big2", df, min_itemsize={"values": 50})
370
+ tm.assert_frame_equal(store.select("df_big2"), df)
371
+ check_col("df_big2", "values_block_1", 50)
372
+
373
+ # bigger string on next append
374
+ store.append("df_new", df)
375
+ df_new = DataFrame(
376
+ [[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
377
+ )
378
+ msg = (
379
+ r"Trying to store a string with len \[26\] in "
380
+ r"\[values_block_1\] column but\n"
381
+ r"this column has a limit of \[15\]!\n"
382
+ "Consider using min_itemsize to preset the sizes on these "
383
+ "columns"
384
+ )
385
+ with pytest.raises(ValueError, match=msg):
386
+ store.append("df_new", df_new)
387
+
388
+ # min_itemsize on Series index (GH 11412)
389
+ df = tm.makeMixedDataFrame().set_index("C")
390
+ store.append("ss", df["B"], min_itemsize={"index": 4})
391
+ tm.assert_series_equal(store.select("ss"), df["B"])
392
+
393
+ # same as above, with data_columns=True
394
+ store.append("ss2", df["B"], data_columns=True, min_itemsize={"index": 4})
395
+ tm.assert_series_equal(store.select("ss2"), df["B"])
396
+
397
+ # min_itemsize in index without appending (GH 10381)
398
+ store.put("ss3", df, format="table", min_itemsize={"index": 6})
399
+ # just make sure there is a longer string:
400
+ df2 = df.copy().reset_index().assign(C="longer").set_index("C")
401
+ store.append("ss3", df2)
402
+ tm.assert_frame_equal(store.select("ss3"), concat([df, df2]))
403
+
404
+ # same as above, with a Series
405
+ store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
406
+ store.append("ss4", df2["B"])
407
+ tm.assert_series_equal(store.select("ss4"), concat([df["B"], df2["B"]]))
408
+
409
+ # with nans
410
+ _maybe_remove(store, "df")
411
+ df = tm.makeTimeDataFrame()
412
+ df["string"] = "foo"
413
+ df.loc[df.index[1:4], "string"] = np.nan
414
+ df["string2"] = "bar"
415
+ df.loc[df.index[4:8], "string2"] = np.nan
416
+ df["string3"] = "bah"
417
+ df.loc[df.index[1:], "string3"] = np.nan
418
+ store.append("df", df)
419
+ result = store.select("df")
420
+ tm.assert_frame_equal(result, df)
421
+
422
+ with ensure_clean_store(setup_path) as store:
423
+ df = DataFrame({"A": "foo", "B": "bar"}, index=range(10))
424
+
425
+ # a min_itemsize that creates a data_column
426
+ _maybe_remove(store, "df")
427
+ store.append("df", df, min_itemsize={"A": 200})
428
+ check_col("df", "A", 200)
429
+ assert store.get_storer("df").data_columns == ["A"]
430
+
431
+ # a min_itemsize that creates a data_column2
432
+ _maybe_remove(store, "df")
433
+ store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
434
+ check_col("df", "A", 200)
435
+ assert store.get_storer("df").data_columns == ["B", "A"]
436
+
437
+ # a min_itemsize that creates a data_column2
438
+ _maybe_remove(store, "df")
439
+ store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
440
+ check_col("df", "B", 200)
441
+ check_col("df", "values_block_0", 200)
442
+ assert store.get_storer("df").data_columns == ["B"]
443
+
444
+ # infer the .typ on subsequent appends
445
+ _maybe_remove(store, "df")
446
+ store.append("df", df[:5], min_itemsize=200)
447
+ store.append("df", df[5:], min_itemsize=200)
448
+ tm.assert_frame_equal(store["df"], df)
449
+
450
+ # invalid min_itemsize keys
451
+ df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
452
+ _maybe_remove(store, "df")
453
+ msg = re.escape(
454
+ "min_itemsize has the key [foo] which is not an axis or data_column"
455
+ )
456
+ with pytest.raises(ValueError, match=msg):
457
+ store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
458
+
459
+
460
+ def test_append_with_empty_string(setup_path):
461
+ with ensure_clean_store(setup_path) as store:
462
+ # with all empty strings (GH 12242)
463
+ df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
464
+ store.append("df", df[:-1], min_itemsize={"x": 1})
465
+ store.append("df", df[-1:], min_itemsize={"x": 1})
466
+ tm.assert_frame_equal(store.select("df"), df)
467
+
468
+
469
+ def test_append_with_data_columns(setup_path):
470
+ with ensure_clean_store(setup_path) as store:
471
+ df = tm.makeTimeDataFrame()
472
+ df.iloc[0, df.columns.get_loc("B")] = 1.0
473
+ _maybe_remove(store, "df")
474
+ store.append("df", df[:2], data_columns=["B"])
475
+ store.append("df", df[2:])
476
+ tm.assert_frame_equal(store["df"], df)
477
+
478
+ # check that we have indices created
479
+ assert store._handle.root.df.table.cols.index.is_indexed is True
480
+ assert store._handle.root.df.table.cols.B.is_indexed is True
481
+
482
+ # data column searching
483
+ result = store.select("df", "B>0")
484
+ expected = df[df.B > 0]
485
+ tm.assert_frame_equal(result, expected)
486
+
487
+ # data column searching (with an indexable and a data_columns)
488
+ result = store.select("df", "B>0 and index>df.index[3]")
489
+ df_new = df.reindex(index=df.index[4:])
490
+ expected = df_new[df_new.B > 0]
491
+ tm.assert_frame_equal(result, expected)
492
+
493
+ # data column selection with a string data_column
494
+ df_new = df.copy()
495
+ df_new["string"] = "foo"
496
+ df_new.loc[df_new.index[1:4], "string"] = np.nan
497
+ df_new.loc[df_new.index[5:6], "string"] = "bar"
498
+ _maybe_remove(store, "df")
499
+ store.append("df", df_new, data_columns=["string"])
500
+ result = store.select("df", "string='foo'")
501
+ expected = df_new[df_new.string == "foo"]
502
+ tm.assert_frame_equal(result, expected)
503
+
504
+ # using min_itemsize and a data column
505
+ def check_col(key, name, size):
506
+ assert (
507
+ getattr(store.get_storer(key).table.description, name).itemsize == size
508
+ )
509
+
510
+ with ensure_clean_store(setup_path) as store:
511
+ _maybe_remove(store, "df")
512
+ store.append("df", df_new, data_columns=["string"], min_itemsize={"string": 30})
513
+ check_col("df", "string", 30)
514
+ _maybe_remove(store, "df")
515
+ store.append("df", df_new, data_columns=["string"], min_itemsize=30)
516
+ check_col("df", "string", 30)
517
+ _maybe_remove(store, "df")
518
+ store.append("df", df_new, data_columns=["string"], min_itemsize={"values": 30})
519
+ check_col("df", "string", 30)
520
+
521
+ with ensure_clean_store(setup_path) as store:
522
+ df_new["string2"] = "foobarbah"
523
+ df_new["string_block1"] = "foobarbah1"
524
+ df_new["string_block2"] = "foobarbah2"
525
+ _maybe_remove(store, "df")
526
+ store.append(
527
+ "df",
528
+ df_new,
529
+ data_columns=["string", "string2"],
530
+ min_itemsize={"string": 30, "string2": 40, "values": 50},
531
+ )
532
+ check_col("df", "string", 30)
533
+ check_col("df", "string2", 40)
534
+ check_col("df", "values_block_1", 50)
535
+
536
+ with ensure_clean_store(setup_path) as store:
537
+ # multiple data columns
538
+ df_new = df.copy()
539
+ df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
540
+ df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
541
+ df_new["string"] = "foo"
542
+
543
+ sl = df_new.columns.get_loc("string")
544
+ df_new.iloc[1:4, sl] = np.nan
545
+ df_new.iloc[5:6, sl] = "bar"
546
+
547
+ df_new["string2"] = "foo"
548
+ sl = df_new.columns.get_loc("string2")
549
+ df_new.iloc[2:5, sl] = np.nan
550
+ df_new.iloc[7:8, sl] = "bar"
551
+ _maybe_remove(store, "df")
552
+ store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
553
+ result = store.select("df", "string='foo' and string2='foo' and A>0 and B<0")
554
+ expected = df_new[
555
+ (df_new.string == "foo")
556
+ & (df_new.string2 == "foo")
557
+ & (df_new.A > 0)
558
+ & (df_new.B < 0)
559
+ ]
560
+ tm.assert_frame_equal(result, expected, check_freq=False)
561
+ # FIXME: 2020-05-07 freq check randomly fails in the CI
562
+
563
+ # yield an empty frame
564
+ result = store.select("df", "string='foo' and string2='cool'")
565
+ expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
566
+ tm.assert_frame_equal(result, expected)
567
+
568
+ with ensure_clean_store(setup_path) as store:
569
+ # doc example
570
+ df_dc = df.copy()
571
+ df_dc["string"] = "foo"
572
+ df_dc.loc[df_dc.index[4:6], "string"] = np.nan
573
+ df_dc.loc[df_dc.index[7:9], "string"] = "bar"
574
+ df_dc["string2"] = "cool"
575
+ df_dc["datetime"] = Timestamp("20010102")
576
+ df_dc.loc[df_dc.index[3:5], ["A", "B", "datetime"]] = np.nan
577
+
578
+ _maybe_remove(store, "df_dc")
579
+ store.append(
580
+ "df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
581
+ )
582
+ result = store.select("df_dc", "B>0")
583
+
584
+ expected = df_dc[df_dc.B > 0]
585
+ tm.assert_frame_equal(result, expected)
586
+
587
+ result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
588
+ expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
589
+ tm.assert_frame_equal(result, expected, check_freq=False)
590
+ # FIXME: 2020-12-07 intermittent build failures here with freq of
591
+ # None instead of BDay(4)
592
+
593
+ with ensure_clean_store(setup_path) as store:
594
+ # doc example part 2
595
+ np.random.seed(1234)
596
+ index = date_range("1/1/2000", periods=8)
597
+ df_dc = DataFrame(np.random.randn(8, 3), index=index, columns=["A", "B", "C"])
598
+ df_dc["string"] = "foo"
599
+ df_dc.loc[df_dc.index[4:6], "string"] = np.nan
600
+ df_dc.loc[df_dc.index[7:9], "string"] = "bar"
601
+ df_dc[["B", "C"]] = df_dc[["B", "C"]].abs()
602
+ df_dc["string2"] = "cool"
603
+
604
+ # on-disk operations
605
+ store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
606
+
607
+ result = store.select("df_dc", "B>0")
608
+ expected = df_dc[df_dc.B > 0]
609
+ tm.assert_frame_equal(result, expected)
610
+
611
+ result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
612
+ expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
613
+ tm.assert_frame_equal(result, expected)
614
+
615
+
616
+ def test_append_hierarchical(tmp_path, setup_path, multiindex_dataframe_random_data):
617
+ df = multiindex_dataframe_random_data
618
+ df.columns.name = None
619
+
620
+ with ensure_clean_store(setup_path) as store:
621
+ store.append("mi", df)
622
+ result = store.select("mi")
623
+ tm.assert_frame_equal(result, df)
624
+
625
+ # GH 3748
626
+ result = store.select("mi", columns=["A", "B"])
627
+ expected = df.reindex(columns=["A", "B"])
628
+ tm.assert_frame_equal(result, expected)
629
+
630
+ path = tmp_path / "test.hdf"
631
+ df.to_hdf(path, "df", format="table")
632
+ result = read_hdf(path, "df", columns=["A", "B"])
633
+ expected = df.reindex(columns=["A", "B"])
634
+ tm.assert_frame_equal(result, expected)
635
+
636
+
637
+ def test_append_misc(setup_path):
638
+ with ensure_clean_store(setup_path) as store:
639
+ df = tm.makeDataFrame()
640
+ store.append("df", df, chunksize=1)
641
+ result = store.select("df")
642
+ tm.assert_frame_equal(result, df)
643
+
644
+ store.append("df1", df, expectedrows=10)
645
+ result = store.select("df1")
646
+ tm.assert_frame_equal(result, df)
647
+
648
+
649
+ @pytest.mark.parametrize("chunksize", [10, 200, 1000])
650
+ def test_append_misc_chunksize(setup_path, chunksize):
651
+ # more chunksize in append tests
652
+ df = tm.makeDataFrame()
653
+ df["string"] = "foo"
654
+ df["float322"] = 1.0
655
+ df["float322"] = df["float322"].astype("float32")
656
+ df["bool"] = df["float322"] > 0
657
+ df["time1"] = Timestamp("20130101")
658
+ df["time2"] = Timestamp("20130102")
659
+ with ensure_clean_store(setup_path, mode="w") as store:
660
+ store.append("obj", df, chunksize=chunksize)
661
+ result = store.select("obj")
662
+ tm.assert_frame_equal(result, df)
663
+
664
+
665
+ def test_append_misc_empty_frame(setup_path):
666
+ # empty frame, GH4273
667
+ with ensure_clean_store(setup_path) as store:
668
+ # 0 len
669
+ df_empty = DataFrame(columns=list("ABC"))
670
+ store.append("df", df_empty)
671
+ with pytest.raises(KeyError, match="'No object named df in the file'"):
672
+ store.select("df")
673
+
674
+ # repeated append of 0/non-zero frames
675
+ df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
676
+ store.append("df", df)
677
+ tm.assert_frame_equal(store.select("df"), df)
678
+ store.append("df", df_empty)
679
+ tm.assert_frame_equal(store.select("df"), df)
680
+
681
+ # store
682
+ df = DataFrame(columns=list("ABC"))
683
+ store.put("df2", df)
684
+ tm.assert_frame_equal(store.select("df2"), df)
685
+
686
+
687
+ # TODO(ArrayManager) currently we rely on falling back to BlockManager, but
688
+ # the conversion from AM->BM converts the invalid object dtype column into
689
+ # a datetime64 column no longer raising an error
690
+ @td.skip_array_manager_not_yet_implemented
691
+ def test_append_raise(setup_path):
692
+ with ensure_clean_store(setup_path) as store:
693
+ # test append with invalid input to get good error messages
694
+
695
+ # list in column
696
+ df = tm.makeDataFrame()
697
+ df["invalid"] = [["a"]] * len(df)
698
+ assert df.dtypes["invalid"] == np.object_
699
+ msg = re.escape(
700
+ """Cannot serialize the column [invalid]
701
+ because its data contents are not [string] but [mixed] object dtype"""
702
+ )
703
+ with pytest.raises(TypeError, match=msg):
704
+ store.append("df", df)
705
+
706
+ # multiple invalid columns
707
+ df["invalid2"] = [["a"]] * len(df)
708
+ df["invalid3"] = [["a"]] * len(df)
709
+ with pytest.raises(TypeError, match=msg):
710
+ store.append("df", df)
711
+
712
+ # datetime with embedded nans as object
713
+ df = tm.makeDataFrame()
714
+ s = Series(datetime.datetime(2001, 1, 2), index=df.index)
715
+ s = s.astype(object)
716
+ s[0:5] = np.nan
717
+ df["invalid"] = s
718
+ assert df.dtypes["invalid"] == np.object_
719
+ msg = "too many timezones in this block, create separate data columns"
720
+ with pytest.raises(TypeError, match=msg):
721
+ store.append("df", df)
722
+
723
+ # directly ndarray
724
+ msg = "value must be None, Series, or DataFrame"
725
+ with pytest.raises(TypeError, match=msg):
726
+ store.append("df", np.arange(10))
727
+
728
+ # series directly
729
+ msg = re.escape(
730
+ "cannot properly create the storer for: "
731
+ "[group->df,value-><class 'pandas.core.series.Series'>]"
732
+ )
733
+ with pytest.raises(TypeError, match=msg):
734
+ store.append("df", Series(np.arange(10)))
735
+
736
+ # appending an incompatible table
737
+ df = tm.makeDataFrame()
738
+ store.append("df", df)
739
+
740
+ df["foo"] = "foo"
741
+ msg = re.escape(
742
+ "invalid combination of [non_index_axes] on appending data "
743
+ "[(1, ['A', 'B', 'C', 'D', 'foo'])] vs current table "
744
+ "[(1, ['A', 'B', 'C', 'D'])]"
745
+ )
746
+ with pytest.raises(ValueError, match=msg):
747
+ store.append("df", df)
748
+
749
+ # incompatible type (GH 41897)
750
+ _maybe_remove(store, "df")
751
+ df["foo"] = Timestamp("20130101")
752
+ store.append("df", df)
753
+ df["foo"] = "bar"
754
+ msg = re.escape(
755
+ "invalid combination of [values_axes] on appending data "
756
+ "[name->values_block_1,cname->values_block_1,"
757
+ "dtype->bytes24,kind->string,shape->(1, 30)] "
758
+ "vs current table "
759
+ "[name->values_block_1,cname->values_block_1,"
760
+ "dtype->datetime64,kind->datetime64,shape->None]"
761
+ )
762
+ with pytest.raises(ValueError, match=msg):
763
+ store.append("df", df)
764
+
765
+
766
+ def test_append_with_timedelta(setup_path):
767
+ # GH 3577
768
+ # append timedelta
769
+
770
+ df = DataFrame(
771
+ {
772
+ "A": Timestamp("20130101"),
773
+ "B": [
774
+ Timestamp("20130101") + timedelta(days=i, seconds=10) for i in range(10)
775
+ ],
776
+ }
777
+ )
778
+ df["C"] = df["A"] - df["B"]
779
+ df.loc[3:5, "C"] = np.nan
780
+
781
+ with ensure_clean_store(setup_path) as store:
782
+ # table
783
+ _maybe_remove(store, "df")
784
+ store.append("df", df, data_columns=True)
785
+ result = store.select("df")
786
+ tm.assert_frame_equal(result, df)
787
+
788
+ result = store.select("df", where="C<100000")
789
+ tm.assert_frame_equal(result, df)
790
+
791
+ result = store.select("df", where="C<pd.Timedelta('-3D')")
792
+ tm.assert_frame_equal(result, df.iloc[3:])
793
+
794
+ result = store.select("df", "C<'-3D'")
795
+ tm.assert_frame_equal(result, df.iloc[3:])
796
+
797
+ # a bit hacky here as we don't really deal with the NaT properly
798
+
799
+ result = store.select("df", "C<'-500000s'")
800
+ result = result.dropna(subset=["C"])
801
+ tm.assert_frame_equal(result, df.iloc[6:])
802
+
803
+ result = store.select("df", "C<'-3.5D'")
804
+ result = result.iloc[1:]
805
+ tm.assert_frame_equal(result, df.iloc[4:])
806
+
807
+ # fixed
808
+ _maybe_remove(store, "df2")
809
+ store.put("df2", df)
810
+ result = store.select("df2")
811
+ tm.assert_frame_equal(result, df)
812
+
813
+
814
+ def test_append_to_multiple(setup_path):
815
+ df1 = tm.makeTimeDataFrame()
816
+ df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
817
+ df2["foo"] = "bar"
818
+ df = concat([df1, df2], axis=1)
819
+
820
+ with ensure_clean_store(setup_path) as store:
821
+ # exceptions
822
+ msg = "append_to_multiple requires a selector that is in passed dict"
823
+ with pytest.raises(ValueError, match=msg):
824
+ store.append_to_multiple(
825
+ {"df1": ["A", "B"], "df2": None}, df, selector="df3"
826
+ )
827
+
828
+ with pytest.raises(ValueError, match=msg):
829
+ store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
830
+
831
+ msg = (
832
+ "append_to_multiple must have a dictionary specified as the way to "
833
+ "split the value"
834
+ )
835
+ with pytest.raises(ValueError, match=msg):
836
+ store.append_to_multiple("df1", df, "df1")
837
+
838
+ # regular operation
839
+ store.append_to_multiple({"df1": ["A", "B"], "df2": None}, df, selector="df1")
840
+ result = store.select_as_multiple(
841
+ ["df1", "df2"], where=["A>0", "B>0"], selector="df1"
842
+ )
843
+ expected = df[(df.A > 0) & (df.B > 0)]
844
+ tm.assert_frame_equal(result, expected)
845
+
846
+
847
+ def test_append_to_multiple_dropna(setup_path):
848
+ df1 = tm.makeTimeDataFrame()
849
+ df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
850
+ df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
851
+ df = concat([df1, df2], axis=1)
852
+
853
+ with ensure_clean_store(setup_path) as store:
854
+ # dropna=True should guarantee rows are synchronized
855
+ store.append_to_multiple(
856
+ {"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
857
+ )
858
+ result = store.select_as_multiple(["df1", "df2"])
859
+ expected = df.dropna()
860
+ tm.assert_frame_equal(result, expected, check_index_type=True)
861
+ tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
862
+
863
+
864
+ def test_append_to_multiple_dropna_false(setup_path):
865
+ df1 = tm.makeTimeDataFrame()
866
+ df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
867
+ df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
868
+ df = concat([df1, df2], axis=1)
869
+
870
+ with ensure_clean_store(setup_path) as store, pd.option_context(
871
+ "io.hdf.dropna_table", True
872
+ ):
873
+ # dropna=False shouldn't synchronize row indexes
874
+ store.append_to_multiple(
875
+ {"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
876
+ )
877
+
878
+ msg = "all tables must have exactly the same nrows!"
879
+ with pytest.raises(ValueError, match=msg):
880
+ store.select_as_multiple(["df1a", "df2a"])
881
+
882
+ assert not store.select("df1a").index.equals(store.select("df2a").index)
883
+
884
+
885
+ def test_append_to_multiple_min_itemsize(setup_path):
886
+ # GH 11238
887
+ df = DataFrame(
888
+ {
889
+ "IX": np.arange(1, 21),
890
+ "Num": np.arange(1, 21),
891
+ "BigNum": np.arange(1, 21) * 88,
892
+ "Str": ["a" for _ in range(20)],
893
+ "LongStr": ["abcde" for _ in range(20)],
894
+ }
895
+ )
896
+ expected = df.iloc[[0]]
897
+
898
+ with ensure_clean_store(setup_path) as store:
899
+ store.append_to_multiple(
900
+ {
901
+ "index": ["IX"],
902
+ "nums": ["Num", "BigNum"],
903
+ "strs": ["Str", "LongStr"],
904
+ },
905
+ df.iloc[[0]],
906
+ "index",
907
+ min_itemsize={"Str": 10, "LongStr": 100, "Num": 2},
908
+ )
909
+ result = store.select_as_multiple(["index", "nums", "strs"])
910
+ tm.assert_frame_equal(result, expected, check_index_type=True)
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from warnings import catch_warnings
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ DataFrame,
9
+ Series,
10
+ )
11
+ import pandas._testing as tm
12
+ from pandas.tests.io.pytables.common import ensure_clean_store
13
+
14
+ from pandas.io.pytables import read_hdf
15
+
16
+
17
+ def test_complex_fixed(tmp_path, setup_path):
18
+ df = DataFrame(
19
+ np.random.rand(4, 5).astype(np.complex64),
20
+ index=list("abcd"),
21
+ columns=list("ABCDE"),
22
+ )
23
+
24
+ path = tmp_path / setup_path
25
+ df.to_hdf(path, "df")
26
+ reread = read_hdf(path, "df")
27
+ tm.assert_frame_equal(df, reread)
28
+
29
+ df = DataFrame(
30
+ np.random.rand(4, 5).astype(np.complex128),
31
+ index=list("abcd"),
32
+ columns=list("ABCDE"),
33
+ )
34
+ path = tmp_path / setup_path
35
+ df.to_hdf(path, "df")
36
+ reread = read_hdf(path, "df")
37
+ tm.assert_frame_equal(df, reread)
38
+
39
+
40
+ def test_complex_table(tmp_path, setup_path):
41
+ df = DataFrame(
42
+ np.random.rand(4, 5).astype(np.complex64),
43
+ index=list("abcd"),
44
+ columns=list("ABCDE"),
45
+ )
46
+
47
+ path = tmp_path / setup_path
48
+ df.to_hdf(path, "df", format="table")
49
+ reread = read_hdf(path, "df")
50
+ tm.assert_frame_equal(df, reread)
51
+
52
+ df = DataFrame(
53
+ np.random.rand(4, 5).astype(np.complex128),
54
+ index=list("abcd"),
55
+ columns=list("ABCDE"),
56
+ )
57
+
58
+ path = tmp_path / setup_path
59
+ df.to_hdf(path, "df", format="table", mode="w")
60
+ reread = read_hdf(path, "df")
61
+ tm.assert_frame_equal(df, reread)
62
+
63
+
64
+ def test_complex_mixed_fixed(tmp_path, setup_path):
65
+ complex64 = np.array(
66
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
67
+ )
68
+ complex128 = np.array(
69
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
70
+ )
71
+ df = DataFrame(
72
+ {
73
+ "A": [1, 2, 3, 4],
74
+ "B": ["a", "b", "c", "d"],
75
+ "C": complex64,
76
+ "D": complex128,
77
+ "E": [1.0, 2.0, 3.0, 4.0],
78
+ },
79
+ index=list("abcd"),
80
+ )
81
+ path = tmp_path / setup_path
82
+ df.to_hdf(path, "df")
83
+ reread = read_hdf(path, "df")
84
+ tm.assert_frame_equal(df, reread)
85
+
86
+
87
+ def test_complex_mixed_table(tmp_path, setup_path):
88
+ complex64 = np.array(
89
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
90
+ )
91
+ complex128 = np.array(
92
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
93
+ )
94
+ df = DataFrame(
95
+ {
96
+ "A": [1, 2, 3, 4],
97
+ "B": ["a", "b", "c", "d"],
98
+ "C": complex64,
99
+ "D": complex128,
100
+ "E": [1.0, 2.0, 3.0, 4.0],
101
+ },
102
+ index=list("abcd"),
103
+ )
104
+
105
+ with ensure_clean_store(setup_path) as store:
106
+ store.append("df", df, data_columns=["A", "B"])
107
+ result = store.select("df", where="A>2")
108
+ tm.assert_frame_equal(df.loc[df.A > 2], result)
109
+
110
+ path = tmp_path / setup_path
111
+ df.to_hdf(path, "df", format="table")
112
+ reread = read_hdf(path, "df")
113
+ tm.assert_frame_equal(df, reread)
114
+
115
+
116
+ def test_complex_across_dimensions_fixed(tmp_path, setup_path):
117
+ with catch_warnings(record=True):
118
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
119
+ s = Series(complex128, index=list("abcd"))
120
+ df = DataFrame({"A": s, "B": s})
121
+
122
+ objs = [s, df]
123
+ comps = [tm.assert_series_equal, tm.assert_frame_equal]
124
+ for obj, comp in zip(objs, comps):
125
+ path = tmp_path / setup_path
126
+ obj.to_hdf(path, "obj", format="fixed")
127
+ reread = read_hdf(path, "obj")
128
+ comp(obj, reread)
129
+
130
+
131
+ def test_complex_across_dimensions(tmp_path, setup_path):
132
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
133
+ s = Series(complex128, index=list("abcd"))
134
+ df = DataFrame({"A": s, "B": s})
135
+
136
+ with catch_warnings(record=True):
137
+ objs = [df]
138
+ comps = [tm.assert_frame_equal]
139
+ for obj, comp in zip(objs, comps):
140
+ path = tmp_path / setup_path
141
+ obj.to_hdf(path, "obj", format="table")
142
+ reread = read_hdf(path, "obj")
143
+ comp(obj, reread)
144
+
145
+
146
+ def test_complex_indexing_error(setup_path):
147
+ complex128 = np.array(
148
+ [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
149
+ )
150
+ df = DataFrame(
151
+ {"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128},
152
+ index=list("abcd"),
153
+ )
154
+
155
+ msg = (
156
+ "Columns containing complex values can be stored "
157
+ "but cannot be indexed when using table format. "
158
+ "Either use fixed format, set index=False, "
159
+ "or do not include the columns containing complex "
160
+ "values to data_columns when initializing the table."
161
+ )
162
+
163
+ with ensure_clean_store(setup_path) as store:
164
+ with pytest.raises(TypeError, match=msg):
165
+ store.append("df", df, data_columns=["C"])
166
+
167
+
168
+ def test_complex_series_error(tmp_path, setup_path):
169
+ complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
170
+ s = Series(complex128, index=list("abcd"))
171
+
172
+ msg = (
173
+ "Columns containing complex values can be stored "
174
+ "but cannot be indexed when using table format. "
175
+ "Either use fixed format, set index=False, "
176
+ "or do not include the columns containing complex "
177
+ "values to data_columns when initializing the table."
178
+ )
179
+
180
+ path = tmp_path / setup_path
181
+ with pytest.raises(TypeError, match=msg):
182
+ s.to_hdf(path, "obj", format="t")
183
+
184
+ path = tmp_path / setup_path
185
+ s.to_hdf(path, "obj", format="t", index=False)
186
+ reread = read_hdf(path, "obj")
187
+ tm.assert_series_equal(s, reread)
188
+
189
+
190
+ def test_complex_append(setup_path):
191
+ df = DataFrame(
192
+ {"a": np.random.randn(100).astype(np.complex128), "b": np.random.randn(100)}
193
+ )
194
+
195
+ with ensure_clean_store(setup_path) as store:
196
+ store.append("df", df, data_columns=["b"])
197
+ store.append("df", df)
198
+ result = store.select("df")
199
+ tm.assert_frame_equal(pd.concat([df, df], axis=0), result)
videochat2/lib/python3.10/site-packages/pandas/tests/io/pytables/test_errors.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ from io import BytesIO
3
+ import re
4
+ from warnings import catch_warnings
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from pandas import (
10
+ CategoricalIndex,
11
+ DataFrame,
12
+ HDFStore,
13
+ MultiIndex,
14
+ _testing as tm,
15
+ date_range,
16
+ read_hdf,
17
+ )
18
+ from pandas.tests.io.pytables.common import ensure_clean_store
19
+
20
+ from pandas.io.pytables import (
21
+ Term,
22
+ _maybe_adjust_name,
23
+ )
24
+
25
+ pytestmark = pytest.mark.single_cpu
26
+
27
+
28
+ def test_pass_spec_to_storer(setup_path):
29
+ df = tm.makeDataFrame()
30
+
31
+ with ensure_clean_store(setup_path) as store:
32
+ store.put("df", df)
33
+ msg = (
34
+ "cannot pass a column specification when reading a Fixed format "
35
+ "store. this store must be selected in its entirety"
36
+ )
37
+ with pytest.raises(TypeError, match=msg):
38
+ store.select("df", columns=["A"])
39
+ msg = (
40
+ "cannot pass a where specification when reading from a Fixed "
41
+ "format store. this store must be selected in its entirety"
42
+ )
43
+ with pytest.raises(TypeError, match=msg):
44
+ store.select("df", where=[("columns=A")])
45
+
46
+
47
+ def test_table_index_incompatible_dtypes(setup_path):
48
+ df1 = DataFrame({"a": [1, 2, 3]})
49
+ df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
50
+
51
+ with ensure_clean_store(setup_path) as store:
52
+ store.put("frame", df1, format="table")
53
+ msg = re.escape("incompatible kind in col [integer - datetime64]")
54
+ with pytest.raises(TypeError, match=msg):
55
+ store.put("frame", df2, format="table", append=True)
56
+
57
+
58
+ def test_unimplemented_dtypes_table_columns(setup_path):
59
+ with ensure_clean_store(setup_path) as store:
60
+ dtypes = [("date", datetime.date(2001, 1, 2))]
61
+
62
+ # currently not supported dtypes ####
63
+ for n, f in dtypes:
64
+ df = tm.makeDataFrame()
65
+ df[n] = f
66
+ msg = re.escape(f"[{n}] is not implemented as a table column")
67
+ with pytest.raises(TypeError, match=msg):
68
+ store.append(f"df1_{n}", df)
69
+
70
+ # frame
71
+ df = tm.makeDataFrame()
72
+ df["obj1"] = "foo"
73
+ df["obj2"] = "bar"
74
+ df["datetime1"] = datetime.date(2001, 1, 2)
75
+ df = df._consolidate()
76
+
77
+ with ensure_clean_store(setup_path) as store:
78
+ # this fails because we have a date in the object block......
79
+ msg = re.escape(
80
+ """Cannot serialize the column [datetime1]
81
+ because its data contents are not [string] but [date] object dtype"""
82
+ )
83
+ with pytest.raises(TypeError, match=msg):
84
+ store.append("df_unimplemented", df)
85
+
86
+
87
+ def test_invalid_terms(tmp_path, setup_path):
88
+ with ensure_clean_store(setup_path) as store:
89
+ with catch_warnings(record=True):
90
+ df = tm.makeTimeDataFrame()
91
+ df["string"] = "foo"
92
+ df.loc[df.index[0:4], "string"] = "bar"
93
+
94
+ store.put("df", df, format="table")
95
+
96
+ # some invalid terms
97
+ msg = re.escape(
98
+ "__init__() missing 1 required positional argument: 'where'"
99
+ )
100
+ with pytest.raises(TypeError, match=msg):
101
+ Term()
102
+
103
+ # more invalid
104
+ msg = re.escape(
105
+ "cannot process expression [df.index[3]], "
106
+ "[2000-01-06 00:00:00] is not a valid condition"
107
+ )
108
+ with pytest.raises(ValueError, match=msg):
109
+ store.select("df", "df.index[3]")
110
+
111
+ msg = "invalid syntax"
112
+ with pytest.raises(SyntaxError, match=msg):
113
+ store.select("df", "index>")
114
+
115
+ # from the docs
116
+ path = tmp_path / setup_path
117
+ dfq = DataFrame(
118
+ np.random.randn(10, 4),
119
+ columns=list("ABCD"),
120
+ index=date_range("20130101", periods=10),
121
+ )
122
+ dfq.to_hdf(path, "dfq", format="table", data_columns=True)
123
+
124
+ # check ok
125
+ read_hdf(path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']")
126
+ read_hdf(path, "dfq", where="A>0 or C>0")
127
+
128
+ # catch the invalid reference
129
+ path = tmp_path / setup_path
130
+ dfq = DataFrame(
131
+ np.random.randn(10, 4),
132
+ columns=list("ABCD"),
133
+ index=date_range("20130101", periods=10),
134
+ )
135
+ dfq.to_hdf(path, "dfq", format="table")
136
+
137
+ msg = (
138
+ r"The passed where expression: A>0 or C>0\n\s*"
139
+ r"contains an invalid variable reference\n\s*"
140
+ r"all of the variable references must be a reference to\n\s*"
141
+ r"an axis \(e.g. 'index' or 'columns'\), or a data_column\n\s*"
142
+ r"The currently defined references are: index,columns\n"
143
+ )
144
+ with pytest.raises(ValueError, match=msg):
145
+ read_hdf(path, "dfq", where="A>0 or C>0")
146
+
147
+
148
+ def test_append_with_diff_col_name_types_raises_value_error(setup_path):
149
+ df = DataFrame(np.random.randn(10, 1))
150
+ df2 = DataFrame({"a": np.random.randn(10)})
151
+ df3 = DataFrame({(1, 2): np.random.randn(10)})
152
+ df4 = DataFrame({("1", 2): np.random.randn(10)})
153
+ df5 = DataFrame({("1", 2, object): np.random.randn(10)})
154
+
155
+ with ensure_clean_store(setup_path) as store:
156
+ name = f"df_{tm.rands(10)}"
157
+ store.append(name, df)
158
+
159
+ for d in (df2, df3, df4, df5):
160
+ msg = re.escape(
161
+ "cannot match existing table structure for [0] on appending data"
162
+ )
163
+ with pytest.raises(ValueError, match=msg):
164
+ store.append(name, d)
165
+
166
+
167
+ def test_invalid_complib(setup_path):
168
+ df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
169
+ with tm.ensure_clean(setup_path) as path:
170
+ msg = r"complib only supports \[.*\] compression."
171
+ with pytest.raises(ValueError, match=msg):
172
+ df.to_hdf(path, "df", complib="foolib")
173
+
174
+
175
+ @pytest.mark.parametrize(
176
+ "idx",
177
+ [
178
+ date_range("2019", freq="D", periods=3, tz="UTC"),
179
+ CategoricalIndex(list("abc")),
180
+ ],
181
+ )
182
+ def test_to_hdf_multiindex_extension_dtype(idx, tmp_path, setup_path):
183
+ # GH 7775
184
+ mi = MultiIndex.from_arrays([idx, idx])
185
+ df = DataFrame(0, index=mi, columns=["a"])
186
+ path = tmp_path / setup_path
187
+ with pytest.raises(NotImplementedError, match="Saving a MultiIndex"):
188
+ df.to_hdf(path, "df")
189
+
190
+
191
+ def test_unsuppored_hdf_file_error(datapath):
192
+ # GH 9539
193
+ data_path = datapath("io", "data", "legacy_hdf/incompatible_dataset.h5")
194
+ message = (
195
+ r"Dataset\(s\) incompatible with Pandas data types, "
196
+ "not table, or no datasets found in HDF5 file."
197
+ )
198
+
199
+ with pytest.raises(ValueError, match=message):
200
+ read_hdf(data_path)
201
+
202
+
203
+ def test_read_hdf_errors(setup_path, tmp_path):
204
+ df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
205
+
206
+ path = tmp_path / setup_path
207
+ msg = r"File [\S]* does not exist"
208
+ with pytest.raises(OSError, match=msg):
209
+ read_hdf(path, "key")
210
+
211
+ df.to_hdf(path, "df")
212
+ store = HDFStore(path, mode="r")
213
+ store.close()
214
+
215
+ msg = "The HDFStore must be open for reading."
216
+ with pytest.raises(OSError, match=msg):
217
+ read_hdf(store, "df")
218
+
219
+
220
+ def test_read_hdf_generic_buffer_errors():
221
+ msg = "Support for generic buffers has not been implemented."
222
+ with pytest.raises(NotImplementedError, match=msg):
223
+ read_hdf(BytesIO(b""), "df")
224
+
225
+
226
+ @pytest.mark.parametrize("bad_version", [(1, 2), (1,), [], "12", "123"])
227
+ def test_maybe_adjust_name_bad_version_raises(bad_version):
228
+ msg = "Version is incorrect, expected sequence of 3 integers"
229
+ with pytest.raises(ValueError, match=msg):
230
+ _maybe_adjust_name("values_block_0", version=bad_version)