ZTWHHH commited on
Commit
3adc262
·
verified ·
1 Parent(s): 34c1e24

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc +0 -0
  2. videochat2/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc +0 -0
  3. videochat2/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc +0 -0
  4. videochat2/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc +0 -0
  5. videochat2/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc +0 -0
  6. videochat2/lib/python3.10/site-packages/pandas/_config/__init__.py +40 -0
  7. videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc +0 -0
  12. videochat2/lib/python3.10/site-packages/pandas/_config/config.py +909 -0
  13. videochat2/lib/python3.10/site-packages/pandas/_config/dates.py +25 -0
  14. videochat2/lib/python3.10/site-packages/pandas/_config/display.py +62 -0
  15. videochat2/lib/python3.10/site-packages/pandas/_config/localization.py +169 -0
  16. videochat2/lib/python3.10/site-packages/pandas/_testing/__init__.py +1168 -0
  17. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc +0 -0
  18. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc +0 -0
  19. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc +0 -0
  20. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_random.cpython-310.pyc +0 -0
  21. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc +0 -0
  22. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc +0 -0
  23. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc +0 -0
  24. videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc +0 -0
  25. videochat2/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py +89 -0
  26. videochat2/lib/python3.10/site-packages/pandas/_testing/_io.py +435 -0
  27. videochat2/lib/python3.10/site-packages/pandas/_testing/_random.py +29 -0
  28. videochat2/lib/python3.10/site-packages/pandas/_testing/_warnings.py +216 -0
  29. videochat2/lib/python3.10/site-packages/pandas/_testing/asserters.py +1378 -0
  30. videochat2/lib/python3.10/site-packages/pandas/_testing/compat.py +24 -0
  31. videochat2/lib/python3.10/site-packages/pandas/_testing/contexts.py +219 -0
  32. videochat2/lib/python3.10/site-packages/pandas/api/__init__.py +14 -0
  33. videochat2/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc +0 -0
  34. videochat2/lib/python3.10/site-packages/pandas/api/extensions/__init__.py +33 -0
  35. videochat2/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc +0 -0
  36. videochat2/lib/python3.10/site-packages/pandas/api/indexers/__init__.py +17 -0
  37. videochat2/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc +0 -0
  38. videochat2/lib/python3.10/site-packages/pandas/api/interchange/__init__.py +8 -0
  39. videochat2/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
  40. videochat2/lib/python3.10/site-packages/pandas/api/types/__init__.py +23 -0
  41. videochat2/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc +0 -0
  42. videochat2/lib/python3.10/site-packages/pandas/compat/__init__.py +169 -0
  43. videochat2/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc +0 -0
  44. videochat2/lib/python3.10/site-packages/pandas/compat/_optional.py +173 -0
  45. videochat2/lib/python3.10/site-packages/pandas/compat/compressors.py +69 -0
  46. videochat2/lib/python3.10/site-packages/pandas/compat/pickle_compat.py +249 -0
  47. videochat2/lib/python3.10/site-packages/pandas/compat/pyarrow.py +22 -0
  48. videochat2/lib/python3.10/site-packages/pandas/tests/computation/__init__.py +0 -0
  49. videochat2/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc +0 -0
  50. videochat2/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc +0 -0
videochat2/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc ADDED
Binary file (8.59 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc ADDED
Binary file (486 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (45.5 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc ADDED
Binary file (410 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/_config/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ pandas._config is considered explicitly upstream of everything else in pandas,
3
+ should have no intra-pandas dependencies.
4
+
5
+ importing `dates` and `display` ensures that keys needed by _libs
6
+ are initialized.
7
+ """
8
+ __all__ = [
9
+ "config",
10
+ "detect_console_encoding",
11
+ "get_option",
12
+ "set_option",
13
+ "reset_option",
14
+ "describe_option",
15
+ "option_context",
16
+ "options",
17
+ "using_copy_on_write",
18
+ ]
19
+ from pandas._config import config
20
+ from pandas._config import dates # pyright: ignore # noqa:F401
21
+ from pandas._config.config import (
22
+ _global_config,
23
+ describe_option,
24
+ get_option,
25
+ option_context,
26
+ options,
27
+ reset_option,
28
+ set_option,
29
+ )
30
+ from pandas._config.display import detect_console_encoding
31
+
32
+
33
+ def using_copy_on_write():
34
+ _mode_options = _global_config["mode"]
35
+ return _mode_options["copy_on_write"] and _mode_options["data_manager"] == "block"
36
+
37
+
38
+ def using_nullable_dtypes():
39
+ _mode_options = _global_config["mode"]
40
+ return _mode_options["nullable_dtypes"]
videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/config.cpython-310.pyc ADDED
Binary file (25.6 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/dates.cpython-310.pyc ADDED
Binary file (736 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/display.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_config/__pycache__/localization.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_config/config.py ADDED
@@ -0,0 +1,909 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The config module holds package-wide configurables and provides
3
+ a uniform API for working with them.
4
+
5
+ Overview
6
+ ========
7
+
8
+ This module supports the following requirements:
9
+ - options are referenced using keys in dot.notation, e.g. "x.y.option - z".
10
+ - keys are case-insensitive.
11
+ - functions should accept partial/regex keys, when unambiguous.
12
+ - options can be registered by modules at import time.
13
+ - options can be registered at init-time (via core.config_init)
14
+ - options have a default value, and (optionally) a description and
15
+ validation function associated with them.
16
+ - options can be deprecated, in which case referencing them
17
+ should produce a warning.
18
+ - deprecated options can optionally be rerouted to a replacement
19
+ so that accessing a deprecated option reroutes to a differently
20
+ named option.
21
+ - options can be reset to their default value.
22
+ - all option can be reset to their default value at once.
23
+ - all options in a certain sub - namespace can be reset at once.
24
+ - the user can set / get / reset or ask for the description of an option.
25
+ - a developer can register and mark an option as deprecated.
26
+ - you can register a callback to be invoked when the option value
27
+ is set or reset. Changing the stored value is considered misuse, but
28
+ is not verboten.
29
+
30
+ Implementation
31
+ ==============
32
+
33
+ - Data is stored using nested dictionaries, and should be accessed
34
+ through the provided API.
35
+
36
+ - "Registered options" and "Deprecated options" have metadata associated
37
+ with them, which are stored in auxiliary dictionaries keyed on the
38
+ fully-qualified key, e.g. "x.y.z.option".
39
+
40
+ - the config_init module is imported by the package's __init__.py file.
41
+ placing any register_option() calls there will ensure those options
42
+ are available as soon as pandas is loaded. If you use register_option
43
+ in a module, it will only be available after that module is imported,
44
+ which you should be aware of.
45
+
46
+ - `config_prefix` is a context_manager (for use with the `with` keyword)
47
+ which can save developers some typing, see the docstring.
48
+
49
+ """
50
+
51
+ from __future__ import annotations
52
+
53
+ from contextlib import (
54
+ ContextDecorator,
55
+ contextmanager,
56
+ )
57
+ import re
58
+ from typing import (
59
+ Any,
60
+ Callable,
61
+ Generator,
62
+ Generic,
63
+ Iterable,
64
+ NamedTuple,
65
+ cast,
66
+ )
67
+ import warnings
68
+
69
+ from pandas._typing import (
70
+ F,
71
+ T,
72
+ )
73
+ from pandas.util._exceptions import find_stack_level
74
+
75
+
76
+ class DeprecatedOption(NamedTuple):
77
+ key: str
78
+ msg: str | None
79
+ rkey: str | None
80
+ removal_ver: str | None
81
+
82
+
83
+ class RegisteredOption(NamedTuple):
84
+ key: str
85
+ defval: object
86
+ doc: str
87
+ validator: Callable[[object], Any] | None
88
+ cb: Callable[[str], Any] | None
89
+
90
+
91
+ # holds deprecated option metadata
92
+ _deprecated_options: dict[str, DeprecatedOption] = {}
93
+
94
+ # holds registered option metadata
95
+ _registered_options: dict[str, RegisteredOption] = {}
96
+
97
+ # holds the current values for registered options
98
+ _global_config: dict[str, Any] = {}
99
+
100
+ # keys which have a special meaning
101
+ _reserved_keys: list[str] = ["all"]
102
+
103
+
104
+ class OptionError(AttributeError, KeyError):
105
+ """
106
+ Exception raised for pandas.options.
107
+
108
+ Backwards compatible with KeyError checks.
109
+ """
110
+
111
+
112
+ #
113
+ # User API
114
+
115
+
116
+ def _get_single_key(pat: str, silent: bool) -> str:
117
+ keys = _select_options(pat)
118
+ if len(keys) == 0:
119
+ if not silent:
120
+ _warn_if_deprecated(pat)
121
+ raise OptionError(f"No such keys(s): {repr(pat)}")
122
+ if len(keys) > 1:
123
+ raise OptionError("Pattern matched multiple keys")
124
+ key = keys[0]
125
+
126
+ if not silent:
127
+ _warn_if_deprecated(key)
128
+
129
+ key = _translate_key(key)
130
+
131
+ return key
132
+
133
+
134
+ def _get_option(pat: str, silent: bool = False) -> Any:
135
+ key = _get_single_key(pat, silent)
136
+
137
+ # walk the nested dict
138
+ root, k = _get_root(key)
139
+ return root[k]
140
+
141
+
142
+ def _set_option(*args, **kwargs) -> None:
143
+ # must at least 1 arg deal with constraints later
144
+ nargs = len(args)
145
+ if not nargs or nargs % 2 != 0:
146
+ raise ValueError("Must provide an even number of non-keyword arguments")
147
+
148
+ # default to false
149
+ silent = kwargs.pop("silent", False)
150
+
151
+ if kwargs:
152
+ kwarg = list(kwargs.keys())[0]
153
+ raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"')
154
+
155
+ for k, v in zip(args[::2], args[1::2]):
156
+ key = _get_single_key(k, silent)
157
+
158
+ o = _get_registered_option(key)
159
+ if o and o.validator:
160
+ o.validator(v)
161
+
162
+ # walk the nested dict
163
+ root, k = _get_root(key)
164
+ root[k] = v
165
+
166
+ if o.cb:
167
+ if silent:
168
+ with warnings.catch_warnings(record=True):
169
+ o.cb(key)
170
+ else:
171
+ o.cb(key)
172
+
173
+
174
+ def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None:
175
+ keys = _select_options(pat)
176
+ if len(keys) == 0:
177
+ raise OptionError("No such keys(s)")
178
+
179
+ s = "\n".join([_build_option_description(k) for k in keys])
180
+
181
+ if _print_desc:
182
+ print(s)
183
+ return None
184
+ return s
185
+
186
+
187
+ def _reset_option(pat: str, silent: bool = False) -> None:
188
+ keys = _select_options(pat)
189
+
190
+ if len(keys) == 0:
191
+ raise OptionError("No such keys(s)")
192
+
193
+ if len(keys) > 1 and len(pat) < 4 and pat != "all":
194
+ raise ValueError(
195
+ "You must specify at least 4 characters when "
196
+ "resetting multiple keys, use the special keyword "
197
+ '"all" to reset all the options to their default value'
198
+ )
199
+
200
+ for k in keys:
201
+ _set_option(k, _registered_options[k].defval, silent=silent)
202
+
203
+
204
+ def get_default_val(pat: str):
205
+ key = _get_single_key(pat, silent=True)
206
+ return _get_registered_option(key).defval
207
+
208
+
209
+ class DictWrapper:
210
+ """provide attribute-style access to a nested dict"""
211
+
212
+ def __init__(self, d: dict[str, Any], prefix: str = "") -> None:
213
+ object.__setattr__(self, "d", d)
214
+ object.__setattr__(self, "prefix", prefix)
215
+
216
+ def __setattr__(self, key: str, val: Any) -> None:
217
+ prefix = object.__getattribute__(self, "prefix")
218
+ if prefix:
219
+ prefix += "."
220
+ prefix += key
221
+ # you can't set new keys
222
+ # can you can't overwrite subtrees
223
+ if key in self.d and not isinstance(self.d[key], dict):
224
+ _set_option(prefix, val)
225
+ else:
226
+ raise OptionError("You can only set the value of existing options")
227
+
228
+ def __getattr__(self, key: str):
229
+ prefix = object.__getattribute__(self, "prefix")
230
+ if prefix:
231
+ prefix += "."
232
+ prefix += key
233
+ try:
234
+ v = object.__getattribute__(self, "d")[key]
235
+ except KeyError as err:
236
+ raise OptionError("No such option") from err
237
+ if isinstance(v, dict):
238
+ return DictWrapper(v, prefix)
239
+ else:
240
+ return _get_option(prefix)
241
+
242
+ def __dir__(self) -> Iterable[str]:
243
+ return list(self.d.keys())
244
+
245
+
246
+ # For user convenience, we'd like to have the available options described
247
+ # in the docstring. For dev convenience we'd like to generate the docstrings
248
+ # dynamically instead of maintaining them by hand. To this, we use the
249
+ # class below which wraps functions inside a callable, and converts
250
+ # __doc__ into a property function. The doctsrings below are templates
251
+ # using the py2.6+ advanced formatting syntax to plug in a concise list
252
+ # of options, and option descriptions.
253
+
254
+
255
+ class CallableDynamicDoc(Generic[T]):
256
+ def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None:
257
+ self.__doc_tmpl__ = doc_tmpl
258
+ self.__func__ = func
259
+
260
+ def __call__(self, *args, **kwds) -> T:
261
+ return self.__func__(*args, **kwds)
262
+
263
+ # error: Signature of "__doc__" incompatible with supertype "object"
264
+ @property
265
+ def __doc__(self) -> str: # type: ignore[override]
266
+ opts_desc = _describe_option("all", _print_desc=False)
267
+ opts_list = pp_options_list(list(_registered_options.keys()))
268
+ return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list)
269
+
270
+
271
+ _get_option_tmpl = """
272
+ get_option(pat)
273
+
274
+ Retrieves the value of the specified option.
275
+
276
+ Available options:
277
+
278
+ {opts_list}
279
+
280
+ Parameters
281
+ ----------
282
+ pat : str
283
+ Regexp which should match a single option.
284
+ Note: partial matches are supported for convenience, but unless you use the
285
+ full option name (e.g. x.y.z.option_name), your code may break in future
286
+ versions if new options with similar names are introduced.
287
+
288
+ Returns
289
+ -------
290
+ result : the value of the option
291
+
292
+ Raises
293
+ ------
294
+ OptionError : if no such option exists
295
+
296
+ Notes
297
+ -----
298
+ Please reference the :ref:`User Guide <options>` for more information.
299
+
300
+ The available options with its descriptions:
301
+
302
+ {opts_desc}
303
+ """
304
+
305
+ _set_option_tmpl = """
306
+ set_option(pat, value)
307
+
308
+ Sets the value of the specified option.
309
+
310
+ Available options:
311
+
312
+ {opts_list}
313
+
314
+ Parameters
315
+ ----------
316
+ pat : str
317
+ Regexp which should match a single option.
318
+ Note: partial matches are supported for convenience, but unless you use the
319
+ full option name (e.g. x.y.z.option_name), your code may break in future
320
+ versions if new options with similar names are introduced.
321
+ value : object
322
+ New value of option.
323
+
324
+ Returns
325
+ -------
326
+ None
327
+
328
+ Raises
329
+ ------
330
+ OptionError if no such option exists
331
+
332
+ Notes
333
+ -----
334
+ Please reference the :ref:`User Guide <options>` for more information.
335
+
336
+ The available options with its descriptions:
337
+
338
+ {opts_desc}
339
+ """
340
+
341
+ _describe_option_tmpl = """
342
+ describe_option(pat, _print_desc=False)
343
+
344
+ Prints the description for one or more registered options.
345
+
346
+ Call with no arguments to get a listing for all registered options.
347
+
348
+ Available options:
349
+
350
+ {opts_list}
351
+
352
+ Parameters
353
+ ----------
354
+ pat : str
355
+ Regexp pattern. All matching keys will have their description displayed.
356
+ _print_desc : bool, default True
357
+ If True (default) the description(s) will be printed to stdout.
358
+ Otherwise, the description(s) will be returned as a unicode string
359
+ (for testing).
360
+
361
+ Returns
362
+ -------
363
+ None by default, the description(s) as a unicode string if _print_desc
364
+ is False
365
+
366
+ Notes
367
+ -----
368
+ Please reference the :ref:`User Guide <options>` for more information.
369
+
370
+ The available options with its descriptions:
371
+
372
+ {opts_desc}
373
+ """
374
+
375
+ _reset_option_tmpl = """
376
+ reset_option(pat)
377
+
378
+ Reset one or more options to their default value.
379
+
380
+ Pass "all" as argument to reset all options.
381
+
382
+ Available options:
383
+
384
+ {opts_list}
385
+
386
+ Parameters
387
+ ----------
388
+ pat : str/regex
389
+ If specified only options matching `prefix*` will be reset.
390
+ Note: partial matches are supported for convenience, but unless you
391
+ use the full option name (e.g. x.y.z.option_name), your code may break
392
+ in future versions if new options with similar names are introduced.
393
+
394
+ Returns
395
+ -------
396
+ None
397
+
398
+ Notes
399
+ -----
400
+ Please reference the :ref:`User Guide <options>` for more information.
401
+
402
+ The available options with its descriptions:
403
+
404
+ {opts_desc}
405
+ """
406
+
407
+ # bind the functions with their docstrings into a Callable
408
+ # and use that as the functions exposed in pd.api
409
+ get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
410
+ set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
411
+ reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
412
+ describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
413
+ options = DictWrapper(_global_config)
414
+
415
+ #
416
+ # Functions for use by pandas developers, in addition to User - api
417
+
418
+
419
+ class option_context(ContextDecorator):
420
+ """
421
+ Context manager to temporarily set options in the `with` statement context.
422
+
423
+ You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
424
+
425
+ Examples
426
+ --------
427
+ >>> from pandas import option_context
428
+ >>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
429
+ ... pass
430
+ """
431
+
432
+ def __init__(self, *args) -> None:
433
+ if len(args) % 2 != 0 or len(args) < 2:
434
+ raise ValueError(
435
+ "Need to invoke as option_context(pat, val, [(pat, val), ...])."
436
+ )
437
+
438
+ self.ops = list(zip(args[::2], args[1::2]))
439
+
440
+ def __enter__(self) -> None:
441
+ self.undo = [(pat, _get_option(pat, silent=True)) for pat, val in self.ops]
442
+
443
+ for pat, val in self.ops:
444
+ _set_option(pat, val, silent=True)
445
+
446
+ def __exit__(self, *args) -> None:
447
+ if self.undo:
448
+ for pat, val in self.undo:
449
+ _set_option(pat, val, silent=True)
450
+
451
+
452
+ def register_option(
453
+ key: str,
454
+ defval: object,
455
+ doc: str = "",
456
+ validator: Callable[[object], Any] | None = None,
457
+ cb: Callable[[str], Any] | None = None,
458
+ ) -> None:
459
+ """
460
+ Register an option in the package-wide pandas config object
461
+
462
+ Parameters
463
+ ----------
464
+ key : str
465
+ Fully-qualified key, e.g. "x.y.option - z".
466
+ defval : object
467
+ Default value of the option.
468
+ doc : str
469
+ Description of the option.
470
+ validator : Callable, optional
471
+ Function of a single argument, should raise `ValueError` if
472
+ called with a value which is not a legal value for the option.
473
+ cb
474
+ a function of a single argument "key", which is called
475
+ immediately after an option value is set/reset. key is
476
+ the full name of the option.
477
+
478
+ Raises
479
+ ------
480
+ ValueError if `validator` is specified and `defval` is not a valid value.
481
+
482
+ """
483
+ import keyword
484
+ import tokenize
485
+
486
+ key = key.lower()
487
+
488
+ if key in _registered_options:
489
+ raise OptionError(f"Option '{key}' has already been registered")
490
+ if key in _reserved_keys:
491
+ raise OptionError(f"Option '{key}' is a reserved key")
492
+
493
+ # the default value should be legal
494
+ if validator:
495
+ validator(defval)
496
+
497
+ # walk the nested dict, creating dicts as needed along the path
498
+ path = key.split(".")
499
+
500
+ for k in path:
501
+ if not re.match("^" + tokenize.Name + "$", k):
502
+ raise ValueError(f"{k} is not a valid identifier")
503
+ if keyword.iskeyword(k):
504
+ raise ValueError(f"{k} is a python keyword")
505
+
506
+ cursor = _global_config
507
+ msg = "Path prefix to option '{option}' is already an option"
508
+
509
+ for i, p in enumerate(path[:-1]):
510
+ if not isinstance(cursor, dict):
511
+ raise OptionError(msg.format(option=".".join(path[:i])))
512
+ if p not in cursor:
513
+ cursor[p] = {}
514
+ cursor = cursor[p]
515
+
516
+ if not isinstance(cursor, dict):
517
+ raise OptionError(msg.format(option=".".join(path[:-1])))
518
+
519
+ cursor[path[-1]] = defval # initialize
520
+
521
+ # save the option metadata
522
+ _registered_options[key] = RegisteredOption(
523
+ key=key, defval=defval, doc=doc, validator=validator, cb=cb
524
+ )
525
+
526
+
527
+ def deprecate_option(
528
+ key: str,
529
+ msg: str | None = None,
530
+ rkey: str | None = None,
531
+ removal_ver: str | None = None,
532
+ ) -> None:
533
+ """
534
+ Mark option `key` as deprecated, if code attempts to access this option,
535
+ a warning will be produced, using `msg` if given, or a default message
536
+ if not.
537
+ if `rkey` is given, any access to the key will be re-routed to `rkey`.
538
+
539
+ Neither the existence of `key` nor that if `rkey` is checked. If they
540
+ do not exist, any subsequence access will fail as usual, after the
541
+ deprecation warning is given.
542
+
543
+ Parameters
544
+ ----------
545
+ key : str
546
+ Name of the option to be deprecated.
547
+ must be a fully-qualified option name (e.g "x.y.z.rkey").
548
+ msg : str, optional
549
+ Warning message to output when the key is referenced.
550
+ if no message is given a default message will be emitted.
551
+ rkey : str, optional
552
+ Name of an option to reroute access to.
553
+ If specified, any referenced `key` will be
554
+ re-routed to `rkey` including set/get/reset.
555
+ rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
556
+ used by the default message if no `msg` is specified.
557
+ removal_ver : str, optional
558
+ Specifies the version in which this option will
559
+ be removed. used by the default message if no `msg` is specified.
560
+
561
+ Raises
562
+ ------
563
+ OptionError
564
+ If the specified key has already been deprecated.
565
+ """
566
+ key = key.lower()
567
+
568
+ if key in _deprecated_options:
569
+ raise OptionError(f"Option '{key}' has already been defined as deprecated.")
570
+
571
+ _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
572
+
573
+
574
+ #
575
+ # functions internal to the module
576
+
577
+
578
+ def _select_options(pat: str) -> list[str]:
579
+ """
580
+ returns a list of keys matching `pat`
581
+
582
+ if pat=="all", returns all registered options
583
+ """
584
+ # short-circuit for exact key
585
+ if pat in _registered_options:
586
+ return [pat]
587
+
588
+ # else look through all of them
589
+ keys = sorted(_registered_options.keys())
590
+ if pat == "all": # reserved key
591
+ return keys
592
+
593
+ return [k for k in keys if re.search(pat, k, re.I)]
594
+
595
+
596
+ def _get_root(key: str) -> tuple[dict[str, Any], str]:
597
+ path = key.split(".")
598
+ cursor = _global_config
599
+ for p in path[:-1]:
600
+ cursor = cursor[p]
601
+ return cursor, path[-1]
602
+
603
+
604
+ def _is_deprecated(key: str) -> bool:
605
+ """Returns True if the given option has been deprecated"""
606
+ key = key.lower()
607
+ return key in _deprecated_options
608
+
609
+
610
+ def _get_deprecated_option(key: str):
611
+ """
612
+ Retrieves the metadata for a deprecated option, if `key` is deprecated.
613
+
614
+ Returns
615
+ -------
616
+ DeprecatedOption (namedtuple) if key is deprecated, None otherwise
617
+ """
618
+ try:
619
+ d = _deprecated_options[key]
620
+ except KeyError:
621
+ return None
622
+ else:
623
+ return d
624
+
625
+
626
+ def _get_registered_option(key: str):
627
+ """
628
+ Retrieves the option metadata if `key` is a registered option.
629
+
630
+ Returns
631
+ -------
632
+ RegisteredOption (namedtuple) if key is deprecated, None otherwise
633
+ """
634
+ return _registered_options.get(key)
635
+
636
+
637
+ def _translate_key(key: str) -> str:
638
+ """
639
+ if key id deprecated and a replacement key defined, will return the
640
+ replacement key, otherwise returns `key` as - is
641
+ """
642
+ d = _get_deprecated_option(key)
643
+ if d:
644
+ return d.rkey or key
645
+ else:
646
+ return key
647
+
648
+
649
+ def _warn_if_deprecated(key: str) -> bool:
650
+ """
651
+ Checks if `key` is a deprecated option and if so, prints a warning.
652
+
653
+ Returns
654
+ -------
655
+ bool - True if `key` is deprecated, False otherwise.
656
+ """
657
+ d = _get_deprecated_option(key)
658
+ if d:
659
+ if d.msg:
660
+ warnings.warn(
661
+ d.msg,
662
+ FutureWarning,
663
+ stacklevel=find_stack_level(),
664
+ )
665
+ else:
666
+ msg = f"'{key}' is deprecated"
667
+ if d.removal_ver:
668
+ msg += f" and will be removed in {d.removal_ver}"
669
+ if d.rkey:
670
+ msg += f", please use '{d.rkey}' instead."
671
+ else:
672
+ msg += ", please refrain from using it."
673
+
674
+ warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
675
+ return True
676
+ return False
677
+
678
+
679
+ def _build_option_description(k: str) -> str:
680
+ """Builds a formatted description of a registered option and prints it"""
681
+ o = _get_registered_option(k)
682
+ d = _get_deprecated_option(k)
683
+
684
+ s = f"{k} "
685
+
686
+ if o.doc:
687
+ s += "\n".join(o.doc.strip().split("\n"))
688
+ else:
689
+ s += "No description available."
690
+
691
+ if o:
692
+ s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]"
693
+
694
+ if d:
695
+ rkey = d.rkey or ""
696
+ s += "\n (Deprecated"
697
+ s += f", use `{rkey}` instead."
698
+ s += ")"
699
+
700
+ return s
701
+
702
+
703
+ def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False):
704
+ """Builds a concise listing of available options, grouped by prefix"""
705
+ from itertools import groupby
706
+ from textwrap import wrap
707
+
708
+ def pp(name: str, ks: Iterable[str]) -> list[str]:
709
+ pfx = "- " + name + ".[" if name else ""
710
+ ls = wrap(
711
+ ", ".join(ks),
712
+ width,
713
+ initial_indent=pfx,
714
+ subsequent_indent=" ",
715
+ break_long_words=False,
716
+ )
717
+ if ls and ls[-1] and name:
718
+ ls[-1] = ls[-1] + "]"
719
+ return ls
720
+
721
+ ls: list[str] = []
722
+ singles = [x for x in sorted(keys) if x.find(".") < 0]
723
+ if singles:
724
+ ls += pp("", singles)
725
+ keys = [x for x in keys if x.find(".") >= 0]
726
+
727
+ for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
728
+ ks = [x[len(k) + 1 :] for x in list(g)]
729
+ ls += pp(k, ks)
730
+ s = "\n".join(ls)
731
+ if _print:
732
+ print(s)
733
+ else:
734
+ return s
735
+
736
+
737
+ #
738
+ # helpers
739
+
740
+
741
+ @contextmanager
742
+ def config_prefix(prefix) -> Generator[None, None, None]:
743
+ """
744
+ contextmanager for multiple invocations of API with a common prefix
745
+
746
+ supported API functions: (register / get / set )__option
747
+
748
+ Warning: This is not thread - safe, and won't work properly if you import
749
+ the API functions into your module using the "from x import y" construct.
750
+
751
+ Example
752
+ -------
753
+ import pandas._config.config as cf
754
+ with cf.config_prefix("display.font"):
755
+ cf.register_option("color", "red")
756
+ cf.register_option("size", " 5 pt")
757
+ cf.set_option(size, " 6 pt")
758
+ cf.get_option(size)
759
+ ...
760
+
761
+ etc'
762
+
763
+ will register options "display.font.color", "display.font.size", set the
764
+ value of "display.font.size"... and so on.
765
+ """
766
+ # Note: reset_option relies on set_option, and on key directly
767
+ # it does not fit in to this monkey-patching scheme
768
+
769
+ global register_option, get_option, set_option
770
+
771
+ def wrap(func: F) -> F:
772
+ def inner(key: str, *args, **kwds):
773
+ pkey = f"{prefix}.{key}"
774
+ return func(pkey, *args, **kwds)
775
+
776
+ return cast(F, inner)
777
+
778
+ _register_option = register_option
779
+ _get_option = get_option
780
+ _set_option = set_option
781
+ set_option = wrap(set_option)
782
+ get_option = wrap(get_option)
783
+ register_option = wrap(register_option)
784
+ try:
785
+ yield
786
+ finally:
787
+ set_option = _set_option
788
+ get_option = _get_option
789
+ register_option = _register_option
790
+
791
+
792
+ # These factories and methods are handy for use as the validator
793
+ # arg in register_option
794
+
795
+
796
+ def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:
797
+ """
798
+
799
+ Parameters
800
+ ----------
801
+ `_type` - a type to be compared against (e.g. type(x) == `_type`)
802
+
803
+ Returns
804
+ -------
805
+ validator - a function of a single argument x , which raises
806
+ ValueError if type(x) is not equal to `_type`
807
+
808
+ """
809
+
810
+ def inner(x) -> None:
811
+ if type(x) != _type:
812
+ raise ValueError(f"Value must have type '{_type}'")
813
+
814
+ return inner
815
+
816
+
817
+ def is_instance_factory(_type) -> Callable[[Any], None]:
818
+ """
819
+
820
+ Parameters
821
+ ----------
822
+ `_type` - the type to be checked against
823
+
824
+ Returns
825
+ -------
826
+ validator - a function of a single argument x , which raises
827
+ ValueError if x is not an instance of `_type`
828
+
829
+ """
830
+ if isinstance(_type, (tuple, list)):
831
+ _type = tuple(_type)
832
+ type_repr = "|".join(map(str, _type))
833
+ else:
834
+ type_repr = f"'{_type}'"
835
+
836
+ def inner(x) -> None:
837
+ if not isinstance(x, _type):
838
+ raise ValueError(f"Value must be an instance of {type_repr}")
839
+
840
+ return inner
841
+
842
+
843
+ def is_one_of_factory(legal_values) -> Callable[[Any], None]:
844
+ callables = [c for c in legal_values if callable(c)]
845
+ legal_values = [c for c in legal_values if not callable(c)]
846
+
847
+ def inner(x) -> None:
848
+ if x not in legal_values:
849
+ if not any(c(x) for c in callables):
850
+ uvals = [str(lval) for lval in legal_values]
851
+ pp_values = "|".join(uvals)
852
+ msg = f"Value must be one of {pp_values}"
853
+ if len(callables):
854
+ msg += " or a callable"
855
+ raise ValueError(msg)
856
+
857
+ return inner
858
+
859
+
860
+ def is_nonnegative_int(value: object) -> None:
861
+ """
862
+ Verify that value is None or a positive int.
863
+
864
+ Parameters
865
+ ----------
866
+ value : None or int
867
+ The `value` to be checked.
868
+
869
+ Raises
870
+ ------
871
+ ValueError
872
+ When the value is not None or is a negative integer
873
+ """
874
+ if value is None:
875
+ return
876
+
877
+ elif isinstance(value, int):
878
+ if value >= 0:
879
+ return
880
+
881
+ msg = "Value must be a nonnegative integer or None"
882
+ raise ValueError(msg)
883
+
884
+
885
+ # common type validators, for convenience
886
+ # usage: register_option(... , validator = is_int)
887
+ is_int = is_type_factory(int)
888
+ is_bool = is_type_factory(bool)
889
+ is_float = is_type_factory(float)
890
+ is_str = is_type_factory(str)
891
+ is_text = is_instance_factory((str, bytes))
892
+
893
+
894
+ def is_callable(obj) -> bool:
895
+ """
896
+
897
+ Parameters
898
+ ----------
899
+ `obj` - the object to be checked
900
+
901
+ Returns
902
+ -------
903
+ validator - returns True if object is callable
904
+ raises ValueError otherwise.
905
+
906
+ """
907
+ if not callable(obj):
908
+ raise ValueError("Value must be a callable")
909
+ return True
videochat2/lib/python3.10/site-packages/pandas/_config/dates.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ config for datetime formatting
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from pandas._config import config as cf
7
+
8
+ pc_date_dayfirst_doc = """
9
+ : boolean
10
+ When True, prints and parses dates with the day first, eg 20/01/2005
11
+ """
12
+
13
+ pc_date_yearfirst_doc = """
14
+ : boolean
15
+ When True, prints and parses dates with the year first, eg 2005/01/20
16
+ """
17
+
18
+ with cf.config_prefix("display"):
19
+ # Needed upstream of `_libs` because these are used in tslibs.parsing
20
+ cf.register_option(
21
+ "date_dayfirst", False, pc_date_dayfirst_doc, validator=cf.is_bool
22
+ )
23
+ cf.register_option(
24
+ "date_yearfirst", False, pc_date_yearfirst_doc, validator=cf.is_bool
25
+ )
videochat2/lib/python3.10/site-packages/pandas/_config/display.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unopinionated display configuration.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import locale
8
+ import sys
9
+
10
+ from pandas._config import config as cf
11
+
12
+ # -----------------------------------------------------------------------------
13
+ # Global formatting options
14
+ _initial_defencoding: str | None = None
15
+
16
+
17
+ def detect_console_encoding() -> str:
18
+ """
19
+ Try to find the most capable encoding supported by the console.
20
+ slightly modified from the way IPython handles the same issue.
21
+ """
22
+ global _initial_defencoding
23
+
24
+ encoding = None
25
+ try:
26
+ encoding = sys.stdout.encoding or sys.stdin.encoding
27
+ except (AttributeError, OSError):
28
+ pass
29
+
30
+ # try again for something better
31
+ if not encoding or "ascii" in encoding.lower():
32
+ try:
33
+ encoding = locale.getpreferredencoding()
34
+ except locale.Error:
35
+ # can be raised by locale.setlocale(), which is
36
+ # called by getpreferredencoding
37
+ # (on some systems, see stdlib locale docs)
38
+ pass
39
+
40
+ # when all else fails. this will usually be "ascii"
41
+ if not encoding or "ascii" in encoding.lower():
42
+ encoding = sys.getdefaultencoding()
43
+
44
+ # GH#3360, save the reported defencoding at import time
45
+ # MPL backends may change it. Make available for debugging.
46
+ if not _initial_defencoding:
47
+ _initial_defencoding = sys.getdefaultencoding()
48
+
49
+ return encoding
50
+
51
+
52
+ pc_encoding_doc = """
53
+ : str/unicode
54
+ Defaults to the detected encoding of the console.
55
+ Specifies the encoding to be used for strings returned by to_string,
56
+ these are generally strings meant to be displayed on the console.
57
+ """
58
+
59
+ with cf.config_prefix("display"):
60
+ cf.register_option(
61
+ "encoding", detect_console_encoding(), pc_encoding_doc, validator=cf.is_text
62
+ )
videochat2/lib/python3.10/site-packages/pandas/_config/localization.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for configuring locale settings.
3
+
4
+ Name `localization` is chosen to avoid overlap with builtin `locale` module.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from contextlib import contextmanager
9
+ import locale
10
+ import platform
11
+ import re
12
+ import subprocess
13
+ from typing import Generator
14
+
15
+ from pandas._config.config import options
16
+
17
+
18
+ @contextmanager
19
+ def set_locale(
20
+ new_locale: str | tuple[str, str], lc_var: int = locale.LC_ALL
21
+ ) -> Generator[str | tuple[str, str], None, None]:
22
+ """
23
+ Context manager for temporarily setting a locale.
24
+
25
+ Parameters
26
+ ----------
27
+ new_locale : str or tuple
28
+ A string of the form <language_country>.<encoding>. For example to set
29
+ the current locale to US English with a UTF8 encoding, you would pass
30
+ "en_US.UTF-8".
31
+ lc_var : int, default `locale.LC_ALL`
32
+ The category of the locale being set.
33
+
34
+ Notes
35
+ -----
36
+ This is useful when you want to run a particular block of code under a
37
+ particular locale, without globally setting the locale. This probably isn't
38
+ thread-safe.
39
+ """
40
+ # getlocale is not always compliant with setlocale, use setlocale. GH#46595
41
+ current_locale = locale.setlocale(lc_var)
42
+
43
+ try:
44
+ locale.setlocale(lc_var, new_locale)
45
+ normalized_code, normalized_encoding = locale.getlocale()
46
+ if normalized_code is not None and normalized_encoding is not None:
47
+ yield f"{normalized_code}.{normalized_encoding}"
48
+ else:
49
+ yield new_locale
50
+ finally:
51
+ locale.setlocale(lc_var, current_locale)
52
+
53
+
54
+ def can_set_locale(lc: str, lc_var: int = locale.LC_ALL) -> bool:
55
+ """
56
+ Check to see if we can set a locale, and subsequently get the locale,
57
+ without raising an Exception.
58
+
59
+ Parameters
60
+ ----------
61
+ lc : str
62
+ The locale to attempt to set.
63
+ lc_var : int, default `locale.LC_ALL`
64
+ The category of the locale being set.
65
+
66
+ Returns
67
+ -------
68
+ bool
69
+ Whether the passed locale can be set
70
+ """
71
+ try:
72
+ with set_locale(lc, lc_var=lc_var):
73
+ pass
74
+ except (ValueError, locale.Error):
75
+ # horrible name for a Exception subclass
76
+ return False
77
+ else:
78
+ return True
79
+
80
+
81
+ def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]:
82
+ """
83
+ Return a list of normalized locales that do not throw an ``Exception``
84
+ when set.
85
+
86
+ Parameters
87
+ ----------
88
+ locales : str
89
+ A string where each locale is separated by a newline.
90
+ normalize : bool
91
+ Whether to call ``locale.normalize`` on each locale.
92
+
93
+ Returns
94
+ -------
95
+ valid_locales : list
96
+ A list of valid locales.
97
+ """
98
+ return [
99
+ loc
100
+ for loc in (
101
+ locale.normalize(loc.strip()) if normalize else loc.strip()
102
+ for loc in locales
103
+ )
104
+ if can_set_locale(loc)
105
+ ]
106
+
107
+
108
+ def get_locales(
109
+ prefix: str | None = None,
110
+ normalize: bool = True,
111
+ ) -> list[str]:
112
+ """
113
+ Get all the locales that are available on the system.
114
+
115
+ Parameters
116
+ ----------
117
+ prefix : str
118
+ If not ``None`` then return only those locales with the prefix
119
+ provided. For example to get all English language locales (those that
120
+ start with ``"en"``), pass ``prefix="en"``.
121
+ normalize : bool
122
+ Call ``locale.normalize`` on the resulting list of available locales.
123
+ If ``True``, only locales that can be set without throwing an
124
+ ``Exception`` are returned.
125
+
126
+ Returns
127
+ -------
128
+ locales : list of strings
129
+ A list of locale strings that can be set with ``locale.setlocale()``.
130
+ For example::
131
+
132
+ locale.setlocale(locale.LC_ALL, locale_string)
133
+
134
+ On error will return an empty list (no locale available, e.g. Windows)
135
+
136
+ """
137
+ if platform.system() in ("Linux", "Darwin"):
138
+ raw_locales = subprocess.check_output(["locale", "-a"])
139
+ else:
140
+ # Other platforms e.g. windows platforms don't define "locale -a"
141
+ # Note: is_platform_windows causes circular import here
142
+ return []
143
+
144
+ try:
145
+ # raw_locales is "\n" separated list of locales
146
+ # it may contain non-decodable parts, so split
147
+ # extract what we can and then rejoin.
148
+ split_raw_locales = raw_locales.split(b"\n")
149
+ out_locales = []
150
+ for x in split_raw_locales:
151
+ try:
152
+ out_locales.append(str(x, encoding=options.display.encoding))
153
+ except UnicodeError:
154
+ # 'locale -a' is used to populated 'raw_locales' and on
155
+ # Redhat 7 Linux (and maybe others) prints locale names
156
+ # using windows-1252 encoding. Bug only triggered by
157
+ # a few special characters and when there is an
158
+ # extensive list of installed locales.
159
+ out_locales.append(str(x, encoding="windows-1252"))
160
+
161
+ except TypeError:
162
+ pass
163
+
164
+ if prefix is None:
165
+ return _valid_locales(out_locales, normalize)
166
+
167
+ pattern = re.compile(f"{prefix}.*")
168
+ found = pattern.findall("\n".join(out_locales))
169
+ return _valid_locales(found, normalize)
videochat2/lib/python3.10/site-packages/pandas/_testing/__init__.py ADDED
@@ -0,0 +1,1168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ from datetime import datetime
5
+ from decimal import Decimal
6
+ import operator
7
+ import os
8
+ import re
9
+ import string
10
+ from sys import byteorder
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ Callable,
14
+ ContextManager,
15
+ Counter,
16
+ Iterable,
17
+ cast,
18
+ )
19
+
20
+ import numpy as np
21
+
22
+ from pandas._config.localization import (
23
+ can_set_locale,
24
+ get_locales,
25
+ set_locale,
26
+ )
27
+
28
+ from pandas._typing import (
29
+ Dtype,
30
+ Frequency,
31
+ NpDtype,
32
+ )
33
+ from pandas.compat import pa_version_under7p0
34
+
35
+ from pandas.core.dtypes.common import (
36
+ is_float_dtype,
37
+ is_integer_dtype,
38
+ is_sequence,
39
+ is_signed_integer_dtype,
40
+ is_unsigned_integer_dtype,
41
+ pandas_dtype,
42
+ )
43
+
44
+ import pandas as pd
45
+ from pandas import (
46
+ ArrowDtype,
47
+ Categorical,
48
+ CategoricalIndex,
49
+ DataFrame,
50
+ DatetimeIndex,
51
+ Index,
52
+ IntervalIndex,
53
+ MultiIndex,
54
+ RangeIndex,
55
+ Series,
56
+ bdate_range,
57
+ )
58
+ from pandas._testing._io import (
59
+ close,
60
+ network,
61
+ round_trip_localpath,
62
+ round_trip_pathlib,
63
+ round_trip_pickle,
64
+ write_to_compressed,
65
+ )
66
+ from pandas._testing._random import (
67
+ rands,
68
+ rands_array,
69
+ )
70
+ from pandas._testing._warnings import (
71
+ assert_produces_warning,
72
+ maybe_produces_warning,
73
+ )
74
+ from pandas._testing.asserters import (
75
+ assert_almost_equal,
76
+ assert_attr_equal,
77
+ assert_categorical_equal,
78
+ assert_class_equal,
79
+ assert_contains_all,
80
+ assert_copy,
81
+ assert_datetime_array_equal,
82
+ assert_dict_equal,
83
+ assert_equal,
84
+ assert_extension_array_equal,
85
+ assert_frame_equal,
86
+ assert_index_equal,
87
+ assert_indexing_slices_equivalent,
88
+ assert_interval_array_equal,
89
+ assert_is_sorted,
90
+ assert_is_valid_plot_return_object,
91
+ assert_metadata_equivalent,
92
+ assert_numpy_array_equal,
93
+ assert_period_array_equal,
94
+ assert_series_equal,
95
+ assert_sp_array_equal,
96
+ assert_timedelta_array_equal,
97
+ raise_assert_detail,
98
+ )
99
+ from pandas._testing.compat import (
100
+ get_dtype,
101
+ get_obj,
102
+ )
103
+ from pandas._testing.contexts import (
104
+ decompress_file,
105
+ ensure_clean,
106
+ ensure_safe_environment_variables,
107
+ raises_chained_assignment_error,
108
+ set_timezone,
109
+ use_numexpr,
110
+ with_csv_dialect,
111
+ )
112
+ from pandas.core.arrays import (
113
+ BaseMaskedArray,
114
+ ExtensionArray,
115
+ PandasArray,
116
+ )
117
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
118
+ from pandas.core.construction import extract_array
119
+
120
+ if TYPE_CHECKING:
121
+ from pandas import (
122
+ PeriodIndex,
123
+ TimedeltaIndex,
124
+ )
125
+ from pandas.core.arrays import ArrowExtensionArray
126
+
127
+ _N = 30
128
+ _K = 4
129
+
130
+ UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"]
131
+ UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
132
+ SIGNED_INT_NUMPY_DTYPES: list[NpDtype] = [int, "int8", "int16", "int32", "int64"]
133
+ SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
134
+ ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
135
+ ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
136
+ ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES]
137
+
138
+ FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"]
139
+ FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
140
+ ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES]
141
+
142
+ COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
143
+ STRING_DTYPES: list[Dtype] = [str, "str", "U"]
144
+
145
+ DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
146
+ TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
147
+
148
+ BOOL_DTYPES: list[Dtype] = [bool, "bool"]
149
+ BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
150
+ OBJECT_DTYPES: list[Dtype] = [object, "object"]
151
+
152
+ ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
153
+ ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES
154
+ ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES]
155
+ ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES]
156
+
157
+ ALL_NUMPY_DTYPES = (
158
+ ALL_REAL_NUMPY_DTYPES
159
+ + COMPLEX_DTYPES
160
+ + STRING_DTYPES
161
+ + DATETIME64_DTYPES
162
+ + TIMEDELTA64_DTYPES
163
+ + BOOL_DTYPES
164
+ + OBJECT_DTYPES
165
+ + BYTES_DTYPES
166
+ )
167
+
168
+ NARROW_NP_DTYPES = [
169
+ np.float16,
170
+ np.float32,
171
+ np.int8,
172
+ np.int16,
173
+ np.int32,
174
+ np.uint8,
175
+ np.uint16,
176
+ np.uint32,
177
+ ]
178
+
179
+ ENDIAN = {"little": "<", "big": ">"}[byteorder]
180
+
181
+ NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
182
+ NP_NAT_OBJECTS = [
183
+ cls("NaT", unit)
184
+ for cls in [np.datetime64, np.timedelta64]
185
+ for unit in [
186
+ "Y",
187
+ "M",
188
+ "W",
189
+ "D",
190
+ "h",
191
+ "m",
192
+ "s",
193
+ "ms",
194
+ "us",
195
+ "ns",
196
+ "ps",
197
+ "fs",
198
+ "as",
199
+ ]
200
+ ]
201
+
202
+ if not pa_version_under7p0:
203
+ import pyarrow as pa
204
+
205
+ UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
206
+ SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
207
+ ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES
208
+ ALL_INT_PYARROW_DTYPES_STR_REPR = [
209
+ str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES
210
+ ]
211
+
212
+ # pa.float16 doesn't seem supported
213
+ # https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86
214
+ FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()]
215
+ FLOAT_PYARROW_DTYPES_STR_REPR = [
216
+ str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES
217
+ ]
218
+ DECIMAL_PYARROW_DTYPES = [pa.decimal128(7, 3)]
219
+ STRING_PYARROW_DTYPES = [pa.string()]
220
+ BINARY_PYARROW_DTYPES = [pa.binary()]
221
+
222
+ TIME_PYARROW_DTYPES = [
223
+ pa.time32("s"),
224
+ pa.time32("ms"),
225
+ pa.time64("us"),
226
+ pa.time64("ns"),
227
+ ]
228
+ DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()]
229
+ DATETIME_PYARROW_DTYPES = [
230
+ pa.timestamp(unit=unit, tz=tz)
231
+ for unit in ["s", "ms", "us", "ns"]
232
+ for tz in [None, "UTC", "US/Pacific", "US/Eastern"]
233
+ ]
234
+ TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]]
235
+
236
+ BOOL_PYARROW_DTYPES = [pa.bool_()]
237
+
238
+ # TODO: Add container like pyarrow types:
239
+ # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions
240
+ ALL_PYARROW_DTYPES = (
241
+ ALL_INT_PYARROW_DTYPES
242
+ + FLOAT_PYARROW_DTYPES
243
+ + DECIMAL_PYARROW_DTYPES
244
+ + STRING_PYARROW_DTYPES
245
+ + BINARY_PYARROW_DTYPES
246
+ + TIME_PYARROW_DTYPES
247
+ + DATE_PYARROW_DTYPES
248
+ + DATETIME_PYARROW_DTYPES
249
+ + TIMEDELTA_PYARROW_DTYPES
250
+ + BOOL_PYARROW_DTYPES
251
+ )
252
+ else:
253
+ FLOAT_PYARROW_DTYPES_STR_REPR = []
254
+ ALL_INT_PYARROW_DTYPES_STR_REPR = []
255
+ ALL_PYARROW_DTYPES = []
256
+
257
+
258
+ EMPTY_STRING_PATTERN = re.compile("^$")
259
+
260
+
261
+ def reset_display_options() -> None:
262
+ """
263
+ Reset the display options for printing and representing objects.
264
+ """
265
+ pd.reset_option("^display.", silent=True)
266
+
267
+
268
+ # -----------------------------------------------------------------------------
269
+ # Comparators
270
+
271
+
272
+ def equalContents(arr1, arr2) -> bool:
273
+ """
274
+ Checks if the set of unique elements of arr1 and arr2 are equivalent.
275
+ """
276
+ return frozenset(arr1) == frozenset(arr2)
277
+
278
+
279
+ def box_expected(expected, box_cls, transpose: bool = True):
280
+ """
281
+ Helper function to wrap the expected output of a test in a given box_class.
282
+
283
+ Parameters
284
+ ----------
285
+ expected : np.ndarray, Index, Series
286
+ box_cls : {Index, Series, DataFrame}
287
+
288
+ Returns
289
+ -------
290
+ subclass of box_cls
291
+ """
292
+ if box_cls is pd.array:
293
+ if isinstance(expected, RangeIndex):
294
+ # pd.array would return an IntegerArray
295
+ expected = PandasArray(np.asarray(expected._values))
296
+ else:
297
+ expected = pd.array(expected, copy=False)
298
+ elif box_cls is Index:
299
+ expected = Index(expected)
300
+ elif box_cls is Series:
301
+ expected = Series(expected)
302
+ elif box_cls is DataFrame:
303
+ expected = Series(expected).to_frame()
304
+ if transpose:
305
+ # for vector operations, we need a DataFrame to be a single-row,
306
+ # not a single-column, in order to operate against non-DataFrame
307
+ # vectors of the same length. But convert to two rows to avoid
308
+ # single-row special cases in datetime arithmetic
309
+ expected = expected.T
310
+ expected = pd.concat([expected] * 2, ignore_index=True)
311
+ elif box_cls is np.ndarray or box_cls is np.array:
312
+ expected = np.array(expected)
313
+ elif box_cls is to_array:
314
+ expected = to_array(expected)
315
+ else:
316
+ raise NotImplementedError(box_cls)
317
+ return expected
318
+
319
+
320
+ def to_array(obj):
321
+ """
322
+ Similar to pd.array, but does not cast numpy dtypes to nullable dtypes.
323
+ """
324
+ # temporary implementation until we get pd.array in place
325
+ dtype = getattr(obj, "dtype", None)
326
+
327
+ if dtype is None:
328
+ return np.asarray(obj)
329
+
330
+ return extract_array(obj, extract_numpy=True)
331
+
332
+
333
+ # -----------------------------------------------------------------------------
334
+ # Others
335
+
336
+
337
+ def getCols(k) -> str:
338
+ return string.ascii_uppercase[:k]
339
+
340
+
341
+ # make index
342
+ def makeStringIndex(k: int = 10, name=None) -> Index:
343
+ return Index(rands_array(nchars=10, size=k), name=name)
344
+
345
+
346
+ def makeCategoricalIndex(
347
+ k: int = 10, n: int = 3, name=None, **kwargs
348
+ ) -> CategoricalIndex:
349
+ """make a length k index or n categories"""
350
+ x = rands_array(nchars=4, size=n, replace=False)
351
+ return CategoricalIndex(
352
+ Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
353
+ )
354
+
355
+
356
+ def makeIntervalIndex(k: int = 10, name=None, **kwargs) -> IntervalIndex:
357
+ """make a length k IntervalIndex"""
358
+ x = np.linspace(0, 100, num=(k + 1))
359
+ return IntervalIndex.from_breaks(x, name=name, **kwargs)
360
+
361
+
362
+ def makeBoolIndex(k: int = 10, name=None) -> Index:
363
+ if k == 1:
364
+ return Index([True], name=name)
365
+ elif k == 2:
366
+ return Index([False, True], name=name)
367
+ return Index([False, True] + [False] * (k - 2), name=name)
368
+
369
+
370
+ def makeNumericIndex(k: int = 10, *, name=None, dtype: Dtype | None) -> Index:
371
+ dtype = pandas_dtype(dtype)
372
+ assert isinstance(dtype, np.dtype)
373
+
374
+ if is_integer_dtype(dtype):
375
+ values = np.arange(k, dtype=dtype)
376
+ if is_unsigned_integer_dtype(dtype):
377
+ values += 2 ** (dtype.itemsize * 8 - 1)
378
+ elif is_float_dtype(dtype):
379
+ values = np.random.random_sample(k) - np.random.random_sample(1)
380
+ values.sort()
381
+ values = values * (10 ** np.random.randint(0, 9))
382
+ else:
383
+ raise NotImplementedError(f"wrong dtype {dtype}")
384
+
385
+ return Index(values, dtype=dtype, name=name)
386
+
387
+
388
+ def makeIntIndex(k: int = 10, *, name=None, dtype: Dtype = "int64") -> Index:
389
+ dtype = pandas_dtype(dtype)
390
+ if not is_signed_integer_dtype(dtype):
391
+ raise TypeError(f"Wrong dtype {dtype}")
392
+ return makeNumericIndex(k, name=name, dtype=dtype)
393
+
394
+
395
+ def makeUIntIndex(k: int = 10, *, name=None, dtype: Dtype = "uint64") -> Index:
396
+ dtype = pandas_dtype(dtype)
397
+ if not is_unsigned_integer_dtype(dtype):
398
+ raise TypeError(f"Wrong dtype {dtype}")
399
+ return makeNumericIndex(k, name=name, dtype=dtype)
400
+
401
+
402
+ def makeRangeIndex(k: int = 10, name=None, **kwargs) -> RangeIndex:
403
+ return RangeIndex(0, k, 1, name=name, **kwargs)
404
+
405
+
406
+ def makeFloatIndex(k: int = 10, *, name=None, dtype: Dtype = "float64") -> Index:
407
+ dtype = pandas_dtype(dtype)
408
+ if not is_float_dtype(dtype):
409
+ raise TypeError(f"Wrong dtype {dtype}")
410
+ return makeNumericIndex(k, name=name, dtype=dtype)
411
+
412
+
413
+ def makeDateIndex(
414
+ k: int = 10, freq: Frequency = "B", name=None, **kwargs
415
+ ) -> DatetimeIndex:
416
+ dt = datetime(2000, 1, 1)
417
+ dr = bdate_range(dt, periods=k, freq=freq, name=name)
418
+ return DatetimeIndex(dr, name=name, **kwargs)
419
+
420
+
421
+ def makeTimedeltaIndex(
422
+ k: int = 10, freq: Frequency = "D", name=None, **kwargs
423
+ ) -> TimedeltaIndex:
424
+ return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
425
+
426
+
427
+ def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
428
+ dt = datetime(2000, 1, 1)
429
+ return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
430
+
431
+
432
+ def makeMultiIndex(k: int = 10, names=None, **kwargs):
433
+ N = (k // 2) + 1
434
+ rng = range(N)
435
+ mi = MultiIndex.from_product([("foo", "bar"), rng], names=names, **kwargs)
436
+ assert len(mi) >= k # GH#38795
437
+ return mi[:k]
438
+
439
+
440
+ def index_subclass_makers_generator():
441
+ make_index_funcs = [
442
+ makeDateIndex,
443
+ makePeriodIndex,
444
+ makeTimedeltaIndex,
445
+ makeRangeIndex,
446
+ makeIntervalIndex,
447
+ makeCategoricalIndex,
448
+ makeMultiIndex,
449
+ ]
450
+ yield from make_index_funcs
451
+
452
+
453
+ def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
454
+ """
455
+ Generator which can be iterated over to get instances of all the classes
456
+ which represent time-series.
457
+
458
+ Parameters
459
+ ----------
460
+ k: length of each of the index instances
461
+ """
462
+ make_index_funcs: list[Callable[..., Index]] = [
463
+ makeDateIndex,
464
+ makePeriodIndex,
465
+ makeTimedeltaIndex,
466
+ ]
467
+ for make_index_func in make_index_funcs:
468
+ yield make_index_func(k=k)
469
+
470
+
471
+ # make series
472
+ def make_rand_series(name=None, dtype=np.float64) -> Series:
473
+ index = makeStringIndex(_N)
474
+ data = np.random.randn(_N)
475
+ with np.errstate(invalid="ignore"):
476
+ data = data.astype(dtype, copy=False)
477
+ return Series(data, index=index, name=name)
478
+
479
+
480
+ def makeFloatSeries(name=None) -> Series:
481
+ return make_rand_series(name=name)
482
+
483
+
484
+ def makeStringSeries(name=None) -> Series:
485
+ return make_rand_series(name=name)
486
+
487
+
488
+ def makeObjectSeries(name=None) -> Series:
489
+ data = makeStringIndex(_N)
490
+ data = Index(data, dtype=object)
491
+ index = makeStringIndex(_N)
492
+ return Series(data, index=index, name=name)
493
+
494
+
495
+ def getSeriesData() -> dict[str, Series]:
496
+ index = makeStringIndex(_N)
497
+ return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
498
+
499
+
500
+ def makeTimeSeries(nper=None, freq: Frequency = "B", name=None) -> Series:
501
+ if nper is None:
502
+ nper = _N
503
+ return Series(
504
+ np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
505
+ )
506
+
507
+
508
+ def makePeriodSeries(nper=None, name=None) -> Series:
509
+ if nper is None:
510
+ nper = _N
511
+ return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
512
+
513
+
514
+ def getTimeSeriesData(nper=None, freq: Frequency = "B") -> dict[str, Series]:
515
+ return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
516
+
517
+
518
+ def getPeriodData(nper=None) -> dict[str, Series]:
519
+ return {c: makePeriodSeries(nper) for c in getCols(_K)}
520
+
521
+
522
+ # make frame
523
+ def makeTimeDataFrame(nper=None, freq: Frequency = "B") -> DataFrame:
524
+ data = getTimeSeriesData(nper, freq)
525
+ return DataFrame(data)
526
+
527
+
528
+ def makeDataFrame() -> DataFrame:
529
+ data = getSeriesData()
530
+ return DataFrame(data)
531
+
532
+
533
+ def getMixedTypeDict():
534
+ index = Index(["a", "b", "c", "d", "e"])
535
+
536
+ data = {
537
+ "A": [0.0, 1.0, 2.0, 3.0, 4.0],
538
+ "B": [0.0, 1.0, 0.0, 1.0, 0.0],
539
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
540
+ "D": bdate_range("1/1/2009", periods=5),
541
+ }
542
+
543
+ return index, data
544
+
545
+
546
+ def makeMixedDataFrame() -> DataFrame:
547
+ return DataFrame(getMixedTypeDict()[1])
548
+
549
+
550
+ def makePeriodFrame(nper=None) -> DataFrame:
551
+ data = getPeriodData(nper)
552
+ return DataFrame(data)
553
+
554
+
555
+ def makeCustomIndex(
556
+ nentries,
557
+ nlevels,
558
+ prefix: str = "#",
559
+ names: bool | str | list[str] | None = False,
560
+ ndupe_l=None,
561
+ idx_type=None,
562
+ ) -> Index:
563
+ """
564
+ Create an index/multindex with given dimensions, levels, names, etc'
565
+
566
+ nentries - number of entries in index
567
+ nlevels - number of levels (> 1 produces multindex)
568
+ prefix - a string prefix for labels
569
+ names - (Optional), bool or list of strings. if True will use default
570
+ names, if false will use no names, if a list is given, the name of
571
+ each level in the index will be taken from the list.
572
+ ndupe_l - (Optional), list of ints, the number of rows for which the
573
+ label will repeated at the corresponding level, you can specify just
574
+ the first few, the rest will use the default ndupe_l of 1.
575
+ len(ndupe_l) <= nlevels.
576
+ idx_type - "i"/"f"/"s"/"dt"/"p"/"td".
577
+ If idx_type is not None, `idx_nlevels` must be 1.
578
+ "i"/"f" creates an integer/float index,
579
+ "s" creates a string
580
+ "dt" create a datetime index.
581
+ "td" create a datetime index.
582
+
583
+ if unspecified, string labels will be generated.
584
+ """
585
+ if ndupe_l is None:
586
+ ndupe_l = [1] * nlevels
587
+ assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
588
+ assert names is None or names is False or names is True or len(names) is nlevels
589
+ assert idx_type is None or (
590
+ idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
591
+ )
592
+
593
+ if names is True:
594
+ # build default names
595
+ names = [prefix + str(i) for i in range(nlevels)]
596
+ if names is False:
597
+ # pass None to index constructor for no name
598
+ names = None
599
+
600
+ # make singleton case uniform
601
+ if isinstance(names, str) and nlevels == 1:
602
+ names = [names]
603
+
604
+ # specific 1D index type requested?
605
+ idx_func_dict: dict[str, Callable[..., Index]] = {
606
+ "i": makeIntIndex,
607
+ "f": makeFloatIndex,
608
+ "s": makeStringIndex,
609
+ "dt": makeDateIndex,
610
+ "td": makeTimedeltaIndex,
611
+ "p": makePeriodIndex,
612
+ }
613
+ idx_func = idx_func_dict.get(idx_type)
614
+ if idx_func:
615
+ idx = idx_func(nentries)
616
+ # but we need to fill in the name
617
+ if names:
618
+ idx.name = names[0]
619
+ return idx
620
+ elif idx_type is not None:
621
+ raise ValueError(
622
+ f"{repr(idx_type)} is not a legal value for `idx_type`, "
623
+ "use 'i'/'f'/'s'/'dt'/'p'/'td'."
624
+ )
625
+
626
+ if len(ndupe_l) < nlevels:
627
+ ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
628
+ assert len(ndupe_l) == nlevels
629
+
630
+ assert all(x > 0 for x in ndupe_l)
631
+
632
+ list_of_lists = []
633
+ for i in range(nlevels):
634
+
635
+ def keyfunc(x):
636
+ numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
637
+ return [int(num) for num in numeric_tuple]
638
+
639
+ # build a list of lists to create the index from
640
+ div_factor = nentries // ndupe_l[i] + 1
641
+
642
+ # Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
643
+ # and Generic Alias Type.
644
+ cnt: Counter[str] = collections.Counter()
645
+ for j in range(div_factor):
646
+ label = f"{prefix}_l{i}_g{j}"
647
+ cnt[label] = ndupe_l[i]
648
+ # cute Counter trick
649
+ result = sorted(cnt.elements(), key=keyfunc)[:nentries]
650
+ list_of_lists.append(result)
651
+
652
+ tuples = list(zip(*list_of_lists))
653
+
654
+ # convert tuples to index
655
+ if nentries == 1:
656
+ # we have a single level of tuples, i.e. a regular Index
657
+ name = None if names is None else names[0]
658
+ index = Index(tuples[0], name=name)
659
+ elif nlevels == 1:
660
+ name = None if names is None else names[0]
661
+ index = Index((x[0] for x in tuples), name=name)
662
+ else:
663
+ index = MultiIndex.from_tuples(tuples, names=names)
664
+ return index
665
+
666
+
667
+ def makeCustomDataframe(
668
+ nrows,
669
+ ncols,
670
+ c_idx_names: bool | list[str] = True,
671
+ r_idx_names: bool | list[str] = True,
672
+ c_idx_nlevels: int = 1,
673
+ r_idx_nlevels: int = 1,
674
+ data_gen_f=None,
675
+ c_ndupe_l=None,
676
+ r_ndupe_l=None,
677
+ dtype=None,
678
+ c_idx_type=None,
679
+ r_idx_type=None,
680
+ ) -> DataFrame:
681
+ """
682
+ Create a DataFrame using supplied parameters.
683
+
684
+ Parameters
685
+ ----------
686
+ nrows, ncols - number of data rows/cols
687
+ c_idx_names, r_idx_names - False/True/list of strings, yields No names ,
688
+ default names or uses the provided names for the levels of the
689
+ corresponding index. You can provide a single string when
690
+ c_idx_nlevels ==1.
691
+ c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
692
+ r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
693
+ data_gen_f - a function f(row,col) which return the data value
694
+ at that position, the default generator used yields values of the form
695
+ "RxCy" based on position.
696
+ c_ndupe_l, r_ndupe_l - list of integers, determines the number
697
+ of duplicates for each label at a given level of the corresponding
698
+ index. The default `None` value produces a multiplicity of 1 across
699
+ all levels, i.e. a unique index. Will accept a partial list of length
700
+ N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
701
+ nrows/ncol, the last label might have lower multiplicity.
702
+ dtype - passed to the DataFrame constructor as is, in case you wish to
703
+ have more control in conjunction with a custom `data_gen_f`
704
+ r_idx_type, c_idx_type - "i"/"f"/"s"/"dt"/"td".
705
+ If idx_type is not None, `idx_nlevels` must be 1.
706
+ "i"/"f" creates an integer/float index,
707
+ "s" creates a string index
708
+ "dt" create a datetime index.
709
+ "td" create a timedelta index.
710
+
711
+ if unspecified, string labels will be generated.
712
+
713
+ Examples
714
+ --------
715
+ # 5 row, 3 columns, default names on both, single index on both axis
716
+ >> makeCustomDataframe(5,3)
717
+
718
+ # make the data a random int between 1 and 100
719
+ >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
720
+
721
+ # 2-level multiindex on rows with each label duplicated
722
+ # twice on first level, default names on both axis, single
723
+ # index on both axis
724
+ >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
725
+
726
+ # DatetimeIndex on row, index with unicode labels on columns
727
+ # no names on either axis
728
+ >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
729
+ r_idx_type="dt",c_idx_type="u")
730
+
731
+ # 4-level multindex on rows with names provided, 2-level multindex
732
+ # on columns with default labels and default names.
733
+ >> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
734
+ r_idx_names=["FEE","FIH","FOH","FUM"],
735
+ c_idx_nlevels=2)
736
+
737
+ >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
738
+ """
739
+ assert c_idx_nlevels > 0
740
+ assert r_idx_nlevels > 0
741
+ assert r_idx_type is None or (
742
+ r_idx_type in ("i", "f", "s", "dt", "p", "td") and r_idx_nlevels == 1
743
+ )
744
+ assert c_idx_type is None or (
745
+ c_idx_type in ("i", "f", "s", "dt", "p", "td") and c_idx_nlevels == 1
746
+ )
747
+
748
+ columns = makeCustomIndex(
749
+ ncols,
750
+ nlevels=c_idx_nlevels,
751
+ prefix="C",
752
+ names=c_idx_names,
753
+ ndupe_l=c_ndupe_l,
754
+ idx_type=c_idx_type,
755
+ )
756
+ index = makeCustomIndex(
757
+ nrows,
758
+ nlevels=r_idx_nlevels,
759
+ prefix="R",
760
+ names=r_idx_names,
761
+ ndupe_l=r_ndupe_l,
762
+ idx_type=r_idx_type,
763
+ )
764
+
765
+ # by default, generate data based on location
766
+ if data_gen_f is None:
767
+ data_gen_f = lambda r, c: f"R{r}C{c}"
768
+
769
+ data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
770
+
771
+ return DataFrame(data, index, columns, dtype=dtype)
772
+
773
+
774
+ def _create_missing_idx(nrows, ncols, density: float, random_state=None):
775
+ if random_state is None:
776
+ random_state = np.random
777
+ else:
778
+ random_state = np.random.RandomState(random_state)
779
+
780
+ # below is cribbed from scipy.sparse
781
+ size = round((1 - density) * nrows * ncols)
782
+ # generate a few more to ensure unique values
783
+ min_rows = 5
784
+ fac = 1.02
785
+ extra_size = min(size + min_rows, fac * size)
786
+
787
+ def _gen_unique_rand(rng, _extra_size):
788
+ ind = rng.rand(int(_extra_size))
789
+ return np.unique(np.floor(ind * nrows * ncols))[:size]
790
+
791
+ ind = _gen_unique_rand(random_state, extra_size)
792
+ while ind.size < size:
793
+ extra_size *= 1.05
794
+ ind = _gen_unique_rand(random_state, extra_size)
795
+
796
+ j = np.floor(ind * 1.0 / nrows).astype(int)
797
+ i = (ind - j * nrows).astype(int)
798
+ return i.tolist(), j.tolist()
799
+
800
+
801
+ def makeMissingDataframe(density: float = 0.9, random_state=None) -> DataFrame:
802
+ df = makeDataFrame()
803
+ i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
804
+ df.iloc[i, j] = np.nan
805
+ return df
806
+
807
+
808
+ class SubclassedSeries(Series):
809
+ _metadata = ["testattr", "name"]
810
+
811
+ @property
812
+ def _constructor(self):
813
+ # For testing, those properties return a generic callable, and not
814
+ # the actual class. In this case that is equivalent, but it is to
815
+ # ensure we don't rely on the property returning a class
816
+ # See https://github.com/pandas-dev/pandas/pull/46018 and
817
+ # https://github.com/pandas-dev/pandas/issues/32638 and linked issues
818
+ return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
819
+
820
+ @property
821
+ def _constructor_expanddim(self):
822
+ return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
823
+
824
+
825
+ class SubclassedDataFrame(DataFrame):
826
+ _metadata = ["testattr"]
827
+
828
+ @property
829
+ def _constructor(self):
830
+ return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
831
+
832
+ @property
833
+ def _constructor_sliced(self):
834
+ return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
835
+
836
+
837
+ class SubclassedCategorical(Categorical):
838
+ @property
839
+ def _constructor(self):
840
+ return SubclassedCategorical
841
+
842
+
843
+ def _make_skipna_wrapper(alternative, skipna_alternative=None):
844
+ """
845
+ Create a function for calling on an array.
846
+
847
+ Parameters
848
+ ----------
849
+ alternative : function
850
+ The function to be called on the array with no NaNs.
851
+ Only used when 'skipna_alternative' is None.
852
+ skipna_alternative : function
853
+ The function to be called on the original array
854
+
855
+ Returns
856
+ -------
857
+ function
858
+ """
859
+ if skipna_alternative:
860
+
861
+ def skipna_wrapper(x):
862
+ return skipna_alternative(x.values)
863
+
864
+ else:
865
+
866
+ def skipna_wrapper(x):
867
+ nona = x.dropna()
868
+ if len(nona) == 0:
869
+ return np.nan
870
+ return alternative(nona)
871
+
872
+ return skipna_wrapper
873
+
874
+
875
+ def convert_rows_list_to_csv_str(rows_list: list[str]) -> str:
876
+ """
877
+ Convert list of CSV rows to single CSV-formatted string for current OS.
878
+
879
+ This method is used for creating expected value of to_csv() method.
880
+
881
+ Parameters
882
+ ----------
883
+ rows_list : List[str]
884
+ Each element represents the row of csv.
885
+
886
+ Returns
887
+ -------
888
+ str
889
+ Expected output of to_csv() in current OS.
890
+ """
891
+ sep = os.linesep
892
+ return sep.join(rows_list) + sep
893
+
894
+
895
+ def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
896
+ """
897
+ Helper function to mark pytest.raises that have an external error message.
898
+
899
+ Parameters
900
+ ----------
901
+ expected_exception : Exception
902
+ Expected error to raise.
903
+
904
+ Returns
905
+ -------
906
+ Callable
907
+ Regular `pytest.raises` function with `match` equal to `None`.
908
+ """
909
+ import pytest
910
+
911
+ return pytest.raises(expected_exception, match=None)
912
+
913
+
914
+ cython_table = pd.core.common._cython_table.items()
915
+
916
+
917
+ def get_cython_table_params(ndframe, func_names_and_expected):
918
+ """
919
+ Combine frame, functions from com._cython_table
920
+ keys and expected result.
921
+
922
+ Parameters
923
+ ----------
924
+ ndframe : DataFrame or Series
925
+ func_names_and_expected : Sequence of two items
926
+ The first item is a name of a NDFrame method ('sum', 'prod') etc.
927
+ The second item is the expected return value.
928
+
929
+ Returns
930
+ -------
931
+ list
932
+ List of three items (DataFrame, function, expected result)
933
+ """
934
+ results = []
935
+ for func_name, expected in func_names_and_expected:
936
+ results.append((ndframe, func_name, expected))
937
+ results += [
938
+ (ndframe, func, expected)
939
+ for func, name in cython_table
940
+ if name == func_name
941
+ ]
942
+ return results
943
+
944
+
945
+ def get_op_from_name(op_name: str) -> Callable:
946
+ """
947
+ The operator function for a given op name.
948
+
949
+ Parameters
950
+ ----------
951
+ op_name : str
952
+ The op name, in form of "add" or "__add__".
953
+
954
+ Returns
955
+ -------
956
+ function
957
+ A function performing the operation.
958
+ """
959
+ short_opname = op_name.strip("_")
960
+ try:
961
+ op = getattr(operator, short_opname)
962
+ except AttributeError:
963
+ # Assume it is the reverse operator
964
+ rop = getattr(operator, short_opname[1:])
965
+ op = lambda x, y: rop(y, x)
966
+
967
+ return op
968
+
969
+
970
+ # -----------------------------------------------------------------------------
971
+ # Indexing test helpers
972
+
973
+
974
+ def getitem(x):
975
+ return x
976
+
977
+
978
+ def setitem(x):
979
+ return x
980
+
981
+
982
+ def loc(x):
983
+ return x.loc
984
+
985
+
986
+ def iloc(x):
987
+ return x.iloc
988
+
989
+
990
+ def at(x):
991
+ return x.at
992
+
993
+
994
+ def iat(x):
995
+ return x.iat
996
+
997
+
998
+ # -----------------------------------------------------------------------------
999
+
1000
+
1001
+ def shares_memory(left, right) -> bool:
1002
+ """
1003
+ Pandas-compat for np.shares_memory.
1004
+ """
1005
+ if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
1006
+ return np.shares_memory(left, right)
1007
+ elif isinstance(left, np.ndarray):
1008
+ # Call with reversed args to get to unpacking logic below.
1009
+ return shares_memory(right, left)
1010
+
1011
+ if isinstance(left, RangeIndex):
1012
+ return False
1013
+ if isinstance(left, MultiIndex):
1014
+ return shares_memory(left._codes, right)
1015
+ if isinstance(left, (Index, Series)):
1016
+ return shares_memory(left._values, right)
1017
+
1018
+ if isinstance(left, NDArrayBackedExtensionArray):
1019
+ return shares_memory(left._ndarray, right)
1020
+ if isinstance(left, pd.core.arrays.SparseArray):
1021
+ return shares_memory(left.sp_values, right)
1022
+ if isinstance(left, pd.core.arrays.IntervalArray):
1023
+ return shares_memory(left._left, right) or shares_memory(left._right, right)
1024
+
1025
+ if isinstance(left, ExtensionArray) and left.dtype == "string[pyarrow]":
1026
+ # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669
1027
+ left = cast("ArrowExtensionArray", left)
1028
+ if isinstance(right, ExtensionArray) and right.dtype == "string[pyarrow]":
1029
+ right = cast("ArrowExtensionArray", right)
1030
+ left_pa_data = left._data
1031
+ right_pa_data = right._data
1032
+ left_buf1 = left_pa_data.chunk(0).buffers()[1]
1033
+ right_buf1 = right_pa_data.chunk(0).buffers()[1]
1034
+ return left_buf1 == right_buf1
1035
+
1036
+ if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray):
1037
+ # By convention, we'll say these share memory if they share *either*
1038
+ # the _data or the _mask
1039
+ return np.shares_memory(left._data, right._data) or np.shares_memory(
1040
+ left._mask, right._mask
1041
+ )
1042
+
1043
+ if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1:
1044
+ arr = left._mgr.arrays[0]
1045
+ return shares_memory(arr, right)
1046
+
1047
+ raise NotImplementedError(type(left), type(right))
1048
+
1049
+
1050
+ __all__ = [
1051
+ "ALL_INT_EA_DTYPES",
1052
+ "ALL_INT_NUMPY_DTYPES",
1053
+ "ALL_NUMPY_DTYPES",
1054
+ "ALL_REAL_NUMPY_DTYPES",
1055
+ "all_timeseries_index_generator",
1056
+ "assert_almost_equal",
1057
+ "assert_attr_equal",
1058
+ "assert_categorical_equal",
1059
+ "assert_class_equal",
1060
+ "assert_contains_all",
1061
+ "assert_copy",
1062
+ "assert_datetime_array_equal",
1063
+ "assert_dict_equal",
1064
+ "assert_equal",
1065
+ "assert_extension_array_equal",
1066
+ "assert_frame_equal",
1067
+ "assert_index_equal",
1068
+ "assert_indexing_slices_equivalent",
1069
+ "assert_interval_array_equal",
1070
+ "assert_is_sorted",
1071
+ "assert_is_valid_plot_return_object",
1072
+ "assert_metadata_equivalent",
1073
+ "assert_numpy_array_equal",
1074
+ "assert_period_array_equal",
1075
+ "assert_produces_warning",
1076
+ "assert_series_equal",
1077
+ "assert_sp_array_equal",
1078
+ "assert_timedelta_array_equal",
1079
+ "at",
1080
+ "BOOL_DTYPES",
1081
+ "box_expected",
1082
+ "BYTES_DTYPES",
1083
+ "can_set_locale",
1084
+ "close",
1085
+ "COMPLEX_DTYPES",
1086
+ "convert_rows_list_to_csv_str",
1087
+ "DATETIME64_DTYPES",
1088
+ "decompress_file",
1089
+ "EMPTY_STRING_PATTERN",
1090
+ "ENDIAN",
1091
+ "ensure_clean",
1092
+ "ensure_safe_environment_variables",
1093
+ "equalContents",
1094
+ "external_error_raised",
1095
+ "FLOAT_EA_DTYPES",
1096
+ "FLOAT_NUMPY_DTYPES",
1097
+ "getCols",
1098
+ "get_cython_table_params",
1099
+ "get_dtype",
1100
+ "getitem",
1101
+ "get_locales",
1102
+ "getMixedTypeDict",
1103
+ "get_obj",
1104
+ "get_op_from_name",
1105
+ "getPeriodData",
1106
+ "getSeriesData",
1107
+ "getTimeSeriesData",
1108
+ "iat",
1109
+ "iloc",
1110
+ "index_subclass_makers_generator",
1111
+ "loc",
1112
+ "makeBoolIndex",
1113
+ "makeCategoricalIndex",
1114
+ "makeCustomDataframe",
1115
+ "makeCustomIndex",
1116
+ "makeDataFrame",
1117
+ "makeDateIndex",
1118
+ "makeFloatIndex",
1119
+ "makeFloatSeries",
1120
+ "makeIntervalIndex",
1121
+ "makeIntIndex",
1122
+ "makeMissingDataframe",
1123
+ "makeMixedDataFrame",
1124
+ "makeMultiIndex",
1125
+ "makeNumericIndex",
1126
+ "makeObjectSeries",
1127
+ "makePeriodFrame",
1128
+ "makePeriodIndex",
1129
+ "makePeriodSeries",
1130
+ "make_rand_series",
1131
+ "makeRangeIndex",
1132
+ "makeStringIndex",
1133
+ "makeStringSeries",
1134
+ "makeTimeDataFrame",
1135
+ "makeTimedeltaIndex",
1136
+ "makeTimeSeries",
1137
+ "makeUIntIndex",
1138
+ "maybe_produces_warning",
1139
+ "NARROW_NP_DTYPES",
1140
+ "network",
1141
+ "NP_NAT_OBJECTS",
1142
+ "NULL_OBJECTS",
1143
+ "OBJECT_DTYPES",
1144
+ "raise_assert_detail",
1145
+ "rands",
1146
+ "reset_display_options",
1147
+ "raises_chained_assignment_error",
1148
+ "round_trip_localpath",
1149
+ "round_trip_pathlib",
1150
+ "round_trip_pickle",
1151
+ "setitem",
1152
+ "set_locale",
1153
+ "set_timezone",
1154
+ "shares_memory",
1155
+ "SIGNED_INT_EA_DTYPES",
1156
+ "SIGNED_INT_NUMPY_DTYPES",
1157
+ "STRING_DTYPES",
1158
+ "SubclassedCategorical",
1159
+ "SubclassedDataFrame",
1160
+ "SubclassedSeries",
1161
+ "TIMEDELTA64_DTYPES",
1162
+ "to_array",
1163
+ "UNSIGNED_INT_EA_DTYPES",
1164
+ "UNSIGNED_INT_NUMPY_DTYPES",
1165
+ "use_numexpr",
1166
+ "with_csv_dialect",
1167
+ "write_to_compressed",
1168
+ ]
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (31.1 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_random.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc ADDED
Binary file (6.32 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc ADDED
Binary file (32.2 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc ADDED
Binary file (819 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc ADDED
Binary file (5.6 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Hypothesis data generator helpers.
3
+ """
4
+ from datetime import datetime
5
+
6
+ from hypothesis import strategies as st
7
+ from hypothesis.extra.dateutil import timezones as dateutil_timezones
8
+ from hypothesis.extra.pytz import timezones as pytz_timezones
9
+
10
+ from pandas.compat import is_platform_windows
11
+
12
+ import pandas as pd
13
+
14
+ from pandas.tseries.offsets import (
15
+ BMonthBegin,
16
+ BMonthEnd,
17
+ BQuarterBegin,
18
+ BQuarterEnd,
19
+ BYearBegin,
20
+ BYearEnd,
21
+ MonthBegin,
22
+ MonthEnd,
23
+ QuarterBegin,
24
+ QuarterEnd,
25
+ YearBegin,
26
+ YearEnd,
27
+ )
28
+
29
+ OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3)
30
+
31
+ OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3)
32
+
33
+ OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3)
34
+
35
+ OPTIONAL_DICTS = st.lists(
36
+ st.one_of(st.none(), st.dictionaries(st.text(), st.integers())),
37
+ max_size=10,
38
+ min_size=3,
39
+ )
40
+
41
+ OPTIONAL_LISTS = st.lists(
42
+ st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)),
43
+ max_size=10,
44
+ min_size=3,
45
+ )
46
+
47
+ OPTIONAL_ONE_OF_ALL = st.one_of(
48
+ OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT
49
+ )
50
+
51
+ if is_platform_windows():
52
+ DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1))
53
+ else:
54
+ DATETIME_NO_TZ = st.datetimes()
55
+
56
+ DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
57
+ min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
58
+ max_value=pd.Timestamp(1900, 1, 1).to_pydatetime(),
59
+ timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
60
+ )
61
+
62
+ DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes(
63
+ min_value=pd.Timestamp.min.to_pydatetime(warn=False),
64
+ max_value=pd.Timestamp.max.to_pydatetime(warn=False),
65
+ )
66
+
67
+ INT_NEG_999_TO_POS_999 = st.integers(-999, 999)
68
+
69
+ # The strategy for each type is registered in conftest.py, as they don't carry
70
+ # enough runtime information (e.g. type hints) to infer how to build them.
71
+ YQM_OFFSET = st.one_of(
72
+ *map(
73
+ st.from_type,
74
+ [
75
+ MonthBegin,
76
+ MonthEnd,
77
+ BMonthBegin,
78
+ BMonthEnd,
79
+ QuarterBegin,
80
+ QuarterEnd,
81
+ BQuarterBegin,
82
+ BQuarterEnd,
83
+ YearBegin,
84
+ YearEnd,
85
+ BYearBegin,
86
+ BYearEnd,
87
+ ],
88
+ )
89
+ )
videochat2/lib/python3.10/site-packages/pandas/_testing/_io.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import bz2
4
+ from functools import wraps
5
+ import gzip
6
+ import io
7
+ import socket
8
+ import tarfile
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ )
14
+ import zipfile
15
+
16
+ from pandas._typing import (
17
+ FilePath,
18
+ ReadPickleBuffer,
19
+ )
20
+ from pandas.compat import get_lzma_file
21
+ from pandas.compat._optional import import_optional_dependency
22
+
23
+ import pandas as pd
24
+ from pandas._testing._random import rands
25
+ from pandas._testing.contexts import ensure_clean
26
+
27
+ from pandas.io.common import urlopen
28
+
29
+ if TYPE_CHECKING:
30
+ from pandas import (
31
+ DataFrame,
32
+ Series,
33
+ )
34
+
35
+ # skip tests on exceptions with these messages
36
+ _network_error_messages = (
37
+ # 'urlopen error timed out',
38
+ # 'timeout: timed out',
39
+ # 'socket.timeout: timed out',
40
+ "timed out",
41
+ "Server Hangup",
42
+ "HTTP Error 503: Service Unavailable",
43
+ "502: Proxy Error",
44
+ "HTTP Error 502: internal error",
45
+ "HTTP Error 502",
46
+ "HTTP Error 503",
47
+ "HTTP Error 403",
48
+ "HTTP Error 400",
49
+ "Temporary failure in name resolution",
50
+ "Name or service not known",
51
+ "Connection refused",
52
+ "certificate verify",
53
+ )
54
+
55
+ # or this e.errno/e.reason.errno
56
+ _network_errno_vals = (
57
+ 101, # Network is unreachable
58
+ 111, # Connection refused
59
+ 110, # Connection timed out
60
+ 104, # Connection reset Error
61
+ 54, # Connection reset by peer
62
+ 60, # urllib.error.URLError: [Errno 60] Connection timed out
63
+ )
64
+
65
+ # Both of the above shouldn't mask real issues such as 404's
66
+ # or refused connections (changed DNS).
67
+ # But some tests (test_data yahoo) contact incredibly flakey
68
+ # servers.
69
+
70
+ # and conditionally raise on exception types in _get_default_network_errors
71
+
72
+
73
+ def _get_default_network_errors():
74
+ # Lazy import for http.client & urllib.error
75
+ # because it imports many things from the stdlib
76
+ import http.client
77
+ import urllib.error
78
+
79
+ return (
80
+ OSError,
81
+ http.client.HTTPException,
82
+ TimeoutError,
83
+ urllib.error.URLError,
84
+ socket.timeout,
85
+ )
86
+
87
+
88
+ def optional_args(decorator):
89
+ """
90
+ allows a decorator to take optional positional and keyword arguments.
91
+ Assumes that taking a single, callable, positional argument means that
92
+ it is decorating a function, i.e. something like this::
93
+
94
+ @my_decorator
95
+ def function(): pass
96
+
97
+ Calls decorator with decorator(f, *args, **kwargs)
98
+ """
99
+
100
+ @wraps(decorator)
101
+ def wrapper(*args, **kwargs):
102
+ def dec(f):
103
+ return decorator(f, *args, **kwargs)
104
+
105
+ is_decorating = not kwargs and len(args) == 1 and callable(args[0])
106
+ if is_decorating:
107
+ f = args[0]
108
+ args = ()
109
+ return dec(f)
110
+ else:
111
+ return dec
112
+
113
+ return wrapper
114
+
115
+
116
+ # error: Untyped decorator makes function "network" untyped
117
+ @optional_args # type: ignore[misc]
118
+ def network(
119
+ t,
120
+ url: str = "https://www.google.com",
121
+ raise_on_error: bool = False,
122
+ check_before_test: bool = False,
123
+ error_classes=None,
124
+ skip_errnos=_network_errno_vals,
125
+ _skip_on_messages=_network_error_messages,
126
+ ):
127
+ """
128
+ Label a test as requiring network connection and, if an error is
129
+ encountered, only raise if it does not find a network connection.
130
+
131
+ In comparison to ``network``, this assumes an added contract to your test:
132
+ you must assert that, under normal conditions, your test will ONLY fail if
133
+ it does not have network connectivity.
134
+
135
+ You can call this in 3 ways: as a standard decorator, with keyword
136
+ arguments, or with a positional argument that is the url to check.
137
+
138
+ Parameters
139
+ ----------
140
+ t : callable
141
+ The test requiring network connectivity.
142
+ url : path
143
+ The url to test via ``pandas.io.common.urlopen`` to check
144
+ for connectivity. Defaults to 'https://www.google.com'.
145
+ raise_on_error : bool
146
+ If True, never catches errors.
147
+ check_before_test : bool
148
+ If True, checks connectivity before running the test case.
149
+ error_classes : tuple or Exception
150
+ error classes to ignore. If not in ``error_classes``, raises the error.
151
+ defaults to OSError. Be careful about changing the error classes here.
152
+ skip_errnos : iterable of int
153
+ Any exception that has .errno or .reason.erno set to one
154
+ of these values will be skipped with an appropriate
155
+ message.
156
+ _skip_on_messages: iterable of string
157
+ any exception e for which one of the strings is
158
+ a substring of str(e) will be skipped with an appropriate
159
+ message. Intended to suppress errors where an errno isn't available.
160
+
161
+ Notes
162
+ -----
163
+ * ``raise_on_error`` supersedes ``check_before_test``
164
+
165
+ Returns
166
+ -------
167
+ t : callable
168
+ The decorated test ``t``, with checks for connectivity errors.
169
+
170
+ Example
171
+ -------
172
+
173
+ Tests decorated with @network will fail if it's possible to make a network
174
+ connection to another URL (defaults to google.com)::
175
+
176
+ >>> from pandas import _testing as tm
177
+ >>> @tm.network
178
+ ... def test_network():
179
+ ... with pd.io.common.urlopen("rabbit://bonanza.com"):
180
+ ... pass
181
+ >>> test_network() # doctest: +SKIP
182
+ Traceback
183
+ ...
184
+ URLError: <urlopen error unknown url type: rabbit>
185
+
186
+ You can specify alternative URLs::
187
+
188
+ >>> @tm.network("https://www.yahoo.com")
189
+ ... def test_something_with_yahoo():
190
+ ... raise OSError("Failure Message")
191
+ >>> test_something_with_yahoo() # doctest: +SKIP
192
+ Traceback (most recent call last):
193
+ ...
194
+ OSError: Failure Message
195
+
196
+ If you set check_before_test, it will check the url first and not run the
197
+ test on failure::
198
+
199
+ >>> @tm.network("failing://url.blaher", check_before_test=True)
200
+ ... def test_something():
201
+ ... print("I ran!")
202
+ ... raise ValueError("Failure")
203
+ >>> test_something() # doctest: +SKIP
204
+ Traceback (most recent call last):
205
+ ...
206
+
207
+ Errors not related to networking will always be raised.
208
+ """
209
+ import pytest
210
+
211
+ if error_classes is None:
212
+ error_classes = _get_default_network_errors()
213
+
214
+ t.network = True
215
+
216
+ @wraps(t)
217
+ def wrapper(*args, **kwargs):
218
+ if (
219
+ check_before_test
220
+ and not raise_on_error
221
+ and not can_connect(url, error_classes)
222
+ ):
223
+ pytest.skip(
224
+ f"May not have network connectivity because cannot connect to {url}"
225
+ )
226
+ try:
227
+ return t(*args, **kwargs)
228
+ except Exception as err:
229
+ errno = getattr(err, "errno", None)
230
+ if not errno and hasattr(errno, "reason"):
231
+ # error: "Exception" has no attribute "reason"
232
+ errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
233
+
234
+ if errno in skip_errnos:
235
+ pytest.skip(f"Skipping test due to known errno and error {err}")
236
+
237
+ e_str = str(err)
238
+
239
+ if any(m.lower() in e_str.lower() for m in _skip_on_messages):
240
+ pytest.skip(
241
+ f"Skipping test because exception message is known and error {err}"
242
+ )
243
+
244
+ if not isinstance(err, error_classes) or raise_on_error:
245
+ raise
246
+ pytest.skip(f"Skipping test due to lack of connectivity and error {err}")
247
+
248
+ return wrapper
249
+
250
+
251
+ def can_connect(url, error_classes=None) -> bool:
252
+ """
253
+ Try to connect to the given url. True if succeeds, False if OSError
254
+ raised
255
+
256
+ Parameters
257
+ ----------
258
+ url : basestring
259
+ The URL to try to connect to
260
+
261
+ Returns
262
+ -------
263
+ connectable : bool
264
+ Return True if no OSError (unable to connect) or URLError (bad url) was
265
+ raised
266
+ """
267
+ if error_classes is None:
268
+ error_classes = _get_default_network_errors()
269
+
270
+ try:
271
+ with urlopen(url, timeout=20) as response:
272
+ # Timeout just in case rate-limiting is applied
273
+ if response.status != 200:
274
+ return False
275
+ except error_classes:
276
+ return False
277
+ else:
278
+ return True
279
+
280
+
281
+ # ------------------------------------------------------------------
282
+ # File-IO
283
+
284
+
285
+ def round_trip_pickle(
286
+ obj: Any, path: FilePath | ReadPickleBuffer | None = None
287
+ ) -> DataFrame | Series:
288
+ """
289
+ Pickle an object and then read it again.
290
+
291
+ Parameters
292
+ ----------
293
+ obj : any object
294
+ The object to pickle and then re-read.
295
+ path : str, path object or file-like object, default None
296
+ The path where the pickled object is written and then read.
297
+
298
+ Returns
299
+ -------
300
+ pandas object
301
+ The original object that was pickled and then re-read.
302
+ """
303
+ _path = path
304
+ if _path is None:
305
+ _path = f"__{rands(10)}__.pickle"
306
+ with ensure_clean(_path) as temp_path:
307
+ pd.to_pickle(obj, temp_path)
308
+ return pd.read_pickle(temp_path)
309
+
310
+
311
+ def round_trip_pathlib(writer, reader, path: str | None = None):
312
+ """
313
+ Write an object to file specified by a pathlib.Path and read it back
314
+
315
+ Parameters
316
+ ----------
317
+ writer : callable bound to pandas object
318
+ IO writing function (e.g. DataFrame.to_csv )
319
+ reader : callable
320
+ IO reading function (e.g. pd.read_csv )
321
+ path : str, default None
322
+ The path where the object is written and then read.
323
+
324
+ Returns
325
+ -------
326
+ pandas object
327
+ The original object that was serialized and then re-read.
328
+ """
329
+ import pytest
330
+
331
+ Path = pytest.importorskip("pathlib").Path
332
+ if path is None:
333
+ path = "___pathlib___"
334
+ with ensure_clean(path) as path:
335
+ writer(Path(path))
336
+ obj = reader(Path(path))
337
+ return obj
338
+
339
+
340
+ def round_trip_localpath(writer, reader, path: str | None = None):
341
+ """
342
+ Write an object to file specified by a py.path LocalPath and read it back.
343
+
344
+ Parameters
345
+ ----------
346
+ writer : callable bound to pandas object
347
+ IO writing function (e.g. DataFrame.to_csv )
348
+ reader : callable
349
+ IO reading function (e.g. pd.read_csv )
350
+ path : str, default None
351
+ The path where the object is written and then read.
352
+
353
+ Returns
354
+ -------
355
+ pandas object
356
+ The original object that was serialized and then re-read.
357
+ """
358
+ import pytest
359
+
360
+ LocalPath = pytest.importorskip("py.path").local
361
+ if path is None:
362
+ path = "___localpath___"
363
+ with ensure_clean(path) as path:
364
+ writer(LocalPath(path))
365
+ obj = reader(LocalPath(path))
366
+ return obj
367
+
368
+
369
+ def write_to_compressed(compression, path, data, dest: str = "test"):
370
+ """
371
+ Write data to a compressed file.
372
+
373
+ Parameters
374
+ ----------
375
+ compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'}
376
+ The compression type to use.
377
+ path : str
378
+ The file path to write the data.
379
+ data : str
380
+ The data to write.
381
+ dest : str, default "test"
382
+ The destination file (for ZIP only)
383
+
384
+ Raises
385
+ ------
386
+ ValueError : An invalid compression value was passed in.
387
+ """
388
+ args: tuple[Any, ...] = (data,)
389
+ mode = "wb"
390
+ method = "write"
391
+ compress_method: Callable
392
+
393
+ if compression == "zip":
394
+ compress_method = zipfile.ZipFile
395
+ mode = "w"
396
+ args = (dest, data)
397
+ method = "writestr"
398
+ elif compression == "tar":
399
+ compress_method = tarfile.TarFile
400
+ mode = "w"
401
+ file = tarfile.TarInfo(name=dest)
402
+ bytes = io.BytesIO(data)
403
+ file.size = len(data)
404
+ args = (file, bytes)
405
+ method = "addfile"
406
+ elif compression == "gzip":
407
+ compress_method = gzip.GzipFile
408
+ elif compression == "bz2":
409
+ compress_method = bz2.BZ2File
410
+ elif compression == "zstd":
411
+ compress_method = import_optional_dependency("zstandard").open
412
+ elif compression == "xz":
413
+ compress_method = get_lzma_file()
414
+ else:
415
+ raise ValueError(f"Unrecognized compression type: {compression}")
416
+
417
+ with compress_method(path, mode=mode) as f:
418
+ getattr(f, method)(*args)
419
+
420
+
421
+ # ------------------------------------------------------------------
422
+ # Plotting
423
+
424
+
425
+ def close(fignum=None) -> None:
426
+ from matplotlib.pyplot import (
427
+ close as _close,
428
+ get_fignums,
429
+ )
430
+
431
+ if fignum is None:
432
+ for fignum in get_fignums():
433
+ _close(fignum)
434
+ else:
435
+ _close(fignum)
videochat2/lib/python3.10/site-packages/pandas/_testing/_random.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import string
2
+
3
+ import numpy as np
4
+
5
+ from pandas._typing import NpDtype
6
+
7
+ RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
8
+
9
+
10
+ def rands_array(nchars, size, dtype: NpDtype = "O", replace: bool = True) -> np.ndarray:
11
+ """
12
+ Generate an array of byte strings.
13
+ """
14
+ retval = (
15
+ np.random.choice(RANDS_CHARS, size=nchars * np.prod(size), replace=replace)
16
+ .view((np.str_, nchars))
17
+ .reshape(size)
18
+ )
19
+ return retval.astype(dtype)
20
+
21
+
22
+ def rands(nchars) -> str:
23
+ """
24
+ Generate one random byte string.
25
+
26
+ See `rands_array` if you want to create an array of random strings.
27
+
28
+ """
29
+ return "".join(np.random.choice(RANDS_CHARS, nchars))
videochat2/lib/python3.10/site-packages/pandas/_testing/_warnings.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import (
4
+ contextmanager,
5
+ nullcontext,
6
+ )
7
+ import re
8
+ import sys
9
+ from typing import (
10
+ Generator,
11
+ Literal,
12
+ Sequence,
13
+ Type,
14
+ cast,
15
+ )
16
+ import warnings
17
+
18
+
19
+ @contextmanager
20
+ def assert_produces_warning(
21
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning,
22
+ filter_level: Literal[
23
+ "error", "ignore", "always", "default", "module", "once"
24
+ ] = "always",
25
+ check_stacklevel: bool = True,
26
+ raise_on_extra_warnings: bool = True,
27
+ match: str | None = None,
28
+ ) -> Generator[list[warnings.WarningMessage], None, None]:
29
+ """
30
+ Context manager for running code expected to either raise a specific warning,
31
+ multiple specific warnings, or not raise any warnings. Verifies that the code
32
+ raises the expected warning(s), and that it does not raise any other unexpected
33
+ warnings. It is basically a wrapper around ``warnings.catch_warnings``.
34
+
35
+ Parameters
36
+ ----------
37
+ expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning
38
+ The type of Exception raised. ``exception.Warning`` is the base
39
+ class for all warnings. To raise multiple types of exceptions,
40
+ pass them as a tuple. To check that no warning is returned,
41
+ specify ``False`` or ``None``.
42
+ filter_level : str or None, default "always"
43
+ Specifies whether warnings are ignored, displayed, or turned
44
+ into errors.
45
+ Valid values are:
46
+
47
+ * "error" - turns matching warnings into exceptions
48
+ * "ignore" - discard the warning
49
+ * "always" - always emit a warning
50
+ * "default" - print the warning the first time it is generated
51
+ from each location
52
+ * "module" - print the warning the first time it is generated
53
+ from each module
54
+ * "once" - print the warning the first time it is generated
55
+
56
+ check_stacklevel : bool, default True
57
+ If True, displays the line that called the function containing
58
+ the warning to show were the function is called. Otherwise, the
59
+ line that implements the function is displayed.
60
+ raise_on_extra_warnings : bool, default True
61
+ Whether extra warnings not of the type `expected_warning` should
62
+ cause the test to fail.
63
+ match : str, optional
64
+ Match warning message.
65
+
66
+ Examples
67
+ --------
68
+ >>> import warnings
69
+ >>> with assert_produces_warning():
70
+ ... warnings.warn(UserWarning())
71
+ ...
72
+ >>> with assert_produces_warning(False):
73
+ ... warnings.warn(RuntimeWarning())
74
+ ...
75
+ Traceback (most recent call last):
76
+ ...
77
+ AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
78
+ >>> with assert_produces_warning(UserWarning):
79
+ ... warnings.warn(RuntimeWarning())
80
+ Traceback (most recent call last):
81
+ ...
82
+ AssertionError: Did not see expected warning of class 'UserWarning'.
83
+
84
+ ..warn:: This is *not* thread-safe.
85
+ """
86
+ __tracebackhide__ = True
87
+
88
+ with warnings.catch_warnings(record=True) as w:
89
+ warnings.simplefilter(filter_level)
90
+ try:
91
+ yield w
92
+ finally:
93
+ if expected_warning:
94
+ expected_warning = cast(Type[Warning], expected_warning)
95
+ _assert_caught_expected_warning(
96
+ caught_warnings=w,
97
+ expected_warning=expected_warning,
98
+ match=match,
99
+ check_stacklevel=check_stacklevel,
100
+ )
101
+ if raise_on_extra_warnings:
102
+ _assert_caught_no_extra_warnings(
103
+ caught_warnings=w,
104
+ expected_warning=expected_warning,
105
+ )
106
+
107
+
108
+ def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs):
109
+ """
110
+ Return a context manager that possibly checks a warning based on the condition
111
+ """
112
+ if condition:
113
+ return assert_produces_warning(warning, **kwargs)
114
+ else:
115
+ return nullcontext()
116
+
117
+
118
+ def _assert_caught_expected_warning(
119
+ *,
120
+ caught_warnings: Sequence[warnings.WarningMessage],
121
+ expected_warning: type[Warning],
122
+ match: str | None,
123
+ check_stacklevel: bool,
124
+ ) -> None:
125
+ """Assert that there was the expected warning among the caught warnings."""
126
+ saw_warning = False
127
+ matched_message = False
128
+ unmatched_messages = []
129
+
130
+ for actual_warning in caught_warnings:
131
+ if issubclass(actual_warning.category, expected_warning):
132
+ saw_warning = True
133
+
134
+ if check_stacklevel:
135
+ _assert_raised_with_correct_stacklevel(actual_warning)
136
+
137
+ if match is not None:
138
+ if re.search(match, str(actual_warning.message)):
139
+ matched_message = True
140
+ else:
141
+ unmatched_messages.append(actual_warning.message)
142
+
143
+ if not saw_warning:
144
+ raise AssertionError(
145
+ f"Did not see expected warning of class "
146
+ f"{repr(expected_warning.__name__)}"
147
+ )
148
+
149
+ if match and not matched_message:
150
+ raise AssertionError(
151
+ f"Did not see warning {repr(expected_warning.__name__)} "
152
+ f"matching '{match}'. The emitted warning messages are "
153
+ f"{unmatched_messages}"
154
+ )
155
+
156
+
157
+ def _assert_caught_no_extra_warnings(
158
+ *,
159
+ caught_warnings: Sequence[warnings.WarningMessage],
160
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
161
+ ) -> None:
162
+ """Assert that no extra warnings apart from the expected ones are caught."""
163
+ extra_warnings = []
164
+
165
+ for actual_warning in caught_warnings:
166
+ if _is_unexpected_warning(actual_warning, expected_warning):
167
+ # GH#38630 pytest.filterwarnings does not suppress these.
168
+ if actual_warning.category == ResourceWarning:
169
+ # GH 44732: Don't make the CI flaky by filtering SSL-related
170
+ # ResourceWarning from dependencies
171
+ if "unclosed <ssl.SSLSocket" in str(actual_warning.message):
172
+ continue
173
+ # GH 44844: Matplotlib leaves font files open during the entire process
174
+ # upon import. Don't make CI flaky if ResourceWarning raised
175
+ # due to these open files.
176
+ if any("matplotlib" in mod for mod in sys.modules):
177
+ continue
178
+ extra_warnings.append(
179
+ (
180
+ actual_warning.category.__name__,
181
+ actual_warning.message,
182
+ actual_warning.filename,
183
+ actual_warning.lineno,
184
+ )
185
+ )
186
+
187
+ if extra_warnings:
188
+ raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
189
+
190
+
191
+ def _is_unexpected_warning(
192
+ actual_warning: warnings.WarningMessage,
193
+ expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None,
194
+ ) -> bool:
195
+ """Check if the actual warning issued is unexpected."""
196
+ if actual_warning and not expected_warning:
197
+ return True
198
+ expected_warning = cast(Type[Warning], expected_warning)
199
+ return bool(not issubclass(actual_warning.category, expected_warning))
200
+
201
+
202
+ def _assert_raised_with_correct_stacklevel(
203
+ actual_warning: warnings.WarningMessage,
204
+ ) -> None:
205
+ from inspect import (
206
+ getframeinfo,
207
+ stack,
208
+ )
209
+
210
+ caller = getframeinfo(stack()[4][0])
211
+ msg = (
212
+ "Warning not set with correct stacklevel. "
213
+ f"File where warning is raised: {actual_warning.filename} != "
214
+ f"{caller.filename}. Warning message: {actual_warning.message}"
215
+ )
216
+ assert actual_warning.filename == caller.filename, msg
videochat2/lib/python3.10/site-packages/pandas/_testing/asserters.py ADDED
@@ -0,0 +1,1378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import operator
4
+ from typing import (
5
+ Literal,
6
+ cast,
7
+ )
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs.missing import is_matching_na
12
+ from pandas._libs.sparse import SparseIndex
13
+ import pandas._libs.testing as _testing
14
+ from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions
15
+
16
+ from pandas.core.dtypes.common import (
17
+ is_bool,
18
+ is_categorical_dtype,
19
+ is_extension_array_dtype,
20
+ is_integer_dtype,
21
+ is_interval_dtype,
22
+ is_number,
23
+ is_numeric_dtype,
24
+ needs_i8_conversion,
25
+ )
26
+ from pandas.core.dtypes.dtypes import (
27
+ CategoricalDtype,
28
+ DatetimeTZDtype,
29
+ PandasDtype,
30
+ )
31
+ from pandas.core.dtypes.missing import array_equivalent
32
+
33
+ import pandas as pd
34
+ from pandas import (
35
+ Categorical,
36
+ DataFrame,
37
+ DatetimeIndex,
38
+ Index,
39
+ IntervalIndex,
40
+ MultiIndex,
41
+ PeriodIndex,
42
+ RangeIndex,
43
+ Series,
44
+ TimedeltaIndex,
45
+ )
46
+ from pandas.core.algorithms import take_nd
47
+ from pandas.core.arrays import (
48
+ DatetimeArray,
49
+ ExtensionArray,
50
+ IntervalArray,
51
+ PeriodArray,
52
+ TimedeltaArray,
53
+ )
54
+ from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
55
+ from pandas.core.arrays.string_ import StringDtype
56
+ from pandas.core.indexes.api import safe_sort_index
57
+
58
+ from pandas.io.formats.printing import pprint_thing
59
+
60
+
61
+ def assert_almost_equal(
62
+ left,
63
+ right,
64
+ check_dtype: bool | Literal["equiv"] = "equiv",
65
+ rtol: float = 1.0e-5,
66
+ atol: float = 1.0e-8,
67
+ **kwargs,
68
+ ) -> None:
69
+ """
70
+ Check that the left and right objects are approximately equal.
71
+
72
+ By approximately equal, we refer to objects that are numbers or that
73
+ contain numbers which may be equivalent to specific levels of precision.
74
+
75
+ Parameters
76
+ ----------
77
+ left : object
78
+ right : object
79
+ check_dtype : bool or {'equiv'}, default 'equiv'
80
+ Check dtype if both a and b are the same type. If 'equiv' is passed in,
81
+ then `RangeIndex` and `Index` with int64 dtype are also considered
82
+ equivalent when doing type checking.
83
+ rtol : float, default 1e-5
84
+ Relative tolerance.
85
+
86
+ .. versionadded:: 1.1.0
87
+ atol : float, default 1e-8
88
+ Absolute tolerance.
89
+
90
+ .. versionadded:: 1.1.0
91
+ """
92
+ if isinstance(left, Index):
93
+ assert_index_equal(
94
+ left,
95
+ right,
96
+ check_exact=False,
97
+ exact=check_dtype,
98
+ rtol=rtol,
99
+ atol=atol,
100
+ **kwargs,
101
+ )
102
+
103
+ elif isinstance(left, Series):
104
+ assert_series_equal(
105
+ left,
106
+ right,
107
+ check_exact=False,
108
+ check_dtype=check_dtype,
109
+ rtol=rtol,
110
+ atol=atol,
111
+ **kwargs,
112
+ )
113
+
114
+ elif isinstance(left, DataFrame):
115
+ assert_frame_equal(
116
+ left,
117
+ right,
118
+ check_exact=False,
119
+ check_dtype=check_dtype,
120
+ rtol=rtol,
121
+ atol=atol,
122
+ **kwargs,
123
+ )
124
+
125
+ else:
126
+ # Other sequences.
127
+ if check_dtype:
128
+ if is_number(left) and is_number(right):
129
+ # Do not compare numeric classes, like np.float64 and float.
130
+ pass
131
+ elif is_bool(left) and is_bool(right):
132
+ # Do not compare bool classes, like np.bool_ and bool.
133
+ pass
134
+ else:
135
+ if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
136
+ obj = "numpy array"
137
+ else:
138
+ obj = "Input"
139
+ assert_class_equal(left, right, obj=obj)
140
+
141
+ # if we have "equiv", this becomes True
142
+ _testing.assert_almost_equal(
143
+ left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs
144
+ )
145
+
146
+
147
+ def _check_isinstance(left, right, cls):
148
+ """
149
+ Helper method for our assert_* methods that ensures that
150
+ the two objects being compared have the right type before
151
+ proceeding with the comparison.
152
+
153
+ Parameters
154
+ ----------
155
+ left : The first object being compared.
156
+ right : The second object being compared.
157
+ cls : The class type to check against.
158
+
159
+ Raises
160
+ ------
161
+ AssertionError : Either `left` or `right` is not an instance of `cls`.
162
+ """
163
+ cls_name = cls.__name__
164
+
165
+ if not isinstance(left, cls):
166
+ raise AssertionError(
167
+ f"{cls_name} Expected type {cls}, found {type(left)} instead"
168
+ )
169
+ if not isinstance(right, cls):
170
+ raise AssertionError(
171
+ f"{cls_name} Expected type {cls}, found {type(right)} instead"
172
+ )
173
+
174
+
175
+ def assert_dict_equal(left, right, compare_keys: bool = True) -> None:
176
+ _check_isinstance(left, right, dict)
177
+ _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
178
+
179
+
180
+ def assert_index_equal(
181
+ left: Index,
182
+ right: Index,
183
+ exact: bool | str = "equiv",
184
+ check_names: bool = True,
185
+ check_exact: bool = True,
186
+ check_categorical: bool = True,
187
+ check_order: bool = True,
188
+ rtol: float = 1.0e-5,
189
+ atol: float = 1.0e-8,
190
+ obj: str = "Index",
191
+ ) -> None:
192
+ """
193
+ Check that left and right Index are equal.
194
+
195
+ Parameters
196
+ ----------
197
+ left : Index
198
+ right : Index
199
+ exact : bool or {'equiv'}, default 'equiv'
200
+ Whether to check the Index class, dtype and inferred_type
201
+ are identical. If 'equiv', then RangeIndex can be substituted for
202
+ Index with an int64 dtype as well.
203
+ check_names : bool, default True
204
+ Whether to check the names attribute.
205
+ check_exact : bool, default True
206
+ Whether to compare number exactly.
207
+ check_categorical : bool, default True
208
+ Whether to compare internal Categorical exactly.
209
+ check_order : bool, default True
210
+ Whether to compare the order of index entries as well as their values.
211
+ If True, both indexes must contain the same elements, in the same order.
212
+ If False, both indexes must contain the same elements, but in any order.
213
+
214
+ .. versionadded:: 1.2.0
215
+ rtol : float, default 1e-5
216
+ Relative tolerance. Only used when check_exact is False.
217
+
218
+ .. versionadded:: 1.1.0
219
+ atol : float, default 1e-8
220
+ Absolute tolerance. Only used when check_exact is False.
221
+
222
+ .. versionadded:: 1.1.0
223
+ obj : str, default 'Index'
224
+ Specify object name being compared, internally used to show appropriate
225
+ assertion message.
226
+
227
+ Examples
228
+ --------
229
+ >>> from pandas import testing as tm
230
+ >>> a = pd.Index([1, 2, 3])
231
+ >>> b = pd.Index([1, 2, 3])
232
+ >>> tm.assert_index_equal(a, b)
233
+ """
234
+ __tracebackhide__ = True
235
+
236
+ def _check_types(left, right, obj: str = "Index") -> None:
237
+ if not exact:
238
+ return
239
+
240
+ assert_class_equal(left, right, exact=exact, obj=obj)
241
+ assert_attr_equal("inferred_type", left, right, obj=obj)
242
+
243
+ # Skip exact dtype checking when `check_categorical` is False
244
+ if is_categorical_dtype(left.dtype) and is_categorical_dtype(right.dtype):
245
+ if check_categorical:
246
+ assert_attr_equal("dtype", left, right, obj=obj)
247
+ assert_index_equal(left.categories, right.categories, exact=exact)
248
+ return
249
+
250
+ assert_attr_equal("dtype", left, right, obj=obj)
251
+
252
+ def _get_ilevel_values(index, level):
253
+ # accept level number only
254
+ unique = index.levels[level]
255
+ level_codes = index.codes[level]
256
+ filled = take_nd(unique._values, level_codes, fill_value=unique._na_value)
257
+ return unique._shallow_copy(filled, name=index.names[level])
258
+
259
+ # instance validation
260
+ _check_isinstance(left, right, Index)
261
+
262
+ # class / dtype comparison
263
+ _check_types(left, right, obj=obj)
264
+
265
+ # level comparison
266
+ if left.nlevels != right.nlevels:
267
+ msg1 = f"{obj} levels are different"
268
+ msg2 = f"{left.nlevels}, {left}"
269
+ msg3 = f"{right.nlevels}, {right}"
270
+ raise_assert_detail(obj, msg1, msg2, msg3)
271
+
272
+ # length comparison
273
+ if len(left) != len(right):
274
+ msg1 = f"{obj} length are different"
275
+ msg2 = f"{len(left)}, {left}"
276
+ msg3 = f"{len(right)}, {right}"
277
+ raise_assert_detail(obj, msg1, msg2, msg3)
278
+
279
+ # If order doesn't matter then sort the index entries
280
+ if not check_order:
281
+ left = safe_sort_index(left)
282
+ right = safe_sort_index(right)
283
+
284
+ # MultiIndex special comparison for little-friendly error messages
285
+ if left.nlevels > 1:
286
+ left = cast(MultiIndex, left)
287
+ right = cast(MultiIndex, right)
288
+
289
+ for level in range(left.nlevels):
290
+ # cannot use get_level_values here because it can change dtype
291
+ llevel = _get_ilevel_values(left, level)
292
+ rlevel = _get_ilevel_values(right, level)
293
+
294
+ lobj = f"MultiIndex level [{level}]"
295
+ assert_index_equal(
296
+ llevel,
297
+ rlevel,
298
+ exact=exact,
299
+ check_names=check_names,
300
+ check_exact=check_exact,
301
+ rtol=rtol,
302
+ atol=atol,
303
+ obj=lobj,
304
+ )
305
+ # get_level_values may change dtype
306
+ _check_types(left.levels[level], right.levels[level], obj=obj)
307
+
308
+ # skip exact index checking when `check_categorical` is False
309
+ if check_exact and check_categorical:
310
+ if not left.equals(right):
311
+ mismatch = left._values != right._values
312
+
313
+ if is_extension_array_dtype(mismatch):
314
+ mismatch = cast("ExtensionArray", mismatch).fillna(True)
315
+
316
+ diff = np.sum(mismatch.astype(int)) * 100.0 / len(left)
317
+ msg = f"{obj} values are different ({np.round(diff, 5)} %)"
318
+ raise_assert_detail(obj, msg, left, right)
319
+ else:
320
+ # if we have "equiv", this becomes True
321
+ exact_bool = bool(exact)
322
+ _testing.assert_almost_equal(
323
+ left.values,
324
+ right.values,
325
+ rtol=rtol,
326
+ atol=atol,
327
+ check_dtype=exact_bool,
328
+ obj=obj,
329
+ lobj=left,
330
+ robj=right,
331
+ )
332
+
333
+ # metadata comparison
334
+ if check_names:
335
+ assert_attr_equal("names", left, right, obj=obj)
336
+ if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
337
+ assert_attr_equal("freq", left, right, obj=obj)
338
+ if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
339
+ assert_interval_array_equal(left._values, right._values)
340
+
341
+ if check_categorical:
342
+ if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
343
+ assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
344
+
345
+
346
+ def assert_class_equal(
347
+ left, right, exact: bool | str = True, obj: str = "Input"
348
+ ) -> None:
349
+ """
350
+ Checks classes are equal.
351
+ """
352
+ __tracebackhide__ = True
353
+
354
+ def repr_class(x):
355
+ if isinstance(x, Index):
356
+ # return Index as it is to include values in the error message
357
+ return x
358
+
359
+ return type(x).__name__
360
+
361
+ def is_class_equiv(idx: Index) -> bool:
362
+ """Classes that are a RangeIndex (sub-)instance or exactly an `Index` .
363
+
364
+ This only checks class equivalence. There is a separate check that the
365
+ dtype is int64.
366
+ """
367
+ return type(idx) is Index or isinstance(idx, RangeIndex)
368
+
369
+ if type(left) == type(right):
370
+ return
371
+
372
+ if exact == "equiv":
373
+ if is_class_equiv(left) and is_class_equiv(right):
374
+ return
375
+
376
+ msg = f"{obj} classes are different"
377
+ raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
378
+
379
+
380
+ def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None:
381
+ """
382
+ Check attributes are equal. Both objects must have attribute.
383
+
384
+ Parameters
385
+ ----------
386
+ attr : str
387
+ Attribute name being compared.
388
+ left : object
389
+ right : object
390
+ obj : str, default 'Attributes'
391
+ Specify object name being compared, internally used to show appropriate
392
+ assertion message
393
+ """
394
+ __tracebackhide__ = True
395
+
396
+ left_attr = getattr(left, attr)
397
+ right_attr = getattr(right, attr)
398
+
399
+ if left_attr is right_attr or is_matching_na(left_attr, right_attr):
400
+ # e.g. both np.nan, both NaT, both pd.NA, ...
401
+ return None
402
+
403
+ try:
404
+ result = left_attr == right_attr
405
+ except TypeError:
406
+ # datetimetz on rhs may raise TypeError
407
+ result = False
408
+ if (left_attr is pd.NA) ^ (right_attr is pd.NA):
409
+ result = False
410
+ elif not isinstance(result, bool):
411
+ result = result.all()
412
+
413
+ if not result:
414
+ msg = f'Attribute "{attr}" are different'
415
+ raise_assert_detail(obj, msg, left_attr, right_attr)
416
+ return None
417
+
418
+
419
+ def assert_is_valid_plot_return_object(objs) -> None:
420
+ import matplotlib.pyplot as plt
421
+
422
+ if isinstance(objs, (Series, np.ndarray)):
423
+ for el in objs.ravel():
424
+ msg = (
425
+ "one of 'objs' is not a matplotlib Axes instance, "
426
+ f"type encountered {repr(type(el).__name__)}"
427
+ )
428
+ assert isinstance(el, (plt.Axes, dict)), msg
429
+ else:
430
+ msg = (
431
+ "objs is neither an ndarray of Artist instances nor a single "
432
+ "ArtistArtist instance, tuple, or dict, 'objs' is a "
433
+ f"{repr(type(objs).__name__)}"
434
+ )
435
+ assert isinstance(objs, (plt.Artist, tuple, dict)), msg
436
+
437
+
438
+ def assert_is_sorted(seq) -> None:
439
+ """Assert that the sequence is sorted."""
440
+ if isinstance(seq, (Index, Series)):
441
+ seq = seq.values
442
+ # sorting does not change precisions
443
+ assert_numpy_array_equal(seq, np.sort(np.array(seq)))
444
+
445
+
446
+ def assert_categorical_equal(
447
+ left,
448
+ right,
449
+ check_dtype: bool = True,
450
+ check_category_order: bool = True,
451
+ obj: str = "Categorical",
452
+ ) -> None:
453
+ """
454
+ Test that Categoricals are equivalent.
455
+
456
+ Parameters
457
+ ----------
458
+ left : Categorical
459
+ right : Categorical
460
+ check_dtype : bool, default True
461
+ Check that integer dtype of the codes are the same.
462
+ check_category_order : bool, default True
463
+ Whether the order of the categories should be compared, which
464
+ implies identical integer codes. If False, only the resulting
465
+ values are compared. The ordered attribute is
466
+ checked regardless.
467
+ obj : str, default 'Categorical'
468
+ Specify object name being compared, internally used to show appropriate
469
+ assertion message.
470
+ """
471
+ _check_isinstance(left, right, Categorical)
472
+
473
+ exact: bool | str
474
+ if isinstance(left.categories, RangeIndex) or isinstance(
475
+ right.categories, RangeIndex
476
+ ):
477
+ exact = "equiv"
478
+ else:
479
+ # We still want to require exact matches for Index
480
+ exact = True
481
+
482
+ if check_category_order:
483
+ assert_index_equal(
484
+ left.categories, right.categories, obj=f"{obj}.categories", exact=exact
485
+ )
486
+ assert_numpy_array_equal(
487
+ left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
488
+ )
489
+ else:
490
+ try:
491
+ lc = left.categories.sort_values()
492
+ rc = right.categories.sort_values()
493
+ except TypeError:
494
+ # e.g. '<' not supported between instances of 'int' and 'str'
495
+ lc, rc = left.categories, right.categories
496
+ assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact)
497
+ assert_index_equal(
498
+ left.categories.take(left.codes),
499
+ right.categories.take(right.codes),
500
+ obj=f"{obj}.values",
501
+ exact=exact,
502
+ )
503
+
504
+ assert_attr_equal("ordered", left, right, obj=obj)
505
+
506
+
507
+ def assert_interval_array_equal(
508
+ left, right, exact: bool | Literal["equiv"] = "equiv", obj: str = "IntervalArray"
509
+ ) -> None:
510
+ """
511
+ Test that two IntervalArrays are equivalent.
512
+
513
+ Parameters
514
+ ----------
515
+ left, right : IntervalArray
516
+ The IntervalArrays to compare.
517
+ exact : bool or {'equiv'}, default 'equiv'
518
+ Whether to check the Index class, dtype and inferred_type
519
+ are identical. If 'equiv', then RangeIndex can be substituted for
520
+ Index with an int64 dtype as well.
521
+ obj : str, default 'IntervalArray'
522
+ Specify object name being compared, internally used to show appropriate
523
+ assertion message
524
+ """
525
+ _check_isinstance(left, right, IntervalArray)
526
+
527
+ kwargs = {}
528
+ if left._left.dtype.kind in ["m", "M"]:
529
+ # We have a DatetimeArray or TimedeltaArray
530
+ kwargs["check_freq"] = False
531
+
532
+ assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
533
+ assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
534
+
535
+ assert_attr_equal("closed", left, right, obj=obj)
536
+
537
+
538
+ def assert_period_array_equal(left, right, obj: str = "PeriodArray") -> None:
539
+ _check_isinstance(left, right, PeriodArray)
540
+
541
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
542
+ assert_attr_equal("freq", left, right, obj=obj)
543
+
544
+
545
+ def assert_datetime_array_equal(
546
+ left, right, obj: str = "DatetimeArray", check_freq: bool = True
547
+ ) -> None:
548
+ __tracebackhide__ = True
549
+ _check_isinstance(left, right, DatetimeArray)
550
+
551
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
552
+ if check_freq:
553
+ assert_attr_equal("freq", left, right, obj=obj)
554
+ assert_attr_equal("tz", left, right, obj=obj)
555
+
556
+
557
+ def assert_timedelta_array_equal(
558
+ left, right, obj: str = "TimedeltaArray", check_freq: bool = True
559
+ ) -> None:
560
+ __tracebackhide__ = True
561
+ _check_isinstance(left, right, TimedeltaArray)
562
+ assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray")
563
+ if check_freq:
564
+ assert_attr_equal("freq", left, right, obj=obj)
565
+
566
+
567
+ def raise_assert_detail(
568
+ obj, message, left, right, diff=None, first_diff=None, index_values=None
569
+ ):
570
+ __tracebackhide__ = True
571
+
572
+ msg = f"""{obj} are different
573
+
574
+ {message}"""
575
+
576
+ if isinstance(index_values, np.ndarray):
577
+ msg += f"\n[index]: {pprint_thing(index_values)}"
578
+
579
+ if isinstance(left, np.ndarray):
580
+ left = pprint_thing(left)
581
+ elif isinstance(left, (CategoricalDtype, PandasDtype, StringDtype)):
582
+ left = repr(left)
583
+
584
+ if isinstance(right, np.ndarray):
585
+ right = pprint_thing(right)
586
+ elif isinstance(right, (CategoricalDtype, PandasDtype, StringDtype)):
587
+ right = repr(right)
588
+
589
+ msg += f"""
590
+ [left]: {left}
591
+ [right]: {right}"""
592
+
593
+ if diff is not None:
594
+ msg += f"\n[diff]: {diff}"
595
+
596
+ if first_diff is not None:
597
+ msg += f"\n{first_diff}"
598
+
599
+ raise AssertionError(msg)
600
+
601
+
602
+ def assert_numpy_array_equal(
603
+ left,
604
+ right,
605
+ strict_nan: bool = False,
606
+ check_dtype: bool | Literal["equiv"] = True,
607
+ err_msg=None,
608
+ check_same=None,
609
+ obj: str = "numpy array",
610
+ index_values=None,
611
+ ) -> None:
612
+ """
613
+ Check that 'np.ndarray' is equivalent.
614
+
615
+ Parameters
616
+ ----------
617
+ left, right : numpy.ndarray or iterable
618
+ The two arrays to be compared.
619
+ strict_nan : bool, default False
620
+ If True, consider NaN and None to be different.
621
+ check_dtype : bool, default True
622
+ Check dtype if both a and b are np.ndarray.
623
+ err_msg : str, default None
624
+ If provided, used as assertion message.
625
+ check_same : None|'copy'|'same', default None
626
+ Ensure left and right refer/do not refer to the same memory area.
627
+ obj : str, default 'numpy array'
628
+ Specify object name being compared, internally used to show appropriate
629
+ assertion message.
630
+ index_values : numpy.ndarray, default None
631
+ optional index (shared by both left and right), used in output.
632
+ """
633
+ __tracebackhide__ = True
634
+
635
+ # instance validation
636
+ # Show a detailed error message when classes are different
637
+ assert_class_equal(left, right, obj=obj)
638
+ # both classes must be an np.ndarray
639
+ _check_isinstance(left, right, np.ndarray)
640
+
641
+ def _get_base(obj):
642
+ return obj.base if getattr(obj, "base", None) is not None else obj
643
+
644
+ left_base = _get_base(left)
645
+ right_base = _get_base(right)
646
+
647
+ if check_same == "same":
648
+ if left_base is not right_base:
649
+ raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
650
+ elif check_same == "copy":
651
+ if left_base is right_base:
652
+ raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
653
+
654
+ def _raise(left, right, err_msg):
655
+ if err_msg is None:
656
+ if left.shape != right.shape:
657
+ raise_assert_detail(
658
+ obj, f"{obj} shapes are different", left.shape, right.shape
659
+ )
660
+
661
+ diff = 0
662
+ for left_arr, right_arr in zip(left, right):
663
+ # count up differences
664
+ if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
665
+ diff += 1
666
+
667
+ diff = diff * 100.0 / left.size
668
+ msg = f"{obj} values are different ({np.round(diff, 5)} %)"
669
+ raise_assert_detail(obj, msg, left, right, index_values=index_values)
670
+
671
+ raise AssertionError(err_msg)
672
+
673
+ # compare shape and values
674
+ if not array_equivalent(left, right, strict_nan=strict_nan):
675
+ _raise(left, right, err_msg)
676
+
677
+ if check_dtype:
678
+ if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
679
+ assert_attr_equal("dtype", left, right, obj=obj)
680
+
681
+
682
+ def assert_extension_array_equal(
683
+ left,
684
+ right,
685
+ check_dtype: bool | Literal["equiv"] = True,
686
+ index_values=None,
687
+ check_exact: bool = False,
688
+ rtol: float = 1.0e-5,
689
+ atol: float = 1.0e-8,
690
+ obj: str = "ExtensionArray",
691
+ ) -> None:
692
+ """
693
+ Check that left and right ExtensionArrays are equal.
694
+
695
+ Parameters
696
+ ----------
697
+ left, right : ExtensionArray
698
+ The two arrays to compare.
699
+ check_dtype : bool, default True
700
+ Whether to check if the ExtensionArray dtypes are identical.
701
+ index_values : numpy.ndarray, default None
702
+ Optional index (shared by both left and right), used in output.
703
+ check_exact : bool, default False
704
+ Whether to compare number exactly.
705
+ rtol : float, default 1e-5
706
+ Relative tolerance. Only used when check_exact is False.
707
+
708
+ .. versionadded:: 1.1.0
709
+ atol : float, default 1e-8
710
+ Absolute tolerance. Only used when check_exact is False.
711
+
712
+ .. versionadded:: 1.1.0
713
+ obj : str, default 'ExtensionArray'
714
+ Specify object name being compared, internally used to show appropriate
715
+ assertion message.
716
+
717
+ .. versionadded:: 2.0.0
718
+
719
+ Notes
720
+ -----
721
+ Missing values are checked separately from valid values.
722
+ A mask of missing values is computed for each and checked to match.
723
+ The remaining all-valid values are cast to object dtype and checked.
724
+
725
+ Examples
726
+ --------
727
+ >>> from pandas import testing as tm
728
+ >>> a = pd.Series([1, 2, 3, 4])
729
+ >>> b, c = a.array, a.array
730
+ >>> tm.assert_extension_array_equal(b, c)
731
+ """
732
+ assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
733
+ assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
734
+ if check_dtype:
735
+ assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
736
+
737
+ if (
738
+ isinstance(left, DatetimeLikeArrayMixin)
739
+ and isinstance(right, DatetimeLikeArrayMixin)
740
+ and type(right) == type(left)
741
+ ):
742
+ # GH 52449
743
+ if not check_dtype and left.dtype.kind in "mM":
744
+ if not isinstance(left.dtype, np.dtype):
745
+ l_unit = cast(DatetimeTZDtype, left.dtype).unit
746
+ else:
747
+ l_unit = np.datetime_data(left.dtype)[0]
748
+ if not isinstance(right.dtype, np.dtype):
749
+ r_unit = cast(DatetimeTZDtype, left.dtype).unit
750
+ else:
751
+ r_unit = np.datetime_data(right.dtype)[0]
752
+ if (
753
+ l_unit != r_unit
754
+ and compare_mismatched_resolutions(
755
+ left._ndarray, right._ndarray, operator.eq
756
+ ).all()
757
+ ):
758
+ return
759
+ # Avoid slow object-dtype comparisons
760
+ # np.asarray for case where we have a np.MaskedArray
761
+ assert_numpy_array_equal(
762
+ np.asarray(left.asi8),
763
+ np.asarray(right.asi8),
764
+ index_values=index_values,
765
+ obj=obj,
766
+ )
767
+ return
768
+
769
+ left_na = np.asarray(left.isna())
770
+ right_na = np.asarray(right.isna())
771
+ assert_numpy_array_equal(
772
+ left_na, right_na, obj=f"{obj} NA mask", index_values=index_values
773
+ )
774
+
775
+ left_valid = left[~left_na].to_numpy(dtype=object)
776
+ right_valid = right[~right_na].to_numpy(dtype=object)
777
+ if check_exact:
778
+ assert_numpy_array_equal(
779
+ left_valid, right_valid, obj=obj, index_values=index_values
780
+ )
781
+ else:
782
+ _testing.assert_almost_equal(
783
+ left_valid,
784
+ right_valid,
785
+ check_dtype=bool(check_dtype),
786
+ rtol=rtol,
787
+ atol=atol,
788
+ obj=obj,
789
+ index_values=index_values,
790
+ )
791
+
792
+
793
+ # This could be refactored to use the NDFrame.equals method
794
+ def assert_series_equal(
795
+ left,
796
+ right,
797
+ check_dtype: bool | Literal["equiv"] = True,
798
+ check_index_type: bool | Literal["equiv"] = "equiv",
799
+ check_series_type: bool = True,
800
+ check_names: bool = True,
801
+ check_exact: bool = False,
802
+ check_datetimelike_compat: bool = False,
803
+ check_categorical: bool = True,
804
+ check_category_order: bool = True,
805
+ check_freq: bool = True,
806
+ check_flags: bool = True,
807
+ rtol: float = 1.0e-5,
808
+ atol: float = 1.0e-8,
809
+ obj: str = "Series",
810
+ *,
811
+ check_index: bool = True,
812
+ check_like: bool = False,
813
+ ) -> None:
814
+ """
815
+ Check that left and right Series are equal.
816
+
817
+ Parameters
818
+ ----------
819
+ left : Series
820
+ right : Series
821
+ check_dtype : bool, default True
822
+ Whether to check the Series dtype is identical.
823
+ check_index_type : bool or {'equiv'}, default 'equiv'
824
+ Whether to check the Index class, dtype and inferred_type
825
+ are identical.
826
+ check_series_type : bool, default True
827
+ Whether to check the Series class is identical.
828
+ check_names : bool, default True
829
+ Whether to check the Series and Index names attribute.
830
+ check_exact : bool, default False
831
+ Whether to compare number exactly.
832
+ check_datetimelike_compat : bool, default False
833
+ Compare datetime-like which is comparable ignoring dtype.
834
+ check_categorical : bool, default True
835
+ Whether to compare internal Categorical exactly.
836
+ check_category_order : bool, default True
837
+ Whether to compare category order of internal Categoricals.
838
+
839
+ .. versionadded:: 1.0.2
840
+ check_freq : bool, default True
841
+ Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
842
+
843
+ .. versionadded:: 1.1.0
844
+ check_flags : bool, default True
845
+ Whether to check the `flags` attribute.
846
+
847
+ .. versionadded:: 1.2.0
848
+
849
+ rtol : float, default 1e-5
850
+ Relative tolerance. Only used when check_exact is False.
851
+
852
+ .. versionadded:: 1.1.0
853
+ atol : float, default 1e-8
854
+ Absolute tolerance. Only used when check_exact is False.
855
+
856
+ .. versionadded:: 1.1.0
857
+ obj : str, default 'Series'
858
+ Specify object name being compared, internally used to show appropriate
859
+ assertion message.
860
+ check_index : bool, default True
861
+ Whether to check index equivalence. If False, then compare only values.
862
+
863
+ .. versionadded:: 1.3.0
864
+ check_like : bool, default False
865
+ If True, ignore the order of the index. Must be False if check_index is False.
866
+ Note: same labels must be with the same data.
867
+
868
+ .. versionadded:: 1.5.0
869
+
870
+ Examples
871
+ --------
872
+ >>> from pandas import testing as tm
873
+ >>> a = pd.Series([1, 2, 3, 4])
874
+ >>> b = pd.Series([1, 2, 3, 4])
875
+ >>> tm.assert_series_equal(a, b)
876
+ """
877
+ __tracebackhide__ = True
878
+
879
+ if not check_index and check_like:
880
+ raise ValueError("check_like must be False if check_index is False")
881
+
882
+ # instance validation
883
+ _check_isinstance(left, right, Series)
884
+
885
+ if check_series_type:
886
+ assert_class_equal(left, right, obj=obj)
887
+
888
+ # length comparison
889
+ if len(left) != len(right):
890
+ msg1 = f"{len(left)}, {left.index}"
891
+ msg2 = f"{len(right)}, {right.index}"
892
+ raise_assert_detail(obj, "Series length are different", msg1, msg2)
893
+
894
+ if check_flags:
895
+ assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
896
+
897
+ if check_index:
898
+ # GH #38183
899
+ assert_index_equal(
900
+ left.index,
901
+ right.index,
902
+ exact=check_index_type,
903
+ check_names=check_names,
904
+ check_exact=check_exact,
905
+ check_categorical=check_categorical,
906
+ check_order=not check_like,
907
+ rtol=rtol,
908
+ atol=atol,
909
+ obj=f"{obj}.index",
910
+ )
911
+
912
+ if check_like:
913
+ left = left.reindex_like(right)
914
+
915
+ if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
916
+ lidx = left.index
917
+ ridx = right.index
918
+ assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
919
+
920
+ if check_dtype:
921
+ # We want to skip exact dtype checking when `check_categorical`
922
+ # is False. We'll still raise if only one is a `Categorical`,
923
+ # regardless of `check_categorical`
924
+ if (
925
+ isinstance(left.dtype, CategoricalDtype)
926
+ and isinstance(right.dtype, CategoricalDtype)
927
+ and not check_categorical
928
+ ):
929
+ pass
930
+ else:
931
+ assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
932
+
933
+ if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
934
+ left_values = left._values
935
+ right_values = right._values
936
+ # Only check exact if dtype is numeric
937
+ if isinstance(left_values, ExtensionArray) and isinstance(
938
+ right_values, ExtensionArray
939
+ ):
940
+ assert_extension_array_equal(
941
+ left_values,
942
+ right_values,
943
+ check_dtype=check_dtype,
944
+ index_values=np.asarray(left.index),
945
+ obj=str(obj),
946
+ )
947
+ else:
948
+ assert_numpy_array_equal(
949
+ left_values,
950
+ right_values,
951
+ check_dtype=check_dtype,
952
+ obj=str(obj),
953
+ index_values=np.asarray(left.index),
954
+ )
955
+ elif check_datetimelike_compat and (
956
+ needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
957
+ ):
958
+ # we want to check only if we have compat dtypes
959
+ # e.g. integer and M|m are NOT compat, but we can simply check
960
+ # the values in that case
961
+
962
+ # datetimelike may have different objects (e.g. datetime.datetime
963
+ # vs Timestamp) but will compare equal
964
+ if not Index(left._values).equals(Index(right._values)):
965
+ msg = (
966
+ f"[datetimelike_compat=True] {left._values} "
967
+ f"is not equal to {right._values}."
968
+ )
969
+ raise AssertionError(msg)
970
+ elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
971
+ assert_interval_array_equal(left.array, right.array)
972
+ elif isinstance(left.dtype, CategoricalDtype) or isinstance(
973
+ right.dtype, CategoricalDtype
974
+ ):
975
+ _testing.assert_almost_equal(
976
+ left._values,
977
+ right._values,
978
+ rtol=rtol,
979
+ atol=atol,
980
+ check_dtype=bool(check_dtype),
981
+ obj=str(obj),
982
+ index_values=np.asarray(left.index),
983
+ )
984
+ elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
985
+ assert_extension_array_equal(
986
+ left._values,
987
+ right._values,
988
+ rtol=rtol,
989
+ atol=atol,
990
+ check_dtype=check_dtype,
991
+ index_values=np.asarray(left.index),
992
+ obj=str(obj),
993
+ )
994
+ elif is_extension_array_dtype_and_needs_i8_conversion(
995
+ left.dtype, right.dtype
996
+ ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
997
+ assert_extension_array_equal(
998
+ left._values,
999
+ right._values,
1000
+ check_dtype=check_dtype,
1001
+ index_values=np.asarray(left.index),
1002
+ obj=str(obj),
1003
+ )
1004
+ elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
1005
+ # DatetimeArray or TimedeltaArray
1006
+ assert_extension_array_equal(
1007
+ left._values,
1008
+ right._values,
1009
+ check_dtype=check_dtype,
1010
+ index_values=np.asarray(left.index),
1011
+ obj=str(obj),
1012
+ )
1013
+ else:
1014
+ _testing.assert_almost_equal(
1015
+ left._values,
1016
+ right._values,
1017
+ rtol=rtol,
1018
+ atol=atol,
1019
+ check_dtype=bool(check_dtype),
1020
+ obj=str(obj),
1021
+ index_values=np.asarray(left.index),
1022
+ )
1023
+
1024
+ # metadata comparison
1025
+ if check_names:
1026
+ assert_attr_equal("name", left, right, obj=obj)
1027
+
1028
+ if check_categorical:
1029
+ if isinstance(left.dtype, CategoricalDtype) or isinstance(
1030
+ right.dtype, CategoricalDtype
1031
+ ):
1032
+ assert_categorical_equal(
1033
+ left._values,
1034
+ right._values,
1035
+ obj=f"{obj} category",
1036
+ check_category_order=check_category_order,
1037
+ )
1038
+
1039
+
1040
+ # This could be refactored to use the NDFrame.equals method
1041
+ def assert_frame_equal(
1042
+ left,
1043
+ right,
1044
+ check_dtype: bool | Literal["equiv"] = True,
1045
+ check_index_type: bool | Literal["equiv"] = "equiv",
1046
+ check_column_type: bool | Literal["equiv"] = "equiv",
1047
+ check_frame_type: bool = True,
1048
+ check_names: bool = True,
1049
+ by_blocks: bool = False,
1050
+ check_exact: bool = False,
1051
+ check_datetimelike_compat: bool = False,
1052
+ check_categorical: bool = True,
1053
+ check_like: bool = False,
1054
+ check_freq: bool = True,
1055
+ check_flags: bool = True,
1056
+ rtol: float = 1.0e-5,
1057
+ atol: float = 1.0e-8,
1058
+ obj: str = "DataFrame",
1059
+ ) -> None:
1060
+ """
1061
+ Check that left and right DataFrame are equal.
1062
+
1063
+ This function is intended to compare two DataFrames and output any
1064
+ differences. It is mostly intended for use in unit tests.
1065
+ Additional parameters allow varying the strictness of the
1066
+ equality checks performed.
1067
+
1068
+ Parameters
1069
+ ----------
1070
+ left : DataFrame
1071
+ First DataFrame to compare.
1072
+ right : DataFrame
1073
+ Second DataFrame to compare.
1074
+ check_dtype : bool, default True
1075
+ Whether to check the DataFrame dtype is identical.
1076
+ check_index_type : bool or {'equiv'}, default 'equiv'
1077
+ Whether to check the Index class, dtype and inferred_type
1078
+ are identical.
1079
+ check_column_type : bool or {'equiv'}, default 'equiv'
1080
+ Whether to check the columns class, dtype and inferred_type
1081
+ are identical. Is passed as the ``exact`` argument of
1082
+ :func:`assert_index_equal`.
1083
+ check_frame_type : bool, default True
1084
+ Whether to check the DataFrame class is identical.
1085
+ check_names : bool, default True
1086
+ Whether to check that the `names` attribute for both the `index`
1087
+ and `column` attributes of the DataFrame is identical.
1088
+ by_blocks : bool, default False
1089
+ Specify how to compare internal data. If False, compare by columns.
1090
+ If True, compare by blocks.
1091
+ check_exact : bool, default False
1092
+ Whether to compare number exactly.
1093
+ check_datetimelike_compat : bool, default False
1094
+ Compare datetime-like which is comparable ignoring dtype.
1095
+ check_categorical : bool, default True
1096
+ Whether to compare internal Categorical exactly.
1097
+ check_like : bool, default False
1098
+ If True, ignore the order of index & columns.
1099
+ Note: index labels must match their respective rows
1100
+ (same as in columns) - same labels must be with the same data.
1101
+ check_freq : bool, default True
1102
+ Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
1103
+
1104
+ .. versionadded:: 1.1.0
1105
+ check_flags : bool, default True
1106
+ Whether to check the `flags` attribute.
1107
+ rtol : float, default 1e-5
1108
+ Relative tolerance. Only used when check_exact is False.
1109
+
1110
+ .. versionadded:: 1.1.0
1111
+ atol : float, default 1e-8
1112
+ Absolute tolerance. Only used when check_exact is False.
1113
+
1114
+ .. versionadded:: 1.1.0
1115
+ obj : str, default 'DataFrame'
1116
+ Specify object name being compared, internally used to show appropriate
1117
+ assertion message.
1118
+
1119
+ See Also
1120
+ --------
1121
+ assert_series_equal : Equivalent method for asserting Series equality.
1122
+ DataFrame.equals : Check DataFrame equality.
1123
+
1124
+ Examples
1125
+ --------
1126
+ This example shows comparing two DataFrames that are equal
1127
+ but with columns of differing dtypes.
1128
+
1129
+ >>> from pandas.testing import assert_frame_equal
1130
+ >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
1131
+ >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
1132
+
1133
+ df1 equals itself.
1134
+
1135
+ >>> assert_frame_equal(df1, df1)
1136
+
1137
+ df1 differs from df2 as column 'b' is of a different type.
1138
+
1139
+ >>> assert_frame_equal(df1, df2)
1140
+ Traceback (most recent call last):
1141
+ ...
1142
+ AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
1143
+
1144
+ Attribute "dtype" are different
1145
+ [left]: int64
1146
+ [right]: float64
1147
+
1148
+ Ignore differing dtypes in columns with check_dtype.
1149
+
1150
+ >>> assert_frame_equal(df1, df2, check_dtype=False)
1151
+ """
1152
+ __tracebackhide__ = True
1153
+
1154
+ # instance validation
1155
+ _check_isinstance(left, right, DataFrame)
1156
+
1157
+ if check_frame_type:
1158
+ assert isinstance(left, type(right))
1159
+ # assert_class_equal(left, right, obj=obj)
1160
+
1161
+ # shape comparison
1162
+ if left.shape != right.shape:
1163
+ raise_assert_detail(
1164
+ obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
1165
+ )
1166
+
1167
+ if check_flags:
1168
+ assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
1169
+
1170
+ # index comparison
1171
+ assert_index_equal(
1172
+ left.index,
1173
+ right.index,
1174
+ exact=check_index_type,
1175
+ check_names=check_names,
1176
+ check_exact=check_exact,
1177
+ check_categorical=check_categorical,
1178
+ check_order=not check_like,
1179
+ rtol=rtol,
1180
+ atol=atol,
1181
+ obj=f"{obj}.index",
1182
+ )
1183
+
1184
+ # column comparison
1185
+ assert_index_equal(
1186
+ left.columns,
1187
+ right.columns,
1188
+ exact=check_column_type,
1189
+ check_names=check_names,
1190
+ check_exact=check_exact,
1191
+ check_categorical=check_categorical,
1192
+ check_order=not check_like,
1193
+ rtol=rtol,
1194
+ atol=atol,
1195
+ obj=f"{obj}.columns",
1196
+ )
1197
+
1198
+ if check_like:
1199
+ left = left.reindex_like(right)
1200
+
1201
+ # compare by blocks
1202
+ if by_blocks:
1203
+ rblocks = right._to_dict_of_blocks()
1204
+ lblocks = left._to_dict_of_blocks()
1205
+ for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
1206
+ assert dtype in lblocks
1207
+ assert dtype in rblocks
1208
+ assert_frame_equal(
1209
+ lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
1210
+ )
1211
+
1212
+ # compare by columns
1213
+ else:
1214
+ for i, col in enumerate(left.columns):
1215
+ # We have already checked that columns match, so we can do
1216
+ # fast location-based lookups
1217
+ lcol = left._ixs(i, axis=1)
1218
+ rcol = right._ixs(i, axis=1)
1219
+
1220
+ # GH #38183
1221
+ # use check_index=False, because we do not want to run
1222
+ # assert_index_equal for each column,
1223
+ # as we already checked it for the whole dataframe before.
1224
+ assert_series_equal(
1225
+ lcol,
1226
+ rcol,
1227
+ check_dtype=check_dtype,
1228
+ check_index_type=check_index_type,
1229
+ check_exact=check_exact,
1230
+ check_names=check_names,
1231
+ check_datetimelike_compat=check_datetimelike_compat,
1232
+ check_categorical=check_categorical,
1233
+ check_freq=check_freq,
1234
+ obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
1235
+ rtol=rtol,
1236
+ atol=atol,
1237
+ check_index=False,
1238
+ check_flags=False,
1239
+ )
1240
+
1241
+
1242
+ def assert_equal(left, right, **kwargs) -> None:
1243
+ """
1244
+ Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
1245
+
1246
+ Parameters
1247
+ ----------
1248
+ left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
1249
+ The two items to be compared.
1250
+ **kwargs
1251
+ All keyword arguments are passed through to the underlying assert method.
1252
+ """
1253
+ __tracebackhide__ = True
1254
+
1255
+ if isinstance(left, Index):
1256
+ assert_index_equal(left, right, **kwargs)
1257
+ if isinstance(left, (DatetimeIndex, TimedeltaIndex)):
1258
+ assert left.freq == right.freq, (left.freq, right.freq)
1259
+ elif isinstance(left, Series):
1260
+ assert_series_equal(left, right, **kwargs)
1261
+ elif isinstance(left, DataFrame):
1262
+ assert_frame_equal(left, right, **kwargs)
1263
+ elif isinstance(left, IntervalArray):
1264
+ assert_interval_array_equal(left, right, **kwargs)
1265
+ elif isinstance(left, PeriodArray):
1266
+ assert_period_array_equal(left, right, **kwargs)
1267
+ elif isinstance(left, DatetimeArray):
1268
+ assert_datetime_array_equal(left, right, **kwargs)
1269
+ elif isinstance(left, TimedeltaArray):
1270
+ assert_timedelta_array_equal(left, right, **kwargs)
1271
+ elif isinstance(left, ExtensionArray):
1272
+ assert_extension_array_equal(left, right, **kwargs)
1273
+ elif isinstance(left, np.ndarray):
1274
+ assert_numpy_array_equal(left, right, **kwargs)
1275
+ elif isinstance(left, str):
1276
+ assert kwargs == {}
1277
+ assert left == right
1278
+ else:
1279
+ assert kwargs == {}
1280
+ assert_almost_equal(left, right)
1281
+
1282
+
1283
+ def assert_sp_array_equal(left, right) -> None:
1284
+ """
1285
+ Check that the left and right SparseArray are equal.
1286
+
1287
+ Parameters
1288
+ ----------
1289
+ left : SparseArray
1290
+ right : SparseArray
1291
+ """
1292
+ _check_isinstance(left, right, pd.arrays.SparseArray)
1293
+
1294
+ assert_numpy_array_equal(left.sp_values, right.sp_values)
1295
+
1296
+ # SparseIndex comparison
1297
+ assert isinstance(left.sp_index, SparseIndex)
1298
+ assert isinstance(right.sp_index, SparseIndex)
1299
+
1300
+ left_index = left.sp_index
1301
+ right_index = right.sp_index
1302
+
1303
+ if not left_index.equals(right_index):
1304
+ raise_assert_detail(
1305
+ "SparseArray.index", "index are not equal", left_index, right_index
1306
+ )
1307
+ else:
1308
+ # Just ensure a
1309
+ pass
1310
+
1311
+ assert_attr_equal("fill_value", left, right)
1312
+ assert_attr_equal("dtype", left, right)
1313
+ assert_numpy_array_equal(left.to_dense(), right.to_dense())
1314
+
1315
+
1316
+ def assert_contains_all(iterable, dic) -> None:
1317
+ for k in iterable:
1318
+ assert k in dic, f"Did not contain item: {repr(k)}"
1319
+
1320
+
1321
+ def assert_copy(iter1, iter2, **eql_kwargs) -> None:
1322
+ """
1323
+ iter1, iter2: iterables that produce elements
1324
+ comparable with assert_almost_equal
1325
+
1326
+ Checks that the elements are equal, but not
1327
+ the same object. (Does not check that items
1328
+ in sequences are also not the same object)
1329
+ """
1330
+ for elem1, elem2 in zip(iter1, iter2):
1331
+ assert_almost_equal(elem1, elem2, **eql_kwargs)
1332
+ msg = (
1333
+ f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
1334
+ "different objects, but they were the same object."
1335
+ )
1336
+ assert elem1 is not elem2, msg
1337
+
1338
+
1339
+ def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
1340
+ """
1341
+ Checks that we have the combination of an ExtensionArraydtype and
1342
+ a dtype that should be converted to int64
1343
+
1344
+ Returns
1345
+ -------
1346
+ bool
1347
+
1348
+ Related to issue #37609
1349
+ """
1350
+ return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
1351
+
1352
+
1353
+ def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None:
1354
+ """
1355
+ Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable,
1356
+ ser[l_slc].
1357
+ """
1358
+ expected = ser.iloc[i_slc]
1359
+
1360
+ assert_series_equal(ser.loc[l_slc], expected)
1361
+
1362
+ if not is_integer_dtype(ser.index):
1363
+ # For integer indices, .loc and plain getitem are position-based.
1364
+ assert_series_equal(ser[l_slc], expected)
1365
+
1366
+
1367
+ def assert_metadata_equivalent(
1368
+ left: DataFrame | Series, right: DataFrame | Series | None = None
1369
+ ) -> None:
1370
+ """
1371
+ Check that ._metadata attributes are equivalent.
1372
+ """
1373
+ for attr in left._metadata:
1374
+ val = getattr(left, attr, None)
1375
+ if right is None:
1376
+ assert val is None
1377
+ else:
1378
+ assert val == getattr(right, attr, None)
videochat2/lib/python3.10/site-packages/pandas/_testing/compat.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for sharing tests between DataFrame/Series
3
+ """
4
+ from pandas._typing import DtypeObj
5
+
6
+ from pandas import DataFrame
7
+
8
+
9
+ def get_dtype(obj) -> DtypeObj:
10
+ if isinstance(obj, DataFrame):
11
+ # Note: we are assuming only one column
12
+ return obj.dtypes.iat[0]
13
+ else:
14
+ return obj.dtype
15
+
16
+
17
+ def get_obj(df: DataFrame, klass):
18
+ """
19
+ For sharing tests using frame_or_series, either return the DataFrame
20
+ unchanged or return it's first column as a Series.
21
+ """
22
+ if klass is DataFrame:
23
+ return df
24
+ return df._ixs(0, axis=1)
videochat2/lib/python3.10/site-packages/pandas/_testing/contexts.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import contextmanager
4
+ import os
5
+ from pathlib import Path
6
+ import tempfile
7
+ from typing import (
8
+ IO,
9
+ Any,
10
+ Generator,
11
+ )
12
+ import uuid
13
+
14
+ from pandas._typing import (
15
+ BaseBuffer,
16
+ CompressionOptions,
17
+ FilePath,
18
+ )
19
+ from pandas.compat import PYPY
20
+ from pandas.errors import ChainedAssignmentError
21
+
22
+ from pandas import set_option
23
+
24
+ from pandas.io.common import get_handle
25
+
26
+
27
+ @contextmanager
28
+ def decompress_file(
29
+ path: FilePath | BaseBuffer, compression: CompressionOptions
30
+ ) -> Generator[IO[bytes], None, None]:
31
+ """
32
+ Open a compressed file and return a file object.
33
+
34
+ Parameters
35
+ ----------
36
+ path : str
37
+ The path where the file is read from.
38
+
39
+ compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None}
40
+ Name of the decompression to use
41
+
42
+ Returns
43
+ -------
44
+ file object
45
+ """
46
+ with get_handle(path, "rb", compression=compression, is_text=False) as handle:
47
+ yield handle.handle
48
+
49
+
50
+ @contextmanager
51
+ def set_timezone(tz: str) -> Generator[None, None, None]:
52
+ """
53
+ Context manager for temporarily setting a timezone.
54
+
55
+ Parameters
56
+ ----------
57
+ tz : str
58
+ A string representing a valid timezone.
59
+
60
+ Examples
61
+ --------
62
+ >>> from datetime import datetime
63
+ >>> from dateutil.tz import tzlocal
64
+ >>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
65
+ 'IST'
66
+
67
+ >>> with set_timezone('US/Eastern'):
68
+ ... tzlocal().tzname(datetime(2021, 1, 1))
69
+ ...
70
+ 'EST'
71
+ """
72
+ import time
73
+
74
+ def setTZ(tz) -> None:
75
+ if tz is None:
76
+ try:
77
+ del os.environ["TZ"]
78
+ except KeyError:
79
+ pass
80
+ else:
81
+ os.environ["TZ"] = tz
82
+ time.tzset()
83
+
84
+ orig_tz = os.environ.get("TZ")
85
+ setTZ(tz)
86
+ try:
87
+ yield
88
+ finally:
89
+ setTZ(orig_tz)
90
+
91
+
92
+ @contextmanager
93
+ def ensure_clean(
94
+ filename=None, return_filelike: bool = False, **kwargs: Any
95
+ ) -> Generator[Any, None, None]:
96
+ """
97
+ Gets a temporary path and agrees to remove on close.
98
+
99
+ This implementation does not use tempfile.mkstemp to avoid having a file handle.
100
+ If the code using the returned path wants to delete the file itself, windows
101
+ requires that no program has a file handle to it.
102
+
103
+ Parameters
104
+ ----------
105
+ filename : str (optional)
106
+ suffix of the created file.
107
+ return_filelike : bool (default False)
108
+ if True, returns a file-like which is *always* cleaned. Necessary for
109
+ savefig and other functions which want to append extensions.
110
+ **kwargs
111
+ Additional keywords are passed to open().
112
+
113
+ """
114
+ folder = Path(tempfile.gettempdir())
115
+
116
+ if filename is None:
117
+ filename = ""
118
+ filename = str(uuid.uuid4()) + filename
119
+ path = folder / filename
120
+
121
+ path.touch()
122
+
123
+ handle_or_str: str | IO = str(path)
124
+ if return_filelike:
125
+ kwargs.setdefault("mode", "w+b")
126
+ handle_or_str = open(path, **kwargs)
127
+
128
+ try:
129
+ yield handle_or_str
130
+ finally:
131
+ if not isinstance(handle_or_str, str):
132
+ handle_or_str.close()
133
+ if path.is_file():
134
+ path.unlink()
135
+
136
+
137
+ @contextmanager
138
+ def ensure_safe_environment_variables() -> Generator[None, None, None]:
139
+ """
140
+ Get a context manager to safely set environment variables
141
+
142
+ All changes will be undone on close, hence environment variables set
143
+ within this contextmanager will neither persist nor change global state.
144
+ """
145
+ saved_environ = dict(os.environ)
146
+ try:
147
+ yield
148
+ finally:
149
+ os.environ.clear()
150
+ os.environ.update(saved_environ)
151
+
152
+
153
+ @contextmanager
154
+ def with_csv_dialect(name, **kwargs) -> Generator[None, None, None]:
155
+ """
156
+ Context manager to temporarily register a CSV dialect for parsing CSV.
157
+
158
+ Parameters
159
+ ----------
160
+ name : str
161
+ The name of the dialect.
162
+ kwargs : mapping
163
+ The parameters for the dialect.
164
+
165
+ Raises
166
+ ------
167
+ ValueError : the name of the dialect conflicts with a builtin one.
168
+
169
+ See Also
170
+ --------
171
+ csv : Python's CSV library.
172
+ """
173
+ import csv
174
+
175
+ _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
176
+
177
+ if name in _BUILTIN_DIALECTS:
178
+ raise ValueError("Cannot override builtin dialect.")
179
+
180
+ csv.register_dialect(name, **kwargs)
181
+ try:
182
+ yield
183
+ finally:
184
+ csv.unregister_dialect(name)
185
+
186
+
187
+ @contextmanager
188
+ def use_numexpr(use, min_elements=None) -> Generator[None, None, None]:
189
+ from pandas.core.computation import expressions as expr
190
+
191
+ if min_elements is None:
192
+ min_elements = expr._MIN_ELEMENTS
193
+
194
+ olduse = expr.USE_NUMEXPR
195
+ oldmin = expr._MIN_ELEMENTS
196
+ set_option("compute.use_numexpr", use)
197
+ expr._MIN_ELEMENTS = min_elements
198
+ try:
199
+ yield
200
+ finally:
201
+ expr._MIN_ELEMENTS = oldmin
202
+ set_option("compute.use_numexpr", olduse)
203
+
204
+
205
+ def raises_chained_assignment_error():
206
+ if PYPY:
207
+ from contextlib import nullcontext
208
+
209
+ return nullcontext()
210
+ else:
211
+ from pandas._testing import assert_produces_warning
212
+
213
+ return assert_produces_warning(
214
+ ChainedAssignmentError,
215
+ match=(
216
+ "A value is trying to be set on a copy of a DataFrame or Series "
217
+ "through chained assignment"
218
+ ),
219
+ )
videochat2/lib/python3.10/site-packages/pandas/api/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ public toolkit API """
2
+ from pandas.api import (
3
+ extensions,
4
+ indexers,
5
+ interchange,
6
+ types,
7
+ )
8
+
9
+ __all__ = [
10
+ "interchange",
11
+ "extensions",
12
+ "indexers",
13
+ "types",
14
+ ]
videochat2/lib/python3.10/site-packages/pandas/api/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (346 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/api/extensions/__init__.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Public API for extending pandas objects.
3
+ """
4
+
5
+ from pandas._libs.lib import no_default
6
+
7
+ from pandas.core.dtypes.base import (
8
+ ExtensionDtype,
9
+ register_extension_dtype,
10
+ )
11
+
12
+ from pandas.core.accessor import (
13
+ register_dataframe_accessor,
14
+ register_index_accessor,
15
+ register_series_accessor,
16
+ )
17
+ from pandas.core.algorithms import take
18
+ from pandas.core.arrays import (
19
+ ExtensionArray,
20
+ ExtensionScalarOpsMixin,
21
+ )
22
+
23
+ __all__ = [
24
+ "no_default",
25
+ "ExtensionDtype",
26
+ "register_extension_dtype",
27
+ "register_dataframe_accessor",
28
+ "register_index_accessor",
29
+ "register_series_accessor",
30
+ "take",
31
+ "ExtensionArray",
32
+ "ExtensionScalarOpsMixin",
33
+ ]
videochat2/lib/python3.10/site-packages/pandas/api/extensions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (733 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/api/indexers/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Public API for Rolling Window Indexers.
3
+ """
4
+
5
+ from pandas.core.indexers import check_array_indexer
6
+ from pandas.core.indexers.objects import (
7
+ BaseIndexer,
8
+ FixedForwardWindowIndexer,
9
+ VariableOffsetWindowIndexer,
10
+ )
11
+
12
+ __all__ = [
13
+ "check_array_indexer",
14
+ "BaseIndexer",
15
+ "FixedForwardWindowIndexer",
16
+ "VariableOffsetWindowIndexer",
17
+ ]
videochat2/lib/python3.10/site-packages/pandas/api/indexers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (476 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/api/interchange/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Public API for DataFrame interchange protocol.
3
+ """
4
+
5
+ from pandas.core.interchange.dataframe_protocol import DataFrame
6
+ from pandas.core.interchange.from_dataframe import from_dataframe
7
+
8
+ __all__ = ["from_dataframe", "DataFrame"]
videochat2/lib/python3.10/site-packages/pandas/api/interchange/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (425 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/api/types/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Public toolkit API.
3
+ """
4
+
5
+ from pandas._libs.lib import infer_dtype
6
+
7
+ from pandas.core.dtypes.api import * # noqa: F401, F403
8
+ from pandas.core.dtypes.concat import union_categoricals
9
+ from pandas.core.dtypes.dtypes import (
10
+ CategoricalDtype,
11
+ DatetimeTZDtype,
12
+ IntervalDtype,
13
+ PeriodDtype,
14
+ )
15
+
16
+ __all__ = [
17
+ "infer_dtype",
18
+ "union_categoricals",
19
+ "CategoricalDtype",
20
+ "DatetimeTZDtype",
21
+ "IntervalDtype",
22
+ "PeriodDtype",
23
+ ]
videochat2/lib/python3.10/site-packages/pandas/api/types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (558 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/compat/__init__.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ compat
3
+ ======
4
+
5
+ Cross-compatible functions for different versions of Python.
6
+
7
+ Other items:
8
+ * platform checker
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import os
13
+ import platform
14
+ import sys
15
+
16
+ from pandas._typing import F
17
+ from pandas.compat._constants import (
18
+ IS64,
19
+ PY39,
20
+ PY310,
21
+ PY311,
22
+ PYPY,
23
+ )
24
+ import pandas.compat.compressors
25
+ from pandas.compat.numpy import (
26
+ is_numpy_dev,
27
+ np_version_under1p21,
28
+ )
29
+ from pandas.compat.pyarrow import (
30
+ pa_version_under7p0,
31
+ pa_version_under8p0,
32
+ pa_version_under9p0,
33
+ pa_version_under11p0,
34
+ )
35
+
36
+
37
+ def set_function_name(f: F, name: str, cls) -> F:
38
+ """
39
+ Bind the name/qualname attributes of the function.
40
+ """
41
+ f.__name__ = name
42
+ f.__qualname__ = f"{cls.__name__}.{name}"
43
+ f.__module__ = cls.__module__
44
+ return f
45
+
46
+
47
+ def is_platform_little_endian() -> bool:
48
+ """
49
+ Checking if the running platform is little endian.
50
+
51
+ Returns
52
+ -------
53
+ bool
54
+ True if the running platform is little endian.
55
+ """
56
+ return sys.byteorder == "little"
57
+
58
+
59
+ def is_platform_windows() -> bool:
60
+ """
61
+ Checking if the running platform is windows.
62
+
63
+ Returns
64
+ -------
65
+ bool
66
+ True if the running platform is windows.
67
+ """
68
+ return sys.platform in ["win32", "cygwin"]
69
+
70
+
71
+ def is_platform_linux() -> bool:
72
+ """
73
+ Checking if the running platform is linux.
74
+
75
+ Returns
76
+ -------
77
+ bool
78
+ True if the running platform is linux.
79
+ """
80
+ return sys.platform == "linux"
81
+
82
+
83
+ def is_platform_mac() -> bool:
84
+ """
85
+ Checking if the running platform is mac.
86
+
87
+ Returns
88
+ -------
89
+ bool
90
+ True if the running platform is mac.
91
+ """
92
+ return sys.platform == "darwin"
93
+
94
+
95
+ def is_platform_arm() -> bool:
96
+ """
97
+ Checking if the running platform use ARM architecture.
98
+
99
+ Returns
100
+ -------
101
+ bool
102
+ True if the running platform uses ARM architecture.
103
+ """
104
+ return platform.machine() in ("arm64", "aarch64") or platform.machine().startswith(
105
+ "armv"
106
+ )
107
+
108
+
109
+ def is_platform_power() -> bool:
110
+ """
111
+ Checking if the running platform use Power architecture.
112
+
113
+ Returns
114
+ -------
115
+ bool
116
+ True if the running platform uses ARM architecture.
117
+ """
118
+ return platform.machine() in ("ppc64", "ppc64le")
119
+
120
+
121
+ def is_ci_environment() -> bool:
122
+ """
123
+ Checking if running in a continuous integration environment by checking
124
+ the PANDAS_CI environment variable.
125
+
126
+ Returns
127
+ -------
128
+ bool
129
+ True if the running in a continuous integration environment.
130
+ """
131
+ return os.environ.get("PANDAS_CI", "0") == "1"
132
+
133
+
134
+ def get_lzma_file() -> type[pandas.compat.compressors.LZMAFile]:
135
+ """
136
+ Importing the `LZMAFile` class from the `lzma` module.
137
+
138
+ Returns
139
+ -------
140
+ class
141
+ The `LZMAFile` class from the `lzma` module.
142
+
143
+ Raises
144
+ ------
145
+ RuntimeError
146
+ If the `lzma` module was not imported correctly, or didn't exist.
147
+ """
148
+ if not pandas.compat.compressors.has_lzma:
149
+ raise RuntimeError(
150
+ "lzma module not available. "
151
+ "A Python re-install with the proper dependencies, "
152
+ "might be required to solve this issue."
153
+ )
154
+ return pandas.compat.compressors.LZMAFile
155
+
156
+
157
+ __all__ = [
158
+ "is_numpy_dev",
159
+ "np_version_under1p21",
160
+ "pa_version_under7p0",
161
+ "pa_version_under8p0",
162
+ "pa_version_under9p0",
163
+ "pa_version_under11p0",
164
+ "IS64",
165
+ "PY39",
166
+ "PY310",
167
+ "PY311",
168
+ "PYPY",
169
+ ]
videochat2/lib/python3.10/site-packages/pandas/compat/__pycache__/compressors.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/compat/_optional.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import sys
5
+ import types
6
+ import warnings
7
+
8
+ from pandas.util._exceptions import find_stack_level
9
+
10
+ from pandas.util.version import Version
11
+
12
+ # Update install.rst & setup.cfg when updating versions!
13
+
14
+ VERSIONS = {
15
+ "bs4": "4.9.3",
16
+ "blosc": "1.21.0",
17
+ "bottleneck": "1.3.2",
18
+ "brotli": "0.7.0",
19
+ "fastparquet": "0.6.3",
20
+ "fsspec": "2021.07.0",
21
+ "html5lib": "1.1",
22
+ "hypothesis": "6.34.2",
23
+ "gcsfs": "2021.07.0",
24
+ "jinja2": "3.0.0",
25
+ "lxml.etree": "4.6.3",
26
+ "matplotlib": "3.6.1",
27
+ "numba": "0.53.1",
28
+ "numexpr": "2.7.3",
29
+ "odfpy": "1.4.1",
30
+ "openpyxl": "3.0.7",
31
+ "pandas_gbq": "0.15.0",
32
+ "psycopg2": "2.8.6", # (dt dec pq3 ext lo64)
33
+ "pymysql": "1.0.2",
34
+ "pyarrow": "7.0.0",
35
+ "pyreadstat": "1.1.2",
36
+ "pytest": "7.3.2",
37
+ "pyxlsb": "1.0.8",
38
+ "s3fs": "2021.08.0",
39
+ "scipy": "1.7.1",
40
+ "snappy": "0.6.0",
41
+ "sqlalchemy": "1.4.16",
42
+ "tables": "3.6.1",
43
+ "tabulate": "0.8.9",
44
+ "xarray": "0.21.0",
45
+ "xlrd": "2.0.1",
46
+ "xlsxwriter": "1.4.3",
47
+ "zstandard": "0.15.2",
48
+ "tzdata": "2022.1",
49
+ "qtpy": "2.2.0",
50
+ "pyqt5": "5.15.1",
51
+ }
52
+
53
+ # A mapping from import name to package name (on PyPI) for packages where
54
+ # these two names are different.
55
+
56
+ INSTALL_MAPPING = {
57
+ "bs4": "beautifulsoup4",
58
+ "bottleneck": "Bottleneck",
59
+ "brotli": "brotlipy",
60
+ "jinja2": "Jinja2",
61
+ "lxml.etree": "lxml",
62
+ "odf": "odfpy",
63
+ "pandas_gbq": "pandas-gbq",
64
+ "snappy": "python-snappy",
65
+ "sqlalchemy": "SQLAlchemy",
66
+ "tables": "pytables",
67
+ }
68
+
69
+
70
+ def get_version(module: types.ModuleType) -> str:
71
+ version = getattr(module, "__version__", None)
72
+ if version is None:
73
+ # xlrd uses a capitalized attribute name
74
+ version = getattr(module, "__VERSION__", None)
75
+
76
+ if version is None:
77
+ if module.__name__ == "brotli":
78
+ # brotli doesn't contain attributes to confirm it's version
79
+ return ""
80
+ if module.__name__ == "snappy":
81
+ # snappy doesn't contain attributes to confirm it's version
82
+ # See https://github.com/andrix/python-snappy/pull/119
83
+ return ""
84
+ raise ImportError(f"Can't determine version for {module.__name__}")
85
+ if module.__name__ == "psycopg2":
86
+ # psycopg2 appends " (dt dec pq3 ext lo64)" to it's version
87
+ version = version.split()[0]
88
+ return version
89
+
90
+
91
+ def import_optional_dependency(
92
+ name: str,
93
+ extra: str = "",
94
+ errors: str = "raise",
95
+ min_version: str | None = None,
96
+ ):
97
+ """
98
+ Import an optional dependency.
99
+
100
+ By default, if a dependency is missing an ImportError with a nice
101
+ message will be raised. If a dependency is present, but too old,
102
+ we raise.
103
+
104
+ Parameters
105
+ ----------
106
+ name : str
107
+ The module name.
108
+ extra : str
109
+ Additional text to include in the ImportError message.
110
+ errors : str {'raise', 'warn', 'ignore'}
111
+ What to do when a dependency is not found or its version is too old.
112
+
113
+ * raise : Raise an ImportError
114
+ * warn : Only applicable when a module's version is to old.
115
+ Warns that the version is too old and returns None
116
+ * ignore: If the module is not installed, return None, otherwise,
117
+ return the module, even if the version is too old.
118
+ It's expected that users validate the version locally when
119
+ using ``errors="ignore"`` (see. ``io/html.py``)
120
+ min_version : str, default None
121
+ Specify a minimum version that is different from the global pandas
122
+ minimum version required.
123
+ Returns
124
+ -------
125
+ maybe_module : Optional[ModuleType]
126
+ The imported module, when found and the version is correct.
127
+ None is returned when the package is not found and `errors`
128
+ is False, or when the package's version is too old and `errors`
129
+ is ``'warn'``.
130
+ """
131
+
132
+ assert errors in {"warn", "raise", "ignore"}
133
+
134
+ package_name = INSTALL_MAPPING.get(name)
135
+ install_name = package_name if package_name is not None else name
136
+
137
+ msg = (
138
+ f"Missing optional dependency '{install_name}'. {extra} "
139
+ f"Use pip or conda to install {install_name}."
140
+ )
141
+ try:
142
+ module = importlib.import_module(name)
143
+ except ImportError:
144
+ if errors == "raise":
145
+ raise ImportError(msg)
146
+ return None
147
+
148
+ # Handle submodules: if we have submodule, grab parent module from sys.modules
149
+ parent = name.split(".")[0]
150
+ if parent != name:
151
+ install_name = parent
152
+ module_to_get = sys.modules[install_name]
153
+ else:
154
+ module_to_get = module
155
+ minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
156
+ if minimum_version:
157
+ version = get_version(module_to_get)
158
+ if version and Version(version) < Version(minimum_version):
159
+ msg = (
160
+ f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
161
+ f"(version '{version}' currently installed)."
162
+ )
163
+ if errors == "warn":
164
+ warnings.warn(
165
+ msg,
166
+ UserWarning,
167
+ stacklevel=find_stack_level(),
168
+ )
169
+ return None
170
+ elif errors == "raise":
171
+ raise ImportError(msg)
172
+
173
+ return module
videochat2/lib/python3.10/site-packages/pandas/compat/compressors.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Patched ``BZ2File`` and ``LZMAFile`` to handle pickle protocol 5.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import bz2
8
+ from pickle import PickleBuffer
9
+
10
+ from pandas.compat._constants import PY310
11
+
12
+ try:
13
+ import lzma
14
+
15
+ has_lzma = True
16
+ except ImportError:
17
+ has_lzma = False
18
+
19
+
20
+ def flatten_buffer(
21
+ b: bytes | bytearray | memoryview | PickleBuffer,
22
+ ) -> bytes | bytearray | memoryview:
23
+ """
24
+ Return some 1-D `uint8` typed buffer.
25
+
26
+ Coerces anything that does not match that description to one that does
27
+ without copying if possible (otherwise will copy).
28
+ """
29
+
30
+ if isinstance(b, (bytes, bytearray)):
31
+ return b
32
+
33
+ if not isinstance(b, PickleBuffer):
34
+ b = PickleBuffer(b)
35
+
36
+ try:
37
+ # coerce to 1-D `uint8` C-contiguous `memoryview` zero-copy
38
+ return b.raw()
39
+ except BufferError:
40
+ # perform in-memory copy if buffer is not contiguous
41
+ return memoryview(b).tobytes("A")
42
+
43
+
44
+ class BZ2File(bz2.BZ2File):
45
+ if not PY310:
46
+
47
+ def write(self, b) -> int:
48
+ # Workaround issue where `bz2.BZ2File` expects `len`
49
+ # to return the number of bytes in `b` by converting
50
+ # `b` into something that meets that constraint with
51
+ # minimal copying.
52
+ #
53
+ # Note: This is fixed in Python 3.10.
54
+ return super().write(flatten_buffer(b))
55
+
56
+
57
+ if has_lzma:
58
+
59
+ class LZMAFile(lzma.LZMAFile):
60
+ if not PY310:
61
+
62
+ def write(self, b) -> int:
63
+ # Workaround issue where `lzma.LZMAFile` expects `len`
64
+ # to return the number of bytes in `b` by converting
65
+ # `b` into something that meets that constraint with
66
+ # minimal copying.
67
+ #
68
+ # Note: This is fixed in Python 3.10.
69
+ return super().write(flatten_buffer(b))
videochat2/lib/python3.10/site-packages/pandas/compat/pickle_compat.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Support pre-0.12 series pickle compatibility.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import contextlib
7
+ import copy
8
+ import io
9
+ import pickle as pkl
10
+ from typing import Generator
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs.arrays import NDArrayBacked
15
+ from pandas._libs.tslibs import BaseOffset
16
+
17
+ from pandas import Index
18
+ from pandas.core.arrays import (
19
+ DatetimeArray,
20
+ PeriodArray,
21
+ TimedeltaArray,
22
+ )
23
+ from pandas.core.internals import BlockManager
24
+
25
+
26
+ def load_reduce(self):
27
+ stack = self.stack
28
+ args = stack.pop()
29
+ func = stack[-1]
30
+
31
+ try:
32
+ stack[-1] = func(*args)
33
+ return
34
+ except TypeError as err:
35
+ # If we have a deprecated function,
36
+ # try to replace and try again.
37
+
38
+ msg = "_reconstruct: First argument must be a sub-type of ndarray"
39
+
40
+ if msg in str(err):
41
+ try:
42
+ cls = args[0]
43
+ stack[-1] = object.__new__(cls)
44
+ return
45
+ except TypeError:
46
+ pass
47
+ elif args and isinstance(args[0], type) and issubclass(args[0], BaseOffset):
48
+ # TypeError: object.__new__(Day) is not safe, use Day.__new__()
49
+ cls = args[0]
50
+ stack[-1] = cls.__new__(*args)
51
+ return
52
+ elif args and issubclass(args[0], PeriodArray):
53
+ cls = args[0]
54
+ stack[-1] = NDArrayBacked.__new__(*args)
55
+ return
56
+
57
+ raise
58
+
59
+
60
+ # If classes are moved, provide compat here.
61
+ _class_locations_map = {
62
+ ("pandas.core.sparse.array", "SparseArray"): ("pandas.core.arrays", "SparseArray"),
63
+ # 15477
64
+ ("pandas.core.base", "FrozenNDArray"): ("numpy", "ndarray"),
65
+ ("pandas.core.indexes.frozen", "FrozenNDArray"): ("numpy", "ndarray"),
66
+ ("pandas.core.base", "FrozenList"): ("pandas.core.indexes.frozen", "FrozenList"),
67
+ # 10890
68
+ ("pandas.core.series", "TimeSeries"): ("pandas.core.series", "Series"),
69
+ ("pandas.sparse.series", "SparseTimeSeries"): (
70
+ "pandas.core.sparse.series",
71
+ "SparseSeries",
72
+ ),
73
+ # 12588, extensions moving
74
+ ("pandas._sparse", "BlockIndex"): ("pandas._libs.sparse", "BlockIndex"),
75
+ ("pandas.tslib", "Timestamp"): ("pandas._libs.tslib", "Timestamp"),
76
+ # 18543 moving period
77
+ ("pandas._period", "Period"): ("pandas._libs.tslibs.period", "Period"),
78
+ ("pandas._libs.period", "Period"): ("pandas._libs.tslibs.period", "Period"),
79
+ # 18014 moved __nat_unpickle from _libs.tslib-->_libs.tslibs.nattype
80
+ ("pandas.tslib", "__nat_unpickle"): (
81
+ "pandas._libs.tslibs.nattype",
82
+ "__nat_unpickle",
83
+ ),
84
+ ("pandas._libs.tslib", "__nat_unpickle"): (
85
+ "pandas._libs.tslibs.nattype",
86
+ "__nat_unpickle",
87
+ ),
88
+ # 15998 top-level dirs moving
89
+ ("pandas.sparse.array", "SparseArray"): (
90
+ "pandas.core.arrays.sparse",
91
+ "SparseArray",
92
+ ),
93
+ ("pandas.indexes.base", "_new_Index"): ("pandas.core.indexes.base", "_new_Index"),
94
+ ("pandas.indexes.base", "Index"): ("pandas.core.indexes.base", "Index"),
95
+ ("pandas.indexes.numeric", "Int64Index"): (
96
+ "pandas.core.indexes.base",
97
+ "Index", # updated in 50775
98
+ ),
99
+ ("pandas.indexes.range", "RangeIndex"): ("pandas.core.indexes.range", "RangeIndex"),
100
+ ("pandas.indexes.multi", "MultiIndex"): ("pandas.core.indexes.multi", "MultiIndex"),
101
+ ("pandas.tseries.index", "_new_DatetimeIndex"): (
102
+ "pandas.core.indexes.datetimes",
103
+ "_new_DatetimeIndex",
104
+ ),
105
+ ("pandas.tseries.index", "DatetimeIndex"): (
106
+ "pandas.core.indexes.datetimes",
107
+ "DatetimeIndex",
108
+ ),
109
+ ("pandas.tseries.period", "PeriodIndex"): (
110
+ "pandas.core.indexes.period",
111
+ "PeriodIndex",
112
+ ),
113
+ # 19269, arrays moving
114
+ ("pandas.core.categorical", "Categorical"): ("pandas.core.arrays", "Categorical"),
115
+ # 19939, add timedeltaindex, float64index compat from 15998 move
116
+ ("pandas.tseries.tdi", "TimedeltaIndex"): (
117
+ "pandas.core.indexes.timedeltas",
118
+ "TimedeltaIndex",
119
+ ),
120
+ ("pandas.indexes.numeric", "Float64Index"): (
121
+ "pandas.core.indexes.base",
122
+ "Index", # updated in 50775
123
+ ),
124
+ # 50775, remove Int64Index, UInt64Index & Float64Index from codabase
125
+ ("pandas.core.indexes.numeric", "Int64Index"): (
126
+ "pandas.core.indexes.base",
127
+ "Index",
128
+ ),
129
+ ("pandas.core.indexes.numeric", "UInt64Index"): (
130
+ "pandas.core.indexes.base",
131
+ "Index",
132
+ ),
133
+ ("pandas.core.indexes.numeric", "Float64Index"): (
134
+ "pandas.core.indexes.base",
135
+ "Index",
136
+ ),
137
+ }
138
+
139
+
140
+ # our Unpickler sub-class to override methods and some dispatcher
141
+ # functions for compat and uses a non-public class of the pickle module.
142
+
143
+
144
+ class Unpickler(pkl._Unpickler):
145
+ def find_class(self, module, name):
146
+ # override superclass
147
+ key = (module, name)
148
+ module, name = _class_locations_map.get(key, key)
149
+ return super().find_class(module, name)
150
+
151
+
152
+ Unpickler.dispatch = copy.copy(Unpickler.dispatch)
153
+ Unpickler.dispatch[pkl.REDUCE[0]] = load_reduce
154
+
155
+
156
+ def load_newobj(self) -> None:
157
+ args = self.stack.pop()
158
+ cls = self.stack[-1]
159
+
160
+ # compat
161
+ if issubclass(cls, Index):
162
+ obj = object.__new__(cls)
163
+ elif issubclass(cls, DatetimeArray) and not args:
164
+ arr = np.array([], dtype="M8[ns]")
165
+ obj = cls.__new__(cls, arr, arr.dtype)
166
+ elif issubclass(cls, TimedeltaArray) and not args:
167
+ arr = np.array([], dtype="m8[ns]")
168
+ obj = cls.__new__(cls, arr, arr.dtype)
169
+ elif cls is BlockManager and not args:
170
+ obj = cls.__new__(cls, (), [], False)
171
+ else:
172
+ obj = cls.__new__(cls, *args)
173
+
174
+ self.stack[-1] = obj
175
+
176
+
177
+ Unpickler.dispatch[pkl.NEWOBJ[0]] = load_newobj
178
+
179
+
180
+ def load_newobj_ex(self) -> None:
181
+ kwargs = self.stack.pop()
182
+ args = self.stack.pop()
183
+ cls = self.stack.pop()
184
+
185
+ # compat
186
+ if issubclass(cls, Index):
187
+ obj = object.__new__(cls)
188
+ else:
189
+ obj = cls.__new__(cls, *args, **kwargs)
190
+ self.append(obj)
191
+
192
+
193
+ try:
194
+ Unpickler.dispatch[pkl.NEWOBJ_EX[0]] = load_newobj_ex
195
+ except (AttributeError, KeyError):
196
+ pass
197
+
198
+
199
+ def load(fh, encoding: str | None = None, is_verbose: bool = False):
200
+ """
201
+ Load a pickle, with a provided encoding,
202
+
203
+ Parameters
204
+ ----------
205
+ fh : a filelike object
206
+ encoding : an optional encoding
207
+ is_verbose : show exception output
208
+ """
209
+ try:
210
+ fh.seek(0)
211
+ if encoding is not None:
212
+ up = Unpickler(fh, encoding=encoding)
213
+ else:
214
+ up = Unpickler(fh)
215
+ # "Unpickler" has no attribute "is_verbose" [attr-defined]
216
+ up.is_verbose = is_verbose # type: ignore[attr-defined]
217
+
218
+ return up.load()
219
+ except (ValueError, TypeError):
220
+ raise
221
+
222
+
223
+ def loads(
224
+ bytes_object: bytes,
225
+ *,
226
+ fix_imports: bool = True,
227
+ encoding: str = "ASCII",
228
+ errors: str = "strict",
229
+ ):
230
+ """
231
+ Analogous to pickle._loads.
232
+ """
233
+ fd = io.BytesIO(bytes_object)
234
+ return Unpickler(
235
+ fd, fix_imports=fix_imports, encoding=encoding, errors=errors
236
+ ).load()
237
+
238
+
239
+ @contextlib.contextmanager
240
+ def patch_pickle() -> Generator[None, None, None]:
241
+ """
242
+ Temporarily patch pickle to use our unpickler.
243
+ """
244
+ orig_loads = pkl.loads
245
+ try:
246
+ setattr(pkl, "loads", loads)
247
+ yield
248
+ finally:
249
+ setattr(pkl, "loads", orig_loads)
videochat2/lib/python3.10/site-packages/pandas/compat/pyarrow.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ support pyarrow compatibility across versions """
2
+
3
+ from __future__ import annotations
4
+
5
+ from pandas.util.version import Version
6
+
7
+ try:
8
+ import pyarrow as pa
9
+
10
+ _pa_version = pa.__version__
11
+ _palv = Version(_pa_version)
12
+ pa_version_under7p0 = _palv < Version("7.0.0")
13
+ pa_version_under8p0 = _palv < Version("8.0.0")
14
+ pa_version_under9p0 = _palv < Version("9.0.0")
15
+ pa_version_under10p0 = _palv < Version("10.0.0")
16
+ pa_version_under11p0 = _palv < Version("11.0.0")
17
+ except ImportError:
18
+ pa_version_under7p0 = True
19
+ pa_version_under8p0 = True
20
+ pa_version_under9p0 = True
21
+ pa_version_under10p0 = True
22
+ pa_version_under11p0 = True
videochat2/lib/python3.10/site-packages/pandas/tests/computation/__init__.py ADDED
File without changes
videochat2/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/tests/computation/__pycache__/test_compat.cpython-310.pyc ADDED
Binary file (1.09 kB). View file