ZTWHHH commited on
Commit
5e52a41
·
verified ·
1 Parent(s): 192ac60

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. videochat2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 +3 -0
  3. videochat2/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so +3 -0
  4. videochat2/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so +3 -0
  5. videochat2/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so +3 -0
  6. videochat2/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so +3 -0
  7. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc +0 -0
  12. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc +0 -0
  13. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc +0 -0
  14. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc +0 -0
  15. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc +0 -0
  16. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc +0 -0
  17. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc +0 -0
  18. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc +0 -0
  19. videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc +0 -0
  20. videochat2/lib/python3.10/site-packages/pandas/core/computation/align.py +213 -0
  21. videochat2/lib/python3.10/site-packages/pandas/core/computation/check.py +12 -0
  22. videochat2/lib/python3.10/site-packages/pandas/core/computation/eval.py +413 -0
  23. videochat2/lib/python3.10/site-packages/pandas/core/computation/expr.py +840 -0
  24. videochat2/lib/python3.10/site-packages/pandas/io/__init__.py +12 -0
  25. videochat2/lib/python3.10/site-packages/pandas/io/_util.py +23 -0
  26. videochat2/lib/python3.10/site-packages/pandas/io/api.py +65 -0
  27. videochat2/lib/python3.10/site-packages/pandas/io/clipboards.py +178 -0
  28. videochat2/lib/python3.10/site-packages/pandas/io/common.py +1253 -0
  29. videochat2/lib/python3.10/site-packages/pandas/io/feather_format.py +162 -0
  30. videochat2/lib/python3.10/site-packages/pandas/io/formats/__init__.py +8 -0
  31. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc +0 -0
  32. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc +0 -0
  33. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc +0 -0
  34. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc +0 -0
  35. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc +0 -0
  36. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc +0 -0
  37. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc +0 -0
  38. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc +0 -0
  39. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/latex.cpython-310.pyc +0 -0
  40. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc +0 -0
  41. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc +0 -0
  42. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc +0 -0
  43. videochat2/lib/python3.10/site-packages/pandas/io/formats/_color_data.py +157 -0
  44. videochat2/lib/python3.10/site-packages/pandas/io/formats/console.py +94 -0
  45. videochat2/lib/python3.10/site-packages/pandas/io/formats/css.py +418 -0
  46. videochat2/lib/python3.10/site-packages/pandas/io/formats/csvs.py +319 -0
  47. videochat2/lib/python3.10/site-packages/pandas/io/formats/excel.py +950 -0
  48. videochat2/lib/python3.10/site-packages/pandas/io/formats/format.py +2240 -0
  49. videochat2/lib/python3.10/site-packages/pandas/io/formats/html.py +633 -0
  50. videochat2/lib/python3.10/site-packages/pandas/io/formats/info.py +1101 -0
.gitattributes CHANGED
@@ -1250,3 +1250,8 @@ vlmpy310/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.py
1250
  vlmpy310/lib/python3.10/site-packages/pyparsing/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1251
  videochat2/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1252
  videochat2/lib/python3.10/site-packages/pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
1250
  vlmpy310/lib/python3.10/site-packages/pyparsing/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1251
  videochat2/lib/python3.10/site-packages/pandas/_libs/missing.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1252
  videochat2/lib/python3.10/site-packages/pandas/_libs/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1253
+ videochat2/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1254
+ videochat2/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1255
+ videochat2/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1256
+ videochat2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs diff=lfs merge=lfs -text
1257
+ videochat2/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
videochat2/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e41dfebac04b6fc31d662991041f352b31aae4c96b18898df4ece6d59694f59
3
+ size 4691408
videochat2/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eabda53825851f060fd89e436b6a9b3162e86be935fed98ea89ac4eb13105658
3
+ size 1816936
videochat2/lib/python3.10/site-packages/pandas/_libs/internals.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2c0722e97a56826fd84dbdc3426241228586cec959c374e570095bd372521a1
3
+ size 360744
videochat2/lib/python3.10/site-packages/pandas/_libs/reshape.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8f09860bf8a46cf3738b4e0a70b93b77bb5dae52621ce57157b7d28905ebe7e
3
+ size 271656
videochat2/lib/python3.10/site-packages/pandas/_libs/sparse.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e9e3f94b082428c3bd1c47a80c1506b0a79f0253c388769285e0768dcebb951
3
+ size 866216
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (179 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/align.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/api.cpython-310.pyc ADDED
Binary file (253 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/check.cpython-310.pyc ADDED
Binary file (449 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/common.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/engines.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/eval.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expr.cpython-310.pyc ADDED
Binary file (23.2 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/expressions.cpython-310.pyc ADDED
Binary file (6.05 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/ops.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/parsing.cpython-310.pyc ADDED
Binary file (6.01 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/pytables.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/__pycache__/scope.cpython-310.pyc ADDED
Binary file (8.83 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/core/computation/align.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Core eval alignment algorithms.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from functools import (
7
+ partial,
8
+ wraps,
9
+ )
10
+ from typing import (
11
+ TYPE_CHECKING,
12
+ Callable,
13
+ Sequence,
14
+ )
15
+ import warnings
16
+
17
+ import numpy as np
18
+
19
+ from pandas.errors import PerformanceWarning
20
+ from pandas.util._exceptions import find_stack_level
21
+
22
+ from pandas.core.dtypes.generic import (
23
+ ABCDataFrame,
24
+ ABCSeries,
25
+ )
26
+
27
+ from pandas.core.base import PandasObject
28
+ import pandas.core.common as com
29
+ from pandas.core.computation.common import result_type_many
30
+
31
+ if TYPE_CHECKING:
32
+ from pandas._typing import F
33
+
34
+ from pandas.core.generic import NDFrame
35
+ from pandas.core.indexes.api import Index
36
+
37
+
38
+ def _align_core_single_unary_op(
39
+ term,
40
+ ) -> tuple[partial | type[NDFrame], dict[str, Index] | None]:
41
+ typ: partial | type[NDFrame]
42
+ axes: dict[str, Index] | None = None
43
+
44
+ if isinstance(term.value, np.ndarray):
45
+ typ = partial(np.asanyarray, dtype=term.value.dtype)
46
+ else:
47
+ typ = type(term.value)
48
+ if hasattr(term.value, "axes"):
49
+ axes = _zip_axes_from_type(typ, term.value.axes)
50
+
51
+ return typ, axes
52
+
53
+
54
+ def _zip_axes_from_type(
55
+ typ: type[NDFrame], new_axes: Sequence[Index]
56
+ ) -> dict[str, Index]:
57
+ return {name: new_axes[i] for i, name in enumerate(typ._AXIS_ORDERS)}
58
+
59
+
60
+ def _any_pandas_objects(terms) -> bool:
61
+ """
62
+ Check a sequence of terms for instances of PandasObject.
63
+ """
64
+ return any(isinstance(term.value, PandasObject) for term in terms)
65
+
66
+
67
+ def _filter_special_cases(f) -> Callable[[F], F]:
68
+ @wraps(f)
69
+ def wrapper(terms):
70
+ # single unary operand
71
+ if len(terms) == 1:
72
+ return _align_core_single_unary_op(terms[0])
73
+
74
+ term_values = (term.value for term in terms)
75
+
76
+ # we don't have any pandas objects
77
+ if not _any_pandas_objects(terms):
78
+ return result_type_many(*term_values), None
79
+
80
+ return f(terms)
81
+
82
+ return wrapper
83
+
84
+
85
+ @_filter_special_cases
86
+ def _align_core(terms):
87
+ term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]
88
+ term_dims = [terms[i].value.ndim for i in term_index]
89
+
90
+ from pandas import Series
91
+
92
+ ndims = Series(dict(zip(term_index, term_dims)))
93
+
94
+ # initial axes are the axes of the largest-axis'd term
95
+ biggest = terms[ndims.idxmax()].value
96
+ typ = biggest._constructor
97
+ axes = biggest.axes
98
+ naxes = len(axes)
99
+ gt_than_one_axis = naxes > 1
100
+
101
+ for value in (terms[i].value for i in term_index):
102
+ is_series = isinstance(value, ABCSeries)
103
+ is_series_and_gt_one_axis = is_series and gt_than_one_axis
104
+
105
+ for axis, items in enumerate(value.axes):
106
+ if is_series_and_gt_one_axis:
107
+ ax, itm = naxes - 1, value.index
108
+ else:
109
+ ax, itm = axis, items
110
+
111
+ if not axes[ax].is_(itm):
112
+ axes[ax] = axes[ax].join(itm, how="outer")
113
+
114
+ for i, ndim in ndims.items():
115
+ for axis, items in zip(range(ndim), axes):
116
+ ti = terms[i].value
117
+
118
+ if hasattr(ti, "reindex"):
119
+ transpose = isinstance(ti, ABCSeries) and naxes > 1
120
+ reindexer = axes[naxes - 1] if transpose else items
121
+
122
+ term_axis_size = len(ti.axes[axis])
123
+ reindexer_size = len(reindexer)
124
+
125
+ ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
126
+ if ordm >= 1 and reindexer_size >= 10000:
127
+ w = (
128
+ f"Alignment difference on axis {axis} is larger "
129
+ f"than an order of magnitude on term {repr(terms[i].name)}, "
130
+ f"by more than {ordm:.4g}; performance may suffer."
131
+ )
132
+ warnings.warn(
133
+ w, category=PerformanceWarning, stacklevel=find_stack_level()
134
+ )
135
+
136
+ f = partial(ti.reindex, reindexer, axis=axis, copy=False)
137
+
138
+ terms[i].update(f())
139
+
140
+ terms[i].update(terms[i].value.values)
141
+
142
+ return typ, _zip_axes_from_type(typ, axes)
143
+
144
+
145
+ def align_terms(terms):
146
+ """
147
+ Align a set of terms.
148
+ """
149
+ try:
150
+ # flatten the parse tree (a nested list, really)
151
+ terms = list(com.flatten(terms))
152
+ except TypeError:
153
+ # can't iterate so it must just be a constant or single variable
154
+ if isinstance(terms.value, (ABCSeries, ABCDataFrame)):
155
+ typ = type(terms.value)
156
+ return typ, _zip_axes_from_type(typ, terms.value.axes)
157
+ return np.result_type(terms.type), None
158
+
159
+ # if all resolved variables are numeric scalars
160
+ if all(term.is_scalar for term in terms):
161
+ return result_type_many(*(term.value for term in terms)).type, None
162
+
163
+ # perform the main alignment
164
+ typ, axes = _align_core(terms)
165
+ return typ, axes
166
+
167
+
168
+ def reconstruct_object(typ, obj, axes, dtype):
169
+ """
170
+ Reconstruct an object given its type, raw value, and possibly empty
171
+ (None) axes.
172
+
173
+ Parameters
174
+ ----------
175
+ typ : object
176
+ A type
177
+ obj : object
178
+ The value to use in the type constructor
179
+ axes : dict
180
+ The axes to use to construct the resulting pandas object
181
+
182
+ Returns
183
+ -------
184
+ ret : typ
185
+ An object of type ``typ`` with the value `obj` and possible axes
186
+ `axes`.
187
+ """
188
+ try:
189
+ typ = typ.type
190
+ except AttributeError:
191
+ pass
192
+
193
+ res_t = np.result_type(obj.dtype, dtype)
194
+
195
+ if not isinstance(typ, partial) and issubclass(typ, PandasObject):
196
+ return typ(obj, dtype=res_t, **axes)
197
+
198
+ # special case for pathological things like ~True/~False
199
+ if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:
200
+ ret_value = res_t.type(obj)
201
+ else:
202
+ ret_value = typ(obj).astype(res_t)
203
+ # The condition is to distinguish 0-dim array (returned in case of
204
+ # scalar) and 1 element array
205
+ # e.g. np.array(0) and np.array([0])
206
+ if (
207
+ len(obj.shape) == 1
208
+ and len(obj) == 1
209
+ and not isinstance(ret_value, np.ndarray)
210
+ ):
211
+ ret_value = np.array([ret_value]).astype(res_t)
212
+
213
+ return ret_value
videochat2/lib/python3.10/site-packages/pandas/core/computation/check.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pandas.compat._optional import import_optional_dependency
4
+
5
+ ne = import_optional_dependency("numexpr", errors="warn")
6
+ NUMEXPR_INSTALLED = ne is not None
7
+ if NUMEXPR_INSTALLED:
8
+ NUMEXPR_VERSION = ne.__version__
9
+ else:
10
+ NUMEXPR_VERSION = None
11
+
12
+ __all__ = ["NUMEXPR_INSTALLED", "NUMEXPR_VERSION"]
videochat2/lib/python3.10/site-packages/pandas/core/computation/eval.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Top level ``eval`` module.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import tokenize
7
+ from typing import TYPE_CHECKING
8
+ import warnings
9
+
10
+ from pandas.util._exceptions import find_stack_level
11
+ from pandas.util._validators import validate_bool_kwarg
12
+
13
+ from pandas.core.dtypes.common import is_extension_array_dtype
14
+
15
+ from pandas.core.computation.engines import ENGINES
16
+ from pandas.core.computation.expr import (
17
+ PARSERS,
18
+ Expr,
19
+ )
20
+ from pandas.core.computation.parsing import tokenize_string
21
+ from pandas.core.computation.scope import ensure_scope
22
+ from pandas.core.generic import NDFrame
23
+
24
+ from pandas.io.formats.printing import pprint_thing
25
+
26
+ if TYPE_CHECKING:
27
+ from pandas.core.computation.ops import BinOp
28
+
29
+
30
+ def _check_engine(engine: str | None) -> str:
31
+ """
32
+ Make sure a valid engine is passed.
33
+
34
+ Parameters
35
+ ----------
36
+ engine : str
37
+ String to validate.
38
+
39
+ Raises
40
+ ------
41
+ KeyError
42
+ * If an invalid engine is passed.
43
+ ImportError
44
+ * If numexpr was requested but doesn't exist.
45
+
46
+ Returns
47
+ -------
48
+ str
49
+ Engine name.
50
+ """
51
+ from pandas.core.computation.check import NUMEXPR_INSTALLED
52
+ from pandas.core.computation.expressions import USE_NUMEXPR
53
+
54
+ if engine is None:
55
+ engine = "numexpr" if USE_NUMEXPR else "python"
56
+
57
+ if engine not in ENGINES:
58
+ valid_engines = list(ENGINES.keys())
59
+ raise KeyError(
60
+ f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
61
+ )
62
+
63
+ # TODO: validate this in a more general way (thinking of future engines
64
+ # that won't necessarily be import-able)
65
+ # Could potentially be done on engine instantiation
66
+ if engine == "numexpr" and not NUMEXPR_INSTALLED:
67
+ raise ImportError(
68
+ "'numexpr' is not installed or an unsupported version. Cannot use "
69
+ "engine='numexpr' for query/eval if 'numexpr' is not installed"
70
+ )
71
+
72
+ return engine
73
+
74
+
75
+ def _check_parser(parser: str):
76
+ """
77
+ Make sure a valid parser is passed.
78
+
79
+ Parameters
80
+ ----------
81
+ parser : str
82
+
83
+ Raises
84
+ ------
85
+ KeyError
86
+ * If an invalid parser is passed
87
+ """
88
+ if parser not in PARSERS:
89
+ raise KeyError(
90
+ f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"
91
+ )
92
+
93
+
94
+ def _check_resolvers(resolvers):
95
+ if resolvers is not None:
96
+ for resolver in resolvers:
97
+ if not hasattr(resolver, "__getitem__"):
98
+ name = type(resolver).__name__
99
+ raise TypeError(
100
+ f"Resolver of type '{name}' does not "
101
+ "implement the __getitem__ method"
102
+ )
103
+
104
+
105
+ def _check_expression(expr):
106
+ """
107
+ Make sure an expression is not an empty string
108
+
109
+ Parameters
110
+ ----------
111
+ expr : object
112
+ An object that can be converted to a string
113
+
114
+ Raises
115
+ ------
116
+ ValueError
117
+ * If expr is an empty string
118
+ """
119
+ if not expr:
120
+ raise ValueError("expr cannot be an empty string")
121
+
122
+
123
+ def _convert_expression(expr) -> str:
124
+ """
125
+ Convert an object to an expression.
126
+
127
+ This function converts an object to an expression (a unicode string) and
128
+ checks to make sure it isn't empty after conversion. This is used to
129
+ convert operators to their string representation for recursive calls to
130
+ :func:`~pandas.eval`.
131
+
132
+ Parameters
133
+ ----------
134
+ expr : object
135
+ The object to be converted to a string.
136
+
137
+ Returns
138
+ -------
139
+ str
140
+ The string representation of an object.
141
+
142
+ Raises
143
+ ------
144
+ ValueError
145
+ * If the expression is empty.
146
+ """
147
+ s = pprint_thing(expr)
148
+ _check_expression(s)
149
+ return s
150
+
151
+
152
+ def _check_for_locals(expr: str, stack_level: int, parser: str):
153
+ at_top_of_stack = stack_level == 0
154
+ not_pandas_parser = parser != "pandas"
155
+
156
+ if not_pandas_parser:
157
+ msg = "The '@' prefix is only supported by the pandas parser"
158
+ elif at_top_of_stack:
159
+ msg = (
160
+ "The '@' prefix is not allowed in top-level eval calls.\n"
161
+ "please refer to your variables by name without the '@' prefix."
162
+ )
163
+
164
+ if at_top_of_stack or not_pandas_parser:
165
+ for toknum, tokval in tokenize_string(expr):
166
+ if toknum == tokenize.OP and tokval == "@":
167
+ raise SyntaxError(msg)
168
+
169
+
170
+ def eval(
171
+ expr: str | BinOp, # we leave BinOp out of the docstr bc it isn't for users
172
+ parser: str = "pandas",
173
+ engine: str | None = None,
174
+ local_dict=None,
175
+ global_dict=None,
176
+ resolvers=(),
177
+ level: int = 0,
178
+ target=None,
179
+ inplace: bool = False,
180
+ ):
181
+ """
182
+ Evaluate a Python expression as a string using various backends.
183
+
184
+ The following arithmetic operations are supported: ``+``, ``-``, ``*``,
185
+ ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
186
+ boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
187
+ Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
188
+ :keyword:`or`, and :keyword:`not` with the same semantics as the
189
+ corresponding bitwise operators. :class:`~pandas.Series` and
190
+ :class:`~pandas.DataFrame` objects are supported and behave as they would
191
+ with plain ol' Python evaluation.
192
+
193
+ Parameters
194
+ ----------
195
+ expr : str
196
+ The expression to evaluate. This string cannot contain any Python
197
+ `statements
198
+ <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
199
+ only Python `expressions
200
+ <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
201
+ parser : {'pandas', 'python'}, default 'pandas'
202
+ The parser to use to construct the syntax tree from the expression. The
203
+ default of ``'pandas'`` parses code slightly different than standard
204
+ Python. Alternatively, you can parse an expression using the
205
+ ``'python'`` parser to retain strict Python semantics. See the
206
+ :ref:`enhancing performance <enhancingperf.eval>` documentation for
207
+ more details.
208
+ engine : {'python', 'numexpr'}, default 'numexpr'
209
+
210
+ The engine used to evaluate the expression. Supported engines are
211
+
212
+ - None : tries to use ``numexpr``, falls back to ``python``
213
+ - ``'numexpr'`` : This default engine evaluates pandas objects using
214
+ numexpr for large speed ups in complex expressions with large frames.
215
+ - ``'python'`` : Performs operations as if you had ``eval``'d in top
216
+ level python. This engine is generally not that useful.
217
+
218
+ More backends may be available in the future.
219
+ local_dict : dict or None, optional
220
+ A dictionary of local variables, taken from locals() by default.
221
+ global_dict : dict or None, optional
222
+ A dictionary of global variables, taken from globals() by default.
223
+ resolvers : list of dict-like or None, optional
224
+ A list of objects implementing the ``__getitem__`` special method that
225
+ you can use to inject an additional collection of namespaces to use for
226
+ variable lookup. For example, this is used in the
227
+ :meth:`~DataFrame.query` method to inject the
228
+ ``DataFrame.index`` and ``DataFrame.columns``
229
+ variables that refer to their respective :class:`~pandas.DataFrame`
230
+ instance attributes.
231
+ level : int, optional
232
+ The number of prior stack frames to traverse and add to the current
233
+ scope. Most users will **not** need to change this parameter.
234
+ target : object, optional, default None
235
+ This is the target object for assignment. It is used when there is
236
+ variable assignment in the expression. If so, then `target` must
237
+ support item assignment with string keys, and if a copy is being
238
+ returned, it must also support `.copy()`.
239
+ inplace : bool, default False
240
+ If `target` is provided, and the expression mutates `target`, whether
241
+ to modify `target` inplace. Otherwise, return a copy of `target` with
242
+ the mutation.
243
+
244
+ Returns
245
+ -------
246
+ ndarray, numeric scalar, DataFrame, Series, or None
247
+ The completion value of evaluating the given code or None if ``inplace=True``.
248
+
249
+ Raises
250
+ ------
251
+ ValueError
252
+ There are many instances where such an error can be raised:
253
+
254
+ - `target=None`, but the expression is multiline.
255
+ - The expression is multiline, but not all them have item assignment.
256
+ An example of such an arrangement is this:
257
+
258
+ a = b + 1
259
+ a + 2
260
+
261
+ Here, there are expressions on different lines, making it multiline,
262
+ but the last line has no variable assigned to the output of `a + 2`.
263
+ - `inplace=True`, but the expression is missing item assignment.
264
+ - Item assignment is provided, but the `target` does not support
265
+ string item assignment.
266
+ - Item assignment is provided and `inplace=False`, but the `target`
267
+ does not support the `.copy()` method
268
+
269
+ See Also
270
+ --------
271
+ DataFrame.query : Evaluates a boolean expression to query the columns
272
+ of a frame.
273
+ DataFrame.eval : Evaluate a string describing operations on
274
+ DataFrame columns.
275
+
276
+ Notes
277
+ -----
278
+ The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
279
+ recursively cast to ``float64``.
280
+
281
+ See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
282
+ more details.
283
+
284
+ Examples
285
+ --------
286
+ >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]})
287
+ >>> df
288
+ animal age
289
+ 0 dog 10
290
+ 1 pig 20
291
+
292
+ We can add a new column using ``pd.eval``:
293
+
294
+ >>> pd.eval("double_age = df.age * 2", target=df)
295
+ animal age double_age
296
+ 0 dog 10 20
297
+ 1 pig 20 40
298
+ """
299
+ inplace = validate_bool_kwarg(inplace, "inplace")
300
+
301
+ exprs: list[str | BinOp]
302
+ if isinstance(expr, str):
303
+ _check_expression(expr)
304
+ exprs = [e.strip() for e in expr.splitlines() if e.strip() != ""]
305
+ else:
306
+ # ops.BinOp; for internal compat, not intended to be passed by users
307
+ exprs = [expr]
308
+ multi_line = len(exprs) > 1
309
+
310
+ if multi_line and target is None:
311
+ raise ValueError(
312
+ "multi-line expressions are only valid in the "
313
+ "context of data, use DataFrame.eval"
314
+ )
315
+ engine = _check_engine(engine)
316
+ _check_parser(parser)
317
+ _check_resolvers(resolvers)
318
+
319
+ ret = None
320
+ first_expr = True
321
+ target_modified = False
322
+
323
+ for expr in exprs:
324
+ expr = _convert_expression(expr)
325
+ _check_for_locals(expr, level, parser)
326
+
327
+ # get our (possibly passed-in) scope
328
+ env = ensure_scope(
329
+ level + 1,
330
+ global_dict=global_dict,
331
+ local_dict=local_dict,
332
+ resolvers=resolvers,
333
+ target=target,
334
+ )
335
+
336
+ parsed_expr = Expr(expr, engine=engine, parser=parser, env=env)
337
+
338
+ if engine == "numexpr" and (
339
+ is_extension_array_dtype(parsed_expr.terms.return_type)
340
+ or getattr(parsed_expr.terms, "operand_types", None) is not None
341
+ and any(
342
+ is_extension_array_dtype(elem)
343
+ for elem in parsed_expr.terms.operand_types
344
+ )
345
+ ):
346
+ warnings.warn(
347
+ "Engine has switched to 'python' because numexpr does not support "
348
+ "extension array dtypes. Please set your engine to python manually.",
349
+ RuntimeWarning,
350
+ stacklevel=find_stack_level(),
351
+ )
352
+ engine = "python"
353
+
354
+ # construct the engine and evaluate the parsed expression
355
+ eng = ENGINES[engine]
356
+ eng_inst = eng(parsed_expr)
357
+ ret = eng_inst.evaluate()
358
+
359
+ if parsed_expr.assigner is None:
360
+ if multi_line:
361
+ raise ValueError(
362
+ "Multi-line expressions are only valid "
363
+ "if all expressions contain an assignment"
364
+ )
365
+ if inplace:
366
+ raise ValueError("Cannot operate inplace if there is no assignment")
367
+
368
+ # assign if needed
369
+ assigner = parsed_expr.assigner
370
+ if env.target is not None and assigner is not None:
371
+ target_modified = True
372
+
373
+ # if returning a copy, copy only on the first assignment
374
+ if not inplace and first_expr:
375
+ try:
376
+ target = env.target.copy()
377
+ except AttributeError as err:
378
+ raise ValueError("Cannot return a copy of the target") from err
379
+ else:
380
+ target = env.target
381
+
382
+ # TypeError is most commonly raised (e.g. int, list), but you
383
+ # get IndexError if you try to do this assignment on np.ndarray.
384
+ # we will ignore numpy warnings here; e.g. if trying
385
+ # to use a non-numeric indexer
386
+ try:
387
+ with warnings.catch_warnings(record=True):
388
+ # TODO: Filter the warnings we actually care about here.
389
+ if inplace and isinstance(target, NDFrame):
390
+ target.loc[:, assigner] = ret
391
+ else:
392
+ target[assigner] = ret
393
+ except (TypeError, IndexError) as err:
394
+ raise ValueError("Cannot assign expression output to target") from err
395
+
396
+ if not resolvers:
397
+ resolvers = ({assigner: ret},)
398
+ else:
399
+ # existing resolver needs updated to handle
400
+ # case of mutating existing column in copy
401
+ for resolver in resolvers:
402
+ if assigner in resolver:
403
+ resolver[assigner] = ret
404
+ break
405
+ else:
406
+ resolvers += ({assigner: ret},)
407
+
408
+ ret = None
409
+ first_expr = False
410
+
411
+ # We want to exclude `inplace=None` as being False.
412
+ if inplace is False:
413
+ return target if target_modified else ret
videochat2/lib/python3.10/site-packages/pandas/core/computation/expr.py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ :func:`~pandas.eval` parsers.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import ast
7
+ from functools import (
8
+ partial,
9
+ reduce,
10
+ )
11
+ from keyword import iskeyword
12
+ import tokenize
13
+ from typing import (
14
+ Callable,
15
+ TypeVar,
16
+ )
17
+
18
+ import numpy as np
19
+
20
+ from pandas.compat import PY39
21
+ from pandas.errors import UndefinedVariableError
22
+
23
+ import pandas.core.common as com
24
+ from pandas.core.computation.ops import (
25
+ ARITH_OPS_SYMS,
26
+ BOOL_OPS_SYMS,
27
+ CMP_OPS_SYMS,
28
+ LOCAL_TAG,
29
+ MATHOPS,
30
+ REDUCTIONS,
31
+ UNARY_OPS_SYMS,
32
+ BinOp,
33
+ Constant,
34
+ Div,
35
+ FuncNode,
36
+ Op,
37
+ Term,
38
+ UnaryOp,
39
+ is_term,
40
+ )
41
+ from pandas.core.computation.parsing import (
42
+ clean_backtick_quoted_toks,
43
+ tokenize_string,
44
+ )
45
+ from pandas.core.computation.scope import Scope
46
+
47
+ from pandas.io.formats import printing
48
+
49
+
50
+ def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]:
51
+ """
52
+ Rewrite the assignment operator for PyTables expressions that use ``=``
53
+ as a substitute for ``==``.
54
+
55
+ Parameters
56
+ ----------
57
+ tok : tuple of int, str
58
+ ints correspond to the all caps constants in the tokenize module
59
+
60
+ Returns
61
+ -------
62
+ tuple of int, str
63
+ Either the input or token or the replacement values
64
+ """
65
+ toknum, tokval = tok
66
+ return toknum, "==" if tokval == "=" else tokval
67
+
68
+
69
+ def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]:
70
+ """
71
+ Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
72
+ precedence is changed to boolean precedence.
73
+
74
+ Parameters
75
+ ----------
76
+ tok : tuple of int, str
77
+ ints correspond to the all caps constants in the tokenize module
78
+
79
+ Returns
80
+ -------
81
+ tuple of int, str
82
+ Either the input or token or the replacement values
83
+ """
84
+ toknum, tokval = tok
85
+ if toknum == tokenize.OP:
86
+ if tokval == "&":
87
+ return tokenize.NAME, "and"
88
+ elif tokval == "|":
89
+ return tokenize.NAME, "or"
90
+ return toknum, tokval
91
+ return toknum, tokval
92
+
93
+
94
+ def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]:
95
+ """
96
+ Replace local variables with a syntactically valid name.
97
+
98
+ Parameters
99
+ ----------
100
+ tok : tuple of int, str
101
+ ints correspond to the all caps constants in the tokenize module
102
+
103
+ Returns
104
+ -------
105
+ tuple of int, str
106
+ Either the input or token or the replacement values
107
+
108
+ Notes
109
+ -----
110
+ This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
111
+ ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
112
+ is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
113
+ """
114
+ toknum, tokval = tok
115
+ if toknum == tokenize.OP and tokval == "@":
116
+ return tokenize.OP, LOCAL_TAG
117
+ return toknum, tokval
118
+
119
+
120
+ def _compose2(f, g):
121
+ """
122
+ Compose 2 callables.
123
+ """
124
+ return lambda *args, **kwargs: f(g(*args, **kwargs))
125
+
126
+
127
+ def _compose(*funcs):
128
+ """
129
+ Compose 2 or more callables.
130
+ """
131
+ assert len(funcs) > 1, "At least 2 callables must be passed to compose"
132
+ return reduce(_compose2, funcs)
133
+
134
+
135
+ def _preparse(
136
+ source: str,
137
+ f=_compose(
138
+ _replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks
139
+ ),
140
+ ) -> str:
141
+ """
142
+ Compose a collection of tokenization functions.
143
+
144
+ Parameters
145
+ ----------
146
+ source : str
147
+ A Python source code string
148
+ f : callable
149
+ This takes a tuple of (toknum, tokval) as its argument and returns a
150
+ tuple with the same structure but possibly different elements. Defaults
151
+ to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
152
+ ``_replace_locals``.
153
+
154
+ Returns
155
+ -------
156
+ str
157
+ Valid Python source code
158
+
159
+ Notes
160
+ -----
161
+ The `f` parameter can be any callable that takes *and* returns input of the
162
+ form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
163
+ the ``tokenize`` module and ``tokval`` is a string.
164
+ """
165
+ assert callable(f), "f must be callable"
166
+ return tokenize.untokenize(f(x) for x in tokenize_string(source))
167
+
168
+
169
+ def _is_type(t):
170
+ """
171
+ Factory for a type checking function of type ``t`` or tuple of types.
172
+ """
173
+ return lambda x: isinstance(x.value, t)
174
+
175
+
176
+ _is_list = _is_type(list)
177
+ _is_str = _is_type(str)
178
+
179
+
180
+ # partition all AST nodes
181
+ _all_nodes = frozenset(
182
+ node
183
+ for node in (getattr(ast, name) for name in dir(ast))
184
+ if isinstance(node, type) and issubclass(node, ast.AST)
185
+ )
186
+
187
+
188
+ def _filter_nodes(superclass, all_nodes=_all_nodes):
189
+ """
190
+ Filter out AST nodes that are subclasses of ``superclass``.
191
+ """
192
+ node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass))
193
+ return frozenset(node_names)
194
+
195
+
196
+ _all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
197
+ _mod_nodes = _filter_nodes(ast.mod)
198
+ _stmt_nodes = _filter_nodes(ast.stmt)
199
+ _expr_nodes = _filter_nodes(ast.expr)
200
+ _expr_context_nodes = _filter_nodes(ast.expr_context)
201
+ _boolop_nodes = _filter_nodes(ast.boolop)
202
+ _operator_nodes = _filter_nodes(ast.operator)
203
+ _unary_op_nodes = _filter_nodes(ast.unaryop)
204
+ _cmp_op_nodes = _filter_nodes(ast.cmpop)
205
+ _comprehension_nodes = _filter_nodes(ast.comprehension)
206
+ _handler_nodes = _filter_nodes(ast.excepthandler)
207
+ _arguments_nodes = _filter_nodes(ast.arguments)
208
+ _keyword_nodes = _filter_nodes(ast.keyword)
209
+ _alias_nodes = _filter_nodes(ast.alias)
210
+
211
+ if not PY39:
212
+ _slice_nodes = _filter_nodes(ast.slice)
213
+
214
+
215
+ # nodes that we don't support directly but are needed for parsing
216
+ _hacked_nodes = frozenset(["Assign", "Module", "Expr"])
217
+
218
+
219
+ _unsupported_expr_nodes = frozenset(
220
+ [
221
+ "Yield",
222
+ "GeneratorExp",
223
+ "IfExp",
224
+ "DictComp",
225
+ "SetComp",
226
+ "Repr",
227
+ "Lambda",
228
+ "Set",
229
+ "AST",
230
+ "Is",
231
+ "IsNot",
232
+ ]
233
+ )
234
+
235
+ # these nodes are low priority or won't ever be supported (e.g., AST)
236
+ _unsupported_nodes = (
237
+ _stmt_nodes
238
+ | _mod_nodes
239
+ | _handler_nodes
240
+ | _arguments_nodes
241
+ | _keyword_nodes
242
+ | _alias_nodes
243
+ | _expr_context_nodes
244
+ | _unsupported_expr_nodes
245
+ ) - _hacked_nodes
246
+
247
+ # we're adding a different assignment in some cases to be equality comparison
248
+ # and we don't want `stmt` and friends in their so get only the class whose
249
+ # names are capitalized
250
+ _base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
251
+ intersection = _unsupported_nodes & _base_supported_nodes
252
+ _msg = f"cannot both support and not support {intersection}"
253
+ assert not intersection, _msg
254
+
255
+
256
+ def _node_not_implemented(node_name: str) -> Callable[..., None]:
257
+ """
258
+ Return a function that raises a NotImplementedError with a passed node name.
259
+ """
260
+
261
+ def f(self, *args, **kwargs):
262
+ raise NotImplementedError(f"'{node_name}' nodes are not implemented")
263
+
264
+ return f
265
+
266
+
267
+ # should be bound by BaseExprVisitor but that creates a circular dependency:
268
+ # _T is used in disallow, but disallow is used to define BaseExprVisitor
269
+ # https://github.com/microsoft/pyright/issues/2315
270
+ _T = TypeVar("_T")
271
+
272
+
273
+ def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]:
274
+ """
275
+ Decorator to disallow certain nodes from parsing. Raises a
276
+ NotImplementedError instead.
277
+
278
+ Returns
279
+ -------
280
+ callable
281
+ """
282
+
283
+ def disallowed(cls: type[_T]) -> type[_T]:
284
+ # error: "Type[_T]" has no attribute "unsupported_nodes"
285
+ cls.unsupported_nodes = () # type: ignore[attr-defined]
286
+ for node in nodes:
287
+ new_method = _node_not_implemented(node)
288
+ name = f"visit_{node}"
289
+ # error: "Type[_T]" has no attribute "unsupported_nodes"
290
+ cls.unsupported_nodes += (name,) # type: ignore[attr-defined]
291
+ setattr(cls, name, new_method)
292
+ return cls
293
+
294
+ return disallowed
295
+
296
+
297
+ def _op_maker(op_class, op_symbol):
298
+ """
299
+ Return a function to create an op class with its symbol already passed.
300
+
301
+ Returns
302
+ -------
303
+ callable
304
+ """
305
+
306
+ def f(self, node, *args, **kwargs):
307
+ """
308
+ Return a partial function with an Op subclass with an operator already passed.
309
+
310
+ Returns
311
+ -------
312
+ callable
313
+ """
314
+ return partial(op_class, op_symbol, *args, **kwargs)
315
+
316
+ return f
317
+
318
+
319
+ _op_classes = {"binary": BinOp, "unary": UnaryOp}
320
+
321
+
322
+ def add_ops(op_classes):
323
+ """
324
+ Decorator to add default implementation of ops.
325
+ """
326
+
327
+ def f(cls):
328
+ for op_attr_name, op_class in op_classes.items():
329
+ ops = getattr(cls, f"{op_attr_name}_ops")
330
+ ops_map = getattr(cls, f"{op_attr_name}_op_nodes_map")
331
+ for op in ops:
332
+ op_node = ops_map[op]
333
+ if op_node is not None:
334
+ made_op = _op_maker(op_class, op)
335
+ setattr(cls, f"visit_{op_node}", made_op)
336
+ return cls
337
+
338
+ return f
339
+
340
+
341
+ @disallow(_unsupported_nodes)
342
+ @add_ops(_op_classes)
343
+ class BaseExprVisitor(ast.NodeVisitor):
344
+ """
345
+ Custom ast walker. Parsers of other engines should subclass this class
346
+ if necessary.
347
+
348
+ Parameters
349
+ ----------
350
+ env : Scope
351
+ engine : str
352
+ parser : str
353
+ preparser : callable
354
+ """
355
+
356
+ const_type: type[Term] = Constant
357
+ term_type = Term
358
+
359
+ binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS
360
+ binary_op_nodes = (
361
+ "Gt",
362
+ "Lt",
363
+ "GtE",
364
+ "LtE",
365
+ "Eq",
366
+ "NotEq",
367
+ "In",
368
+ "NotIn",
369
+ "BitAnd",
370
+ "BitOr",
371
+ "And",
372
+ "Or",
373
+ "Add",
374
+ "Sub",
375
+ "Mult",
376
+ None,
377
+ "Pow",
378
+ "FloorDiv",
379
+ "Mod",
380
+ )
381
+ binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
382
+
383
+ unary_ops = UNARY_OPS_SYMS
384
+ unary_op_nodes = "UAdd", "USub", "Invert", "Not"
385
+ unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
386
+
387
+ rewrite_map = {
388
+ ast.Eq: ast.In,
389
+ ast.NotEq: ast.NotIn,
390
+ ast.In: ast.In,
391
+ ast.NotIn: ast.NotIn,
392
+ }
393
+
394
+ unsupported_nodes: tuple[str, ...]
395
+
396
+ def __init__(self, env, engine, parser, preparser=_preparse) -> None:
397
+ self.env = env
398
+ self.engine = engine
399
+ self.parser = parser
400
+ self.preparser = preparser
401
+ self.assigner = None
402
+
403
+ def visit(self, node, **kwargs):
404
+ if isinstance(node, str):
405
+ clean = self.preparser(node)
406
+ try:
407
+ node = ast.fix_missing_locations(ast.parse(clean))
408
+ except SyntaxError as e:
409
+ if any(iskeyword(x) for x in clean.split()):
410
+ e.msg = "Python keyword not valid identifier in numexpr query"
411
+ raise e
412
+
413
+ method = f"visit_{type(node).__name__}"
414
+ visitor = getattr(self, method)
415
+ return visitor(node, **kwargs)
416
+
417
+ def visit_Module(self, node, **kwargs):
418
+ if len(node.body) != 1:
419
+ raise SyntaxError("only a single expression is allowed")
420
+ expr = node.body[0]
421
+ return self.visit(expr, **kwargs)
422
+
423
+ def visit_Expr(self, node, **kwargs):
424
+ return self.visit(node.value, **kwargs)
425
+
426
+ def _rewrite_membership_op(self, node, left, right):
427
+ # the kind of the operator (is actually an instance)
428
+ op_instance = node.op
429
+ op_type = type(op_instance)
430
+
431
+ # must be two terms and the comparison operator must be ==/!=/in/not in
432
+ if is_term(left) and is_term(right) and op_type in self.rewrite_map:
433
+ left_list, right_list = map(_is_list, (left, right))
434
+ left_str, right_str = map(_is_str, (left, right))
435
+
436
+ # if there are any strings or lists in the expression
437
+ if left_list or right_list or left_str or right_str:
438
+ op_instance = self.rewrite_map[op_type]()
439
+
440
+ # pop the string variable out of locals and replace it with a list
441
+ # of one string, kind of a hack
442
+ if right_str:
443
+ name = self.env.add_tmp([right.value])
444
+ right = self.term_type(name, self.env)
445
+
446
+ if left_str:
447
+ name = self.env.add_tmp([left.value])
448
+ left = self.term_type(name, self.env)
449
+
450
+ op = self.visit(op_instance)
451
+ return op, op_instance, left, right
452
+
453
+ def _maybe_transform_eq_ne(self, node, left=None, right=None):
454
+ if left is None:
455
+ left = self.visit(node.left, side="left")
456
+ if right is None:
457
+ right = self.visit(node.right, side="right")
458
+ op, op_class, left, right = self._rewrite_membership_op(node, left, right)
459
+ return op, op_class, left, right
460
+
461
+ def _maybe_downcast_constants(self, left, right):
462
+ f32 = np.dtype(np.float32)
463
+ if (
464
+ left.is_scalar
465
+ and hasattr(left, "value")
466
+ and not right.is_scalar
467
+ and right.return_type == f32
468
+ ):
469
+ # right is a float32 array, left is a scalar
470
+ name = self.env.add_tmp(np.float32(left.value))
471
+ left = self.term_type(name, self.env)
472
+ if (
473
+ right.is_scalar
474
+ and hasattr(right, "value")
475
+ and not left.is_scalar
476
+ and left.return_type == f32
477
+ ):
478
+ # left is a float32 array, right is a scalar
479
+ name = self.env.add_tmp(np.float32(right.value))
480
+ right = self.term_type(name, self.env)
481
+
482
+ return left, right
483
+
484
+ def _maybe_eval(self, binop, eval_in_python):
485
+ # eval `in` and `not in` (for now) in "partial" python space
486
+ # things that can be evaluated in "eval" space will be turned into
487
+ # temporary variables. for example,
488
+ # [1,2] in a + 2 * b
489
+ # in that case a + 2 * b will be evaluated using numexpr, and the "in"
490
+ # call will be evaluated using isin (in python space)
491
+ return binop.evaluate(
492
+ self.env, self.engine, self.parser, self.term_type, eval_in_python
493
+ )
494
+
495
+ def _maybe_evaluate_binop(
496
+ self,
497
+ op,
498
+ op_class,
499
+ lhs,
500
+ rhs,
501
+ eval_in_python=("in", "not in"),
502
+ maybe_eval_in_python=("==", "!=", "<", ">", "<=", ">="),
503
+ ):
504
+ res = op(lhs, rhs)
505
+
506
+ if res.has_invalid_return_type:
507
+ raise TypeError(
508
+ f"unsupported operand type(s) for {res.op}: "
509
+ f"'{lhs.type}' and '{rhs.type}'"
510
+ )
511
+
512
+ if self.engine != "pytables" and (
513
+ res.op in CMP_OPS_SYMS
514
+ and getattr(lhs, "is_datetime", False)
515
+ or getattr(rhs, "is_datetime", False)
516
+ ):
517
+ # all date ops must be done in python bc numexpr doesn't work
518
+ # well with NaT
519
+ return self._maybe_eval(res, self.binary_ops)
520
+
521
+ if res.op in eval_in_python:
522
+ # "in"/"not in" ops are always evaluated in python
523
+ return self._maybe_eval(res, eval_in_python)
524
+ elif self.engine != "pytables":
525
+ if (
526
+ getattr(lhs, "return_type", None) == object
527
+ or getattr(rhs, "return_type", None) == object
528
+ ):
529
+ # evaluate "==" and "!=" in python if either of our operands
530
+ # has an object return type
531
+ return self._maybe_eval(res, eval_in_python + maybe_eval_in_python)
532
+ return res
533
+
534
+ def visit_BinOp(self, node, **kwargs):
535
+ op, op_class, left, right = self._maybe_transform_eq_ne(node)
536
+ left, right = self._maybe_downcast_constants(left, right)
537
+ return self._maybe_evaluate_binop(op, op_class, left, right)
538
+
539
+ def visit_Div(self, node, **kwargs):
540
+ return lambda lhs, rhs: Div(lhs, rhs)
541
+
542
+ def visit_UnaryOp(self, node, **kwargs):
543
+ op = self.visit(node.op)
544
+ operand = self.visit(node.operand)
545
+ return op(operand)
546
+
547
+ def visit_Name(self, node, **kwargs):
548
+ return self.term_type(node.id, self.env, **kwargs)
549
+
550
+ def visit_NameConstant(self, node, **kwargs) -> Term:
551
+ return self.const_type(node.value, self.env)
552
+
553
+ def visit_Num(self, node, **kwargs) -> Term:
554
+ return self.const_type(node.n, self.env)
555
+
556
+ def visit_Constant(self, node, **kwargs) -> Term:
557
+ return self.const_type(node.n, self.env)
558
+
559
+ def visit_Str(self, node, **kwargs):
560
+ name = self.env.add_tmp(node.s)
561
+ return self.term_type(name, self.env)
562
+
563
+ def visit_List(self, node, **kwargs):
564
+ name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts])
565
+ return self.term_type(name, self.env)
566
+
567
+ visit_Tuple = visit_List
568
+
569
+ def visit_Index(self, node, **kwargs):
570
+ """df.index[4]"""
571
+ return self.visit(node.value)
572
+
573
+ def visit_Subscript(self, node, **kwargs):
574
+ from pandas import eval as pd_eval
575
+
576
+ value = self.visit(node.value)
577
+ slobj = self.visit(node.slice)
578
+ result = pd_eval(
579
+ slobj, local_dict=self.env, engine=self.engine, parser=self.parser
580
+ )
581
+ try:
582
+ # a Term instance
583
+ v = value.value[result]
584
+ except AttributeError:
585
+ # an Op instance
586
+ lhs = pd_eval(
587
+ value, local_dict=self.env, engine=self.engine, parser=self.parser
588
+ )
589
+ v = lhs[result]
590
+ name = self.env.add_tmp(v)
591
+ return self.term_type(name, env=self.env)
592
+
593
+ def visit_Slice(self, node, **kwargs):
594
+ """df.index[slice(4,6)]"""
595
+ lower = node.lower
596
+ if lower is not None:
597
+ lower = self.visit(lower).value
598
+ upper = node.upper
599
+ if upper is not None:
600
+ upper = self.visit(upper).value
601
+ step = node.step
602
+ if step is not None:
603
+ step = self.visit(step).value
604
+
605
+ return slice(lower, upper, step)
606
+
607
+ def visit_Assign(self, node, **kwargs):
608
+ """
609
+ support a single assignment node, like
610
+
611
+ c = a + b
612
+
613
+ set the assigner at the top level, must be a Name node which
614
+ might or might not exist in the resolvers
615
+
616
+ """
617
+ if len(node.targets) != 1:
618
+ raise SyntaxError("can only assign a single expression")
619
+ if not isinstance(node.targets[0], ast.Name):
620
+ raise SyntaxError("left hand side of an assignment must be a single name")
621
+ if self.env.target is None:
622
+ raise ValueError("cannot assign without a target object")
623
+
624
+ try:
625
+ assigner = self.visit(node.targets[0], **kwargs)
626
+ except UndefinedVariableError:
627
+ assigner = node.targets[0].id
628
+
629
+ self.assigner = getattr(assigner, "name", assigner)
630
+ if self.assigner is None:
631
+ raise SyntaxError(
632
+ "left hand side of an assignment must be a single resolvable name"
633
+ )
634
+
635
+ return self.visit(node.value, **kwargs)
636
+
637
+ def visit_Attribute(self, node, **kwargs):
638
+ attr = node.attr
639
+ value = node.value
640
+
641
+ ctx = node.ctx
642
+ if isinstance(ctx, ast.Load):
643
+ # resolve the value
644
+ resolved = self.visit(value).value
645
+ try:
646
+ v = getattr(resolved, attr)
647
+ name = self.env.add_tmp(v)
648
+ return self.term_type(name, self.env)
649
+ except AttributeError:
650
+ # something like datetime.datetime where scope is overridden
651
+ if isinstance(value, ast.Name) and value.id == attr:
652
+ return resolved
653
+ raise
654
+
655
+ raise ValueError(f"Invalid Attribute context {type(ctx).__name__}")
656
+
657
+ def visit_Call(self, node, side=None, **kwargs):
658
+ if isinstance(node.func, ast.Attribute) and node.func.attr != "__call__":
659
+ res = self.visit_Attribute(node.func)
660
+ elif not isinstance(node.func, ast.Name):
661
+ raise TypeError("Only named functions are supported")
662
+ else:
663
+ try:
664
+ res = self.visit(node.func)
665
+ except UndefinedVariableError:
666
+ # Check if this is a supported function name
667
+ try:
668
+ res = FuncNode(node.func.id)
669
+ except ValueError:
670
+ # Raise original error
671
+ raise
672
+
673
+ if res is None:
674
+ # error: "expr" has no attribute "id"
675
+ raise ValueError(
676
+ f"Invalid function call {node.func.id}" # type: ignore[attr-defined]
677
+ )
678
+ if hasattr(res, "value"):
679
+ res = res.value
680
+
681
+ if isinstance(res, FuncNode):
682
+ new_args = [self.visit(arg) for arg in node.args]
683
+
684
+ if node.keywords:
685
+ raise TypeError(
686
+ f'Function "{res.name}" does not support keyword arguments'
687
+ )
688
+
689
+ return res(*new_args)
690
+
691
+ else:
692
+ new_args = [self.visit(arg)(self.env) for arg in node.args]
693
+
694
+ for key in node.keywords:
695
+ if not isinstance(key, ast.keyword):
696
+ # error: "expr" has no attribute "id"
697
+ raise ValueError(
698
+ "keyword error in function call " # type: ignore[attr-defined]
699
+ f"'{node.func.id}'"
700
+ )
701
+
702
+ if key.arg:
703
+ kwargs[key.arg] = self.visit(key.value)(self.env)
704
+
705
+ name = self.env.add_tmp(res(*new_args, **kwargs))
706
+ return self.term_type(name=name, env=self.env)
707
+
708
+ def translate_In(self, op):
709
+ return op
710
+
711
+ def visit_Compare(self, node, **kwargs):
712
+ ops = node.ops
713
+ comps = node.comparators
714
+
715
+ # base case: we have something like a CMP b
716
+ if len(comps) == 1:
717
+ op = self.translate_In(ops[0])
718
+ binop = ast.BinOp(op=op, left=node.left, right=comps[0])
719
+ return self.visit(binop)
720
+
721
+ # recursive case: we have a chained comparison, a CMP b CMP c, etc.
722
+ left = node.left
723
+ values = []
724
+ for op, comp in zip(ops, comps):
725
+ new_node = self.visit(
726
+ ast.Compare(comparators=[comp], left=left, ops=[self.translate_In(op)])
727
+ )
728
+ left = comp
729
+ values.append(new_node)
730
+ return self.visit(ast.BoolOp(op=ast.And(), values=values))
731
+
732
+ def _try_visit_binop(self, bop):
733
+ if isinstance(bop, (Op, Term)):
734
+ return bop
735
+ return self.visit(bop)
736
+
737
+ def visit_BoolOp(self, node, **kwargs):
738
+ def visitor(x, y):
739
+ lhs = self._try_visit_binop(x)
740
+ rhs = self._try_visit_binop(y)
741
+
742
+ op, op_class, lhs, rhs = self._maybe_transform_eq_ne(node, lhs, rhs)
743
+ return self._maybe_evaluate_binop(op, node.op, lhs, rhs)
744
+
745
+ operands = node.values
746
+ return reduce(visitor, operands)
747
+
748
+
749
+ _python_not_supported = frozenset(["Dict", "BoolOp", "In", "NotIn"])
750
+ _numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS)
751
+
752
+
753
+ @disallow(
754
+ (_unsupported_nodes | _python_not_supported)
755
+ - (_boolop_nodes | frozenset(["BoolOp", "Attribute", "In", "NotIn", "Tuple"]))
756
+ )
757
+ class PandasExprVisitor(BaseExprVisitor):
758
+ def __init__(
759
+ self,
760
+ env,
761
+ engine,
762
+ parser,
763
+ preparser=partial(
764
+ _preparse,
765
+ f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks),
766
+ ),
767
+ ) -> None:
768
+ super().__init__(env, engine, parser, preparser)
769
+
770
+
771
+ @disallow(_unsupported_nodes | _python_not_supported | frozenset(["Not"]))
772
+ class PythonExprVisitor(BaseExprVisitor):
773
+ def __init__(
774
+ self, env, engine, parser, preparser=lambda source, f=None: source
775
+ ) -> None:
776
+ super().__init__(env, engine, parser, preparser=preparser)
777
+
778
+
779
+ class Expr:
780
+ """
781
+ Object encapsulating an expression.
782
+
783
+ Parameters
784
+ ----------
785
+ expr : str
786
+ engine : str, optional, default 'numexpr'
787
+ parser : str, optional, default 'pandas'
788
+ env : Scope, optional, default None
789
+ level : int, optional, default 2
790
+ """
791
+
792
+ env: Scope
793
+ engine: str
794
+ parser: str
795
+
796
+ def __init__(
797
+ self,
798
+ expr,
799
+ engine: str = "numexpr",
800
+ parser: str = "pandas",
801
+ env: Scope | None = None,
802
+ level: int = 0,
803
+ ) -> None:
804
+ self.expr = expr
805
+ self.env = env or Scope(level=level + 1)
806
+ self.engine = engine
807
+ self.parser = parser
808
+ self._visitor = PARSERS[parser](self.env, self.engine, self.parser)
809
+ self.terms = self.parse()
810
+
811
+ @property
812
+ def assigner(self):
813
+ return getattr(self._visitor, "assigner", None)
814
+
815
+ def __call__(self):
816
+ return self.terms(self.env)
817
+
818
+ def __repr__(self) -> str:
819
+ return printing.pprint_thing(self.terms)
820
+
821
+ def __len__(self) -> int:
822
+ return len(self.expr)
823
+
824
+ def parse(self):
825
+ """
826
+ Parse an expression.
827
+ """
828
+ return self._visitor.visit(self.expr)
829
+
830
+ @property
831
+ def names(self):
832
+ """
833
+ Get the names in an expression.
834
+ """
835
+ if is_term(self.terms):
836
+ return frozenset([self.terms.name])
837
+ return frozenset(term.name for term in com.flatten(self.terms))
838
+
839
+
840
+ PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}
videochat2/lib/python3.10/site-packages/pandas/io/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ if TYPE_CHECKING:
4
+ # import modules that have public classes/functions
5
+ from pandas.io import (
6
+ formats,
7
+ json,
8
+ stata,
9
+ )
10
+
11
+ # and mark only those modules as public
12
+ __all__ = ["formats", "json", "stata"]
videochat2/lib/python3.10/site-packages/pandas/io/_util.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pandas.compat._optional import import_optional_dependency
4
+
5
+ import pandas as pd
6
+
7
+
8
+ def _arrow_dtype_mapping() -> dict:
9
+ pa = import_optional_dependency("pyarrow")
10
+ return {
11
+ pa.int8(): pd.Int8Dtype(),
12
+ pa.int16(): pd.Int16Dtype(),
13
+ pa.int32(): pd.Int32Dtype(),
14
+ pa.int64(): pd.Int64Dtype(),
15
+ pa.uint8(): pd.UInt8Dtype(),
16
+ pa.uint16(): pd.UInt16Dtype(),
17
+ pa.uint32(): pd.UInt32Dtype(),
18
+ pa.uint64(): pd.UInt64Dtype(),
19
+ pa.bool_(): pd.BooleanDtype(),
20
+ pa.string(): pd.StringDtype(),
21
+ pa.float32(): pd.Float32Dtype(),
22
+ pa.float64(): pd.Float64Dtype(),
23
+ }
videochat2/lib/python3.10/site-packages/pandas/io/api.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data IO api
3
+ """
4
+
5
+ from pandas.io.clipboards import read_clipboard
6
+ from pandas.io.excel import (
7
+ ExcelFile,
8
+ ExcelWriter,
9
+ read_excel,
10
+ )
11
+ from pandas.io.feather_format import read_feather
12
+ from pandas.io.gbq import read_gbq
13
+ from pandas.io.html import read_html
14
+ from pandas.io.json import read_json
15
+ from pandas.io.orc import read_orc
16
+ from pandas.io.parquet import read_parquet
17
+ from pandas.io.parsers import (
18
+ read_csv,
19
+ read_fwf,
20
+ read_table,
21
+ )
22
+ from pandas.io.pickle import (
23
+ read_pickle,
24
+ to_pickle,
25
+ )
26
+ from pandas.io.pytables import (
27
+ HDFStore,
28
+ read_hdf,
29
+ )
30
+ from pandas.io.sas import read_sas
31
+ from pandas.io.spss import read_spss
32
+ from pandas.io.sql import (
33
+ read_sql,
34
+ read_sql_query,
35
+ read_sql_table,
36
+ )
37
+ from pandas.io.stata import read_stata
38
+ from pandas.io.xml import read_xml
39
+
40
+ __all__ = [
41
+ "ExcelFile",
42
+ "ExcelWriter",
43
+ "HDFStore",
44
+ "read_clipboard",
45
+ "read_csv",
46
+ "read_excel",
47
+ "read_feather",
48
+ "read_fwf",
49
+ "read_gbq",
50
+ "read_hdf",
51
+ "read_html",
52
+ "read_json",
53
+ "read_orc",
54
+ "read_parquet",
55
+ "read_pickle",
56
+ "read_sas",
57
+ "read_spss",
58
+ "read_sql",
59
+ "read_sql_query",
60
+ "read_sql_table",
61
+ "read_stata",
62
+ "read_table",
63
+ "read_xml",
64
+ "to_pickle",
65
+ ]
videochat2/lib/python3.10/site-packages/pandas/io/clipboards.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ io on the clipboard """
2
+ from __future__ import annotations
3
+
4
+ from io import StringIO
5
+ from typing import TYPE_CHECKING
6
+ import warnings
7
+
8
+ from pandas._libs import lib
9
+ from pandas.util._exceptions import find_stack_level
10
+ from pandas.util._validators import check_dtype_backend
11
+
12
+ from pandas.core.dtypes.generic import ABCDataFrame
13
+
14
+ from pandas import (
15
+ get_option,
16
+ option_context,
17
+ )
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas._typing import DtypeBackend
21
+
22
+
23
+ def read_clipboard(
24
+ sep: str = r"\s+",
25
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
26
+ **kwargs,
27
+ ): # pragma: no cover
28
+ r"""
29
+ Read text from clipboard and pass to read_csv.
30
+
31
+ Parameters
32
+ ----------
33
+ sep : str, default '\s+'
34
+ A string or regex delimiter. The default of '\s+' denotes
35
+ one or more whitespace characters.
36
+
37
+ dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
38
+ Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
39
+ arrays, nullable dtypes are used for all dtypes that have a nullable
40
+ implementation when "numpy_nullable" is set, pyarrow is used for all
41
+ dtypes if "pyarrow" is set.
42
+
43
+ The dtype_backends are still experimential.
44
+
45
+ .. versionadded:: 2.0
46
+
47
+ **kwargs
48
+ See read_csv for the full argument list.
49
+
50
+ Returns
51
+ -------
52
+ DataFrame
53
+ A parsed DataFrame object.
54
+ """
55
+ encoding = kwargs.pop("encoding", "utf-8")
56
+
57
+ # only utf-8 is valid for passed value because that's what clipboard
58
+ # supports
59
+ if encoding is not None and encoding.lower().replace("-", "") != "utf8":
60
+ raise NotImplementedError("reading from clipboard only supports utf-8 encoding")
61
+
62
+ check_dtype_backend(dtype_backend)
63
+
64
+ from pandas.io.clipboard import clipboard_get
65
+ from pandas.io.parsers import read_csv
66
+
67
+ text = clipboard_get()
68
+
69
+ # Try to decode (if needed, as "text" might already be a string here).
70
+ try:
71
+ text = text.decode(kwargs.get("encoding") or get_option("display.encoding"))
72
+ except AttributeError:
73
+ pass
74
+
75
+ # Excel copies into clipboard with \t separation
76
+ # inspect no more then the 10 first lines, if they
77
+ # all contain an equal number (>0) of tabs, infer
78
+ # that this came from excel and set 'sep' accordingly
79
+ lines = text[:10000].split("\n")[:-1][:10]
80
+
81
+ # Need to remove leading white space, since read_csv
82
+ # accepts:
83
+ # a b
84
+ # 0 1 2
85
+ # 1 3 4
86
+
87
+ counts = {x.lstrip(" ").count("\t") for x in lines}
88
+ if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
89
+ sep = "\t"
90
+ # check the number of leading tabs in the first line
91
+ # to account for index columns
92
+ index_length = len(lines[0]) - len(lines[0].lstrip(" \t"))
93
+ if index_length != 0:
94
+ kwargs.setdefault("index_col", list(range(index_length)))
95
+
96
+ # Edge case where sep is specified to be None, return to default
97
+ if sep is None and kwargs.get("delim_whitespace") is None:
98
+ sep = r"\s+"
99
+
100
+ # Regex separator currently only works with python engine.
101
+ # Default to python if separator is multi-character (regex)
102
+ if len(sep) > 1 and kwargs.get("engine") is None:
103
+ kwargs["engine"] = "python"
104
+ elif len(sep) > 1 and kwargs.get("engine") == "c":
105
+ warnings.warn(
106
+ "read_clipboard with regex separator does not work properly with c engine.",
107
+ stacklevel=find_stack_level(),
108
+ )
109
+
110
+ return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs)
111
+
112
+
113
+ def to_clipboard(
114
+ obj, excel: bool | None = True, sep: str | None = None, **kwargs
115
+ ) -> None: # pragma: no cover
116
+ """
117
+ Attempt to write text representation of object to the system clipboard
118
+ The clipboard can be then pasted into Excel for example.
119
+
120
+ Parameters
121
+ ----------
122
+ obj : the object to write to the clipboard
123
+ excel : bool, defaults to True
124
+ if True, use the provided separator, writing in a csv
125
+ format for allowing easy pasting into excel.
126
+ if False, write a string representation of the object
127
+ to the clipboard
128
+ sep : optional, defaults to tab
129
+ other keywords are passed to to_csv
130
+
131
+ Notes
132
+ -----
133
+ Requirements for your platform
134
+ - Linux: xclip, or xsel (with PyQt4 modules)
135
+ - Windows:
136
+ - OS X:
137
+ """
138
+ encoding = kwargs.pop("encoding", "utf-8")
139
+
140
+ # testing if an invalid encoding is passed to clipboard
141
+ if encoding is not None and encoding.lower().replace("-", "") != "utf8":
142
+ raise ValueError("clipboard only supports utf-8 encoding")
143
+
144
+ from pandas.io.clipboard import clipboard_set
145
+
146
+ if excel is None:
147
+ excel = True
148
+
149
+ if excel:
150
+ try:
151
+ if sep is None:
152
+ sep = "\t"
153
+ buf = StringIO()
154
+
155
+ # clipboard_set (pyperclip) expects unicode
156
+ obj.to_csv(buf, sep=sep, encoding="utf-8", **kwargs)
157
+ text = buf.getvalue()
158
+
159
+ clipboard_set(text)
160
+ return
161
+ except TypeError:
162
+ warnings.warn(
163
+ "to_clipboard in excel mode requires a single character separator.",
164
+ stacklevel=find_stack_level(),
165
+ )
166
+ elif sep is not None:
167
+ warnings.warn(
168
+ "to_clipboard with excel=False ignores the sep argument.",
169
+ stacklevel=find_stack_level(),
170
+ )
171
+
172
+ if isinstance(obj, ABCDataFrame):
173
+ # str(df) has various unhelpful defaults, like truncation
174
+ with option_context("display.max_colwidth", None):
175
+ objstr = obj.to_string(**kwargs)
176
+ else:
177
+ objstr = str(obj)
178
+ clipboard_set(objstr)
videochat2/lib/python3.10/site-packages/pandas/io/common.py ADDED
@@ -0,0 +1,1253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Common IO api utilities"""
2
+ from __future__ import annotations
3
+
4
+ from abc import (
5
+ ABC,
6
+ abstractmethod,
7
+ )
8
+ import codecs
9
+ from collections import defaultdict
10
+ import dataclasses
11
+ import functools
12
+ import gzip
13
+ from io import (
14
+ BufferedIOBase,
15
+ BytesIO,
16
+ RawIOBase,
17
+ StringIO,
18
+ TextIOBase,
19
+ TextIOWrapper,
20
+ )
21
+ import mmap
22
+ import os
23
+ from pathlib import Path
24
+ import re
25
+ import tarfile
26
+ from typing import (
27
+ IO,
28
+ Any,
29
+ AnyStr,
30
+ DefaultDict,
31
+ Generic,
32
+ Hashable,
33
+ Literal,
34
+ Mapping,
35
+ Sequence,
36
+ TypeVar,
37
+ cast,
38
+ overload,
39
+ )
40
+ from urllib.parse import (
41
+ urljoin,
42
+ urlparse as parse_url,
43
+ uses_netloc,
44
+ uses_params,
45
+ uses_relative,
46
+ )
47
+ import warnings
48
+ import zipfile
49
+
50
+ from pandas._typing import (
51
+ BaseBuffer,
52
+ CompressionDict,
53
+ CompressionOptions,
54
+ FilePath,
55
+ ReadBuffer,
56
+ ReadCsvBuffer,
57
+ StorageOptions,
58
+ WriteBuffer,
59
+ )
60
+ from pandas.compat import get_lzma_file
61
+ from pandas.compat._optional import import_optional_dependency
62
+ from pandas.compat.compressors import BZ2File as _BZ2File
63
+ from pandas.util._decorators import doc
64
+ from pandas.util._exceptions import find_stack_level
65
+
66
+ from pandas.core.dtypes.common import (
67
+ is_bool,
68
+ is_file_like,
69
+ is_integer,
70
+ is_list_like,
71
+ )
72
+
73
+ from pandas.core.indexes.api import MultiIndex
74
+ from pandas.core.shared_docs import _shared_docs
75
+
76
+ _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
77
+ _VALID_URLS.discard("")
78
+ _RFC_3986_PATTERN = re.compile(r"^[A-Za-z][A-Za-z0-9+\-+.]*://")
79
+
80
+ BaseBufferT = TypeVar("BaseBufferT", bound=BaseBuffer)
81
+
82
+
83
+ @dataclasses.dataclass
84
+ class IOArgs:
85
+ """
86
+ Return value of io/common.py:_get_filepath_or_buffer.
87
+ """
88
+
89
+ filepath_or_buffer: str | BaseBuffer
90
+ encoding: str
91
+ mode: str
92
+ compression: CompressionDict
93
+ should_close: bool = False
94
+
95
+
96
+ @dataclasses.dataclass
97
+ class IOHandles(Generic[AnyStr]):
98
+ """
99
+ Return value of io/common.py:get_handle
100
+
101
+ Can be used as a context manager.
102
+
103
+ This is used to easily close created buffers and to handle corner cases when
104
+ TextIOWrapper is inserted.
105
+
106
+ handle: The file handle to be used.
107
+ created_handles: All file handles that are created by get_handle
108
+ is_wrapped: Whether a TextIOWrapper needs to be detached.
109
+ """
110
+
111
+ # handle might not implement the IO-interface
112
+ handle: IO[AnyStr]
113
+ compression: CompressionDict
114
+ created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)
115
+ is_wrapped: bool = False
116
+
117
+ def close(self) -> None:
118
+ """
119
+ Close all created buffers.
120
+
121
+ Note: If a TextIOWrapper was inserted, it is flushed and detached to
122
+ avoid closing the potentially user-created buffer.
123
+ """
124
+ if self.is_wrapped:
125
+ assert isinstance(self.handle, TextIOWrapper)
126
+ self.handle.flush()
127
+ self.handle.detach()
128
+ self.created_handles.remove(self.handle)
129
+ for handle in self.created_handles:
130
+ handle.close()
131
+ self.created_handles = []
132
+ self.is_wrapped = False
133
+
134
+ def __enter__(self) -> IOHandles[AnyStr]:
135
+ return self
136
+
137
+ def __exit__(self, *args: Any) -> None:
138
+ self.close()
139
+
140
+
141
+ def is_url(url: object) -> bool:
142
+ """
143
+ Check to see if a URL has a valid protocol.
144
+
145
+ Parameters
146
+ ----------
147
+ url : str or unicode
148
+
149
+ Returns
150
+ -------
151
+ isurl : bool
152
+ If `url` has a valid protocol return True otherwise False.
153
+ """
154
+ if not isinstance(url, str):
155
+ return False
156
+ return parse_url(url).scheme in _VALID_URLS
157
+
158
+
159
+ @overload
160
+ def _expand_user(filepath_or_buffer: str) -> str:
161
+ ...
162
+
163
+
164
+ @overload
165
+ def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT:
166
+ ...
167
+
168
+
169
+ def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
170
+ """
171
+ Return the argument with an initial component of ~ or ~user
172
+ replaced by that user's home directory.
173
+
174
+ Parameters
175
+ ----------
176
+ filepath_or_buffer : object to be converted if possible
177
+
178
+ Returns
179
+ -------
180
+ expanded_filepath_or_buffer : an expanded filepath or the
181
+ input if not expandable
182
+ """
183
+ if isinstance(filepath_or_buffer, str):
184
+ return os.path.expanduser(filepath_or_buffer)
185
+ return filepath_or_buffer
186
+
187
+
188
+ def validate_header_arg(header: object) -> None:
189
+ if header is None:
190
+ return
191
+ if is_integer(header):
192
+ header = cast(int, header)
193
+ if header < 0:
194
+ # GH 27779
195
+ raise ValueError(
196
+ "Passing negative integer to header is invalid. "
197
+ "For no header, use header=None instead"
198
+ )
199
+ return
200
+ if is_list_like(header, allow_sets=False):
201
+ header = cast(Sequence, header)
202
+ if not all(map(is_integer, header)):
203
+ raise ValueError("header must be integer or list of integers")
204
+ if any(i < 0 for i in header):
205
+ raise ValueError("cannot specify multi-index header with negative integers")
206
+ return
207
+ if is_bool(header):
208
+ raise TypeError(
209
+ "Passing a bool to header is invalid. Use header=None for no header or "
210
+ "header=int or list-like of ints to specify "
211
+ "the row(s) making up the column names"
212
+ )
213
+ # GH 16338
214
+ raise ValueError("header must be integer or list of integers")
215
+
216
+
217
+ @overload
218
+ def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool = ...) -> str:
219
+ ...
220
+
221
+
222
+ @overload
223
+ def stringify_path(
224
+ filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
225
+ ) -> BaseBufferT:
226
+ ...
227
+
228
+
229
+ def stringify_path(
230
+ filepath_or_buffer: FilePath | BaseBufferT,
231
+ convert_file_like: bool = False,
232
+ ) -> str | BaseBufferT:
233
+ """
234
+ Attempt to convert a path-like object to a string.
235
+
236
+ Parameters
237
+ ----------
238
+ filepath_or_buffer : object to be converted
239
+
240
+ Returns
241
+ -------
242
+ str_filepath_or_buffer : maybe a string version of the object
243
+
244
+ Notes
245
+ -----
246
+ Objects supporting the fspath protocol (python 3.6+) are coerced
247
+ according to its __fspath__ method.
248
+
249
+ Any other object is passed through unchanged, which includes bytes,
250
+ strings, buffers, or anything else that's not even path-like.
251
+ """
252
+ if not convert_file_like and is_file_like(filepath_or_buffer):
253
+ # GH 38125: some fsspec objects implement os.PathLike but have already opened a
254
+ # file. This prevents opening the file a second time. infer_compression calls
255
+ # this function with convert_file_like=True to infer the compression.
256
+ return cast(BaseBufferT, filepath_or_buffer)
257
+
258
+ if isinstance(filepath_or_buffer, os.PathLike):
259
+ filepath_or_buffer = filepath_or_buffer.__fspath__()
260
+ return _expand_user(filepath_or_buffer)
261
+
262
+
263
+ def urlopen(*args, **kwargs):
264
+ """
265
+ Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
266
+ the stdlib.
267
+ """
268
+ import urllib.request
269
+
270
+ return urllib.request.urlopen(*args, **kwargs)
271
+
272
+
273
+ def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:
274
+ """
275
+ Returns true if the given URL looks like
276
+ something fsspec can handle
277
+ """
278
+ return (
279
+ isinstance(url, str)
280
+ and bool(_RFC_3986_PATTERN.match(url))
281
+ and not url.startswith(("http://", "https://"))
282
+ )
283
+
284
+
285
+ @doc(
286
+ storage_options=_shared_docs["storage_options"],
287
+ compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
288
+ )
289
+ def _get_filepath_or_buffer(
290
+ filepath_or_buffer: FilePath | BaseBuffer,
291
+ encoding: str = "utf-8",
292
+ compression: CompressionOptions = None,
293
+ mode: str = "r",
294
+ storage_options: StorageOptions = None,
295
+ ) -> IOArgs:
296
+ """
297
+ If the filepath_or_buffer is a url, translate and return the buffer.
298
+ Otherwise passthrough.
299
+
300
+ Parameters
301
+ ----------
302
+ filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
303
+ or buffer
304
+ {compression_options}
305
+
306
+ .. versionchanged:: 1.4.0 Zstandard support.
307
+
308
+ encoding : the encoding to use to decode bytes, default is 'utf-8'
309
+ mode : str, optional
310
+
311
+ {storage_options}
312
+
313
+ .. versionadded:: 1.2.0
314
+
315
+ ..versionchange:: 1.2.0
316
+
317
+ Returns the dataclass IOArgs.
318
+ """
319
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
320
+
321
+ # handle compression dict
322
+ compression_method, compression = get_compression_method(compression)
323
+ compression_method = infer_compression(filepath_or_buffer, compression_method)
324
+
325
+ # GH21227 internal compression is not used for non-binary handles.
326
+ if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
327
+ warnings.warn(
328
+ "compression has no effect when passing a non-binary object as input.",
329
+ RuntimeWarning,
330
+ stacklevel=find_stack_level(),
331
+ )
332
+ compression_method = None
333
+
334
+ compression = dict(compression, method=compression_method)
335
+
336
+ # bz2 and xz do not write the byte order mark for utf-16 and utf-32
337
+ # print a warning when writing such files
338
+ if (
339
+ "w" in mode
340
+ and compression_method in ["bz2", "xz"]
341
+ and encoding in ["utf-16", "utf-32"]
342
+ ):
343
+ warnings.warn(
344
+ f"{compression} will not write the byte order mark for {encoding}",
345
+ UnicodeWarning,
346
+ stacklevel=find_stack_level(),
347
+ )
348
+
349
+ # Use binary mode when converting path-like objects to file-like objects (fsspec)
350
+ # except when text mode is explicitly requested. The original mode is returned if
351
+ # fsspec is not used.
352
+ fsspec_mode = mode
353
+ if "t" not in fsspec_mode and "b" not in fsspec_mode:
354
+ fsspec_mode += "b"
355
+
356
+ if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
357
+ # TODO: fsspec can also handle HTTP via requests, but leaving this
358
+ # unchanged. using fsspec appears to break the ability to infer if the
359
+ # server responded with gzipped data
360
+ storage_options = storage_options or {}
361
+
362
+ # waiting until now for importing to match intended lazy logic of
363
+ # urlopen function defined elsewhere in this module
364
+ import urllib.request
365
+
366
+ # assuming storage_options is to be interpreted as headers
367
+ req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
368
+ with urlopen(req_info) as req:
369
+ content_encoding = req.headers.get("Content-Encoding", None)
370
+ if content_encoding == "gzip":
371
+ # Override compression based on Content-Encoding header
372
+ compression = {"method": "gzip"}
373
+ reader = BytesIO(req.read())
374
+ return IOArgs(
375
+ filepath_or_buffer=reader,
376
+ encoding=encoding,
377
+ compression=compression,
378
+ should_close=True,
379
+ mode=fsspec_mode,
380
+ )
381
+
382
+ if is_fsspec_url(filepath_or_buffer):
383
+ assert isinstance(
384
+ filepath_or_buffer, str
385
+ ) # just to appease mypy for this branch
386
+ # two special-case s3-like protocols; these have special meaning in Hadoop,
387
+ # but are equivalent to just "s3" from fsspec's point of view
388
+ # cc #11071
389
+ if filepath_or_buffer.startswith("s3a://"):
390
+ filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
391
+ if filepath_or_buffer.startswith("s3n://"):
392
+ filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
393
+ fsspec = import_optional_dependency("fsspec")
394
+
395
+ # If botocore is installed we fallback to reading with anon=True
396
+ # to allow reads from public buckets
397
+ err_types_to_retry_with_anon: list[Any] = []
398
+ try:
399
+ import_optional_dependency("botocore")
400
+ from botocore.exceptions import (
401
+ ClientError,
402
+ NoCredentialsError,
403
+ )
404
+
405
+ err_types_to_retry_with_anon = [
406
+ ClientError,
407
+ NoCredentialsError,
408
+ PermissionError,
409
+ ]
410
+ except ImportError:
411
+ pass
412
+
413
+ try:
414
+ file_obj = fsspec.open(
415
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
416
+ ).open()
417
+ # GH 34626 Reads from Public Buckets without Credentials needs anon=True
418
+ except tuple(err_types_to_retry_with_anon):
419
+ if storage_options is None:
420
+ storage_options = {"anon": True}
421
+ else:
422
+ # don't mutate user input.
423
+ storage_options = dict(storage_options)
424
+ storage_options["anon"] = True
425
+ file_obj = fsspec.open(
426
+ filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
427
+ ).open()
428
+
429
+ return IOArgs(
430
+ filepath_or_buffer=file_obj,
431
+ encoding=encoding,
432
+ compression=compression,
433
+ should_close=True,
434
+ mode=fsspec_mode,
435
+ )
436
+ elif storage_options:
437
+ raise ValueError(
438
+ "storage_options passed with file object or non-fsspec file path"
439
+ )
440
+
441
+ if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
442
+ return IOArgs(
443
+ filepath_or_buffer=_expand_user(filepath_or_buffer),
444
+ encoding=encoding,
445
+ compression=compression,
446
+ should_close=False,
447
+ mode=mode,
448
+ )
449
+
450
+ # is_file_like requires (read | write) & __iter__ but __iter__ is only
451
+ # needed for read_csv(engine=python)
452
+ if not (
453
+ hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")
454
+ ):
455
+ msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
456
+ raise ValueError(msg)
457
+
458
+ return IOArgs(
459
+ filepath_or_buffer=filepath_or_buffer,
460
+ encoding=encoding,
461
+ compression=compression,
462
+ should_close=False,
463
+ mode=mode,
464
+ )
465
+
466
+
467
+ def file_path_to_url(path: str) -> str:
468
+ """
469
+ converts an absolute native path to a FILE URL.
470
+
471
+ Parameters
472
+ ----------
473
+ path : a path in native format
474
+
475
+ Returns
476
+ -------
477
+ a valid FILE URL
478
+ """
479
+ # lazify expensive import (~30ms)
480
+ from urllib.request import pathname2url
481
+
482
+ return urljoin("file:", pathname2url(path))
483
+
484
+
485
+ extension_to_compression = {
486
+ ".tar": "tar",
487
+ ".tar.gz": "tar",
488
+ ".tar.bz2": "tar",
489
+ ".tar.xz": "tar",
490
+ ".gz": "gzip",
491
+ ".bz2": "bz2",
492
+ ".zip": "zip",
493
+ ".xz": "xz",
494
+ ".zst": "zstd",
495
+ }
496
+ _supported_compressions = set(extension_to_compression.values())
497
+
498
+
499
+ def get_compression_method(
500
+ compression: CompressionOptions,
501
+ ) -> tuple[str | None, CompressionDict]:
502
+ """
503
+ Simplifies a compression argument to a compression method string and
504
+ a mapping containing additional arguments.
505
+
506
+ Parameters
507
+ ----------
508
+ compression : str or mapping
509
+ If string, specifies the compression method. If mapping, value at key
510
+ 'method' specifies compression method.
511
+
512
+ Returns
513
+ -------
514
+ tuple of ({compression method}, Optional[str]
515
+ {compression arguments}, Dict[str, Any])
516
+
517
+ Raises
518
+ ------
519
+ ValueError on mapping missing 'method' key
520
+ """
521
+ compression_method: str | None
522
+ if isinstance(compression, Mapping):
523
+ compression_args = dict(compression)
524
+ try:
525
+ compression_method = compression_args.pop("method")
526
+ except KeyError as err:
527
+ raise ValueError("If mapping, compression must have key 'method'") from err
528
+ else:
529
+ compression_args = {}
530
+ compression_method = compression
531
+ return compression_method, compression_args
532
+
533
+
534
+ @doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer")
535
+ def infer_compression(
536
+ filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
537
+ ) -> str | None:
538
+ """
539
+ Get the compression method for filepath_or_buffer. If compression='infer',
540
+ the inferred compression method is returned. Otherwise, the input
541
+ compression method is returned unchanged, unless it's invalid, in which
542
+ case an error is raised.
543
+
544
+ Parameters
545
+ ----------
546
+ filepath_or_buffer : str or file handle
547
+ File path or object.
548
+ {compression_options}
549
+
550
+ .. versionchanged:: 1.4.0 Zstandard support.
551
+
552
+ Returns
553
+ -------
554
+ string or None
555
+
556
+ Raises
557
+ ------
558
+ ValueError on invalid compression specified.
559
+ """
560
+ if compression is None:
561
+ return None
562
+
563
+ # Infer compression
564
+ if compression == "infer":
565
+ # Convert all path types (e.g. pathlib.Path) to strings
566
+ filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
567
+ if not isinstance(filepath_or_buffer, str):
568
+ # Cannot infer compression of a buffer, assume no compression
569
+ return None
570
+
571
+ # Infer compression from the filename/URL extension
572
+ for extension, compression in extension_to_compression.items():
573
+ if filepath_or_buffer.lower().endswith(extension):
574
+ return compression
575
+ return None
576
+
577
+ # Compression has been specified. Check that it's valid
578
+ if compression in _supported_compressions:
579
+ return compression
580
+
581
+ valid = ["infer", None] + sorted(_supported_compressions)
582
+ msg = (
583
+ f"Unrecognized compression type: {compression}\n"
584
+ f"Valid compression types are {valid}"
585
+ )
586
+ raise ValueError(msg)
587
+
588
+
589
+ def check_parent_directory(path: Path | str) -> None:
590
+ """
591
+ Check if parent directory of a file exists, raise OSError if it does not
592
+
593
+ Parameters
594
+ ----------
595
+ path: Path or str
596
+ Path to check parent directory of
597
+ """
598
+ parent = Path(path).parent
599
+ if not parent.is_dir():
600
+ raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")
601
+
602
+
603
+ @overload
604
+ def get_handle(
605
+ path_or_buf: FilePath | BaseBuffer,
606
+ mode: str,
607
+ *,
608
+ encoding: str | None = ...,
609
+ compression: CompressionOptions = ...,
610
+ memory_map: bool = ...,
611
+ is_text: Literal[False],
612
+ errors: str | None = ...,
613
+ storage_options: StorageOptions = ...,
614
+ ) -> IOHandles[bytes]:
615
+ ...
616
+
617
+
618
+ @overload
619
+ def get_handle(
620
+ path_or_buf: FilePath | BaseBuffer,
621
+ mode: str,
622
+ *,
623
+ encoding: str | None = ...,
624
+ compression: CompressionOptions = ...,
625
+ memory_map: bool = ...,
626
+ is_text: Literal[True] = ...,
627
+ errors: str | None = ...,
628
+ storage_options: StorageOptions = ...,
629
+ ) -> IOHandles[str]:
630
+ ...
631
+
632
+
633
+ @overload
634
+ def get_handle(
635
+ path_or_buf: FilePath | BaseBuffer,
636
+ mode: str,
637
+ *,
638
+ encoding: str | None = ...,
639
+ compression: CompressionOptions = ...,
640
+ memory_map: bool = ...,
641
+ is_text: bool = ...,
642
+ errors: str | None = ...,
643
+ storage_options: StorageOptions = ...,
644
+ ) -> IOHandles[str] | IOHandles[bytes]:
645
+ ...
646
+
647
+
648
+ @doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
649
+ def get_handle(
650
+ path_or_buf: FilePath | BaseBuffer,
651
+ mode: str,
652
+ *,
653
+ encoding: str | None = None,
654
+ compression: CompressionOptions = None,
655
+ memory_map: bool = False,
656
+ is_text: bool = True,
657
+ errors: str | None = None,
658
+ storage_options: StorageOptions = None,
659
+ ) -> IOHandles[str] | IOHandles[bytes]:
660
+ """
661
+ Get file handle for given path/buffer and mode.
662
+
663
+ Parameters
664
+ ----------
665
+ path_or_buf : str or file handle
666
+ File path or object.
667
+ mode : str
668
+ Mode to open path_or_buf with.
669
+ encoding : str or None
670
+ Encoding to use.
671
+ {compression_options}
672
+
673
+ .. versionchanged:: 1.0.0
674
+ May now be a dict with key 'method' as compression mode
675
+ and other keys as compression options if compression
676
+ mode is 'zip'.
677
+
678
+ .. versionchanged:: 1.1.0
679
+ Passing compression options as keys in dict is now
680
+ supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'.
681
+
682
+ .. versionchanged:: 1.4.0 Zstandard support.
683
+
684
+ memory_map : bool, default False
685
+ See parsers._parser_params for more information. Only used by read_csv.
686
+ is_text : bool, default True
687
+ Whether the type of the content passed to the file/buffer is string or
688
+ bytes. This is not the same as `"b" not in mode`. If a string content is
689
+ passed to a binary file/buffer, a wrapper is inserted.
690
+ errors : str, default 'strict'
691
+ Specifies how encoding and decoding errors are to be handled.
692
+ See the errors argument for :func:`open` for a full list
693
+ of options.
694
+ storage_options: StorageOptions = None
695
+ Passed to _get_filepath_or_buffer
696
+
697
+ .. versionchanged:: 1.2.0
698
+
699
+ Returns the dataclass IOHandles
700
+ """
701
+ # Windows does not default to utf-8. Set to utf-8 for a consistent behavior
702
+ encoding = encoding or "utf-8"
703
+
704
+ errors = errors or "strict"
705
+
706
+ # read_csv does not know whether the buffer is opened in binary/text mode
707
+ if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
708
+ mode += "b"
709
+
710
+ # validate encoding and errors
711
+ codecs.lookup(encoding)
712
+ if isinstance(errors, str):
713
+ codecs.lookup_error(errors)
714
+
715
+ # open URLs
716
+ ioargs = _get_filepath_or_buffer(
717
+ path_or_buf,
718
+ encoding=encoding,
719
+ compression=compression,
720
+ mode=mode,
721
+ storage_options=storage_options,
722
+ )
723
+
724
+ handle = ioargs.filepath_or_buffer
725
+ handles: list[BaseBuffer]
726
+
727
+ # memory mapping needs to be the first step
728
+ # only used for read_csv
729
+ handle, memory_map, handles = _maybe_memory_map(handle, memory_map)
730
+
731
+ is_path = isinstance(handle, str)
732
+ compression_args = dict(ioargs.compression)
733
+ compression = compression_args.pop("method")
734
+
735
+ # Only for write methods
736
+ if "r" not in mode and is_path:
737
+ check_parent_directory(str(handle))
738
+
739
+ if compression:
740
+ if compression != "zstd":
741
+ # compression libraries do not like an explicit text-mode
742
+ ioargs.mode = ioargs.mode.replace("t", "")
743
+ elif compression == "zstd" and "b" not in ioargs.mode:
744
+ # python-zstandard defaults to text mode, but we always expect
745
+ # compression libraries to use binary mode.
746
+ ioargs.mode += "b"
747
+
748
+ # GZ Compression
749
+ if compression == "gzip":
750
+ if isinstance(handle, str):
751
+ # error: Incompatible types in assignment (expression has type
752
+ # "GzipFile", variable has type "Union[str, BaseBuffer]")
753
+ handle = gzip.GzipFile( # type: ignore[assignment]
754
+ filename=handle,
755
+ mode=ioargs.mode,
756
+ **compression_args,
757
+ )
758
+ else:
759
+ handle = gzip.GzipFile(
760
+ # No overload variant of "GzipFile" matches argument types
761
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
762
+ fileobj=handle, # type: ignore[call-overload]
763
+ mode=ioargs.mode,
764
+ **compression_args,
765
+ )
766
+
767
+ # BZ Compression
768
+ elif compression == "bz2":
769
+ # Overload of "BZ2File" to handle pickle protocol 5
770
+ # "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
771
+ handle = _BZ2File( # type: ignore[call-overload]
772
+ handle,
773
+ mode=ioargs.mode,
774
+ **compression_args,
775
+ )
776
+
777
+ # ZIP Compression
778
+ elif compression == "zip":
779
+ # error: Argument 1 to "_BytesZipFile" has incompatible type
780
+ # "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]],
781
+ # ReadBuffer[bytes], WriteBuffer[bytes]]"
782
+ handle = _BytesZipFile(
783
+ handle, ioargs.mode, **compression_args # type: ignore[arg-type]
784
+ )
785
+ if handle.buffer.mode == "r":
786
+ handles.append(handle)
787
+ zip_names = handle.buffer.namelist()
788
+ if len(zip_names) == 1:
789
+ handle = handle.buffer.open(zip_names.pop())
790
+ elif not zip_names:
791
+ raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
792
+ else:
793
+ raise ValueError(
794
+ "Multiple files found in ZIP file. "
795
+ f"Only one file per ZIP: {zip_names}"
796
+ )
797
+
798
+ # TAR Encoding
799
+ elif compression == "tar":
800
+ compression_args.setdefault("mode", ioargs.mode)
801
+ if isinstance(handle, str):
802
+ handle = _BytesTarFile(name=handle, **compression_args)
803
+ else:
804
+ # error: Argument "fileobj" to "_BytesTarFile" has incompatible
805
+ # type "BaseBuffer"; expected "Union[ReadBuffer[bytes],
806
+ # WriteBuffer[bytes], None]"
807
+ handle = _BytesTarFile(
808
+ fileobj=handle, **compression_args # type: ignore[arg-type]
809
+ )
810
+ assert isinstance(handle, _BytesTarFile)
811
+ if "r" in handle.buffer.mode:
812
+ handles.append(handle)
813
+ files = handle.buffer.getnames()
814
+ if len(files) == 1:
815
+ file = handle.buffer.extractfile(files[0])
816
+ assert file is not None
817
+ handle = file
818
+ elif not files:
819
+ raise ValueError(f"Zero files found in TAR archive {path_or_buf}")
820
+ else:
821
+ raise ValueError(
822
+ "Multiple files found in TAR archive. "
823
+ f"Only one file per TAR archive: {files}"
824
+ )
825
+
826
+ # XZ Compression
827
+ elif compression == "xz":
828
+ # error: Argument 1 to "LZMAFile" has incompatible type "Union[str,
829
+ # BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str],
830
+ # PathLike[bytes]], IO[bytes]]]"
831
+ handle = get_lzma_file()(handle, ioargs.mode) # type: ignore[arg-type]
832
+
833
+ # Zstd Compression
834
+ elif compression == "zstd":
835
+ zstd = import_optional_dependency("zstandard")
836
+ if "r" in ioargs.mode:
837
+ open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)}
838
+ else:
839
+ open_args = {"cctx": zstd.ZstdCompressor(**compression_args)}
840
+ handle = zstd.open(
841
+ handle,
842
+ mode=ioargs.mode,
843
+ **open_args,
844
+ )
845
+
846
+ # Unrecognized Compression
847
+ else:
848
+ msg = f"Unrecognized compression type: {compression}"
849
+ raise ValueError(msg)
850
+
851
+ assert not isinstance(handle, str)
852
+ handles.append(handle)
853
+
854
+ elif isinstance(handle, str):
855
+ # Check whether the filename is to be opened in binary mode.
856
+ # Binary mode does not support 'encoding' and 'newline'.
857
+ if ioargs.encoding and "b" not in ioargs.mode:
858
+ # Encoding
859
+ handle = open(
860
+ handle,
861
+ ioargs.mode,
862
+ encoding=ioargs.encoding,
863
+ errors=errors,
864
+ newline="",
865
+ )
866
+ else:
867
+ # Binary mode
868
+ handle = open(handle, ioargs.mode)
869
+ handles.append(handle)
870
+
871
+ # Convert BytesIO or file objects passed with an encoding
872
+ is_wrapped = False
873
+ if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase):
874
+ # not added to handles as it does not open/buffer resources
875
+ handle = _BytesIOWrapper(
876
+ handle,
877
+ encoding=ioargs.encoding,
878
+ )
879
+ elif is_text and (
880
+ compression or memory_map or _is_binary_mode(handle, ioargs.mode)
881
+ ):
882
+ if (
883
+ not hasattr(handle, "readable")
884
+ or not hasattr(handle, "writable")
885
+ or not hasattr(handle, "seekable")
886
+ ):
887
+ handle = _IOWrapper(handle)
888
+ # error: Argument 1 to "TextIOWrapper" has incompatible type
889
+ # "_IOWrapper"; expected "IO[bytes]"
890
+ handle = TextIOWrapper(
891
+ handle, # type: ignore[arg-type]
892
+ encoding=ioargs.encoding,
893
+ errors=errors,
894
+ newline="",
895
+ )
896
+ handles.append(handle)
897
+ # only marked as wrapped when the caller provided a handle
898
+ is_wrapped = not (
899
+ isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
900
+ )
901
+
902
+ if "r" in ioargs.mode and not hasattr(handle, "read"):
903
+ raise TypeError(
904
+ "Expected file path name or file-like object, "
905
+ f"got {type(ioargs.filepath_or_buffer)} type"
906
+ )
907
+
908
+ handles.reverse() # close the most recently added buffer first
909
+ if ioargs.should_close:
910
+ assert not isinstance(ioargs.filepath_or_buffer, str)
911
+ handles.append(ioargs.filepath_or_buffer)
912
+
913
+ return IOHandles(
914
+ # error: Argument "handle" to "IOHandles" has incompatible type
915
+ # "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],
916
+ # typing.IO[Any]]"; expected "pandas._typing.IO[Any]"
917
+ handle=handle, # type: ignore[arg-type]
918
+ # error: Argument "created_handles" to "IOHandles" has incompatible type
919
+ # "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"
920
+ created_handles=handles, # type: ignore[arg-type]
921
+ is_wrapped=is_wrapped,
922
+ compression=ioargs.compression,
923
+ )
924
+
925
+
926
+ # error: Definition of "__enter__" in base class "IOBase" is incompatible
927
+ # with definition in base class "BinaryIO"
928
+ class _BufferedWriter(BytesIO, ABC): # type: ignore[misc]
929
+ """
930
+ Some objects do not support multiple .write() calls (TarFile and ZipFile).
931
+ This wrapper writes to the underlying buffer on close.
932
+ """
933
+
934
+ @abstractmethod
935
+ def write_to_buffer(self) -> None:
936
+ ...
937
+
938
+ def close(self) -> None:
939
+ if self.closed:
940
+ # already closed
941
+ return
942
+ if self.getvalue():
943
+ # write to buffer
944
+ self.seek(0)
945
+ # error: "_BufferedWriter" has no attribute "buffer"
946
+ with self.buffer: # type: ignore[attr-defined]
947
+ self.write_to_buffer()
948
+ else:
949
+ # error: "_BufferedWriter" has no attribute "buffer"
950
+ self.buffer.close() # type: ignore[attr-defined]
951
+ super().close()
952
+
953
+
954
+ class _BytesTarFile(_BufferedWriter):
955
+ def __init__(
956
+ self,
957
+ name: str | None = None,
958
+ mode: Literal["r", "a", "w", "x"] = "r",
959
+ fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None = None,
960
+ archive_name: str | None = None,
961
+ **kwargs,
962
+ ) -> None:
963
+ super().__init__()
964
+ self.archive_name = archive_name
965
+ self.name = name
966
+ # error: Argument "fileobj" to "open" of "TarFile" has incompatible
967
+ # type "Union[ReadBuffer[bytes], WriteBuffer[bytes], None]"; expected
968
+ # "Optional[IO[bytes]]"
969
+ self.buffer = tarfile.TarFile.open(
970
+ name=name,
971
+ mode=self.extend_mode(mode),
972
+ fileobj=fileobj, # type: ignore[arg-type]
973
+ **kwargs,
974
+ )
975
+
976
+ def extend_mode(self, mode: str) -> str:
977
+ mode = mode.replace("b", "")
978
+ if mode != "w":
979
+ return mode
980
+ if self.name is not None:
981
+ suffix = Path(self.name).suffix
982
+ if suffix in (".gz", ".xz", ".bz2"):
983
+ mode = f"{mode}:{suffix[1:]}"
984
+ return mode
985
+
986
+ def infer_filename(self) -> str | None:
987
+ """
988
+ If an explicit archive_name is not given, we still want the file inside the zip
989
+ file not to be named something.tar, because that causes confusion (GH39465).
990
+ """
991
+ if self.name is None:
992
+ return None
993
+
994
+ filename = Path(self.name)
995
+ if filename.suffix == ".tar":
996
+ return filename.with_suffix("").name
997
+ elif filename.suffix in (".tar.gz", ".tar.bz2", ".tar.xz"):
998
+ return filename.with_suffix("").with_suffix("").name
999
+ return filename.name
1000
+
1001
+ def write_to_buffer(self) -> None:
1002
+ # TarFile needs a non-empty string
1003
+ archive_name = self.archive_name or self.infer_filename() or "tar"
1004
+ tarinfo = tarfile.TarInfo(name=archive_name)
1005
+ tarinfo.size = len(self.getvalue())
1006
+ self.buffer.addfile(tarinfo, self)
1007
+
1008
+
1009
+ class _BytesZipFile(_BufferedWriter):
1010
+ def __init__(
1011
+ self,
1012
+ file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes],
1013
+ mode: str,
1014
+ archive_name: str | None = None,
1015
+ **kwargs,
1016
+ ) -> None:
1017
+ super().__init__()
1018
+ mode = mode.replace("b", "")
1019
+ self.archive_name = archive_name
1020
+
1021
+ kwargs.setdefault("compression", zipfile.ZIP_DEFLATED)
1022
+ # error: Argument 1 to "ZipFile" has incompatible type "Union[
1023
+ # Union[str, PathLike[str]], ReadBuffer[bytes], WriteBuffer[bytes]]";
1024
+ # expected "Union[Union[str, PathLike[str]], IO[bytes]]"
1025
+ self.buffer = zipfile.ZipFile(file, mode, **kwargs) # type: ignore[arg-type]
1026
+
1027
+ def infer_filename(self) -> str | None:
1028
+ """
1029
+ If an explicit archive_name is not given, we still want the file inside the zip
1030
+ file not to be named something.zip, because that causes confusion (GH39465).
1031
+ """
1032
+ if isinstance(self.buffer.filename, (os.PathLike, str)):
1033
+ filename = Path(self.buffer.filename)
1034
+ if filename.suffix == ".zip":
1035
+ return filename.with_suffix("").name
1036
+ return filename.name
1037
+ return None
1038
+
1039
+ def write_to_buffer(self) -> None:
1040
+ # ZipFile needs a non-empty string
1041
+ archive_name = self.archive_name or self.infer_filename() or "zip"
1042
+ self.buffer.writestr(archive_name, self.getvalue())
1043
+
1044
+
1045
+ class _IOWrapper:
1046
+ # TextIOWrapper is overly strict: it request that the buffer has seekable, readable,
1047
+ # and writable. If we have a read-only buffer, we shouldn't need writable and vice
1048
+ # versa. Some buffers, are seek/read/writ-able but they do not have the "-able"
1049
+ # methods, e.g., tempfile.SpooledTemporaryFile.
1050
+ # If a buffer does not have the above "-able" methods, we simple assume they are
1051
+ # seek/read/writ-able.
1052
+ def __init__(self, buffer: BaseBuffer) -> None:
1053
+ self.buffer = buffer
1054
+
1055
+ def __getattr__(self, name: str):
1056
+ return getattr(self.buffer, name)
1057
+
1058
+ def readable(self) -> bool:
1059
+ if hasattr(self.buffer, "readable"):
1060
+ return self.buffer.readable()
1061
+ return True
1062
+
1063
+ def seekable(self) -> bool:
1064
+ if hasattr(self.buffer, "seekable"):
1065
+ return self.buffer.seekable()
1066
+ return True
1067
+
1068
+ def writable(self) -> bool:
1069
+ if hasattr(self.buffer, "writable"):
1070
+ return self.buffer.writable()
1071
+ return True
1072
+
1073
+
1074
+ class _BytesIOWrapper:
1075
+ # Wrapper that wraps a StringIO buffer and reads bytes from it
1076
+ # Created for compat with pyarrow read_csv
1077
+ def __init__(self, buffer: StringIO | TextIOBase, encoding: str = "utf-8") -> None:
1078
+ self.buffer = buffer
1079
+ self.encoding = encoding
1080
+ # Because a character can be represented by more than 1 byte,
1081
+ # it is possible that reading will produce more bytes than n
1082
+ # We store the extra bytes in this overflow variable, and append the
1083
+ # overflow to the front of the bytestring the next time reading is performed
1084
+ self.overflow = b""
1085
+
1086
+ def __getattr__(self, attr: str):
1087
+ return getattr(self.buffer, attr)
1088
+
1089
+ def read(self, n: int | None = -1) -> bytes:
1090
+ assert self.buffer is not None
1091
+ bytestring = self.buffer.read(n).encode(self.encoding)
1092
+ # When n=-1/n greater than remaining bytes: Read entire file/rest of file
1093
+ combined_bytestring = self.overflow + bytestring
1094
+ if n is None or n < 0 or n >= len(combined_bytestring):
1095
+ self.overflow = b""
1096
+ return combined_bytestring
1097
+ else:
1098
+ to_return = combined_bytestring[:n]
1099
+ self.overflow = combined_bytestring[n:]
1100
+ return to_return
1101
+
1102
+
1103
+ def _maybe_memory_map(
1104
+ handle: str | BaseBuffer, memory_map: bool
1105
+ ) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]:
1106
+ """Try to memory map file/buffer."""
1107
+ handles: list[BaseBuffer] = []
1108
+ memory_map &= hasattr(handle, "fileno") or isinstance(handle, str)
1109
+ if not memory_map:
1110
+ return handle, memory_map, handles
1111
+
1112
+ # mmap used by only read_csv
1113
+ handle = cast(ReadCsvBuffer, handle)
1114
+
1115
+ # need to open the file first
1116
+ if isinstance(handle, str):
1117
+ handle = open(handle, "rb")
1118
+ handles.append(handle)
1119
+
1120
+ try:
1121
+ # open mmap and adds *-able
1122
+ # error: Argument 1 to "_IOWrapper" has incompatible type "mmap";
1123
+ # expected "BaseBuffer"
1124
+ wrapped = _IOWrapper(
1125
+ mmap.mmap(
1126
+ handle.fileno(), 0, access=mmap.ACCESS_READ # type: ignore[arg-type]
1127
+ )
1128
+ )
1129
+ finally:
1130
+ for handle in reversed(handles):
1131
+ # error: "BaseBuffer" has no attribute "close"
1132
+ handle.close() # type: ignore[attr-defined]
1133
+
1134
+ return wrapped, memory_map, [wrapped]
1135
+
1136
+
1137
+ def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool:
1138
+ """Test whether file exists."""
1139
+ exists = False
1140
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
1141
+ if not isinstance(filepath_or_buffer, str):
1142
+ return exists
1143
+ try:
1144
+ exists = os.path.exists(filepath_or_buffer)
1145
+ # gh-5874: if the filepath is too long will raise here
1146
+ except (TypeError, ValueError):
1147
+ pass
1148
+ return exists
1149
+
1150
+
1151
+ def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool:
1152
+ """Whether the handle is opened in binary mode"""
1153
+ # specified by user
1154
+ if "t" in mode or "b" in mode:
1155
+ return "b" in mode
1156
+
1157
+ # exceptions
1158
+ text_classes = (
1159
+ # classes that expect string but have 'b' in mode
1160
+ codecs.StreamWriter,
1161
+ codecs.StreamReader,
1162
+ codecs.StreamReaderWriter,
1163
+ )
1164
+ if issubclass(type(handle), text_classes):
1165
+ return False
1166
+
1167
+ return isinstance(handle, _get_binary_io_classes()) or "b" in getattr(
1168
+ handle, "mode", mode
1169
+ )
1170
+
1171
+
1172
+ @functools.lru_cache
1173
+ def _get_binary_io_classes() -> tuple[type, ...]:
1174
+ """IO classes that that expect bytes"""
1175
+ binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase)
1176
+
1177
+ # python-zstandard doesn't use any of the builtin base classes; instead we
1178
+ # have to use the `zstd.ZstdDecompressionReader` class for isinstance checks.
1179
+ # Unfortunately `zstd.ZstdDecompressionReader` isn't exposed by python-zstandard
1180
+ # so we have to get it from a `zstd.ZstdDecompressor` instance.
1181
+ # See also https://github.com/indygreg/python-zstandard/pull/165.
1182
+ zstd = import_optional_dependency("zstandard", errors="ignore")
1183
+ if zstd is not None:
1184
+ with zstd.ZstdDecompressor().stream_reader(b"") as reader:
1185
+ binary_classes += (type(reader),)
1186
+
1187
+ return binary_classes
1188
+
1189
+
1190
+ def is_potential_multi_index(
1191
+ columns: Sequence[Hashable] | MultiIndex,
1192
+ index_col: bool | Sequence[int] | None = None,
1193
+ ) -> bool:
1194
+ """
1195
+ Check whether or not the `columns` parameter
1196
+ could be converted into a MultiIndex.
1197
+
1198
+ Parameters
1199
+ ----------
1200
+ columns : array-like
1201
+ Object which may or may not be convertible into a MultiIndex
1202
+ index_col : None, bool or list, optional
1203
+ Column or columns to use as the (possibly hierarchical) index
1204
+
1205
+ Returns
1206
+ -------
1207
+ bool : Whether or not columns could become a MultiIndex
1208
+ """
1209
+ if index_col is None or isinstance(index_col, bool):
1210
+ index_col = []
1211
+
1212
+ return bool(
1213
+ len(columns)
1214
+ and not isinstance(columns, MultiIndex)
1215
+ and all(isinstance(c, tuple) for c in columns if c not in list(index_col))
1216
+ )
1217
+
1218
+
1219
+ def dedup_names(
1220
+ names: Sequence[Hashable], is_potential_multiindex: bool
1221
+ ) -> Sequence[Hashable]:
1222
+ """
1223
+ Rename column names if duplicates exist.
1224
+
1225
+ Currently the renaming is done by appending a period and an autonumeric,
1226
+ but a custom pattern may be supported in the future.
1227
+
1228
+ Examples
1229
+ --------
1230
+ >>> dedup_names(["x", "y", "x", "x"], is_potential_multiindex=False)
1231
+ ['x', 'y', 'x.1', 'x.2']
1232
+ """
1233
+ names = list(names) # so we can index
1234
+ counts: DefaultDict[Hashable, int] = defaultdict(int)
1235
+
1236
+ for i, col in enumerate(names):
1237
+ cur_count = counts[col]
1238
+
1239
+ while cur_count > 0:
1240
+ counts[col] = cur_count + 1
1241
+
1242
+ if is_potential_multiindex:
1243
+ # for mypy
1244
+ assert isinstance(col, tuple)
1245
+ col = col[:-1] + (f"{col[-1]}.{cur_count}",)
1246
+ else:
1247
+ col = f"{col}.{cur_count}"
1248
+ cur_count = counts[col]
1249
+
1250
+ names[i] = col
1251
+ counts[col] = cur_count + 1
1252
+
1253
+ return names
videochat2/lib/python3.10/site-packages/pandas/io/feather_format.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ feather-format compat """
2
+ from __future__ import annotations
3
+
4
+ from typing import (
5
+ Hashable,
6
+ Sequence,
7
+ )
8
+
9
+ from pandas._libs import lib
10
+ from pandas._typing import (
11
+ DtypeBackend,
12
+ FilePath,
13
+ ReadBuffer,
14
+ StorageOptions,
15
+ WriteBuffer,
16
+ )
17
+ from pandas.compat._optional import import_optional_dependency
18
+ from pandas.util._decorators import doc
19
+ from pandas.util._validators import check_dtype_backend
20
+
21
+ import pandas as pd
22
+ from pandas.core.api import (
23
+ DataFrame,
24
+ RangeIndex,
25
+ )
26
+ from pandas.core.shared_docs import _shared_docs
27
+
28
+ from pandas.io.common import get_handle
29
+
30
+
31
+ @doc(storage_options=_shared_docs["storage_options"])
32
+ def to_feather(
33
+ df: DataFrame,
34
+ path: FilePath | WriteBuffer[bytes],
35
+ storage_options: StorageOptions = None,
36
+ **kwargs,
37
+ ) -> None:
38
+ """
39
+ Write a DataFrame to the binary Feather format.
40
+
41
+ Parameters
42
+ ----------
43
+ df : DataFrame
44
+ path : str, path object, or file-like object
45
+ {storage_options}
46
+
47
+ .. versionadded:: 1.2.0
48
+
49
+ **kwargs :
50
+ Additional keywords passed to `pyarrow.feather.write_feather`.
51
+
52
+ .. versionadded:: 1.1.0
53
+ """
54
+ import_optional_dependency("pyarrow")
55
+ from pyarrow import feather
56
+
57
+ if not isinstance(df, DataFrame):
58
+ raise ValueError("feather only support IO with DataFrames")
59
+
60
+ valid_types = {"string", "unicode"}
61
+
62
+ # validate index
63
+ # --------------
64
+
65
+ # validate that we have only a default index
66
+ # raise on anything else as we don't serialize the index
67
+
68
+ if not df.index.dtype == "int64":
69
+ typ = type(df.index)
70
+ raise ValueError(
71
+ f"feather does not support serializing {typ} "
72
+ "for the index; you can .reset_index() to make the index into column(s)"
73
+ )
74
+
75
+ if not df.index.equals(RangeIndex.from_range(range(len(df)))):
76
+ raise ValueError(
77
+ "feather does not support serializing a non-default index for the index; "
78
+ "you can .reset_index() to make the index into column(s)"
79
+ )
80
+
81
+ if df.index.name is not None:
82
+ raise ValueError(
83
+ "feather does not serialize index meta-data on a default index"
84
+ )
85
+
86
+ # validate columns
87
+ # ----------------
88
+
89
+ # must have value column names (strings only)
90
+ if df.columns.inferred_type not in valid_types:
91
+ raise ValueError("feather must have string column names")
92
+
93
+ with get_handle(
94
+ path, "wb", storage_options=storage_options, is_text=False
95
+ ) as handles:
96
+ feather.write_feather(df, handles.handle, **kwargs)
97
+
98
+
99
+ @doc(storage_options=_shared_docs["storage_options"])
100
+ def read_feather(
101
+ path: FilePath | ReadBuffer[bytes],
102
+ columns: Sequence[Hashable] | None = None,
103
+ use_threads: bool = True,
104
+ storage_options: StorageOptions = None,
105
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
106
+ ):
107
+ """
108
+ Load a feather-format object from the file path.
109
+
110
+ Parameters
111
+ ----------
112
+ path : str, path object, or file-like object
113
+ String, path object (implementing ``os.PathLike[str]``), or file-like
114
+ object implementing a binary ``read()`` function. The string could be a URL.
115
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
116
+ expected. A local file could be: ``file://localhost/path/to/table.feather``.
117
+ columns : sequence, default None
118
+ If not provided, all columns are read.
119
+ use_threads : bool, default True
120
+ Whether to parallelize reading using multiple threads.
121
+ {storage_options}
122
+
123
+ .. versionadded:: 1.2.0
124
+
125
+ dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames
126
+ Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
127
+ arrays, nullable dtypes are used for all dtypes that have a nullable
128
+ implementation when "numpy_nullable" is set, pyarrow is used for all
129
+ dtypes if "pyarrow" is set.
130
+
131
+ The dtype_backends are still experimential.
132
+
133
+ .. versionadded:: 2.0
134
+
135
+ Returns
136
+ -------
137
+ type of object stored in file
138
+ """
139
+ import_optional_dependency("pyarrow")
140
+ from pyarrow import feather
141
+
142
+ check_dtype_backend(dtype_backend)
143
+
144
+ with get_handle(
145
+ path, "rb", storage_options=storage_options, is_text=False
146
+ ) as handles:
147
+ if dtype_backend is lib.no_default:
148
+ return feather.read_feather(
149
+ handles.handle, columns=columns, use_threads=bool(use_threads)
150
+ )
151
+
152
+ pa_table = feather.read_table(
153
+ handles.handle, columns=columns, use_threads=bool(use_threads)
154
+ )
155
+
156
+ if dtype_backend == "numpy_nullable":
157
+ from pandas.io._util import _arrow_dtype_mapping
158
+
159
+ return pa_table.to_pandas(types_mapper=_arrow_dtype_mapping().get)
160
+
161
+ elif dtype_backend == "pyarrow":
162
+ return pa_table.to_pandas(types_mapper=pd.ArrowDtype)
videochat2/lib/python3.10/site-packages/pandas/io/formats/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING
2
+
3
+ if TYPE_CHECKING:
4
+ # import modules that have public classes/functions
5
+ from pandas.io.formats import style
6
+
7
+ # and mark only those modules as public
8
+ __all__ = ["style"]
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc ADDED
Binary file (9.82 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc ADDED
Binary file (24.5 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc ADDED
Binary file (63.6 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc ADDED
Binary file (36.4 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/latex.cpython-310.pyc ADDED
Binary file (26.3 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc ADDED
Binary file (6.46 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc ADDED
Binary file (70.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/_color_data.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GH37967: Enable the use of CSS named colors, as defined in
2
+ # matplotlib.colors.CSS4_COLORS, when exporting to Excel.
3
+ # This data has been copied here, instead of being imported from matplotlib,
4
+ # not to have ``to_excel`` methods require matplotlib.
5
+ # source: matplotlib._color_data (3.3.3)
6
+ from __future__ import annotations
7
+
8
+ CSS4_COLORS = {
9
+ "aliceblue": "F0F8FF",
10
+ "antiquewhite": "FAEBD7",
11
+ "aqua": "00FFFF",
12
+ "aquamarine": "7FFFD4",
13
+ "azure": "F0FFFF",
14
+ "beige": "F5F5DC",
15
+ "bisque": "FFE4C4",
16
+ "black": "000000",
17
+ "blanchedalmond": "FFEBCD",
18
+ "blue": "0000FF",
19
+ "blueviolet": "8A2BE2",
20
+ "brown": "A52A2A",
21
+ "burlywood": "DEB887",
22
+ "cadetblue": "5F9EA0",
23
+ "chartreuse": "7FFF00",
24
+ "chocolate": "D2691E",
25
+ "coral": "FF7F50",
26
+ "cornflowerblue": "6495ED",
27
+ "cornsilk": "FFF8DC",
28
+ "crimson": "DC143C",
29
+ "cyan": "00FFFF",
30
+ "darkblue": "00008B",
31
+ "darkcyan": "008B8B",
32
+ "darkgoldenrod": "B8860B",
33
+ "darkgray": "A9A9A9",
34
+ "darkgreen": "006400",
35
+ "darkgrey": "A9A9A9",
36
+ "darkkhaki": "BDB76B",
37
+ "darkmagenta": "8B008B",
38
+ "darkolivegreen": "556B2F",
39
+ "darkorange": "FF8C00",
40
+ "darkorchid": "9932CC",
41
+ "darkred": "8B0000",
42
+ "darksalmon": "E9967A",
43
+ "darkseagreen": "8FBC8F",
44
+ "darkslateblue": "483D8B",
45
+ "darkslategray": "2F4F4F",
46
+ "darkslategrey": "2F4F4F",
47
+ "darkturquoise": "00CED1",
48
+ "darkviolet": "9400D3",
49
+ "deeppink": "FF1493",
50
+ "deepskyblue": "00BFFF",
51
+ "dimgray": "696969",
52
+ "dimgrey": "696969",
53
+ "dodgerblue": "1E90FF",
54
+ "firebrick": "B22222",
55
+ "floralwhite": "FFFAF0",
56
+ "forestgreen": "228B22",
57
+ "fuchsia": "FF00FF",
58
+ "gainsboro": "DCDCDC",
59
+ "ghostwhite": "F8F8FF",
60
+ "gold": "FFD700",
61
+ "goldenrod": "DAA520",
62
+ "gray": "808080",
63
+ "green": "008000",
64
+ "greenyellow": "ADFF2F",
65
+ "grey": "808080",
66
+ "honeydew": "F0FFF0",
67
+ "hotpink": "FF69B4",
68
+ "indianred": "CD5C5C",
69
+ "indigo": "4B0082",
70
+ "ivory": "FFFFF0",
71
+ "khaki": "F0E68C",
72
+ "lavender": "E6E6FA",
73
+ "lavenderblush": "FFF0F5",
74
+ "lawngreen": "7CFC00",
75
+ "lemonchiffon": "FFFACD",
76
+ "lightblue": "ADD8E6",
77
+ "lightcoral": "F08080",
78
+ "lightcyan": "E0FFFF",
79
+ "lightgoldenrodyellow": "FAFAD2",
80
+ "lightgray": "D3D3D3",
81
+ "lightgreen": "90EE90",
82
+ "lightgrey": "D3D3D3",
83
+ "lightpink": "FFB6C1",
84
+ "lightsalmon": "FFA07A",
85
+ "lightseagreen": "20B2AA",
86
+ "lightskyblue": "87CEFA",
87
+ "lightslategray": "778899",
88
+ "lightslategrey": "778899",
89
+ "lightsteelblue": "B0C4DE",
90
+ "lightyellow": "FFFFE0",
91
+ "lime": "00FF00",
92
+ "limegreen": "32CD32",
93
+ "linen": "FAF0E6",
94
+ "magenta": "FF00FF",
95
+ "maroon": "800000",
96
+ "mediumaquamarine": "66CDAA",
97
+ "mediumblue": "0000CD",
98
+ "mediumorchid": "BA55D3",
99
+ "mediumpurple": "9370DB",
100
+ "mediumseagreen": "3CB371",
101
+ "mediumslateblue": "7B68EE",
102
+ "mediumspringgreen": "00FA9A",
103
+ "mediumturquoise": "48D1CC",
104
+ "mediumvioletred": "C71585",
105
+ "midnightblue": "191970",
106
+ "mintcream": "F5FFFA",
107
+ "mistyrose": "FFE4E1",
108
+ "moccasin": "FFE4B5",
109
+ "navajowhite": "FFDEAD",
110
+ "navy": "000080",
111
+ "oldlace": "FDF5E6",
112
+ "olive": "808000",
113
+ "olivedrab": "6B8E23",
114
+ "orange": "FFA500",
115
+ "orangered": "FF4500",
116
+ "orchid": "DA70D6",
117
+ "palegoldenrod": "EEE8AA",
118
+ "palegreen": "98FB98",
119
+ "paleturquoise": "AFEEEE",
120
+ "palevioletred": "DB7093",
121
+ "papayawhip": "FFEFD5",
122
+ "peachpuff": "FFDAB9",
123
+ "peru": "CD853F",
124
+ "pink": "FFC0CB",
125
+ "plum": "DDA0DD",
126
+ "powderblue": "B0E0E6",
127
+ "purple": "800080",
128
+ "rebeccapurple": "663399",
129
+ "red": "FF0000",
130
+ "rosybrown": "BC8F8F",
131
+ "royalblue": "4169E1",
132
+ "saddlebrown": "8B4513",
133
+ "salmon": "FA8072",
134
+ "sandybrown": "F4A460",
135
+ "seagreen": "2E8B57",
136
+ "seashell": "FFF5EE",
137
+ "sienna": "A0522D",
138
+ "silver": "C0C0C0",
139
+ "skyblue": "87CEEB",
140
+ "slateblue": "6A5ACD",
141
+ "slategray": "708090",
142
+ "slategrey": "708090",
143
+ "snow": "FFFAFA",
144
+ "springgreen": "00FF7F",
145
+ "steelblue": "4682B4",
146
+ "tan": "D2B48C",
147
+ "teal": "008080",
148
+ "thistle": "D8BFD8",
149
+ "tomato": "FF6347",
150
+ "turquoise": "40E0D0",
151
+ "violet": "EE82EE",
152
+ "wheat": "F5DEB3",
153
+ "white": "FFFFFF",
154
+ "whitesmoke": "F5F5F5",
155
+ "yellow": "FFFF00",
156
+ "yellowgreen": "9ACD32",
157
+ }
videochat2/lib/python3.10/site-packages/pandas/io/formats/console.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Internal module for console introspection
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from shutil import get_terminal_size
7
+
8
+
9
+ def get_console_size() -> tuple[int | None, int | None]:
10
+ """
11
+ Return console size as tuple = (width, height).
12
+
13
+ Returns (None,None) in non-interactive session.
14
+ """
15
+ from pandas import get_option
16
+
17
+ display_width = get_option("display.width")
18
+ display_height = get_option("display.max_rows")
19
+
20
+ # Consider
21
+ # interactive shell terminal, can detect term size
22
+ # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
23
+ # size non-interactive script, should disregard term size
24
+
25
+ # in addition
26
+ # width,height have default values, but setting to 'None' signals
27
+ # should use Auto-Detection, But only in interactive shell-terminal.
28
+ # Simple. yeah.
29
+
30
+ if in_interactive_session():
31
+ if in_ipython_frontend():
32
+ # sane defaults for interactive non-shell terminal
33
+ # match default for width,height in config_init
34
+ from pandas._config.config import get_default_val
35
+
36
+ terminal_width = get_default_val("display.width")
37
+ terminal_height = get_default_val("display.max_rows")
38
+ else:
39
+ # pure terminal
40
+ terminal_width, terminal_height = get_terminal_size()
41
+ else:
42
+ terminal_width, terminal_height = None, None
43
+
44
+ # Note if the User sets width/Height to None (auto-detection)
45
+ # and we're in a script (non-inter), this will return (None,None)
46
+ # caller needs to deal.
47
+ return display_width or terminal_width, display_height or terminal_height
48
+
49
+
50
+ # ----------------------------------------------------------------------
51
+ # Detect our environment
52
+
53
+
54
+ def in_interactive_session() -> bool:
55
+ """
56
+ Check if we're running in an interactive shell.
57
+
58
+ Returns
59
+ -------
60
+ bool
61
+ True if running under python/ipython interactive shell.
62
+ """
63
+ from pandas import get_option
64
+
65
+ def check_main():
66
+ try:
67
+ import __main__ as main
68
+ except ModuleNotFoundError:
69
+ return get_option("mode.sim_interactive")
70
+ return not hasattr(main, "__file__") or get_option("mode.sim_interactive")
71
+
72
+ try:
73
+ # error: Name '__IPYTHON__' is not defined
74
+ return __IPYTHON__ or check_main() # type: ignore[name-defined]
75
+ except NameError:
76
+ return check_main()
77
+
78
+
79
+ def in_ipython_frontend() -> bool:
80
+ """
81
+ Check if we're inside an IPython zmq frontend.
82
+
83
+ Returns
84
+ -------
85
+ bool
86
+ """
87
+ try:
88
+ # error: Name 'get_ipython' is not defined
89
+ ip = get_ipython() # type: ignore[name-defined]
90
+ return "zmq" in str(type(ip)).lower()
91
+ except NameError:
92
+ pass
93
+
94
+ return False
videochat2/lib/python3.10/site-packages/pandas/io/formats/css.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for interpreting CSS from Stylers for formatting non-HTML outputs.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import re
7
+ from typing import (
8
+ Callable,
9
+ Generator,
10
+ Iterable,
11
+ Iterator,
12
+ )
13
+ import warnings
14
+
15
+ from pandas.errors import CSSWarning
16
+ from pandas.util._exceptions import find_stack_level
17
+
18
+
19
+ def _side_expander(prop_fmt: str) -> Callable:
20
+ """
21
+ Wrapper to expand shorthand property into top, right, bottom, left properties
22
+
23
+ Parameters
24
+ ----------
25
+ side : str
26
+ The border side to expand into properties
27
+
28
+ Returns
29
+ -------
30
+ function: Return to call when a 'border(-{side}): {value}' string is encountered
31
+ """
32
+
33
+ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
34
+ """
35
+ Expand shorthand property into side-specific property (top, right, bottom, left)
36
+
37
+ Parameters
38
+ ----------
39
+ prop (str): CSS property name
40
+ value (str): String token for property
41
+
42
+ Yields
43
+ ------
44
+ Tuple (str, str): Expanded property, value
45
+ """
46
+ tokens = value.split()
47
+ try:
48
+ mapping = self.SIDE_SHORTHANDS[len(tokens)]
49
+ except KeyError:
50
+ warnings.warn(
51
+ f'Could not expand "{prop}: {value}"',
52
+ CSSWarning,
53
+ stacklevel=find_stack_level(),
54
+ )
55
+ return
56
+ for key, idx in zip(self.SIDES, mapping):
57
+ yield prop_fmt.format(key), tokens[idx]
58
+
59
+ return expand
60
+
61
+
62
+ def _border_expander(side: str = "") -> Callable:
63
+ """
64
+ Wrapper to expand 'border' property into border color, style, and width properties
65
+
66
+ Parameters
67
+ ----------
68
+ side : str
69
+ The border side to expand into properties
70
+
71
+ Returns
72
+ -------
73
+ function: Return to call when a 'border(-{side}): {value}' string is encountered
74
+ """
75
+ if side != "":
76
+ side = f"-{side}"
77
+
78
+ def expand(self, prop, value: str) -> Generator[tuple[str, str], None, None]:
79
+ """
80
+ Expand border into color, style, and width tuples
81
+
82
+ Parameters
83
+ ----------
84
+ prop : str
85
+ CSS property name passed to styler
86
+ value : str
87
+ Value passed to styler for property
88
+
89
+ Yields
90
+ ------
91
+ Tuple (str, str): Expanded property, value
92
+ """
93
+ tokens = value.split()
94
+ if len(tokens) == 0 or len(tokens) > 3:
95
+ warnings.warn(
96
+ f'Too many tokens provided to "{prop}" (expected 1-3)',
97
+ CSSWarning,
98
+ stacklevel=find_stack_level(),
99
+ )
100
+
101
+ # TODO: Can we use current color as initial value to comply with CSS standards?
102
+ border_declarations = {
103
+ f"border{side}-color": "black",
104
+ f"border{side}-style": "none",
105
+ f"border{side}-width": "medium",
106
+ }
107
+ for token in tokens:
108
+ if token.lower() in self.BORDER_STYLES:
109
+ border_declarations[f"border{side}-style"] = token
110
+ elif any(ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS):
111
+ border_declarations[f"border{side}-width"] = token
112
+ else:
113
+ border_declarations[f"border{side}-color"] = token
114
+ # TODO: Warn user if item entered more than once (e.g. "border: red green")
115
+
116
+ # Per CSS, "border" will reset previous "border-*" definitions
117
+ yield from self.atomize(border_declarations.items())
118
+
119
+ return expand
120
+
121
+
122
+ class CSSResolver:
123
+ """
124
+ A callable for parsing and resolving CSS to atomic properties.
125
+ """
126
+
127
+ UNIT_RATIOS = {
128
+ "pt": ("pt", 1),
129
+ "em": ("em", 1),
130
+ "rem": ("pt", 12),
131
+ "ex": ("em", 0.5),
132
+ # 'ch':
133
+ "px": ("pt", 0.75),
134
+ "pc": ("pt", 12),
135
+ "in": ("pt", 72),
136
+ "cm": ("in", 1 / 2.54),
137
+ "mm": ("in", 1 / 25.4),
138
+ "q": ("mm", 0.25),
139
+ "!!default": ("em", 0),
140
+ }
141
+
142
+ FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
143
+ FONT_SIZE_RATIOS.update(
144
+ {
145
+ "%": ("em", 0.01),
146
+ "xx-small": ("rem", 0.5),
147
+ "x-small": ("rem", 0.625),
148
+ "small": ("rem", 0.8),
149
+ "medium": ("rem", 1),
150
+ "large": ("rem", 1.125),
151
+ "x-large": ("rem", 1.5),
152
+ "xx-large": ("rem", 2),
153
+ "smaller": ("em", 1 / 1.2),
154
+ "larger": ("em", 1.2),
155
+ "!!default": ("em", 1),
156
+ }
157
+ )
158
+
159
+ MARGIN_RATIOS = UNIT_RATIOS.copy()
160
+ MARGIN_RATIOS.update({"none": ("pt", 0)})
161
+
162
+ BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
163
+ BORDER_WIDTH_RATIOS.update(
164
+ {
165
+ "none": ("pt", 0),
166
+ "thick": ("px", 4),
167
+ "medium": ("px", 2),
168
+ "thin": ("px", 1),
169
+ # Default: medium only if solid
170
+ }
171
+ )
172
+
173
+ BORDER_STYLES = [
174
+ "none",
175
+ "hidden",
176
+ "dotted",
177
+ "dashed",
178
+ "solid",
179
+ "double",
180
+ "groove",
181
+ "ridge",
182
+ "inset",
183
+ "outset",
184
+ "mediumdashdot",
185
+ "dashdotdot",
186
+ "hair",
187
+ "mediumdashdotdot",
188
+ "dashdot",
189
+ "slantdashdot",
190
+ "mediumdashed",
191
+ ]
192
+
193
+ SIDE_SHORTHANDS = {
194
+ 1: [0, 0, 0, 0],
195
+ 2: [0, 1, 0, 1],
196
+ 3: [0, 1, 2, 1],
197
+ 4: [0, 1, 2, 3],
198
+ }
199
+
200
+ SIDES = ("top", "right", "bottom", "left")
201
+
202
+ CSS_EXPANSIONS = {
203
+ **{
204
+ (f"border-{prop}" if prop else "border"): _border_expander(prop)
205
+ for prop in ["", "top", "right", "bottom", "left"]
206
+ },
207
+ **{
208
+ f"border-{prop}": _side_expander(f"border-{{:s}}-{prop}")
209
+ for prop in ["color", "style", "width"]
210
+ },
211
+ **{
212
+ "margin": _side_expander("margin-{:s}"),
213
+ "padding": _side_expander("padding-{:s}"),
214
+ },
215
+ }
216
+
217
+ def __call__(
218
+ self,
219
+ declarations: str | Iterable[tuple[str, str]],
220
+ inherited: dict[str, str] | None = None,
221
+ ) -> dict[str, str]:
222
+ """
223
+ The given declarations to atomic properties.
224
+
225
+ Parameters
226
+ ----------
227
+ declarations_str : str | Iterable[tuple[str, str]]
228
+ A CSS string or set of CSS declaration tuples
229
+ e.g. "font-weight: bold; background: blue" or
230
+ {("font-weight", "bold"), ("background", "blue")}
231
+ inherited : dict, optional
232
+ Atomic properties indicating the inherited style context in which
233
+ declarations_str is to be resolved. ``inherited`` should already
234
+ be resolved, i.e. valid output of this method.
235
+
236
+ Returns
237
+ -------
238
+ dict
239
+ Atomic CSS 2.2 properties.
240
+
241
+ Examples
242
+ --------
243
+ >>> resolve = CSSResolver()
244
+ >>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}
245
+ >>> out = resolve('''
246
+ ... border-color: BLUE RED;
247
+ ... font-size: 1em;
248
+ ... font-size: 2em;
249
+ ... font-weight: normal;
250
+ ... font-weight: inherit;
251
+ ... ''', inherited)
252
+ >>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
253
+ [('border-bottom-color', 'blue'),
254
+ ('border-left-color', 'red'),
255
+ ('border-right-color', 'red'),
256
+ ('border-top-color', 'blue'),
257
+ ('font-family', 'serif'),
258
+ ('font-size', '24pt'),
259
+ ('font-weight', 'bold')]
260
+ """
261
+ if isinstance(declarations, str):
262
+ declarations = self.parse(declarations)
263
+ props = dict(self.atomize(declarations))
264
+ if inherited is None:
265
+ inherited = {}
266
+
267
+ props = self._update_initial(props, inherited)
268
+ props = self._update_font_size(props, inherited)
269
+ return self._update_other_units(props)
270
+
271
+ def _update_initial(
272
+ self,
273
+ props: dict[str, str],
274
+ inherited: dict[str, str],
275
+ ) -> dict[str, str]:
276
+ # 1. resolve inherited, initial
277
+ for prop, val in inherited.items():
278
+ if prop not in props:
279
+ props[prop] = val
280
+
281
+ new_props = props.copy()
282
+ for prop, val in props.items():
283
+ if val == "inherit":
284
+ val = inherited.get(prop, "initial")
285
+
286
+ if val in ("initial", None):
287
+ # we do not define a complete initial stylesheet
288
+ del new_props[prop]
289
+ else:
290
+ new_props[prop] = val
291
+ return new_props
292
+
293
+ def _update_font_size(
294
+ self,
295
+ props: dict[str, str],
296
+ inherited: dict[str, str],
297
+ ) -> dict[str, str]:
298
+ # 2. resolve relative font size
299
+ if props.get("font-size"):
300
+ props["font-size"] = self.size_to_pt(
301
+ props["font-size"],
302
+ self._get_font_size(inherited),
303
+ conversions=self.FONT_SIZE_RATIOS,
304
+ )
305
+ return props
306
+
307
+ def _get_font_size(self, props: dict[str, str]) -> float | None:
308
+ if props.get("font-size"):
309
+ font_size_string = props["font-size"]
310
+ return self._get_float_font_size_from_pt(font_size_string)
311
+ return None
312
+
313
+ def _get_float_font_size_from_pt(self, font_size_string: str) -> float:
314
+ assert font_size_string.endswith("pt")
315
+ return float(font_size_string.rstrip("pt"))
316
+
317
+ def _update_other_units(self, props: dict[str, str]) -> dict[str, str]:
318
+ font_size = self._get_font_size(props)
319
+ # 3. TODO: resolve other font-relative units
320
+ for side in self.SIDES:
321
+ prop = f"border-{side}-width"
322
+ if prop in props:
323
+ props[prop] = self.size_to_pt(
324
+ props[prop],
325
+ em_pt=font_size,
326
+ conversions=self.BORDER_WIDTH_RATIOS,
327
+ )
328
+
329
+ for prop in [f"margin-{side}", f"padding-{side}"]:
330
+ if prop in props:
331
+ # TODO: support %
332
+ props[prop] = self.size_to_pt(
333
+ props[prop],
334
+ em_pt=font_size,
335
+ conversions=self.MARGIN_RATIOS,
336
+ )
337
+ return props
338
+
339
+ def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS):
340
+ def _error():
341
+ warnings.warn(
342
+ f"Unhandled size: {repr(in_val)}",
343
+ CSSWarning,
344
+ stacklevel=find_stack_level(),
345
+ )
346
+ return self.size_to_pt("1!!default", conversions=conversions)
347
+
348
+ match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val)
349
+ if match is None:
350
+ return _error()
351
+
352
+ val, unit = match.groups()
353
+ if val == "":
354
+ # hack for 'large' etc.
355
+ val = 1
356
+ else:
357
+ try:
358
+ val = float(val)
359
+ except ValueError:
360
+ return _error()
361
+
362
+ while unit != "pt":
363
+ if unit == "em":
364
+ if em_pt is None:
365
+ unit = "rem"
366
+ else:
367
+ val *= em_pt
368
+ unit = "pt"
369
+ continue
370
+
371
+ try:
372
+ unit, mul = conversions[unit]
373
+ except KeyError:
374
+ return _error()
375
+ val *= mul
376
+
377
+ val = round(val, 5)
378
+ if int(val) == val:
379
+ size_fmt = f"{int(val):d}pt"
380
+ else:
381
+ size_fmt = f"{val:f}pt"
382
+ return size_fmt
383
+
384
+ def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]:
385
+ for prop, value in declarations:
386
+ prop = prop.lower()
387
+ value = value.lower()
388
+ if prop in self.CSS_EXPANSIONS:
389
+ expand = self.CSS_EXPANSIONS[prop]
390
+ yield from expand(self, prop, value)
391
+ else:
392
+ yield prop, value
393
+
394
+ def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]:
395
+ """
396
+ Generates (prop, value) pairs from declarations.
397
+
398
+ In a future version may generate parsed tokens from tinycss/tinycss2
399
+
400
+ Parameters
401
+ ----------
402
+ declarations_str : str
403
+ """
404
+ for decl in declarations_str.split(";"):
405
+ if not decl.strip():
406
+ continue
407
+ prop, sep, val = decl.partition(":")
408
+ prop = prop.strip().lower()
409
+ # TODO: don't lowercase case sensitive parts of values (strings)
410
+ val = val.strip().lower()
411
+ if sep:
412
+ yield prop, val
413
+ else:
414
+ warnings.warn(
415
+ f"Ill-formatted attribute: expected a colon in {repr(decl)}",
416
+ CSSWarning,
417
+ stacklevel=find_stack_level(),
418
+ )
videochat2/lib/python3.10/site-packages/pandas/io/formats/csvs.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for formatting output data into CSV files.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import csv as csvlib
8
+ import os
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Hashable,
13
+ Iterator,
14
+ Sequence,
15
+ cast,
16
+ )
17
+
18
+ import numpy as np
19
+
20
+ from pandas._libs import writers as libwriters
21
+ from pandas._typing import (
22
+ CompressionOptions,
23
+ FilePath,
24
+ FloatFormatType,
25
+ IndexLabel,
26
+ StorageOptions,
27
+ WriteBuffer,
28
+ )
29
+ from pandas.util._decorators import cache_readonly
30
+
31
+ from pandas.core.dtypes.generic import (
32
+ ABCDatetimeIndex,
33
+ ABCIndex,
34
+ ABCMultiIndex,
35
+ ABCPeriodIndex,
36
+ )
37
+ from pandas.core.dtypes.missing import notna
38
+
39
+ from pandas.core.indexes.api import Index
40
+
41
+ from pandas.io.common import get_handle
42
+
43
+ if TYPE_CHECKING:
44
+ from pandas.io.formats.format import DataFrameFormatter
45
+
46
+
47
+ class CSVFormatter:
48
+ cols: np.ndarray
49
+
50
+ def __init__(
51
+ self,
52
+ formatter: DataFrameFormatter,
53
+ path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "",
54
+ sep: str = ",",
55
+ cols: Sequence[Hashable] | None = None,
56
+ index_label: IndexLabel | None = None,
57
+ mode: str = "w",
58
+ encoding: str | None = None,
59
+ errors: str = "strict",
60
+ compression: CompressionOptions = "infer",
61
+ quoting: int | None = None,
62
+ lineterminator: str | None = "\n",
63
+ chunksize: int | None = None,
64
+ quotechar: str | None = '"',
65
+ date_format: str | None = None,
66
+ doublequote: bool = True,
67
+ escapechar: str | None = None,
68
+ storage_options: StorageOptions = None,
69
+ ) -> None:
70
+ self.fmt = formatter
71
+
72
+ self.obj = self.fmt.frame
73
+
74
+ self.filepath_or_buffer = path_or_buf
75
+ self.encoding = encoding
76
+ self.compression: CompressionOptions = compression
77
+ self.mode = mode
78
+ self.storage_options = storage_options
79
+
80
+ self.sep = sep
81
+ self.index_label = self._initialize_index_label(index_label)
82
+ self.errors = errors
83
+ self.quoting = quoting or csvlib.QUOTE_MINIMAL
84
+ self.quotechar = self._initialize_quotechar(quotechar)
85
+ self.doublequote = doublequote
86
+ self.escapechar = escapechar
87
+ self.lineterminator = lineterminator or os.linesep
88
+ self.date_format = date_format
89
+ self.cols = self._initialize_columns(cols)
90
+ self.chunksize = self._initialize_chunksize(chunksize)
91
+
92
+ @property
93
+ def na_rep(self) -> str:
94
+ return self.fmt.na_rep
95
+
96
+ @property
97
+ def float_format(self) -> FloatFormatType | None:
98
+ return self.fmt.float_format
99
+
100
+ @property
101
+ def decimal(self) -> str:
102
+ return self.fmt.decimal
103
+
104
+ @property
105
+ def header(self) -> bool | Sequence[str]:
106
+ return self.fmt.header
107
+
108
+ @property
109
+ def index(self) -> bool:
110
+ return self.fmt.index
111
+
112
+ def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel:
113
+ if index_label is not False:
114
+ if index_label is None:
115
+ return self._get_index_label_from_obj()
116
+ elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)):
117
+ # given a string for a DF with Index
118
+ return [index_label]
119
+ return index_label
120
+
121
+ def _get_index_label_from_obj(self) -> Sequence[Hashable]:
122
+ if isinstance(self.obj.index, ABCMultiIndex):
123
+ return self._get_index_label_multiindex()
124
+ else:
125
+ return self._get_index_label_flat()
126
+
127
+ def _get_index_label_multiindex(self) -> Sequence[Hashable]:
128
+ return [name or "" for name in self.obj.index.names]
129
+
130
+ def _get_index_label_flat(self) -> Sequence[Hashable]:
131
+ index_label = self.obj.index.name
132
+ return [""] if index_label is None else [index_label]
133
+
134
+ def _initialize_quotechar(self, quotechar: str | None) -> str | None:
135
+ if self.quoting != csvlib.QUOTE_NONE:
136
+ # prevents crash in _csv
137
+ return quotechar
138
+ return None
139
+
140
+ @property
141
+ def has_mi_columns(self) -> bool:
142
+ return bool(isinstance(self.obj.columns, ABCMultiIndex))
143
+
144
+ def _initialize_columns(self, cols: Sequence[Hashable] | None) -> np.ndarray:
145
+ # validate mi options
146
+ if self.has_mi_columns:
147
+ if cols is not None:
148
+ msg = "cannot specify cols with a MultiIndex on the columns"
149
+ raise TypeError(msg)
150
+
151
+ if cols is not None:
152
+ if isinstance(cols, ABCIndex):
153
+ cols = cols._format_native_types(**self._number_format)
154
+ else:
155
+ cols = list(cols)
156
+ self.obj = self.obj.loc[:, cols]
157
+
158
+ # update columns to include possible multiplicity of dupes
159
+ # and make sure cols is just a list of labels
160
+ new_cols = self.obj.columns
161
+ return new_cols._format_native_types(**self._number_format)
162
+
163
+ def _initialize_chunksize(self, chunksize: int | None) -> int:
164
+ if chunksize is None:
165
+ return (100000 // (len(self.cols) or 1)) or 1
166
+ return int(chunksize)
167
+
168
+ @property
169
+ def _number_format(self) -> dict[str, Any]:
170
+ """Dictionary used for storing number formatting settings."""
171
+ return {
172
+ "na_rep": self.na_rep,
173
+ "float_format": self.float_format,
174
+ "date_format": self.date_format,
175
+ "quoting": self.quoting,
176
+ "decimal": self.decimal,
177
+ }
178
+
179
+ @cache_readonly
180
+ def data_index(self) -> Index:
181
+ data_index = self.obj.index
182
+ if (
183
+ isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex))
184
+ and self.date_format is not None
185
+ ):
186
+ data_index = Index(
187
+ [x.strftime(self.date_format) if notna(x) else "" for x in data_index]
188
+ )
189
+ elif isinstance(data_index, ABCMultiIndex):
190
+ data_index = data_index.remove_unused_levels()
191
+ return data_index
192
+
193
+ @property
194
+ def nlevels(self) -> int:
195
+ if self.index:
196
+ return getattr(self.data_index, "nlevels", 1)
197
+ else:
198
+ return 0
199
+
200
+ @property
201
+ def _has_aliases(self) -> bool:
202
+ return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex))
203
+
204
+ @property
205
+ def _need_to_save_header(self) -> bool:
206
+ return bool(self._has_aliases or self.header)
207
+
208
+ @property
209
+ def write_cols(self) -> Sequence[Hashable]:
210
+ if self._has_aliases:
211
+ assert not isinstance(self.header, bool)
212
+ if len(self.header) != len(self.cols):
213
+ raise ValueError(
214
+ f"Writing {len(self.cols)} cols but got {len(self.header)} aliases"
215
+ )
216
+ return self.header
217
+ else:
218
+ # self.cols is an ndarray derived from Index._format_native_types,
219
+ # so its entries are strings, i.e. hashable
220
+ return cast(Sequence[Hashable], self.cols)
221
+
222
+ @property
223
+ def encoded_labels(self) -> list[Hashable]:
224
+ encoded_labels: list[Hashable] = []
225
+
226
+ if self.index and self.index_label:
227
+ assert isinstance(self.index_label, Sequence)
228
+ encoded_labels = list(self.index_label)
229
+
230
+ if not self.has_mi_columns or self._has_aliases:
231
+ encoded_labels += list(self.write_cols)
232
+
233
+ return encoded_labels
234
+
235
+ def save(self) -> None:
236
+ """
237
+ Create the writer & save.
238
+ """
239
+ # apply compression and byte/text conversion
240
+ with get_handle(
241
+ self.filepath_or_buffer,
242
+ self.mode,
243
+ encoding=self.encoding,
244
+ errors=self.errors,
245
+ compression=self.compression,
246
+ storage_options=self.storage_options,
247
+ ) as handles:
248
+ # Note: self.encoding is irrelevant here
249
+ self.writer = csvlib.writer(
250
+ handles.handle,
251
+ lineterminator=self.lineterminator,
252
+ delimiter=self.sep,
253
+ quoting=self.quoting,
254
+ doublequote=self.doublequote,
255
+ escapechar=self.escapechar,
256
+ quotechar=self.quotechar,
257
+ )
258
+
259
+ self._save()
260
+
261
+ def _save(self) -> None:
262
+ if self._need_to_save_header:
263
+ self._save_header()
264
+ self._save_body()
265
+
266
+ def _save_header(self) -> None:
267
+ if not self.has_mi_columns or self._has_aliases:
268
+ self.writer.writerow(self.encoded_labels)
269
+ else:
270
+ for row in self._generate_multiindex_header_rows():
271
+ self.writer.writerow(row)
272
+
273
+ def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]:
274
+ columns = self.obj.columns
275
+ for i in range(columns.nlevels):
276
+ # we need at least 1 index column to write our col names
277
+ col_line = []
278
+ if self.index:
279
+ # name is the first column
280
+ col_line.append(columns.names[i])
281
+
282
+ if isinstance(self.index_label, list) and len(self.index_label) > 1:
283
+ col_line.extend([""] * (len(self.index_label) - 1))
284
+
285
+ col_line.extend(columns._get_level_values(i))
286
+ yield col_line
287
+
288
+ # Write out the index line if it's not empty.
289
+ # Otherwise, we will print out an extraneous
290
+ # blank line between the mi and the data rows.
291
+ if self.encoded_labels and set(self.encoded_labels) != {""}:
292
+ yield self.encoded_labels + [""] * len(columns)
293
+
294
+ def _save_body(self) -> None:
295
+ nrows = len(self.data_index)
296
+ chunks = (nrows // self.chunksize) + 1
297
+ for i in range(chunks):
298
+ start_i = i * self.chunksize
299
+ end_i = min(start_i + self.chunksize, nrows)
300
+ if start_i >= end_i:
301
+ break
302
+ self._save_chunk(start_i, end_i)
303
+
304
+ def _save_chunk(self, start_i: int, end_i: int) -> None:
305
+ # create the data for a chunk
306
+ slicer = slice(start_i, end_i)
307
+ df = self.obj.iloc[slicer]
308
+
309
+ res = df._mgr.to_native_types(**self._number_format)
310
+ data = [res.iget_values(i) for i in range(len(res.items))]
311
+
312
+ ix = self.data_index[slicer]._format_native_types(**self._number_format)
313
+ libwriters.write_csv_rows(
314
+ data,
315
+ ix,
316
+ self.nlevels,
317
+ self.cols,
318
+ self.writer,
319
+ )
videochat2/lib/python3.10/site-packages/pandas/io/formats/excel.py ADDED
@@ -0,0 +1,950 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for conversion to writer-agnostic Excel representation.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from functools import (
7
+ lru_cache,
8
+ reduce,
9
+ )
10
+ import itertools
11
+ import re
12
+ from typing import (
13
+ Any,
14
+ Callable,
15
+ Hashable,
16
+ Iterable,
17
+ Mapping,
18
+ Sequence,
19
+ cast,
20
+ )
21
+ import warnings
22
+
23
+ import numpy as np
24
+
25
+ from pandas._libs.lib import is_list_like
26
+ from pandas._typing import (
27
+ IndexLabel,
28
+ StorageOptions,
29
+ )
30
+ from pandas.util._decorators import doc
31
+ from pandas.util._exceptions import find_stack_level
32
+
33
+ from pandas.core.dtypes import missing
34
+ from pandas.core.dtypes.common import (
35
+ is_float,
36
+ is_scalar,
37
+ )
38
+
39
+ from pandas import (
40
+ DataFrame,
41
+ Index,
42
+ MultiIndex,
43
+ PeriodIndex,
44
+ )
45
+ import pandas.core.common as com
46
+ from pandas.core.shared_docs import _shared_docs
47
+
48
+ from pandas.io.formats._color_data import CSS4_COLORS
49
+ from pandas.io.formats.css import (
50
+ CSSResolver,
51
+ CSSWarning,
52
+ )
53
+ from pandas.io.formats.format import get_level_lengths
54
+ from pandas.io.formats.printing import pprint_thing
55
+
56
+
57
+ class ExcelCell:
58
+ __fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
59
+ __slots__ = __fields__
60
+
61
+ def __init__(
62
+ self,
63
+ row: int,
64
+ col: int,
65
+ val,
66
+ style=None,
67
+ mergestart: int | None = None,
68
+ mergeend: int | None = None,
69
+ ) -> None:
70
+ self.row = row
71
+ self.col = col
72
+ self.val = val
73
+ self.style = style
74
+ self.mergestart = mergestart
75
+ self.mergeend = mergeend
76
+
77
+
78
+ class CssExcelCell(ExcelCell):
79
+ def __init__(
80
+ self,
81
+ row: int,
82
+ col: int,
83
+ val,
84
+ style: dict | None,
85
+ css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None,
86
+ css_row: int,
87
+ css_col: int,
88
+ css_converter: Callable | None,
89
+ **kwargs,
90
+ ) -> None:
91
+ if css_styles and css_converter:
92
+ # Use dict to get only one (case-insensitive) declaration per property
93
+ declaration_dict = {
94
+ prop.lower(): val for prop, val in css_styles[css_row, css_col]
95
+ }
96
+ # Convert to frozenset for order-invariant caching
97
+ unique_declarations = frozenset(declaration_dict.items())
98
+ style = css_converter(unique_declarations)
99
+
100
+ super().__init__(row=row, col=col, val=val, style=style, **kwargs)
101
+
102
+
103
+ class CSSToExcelConverter:
104
+ """
105
+ A callable for converting CSS declarations to ExcelWriter styles
106
+
107
+ Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
108
+ focusing on font styling, backgrounds, borders and alignment.
109
+
110
+ Operates by first computing CSS styles in a fairly generic
111
+ way (see :meth:`compute_css`) then determining Excel style
112
+ properties from CSS properties (see :meth:`build_xlstyle`).
113
+
114
+ Parameters
115
+ ----------
116
+ inherited : str, optional
117
+ CSS declarations understood to be the containing scope for the
118
+ CSS processed by :meth:`__call__`.
119
+ """
120
+
121
+ NAMED_COLORS = CSS4_COLORS
122
+
123
+ VERTICAL_MAP = {
124
+ "top": "top",
125
+ "text-top": "top",
126
+ "middle": "center",
127
+ "baseline": "bottom",
128
+ "bottom": "bottom",
129
+ "text-bottom": "bottom",
130
+ # OpenXML also has 'justify', 'distributed'
131
+ }
132
+
133
+ BOLD_MAP = {
134
+ "bold": True,
135
+ "bolder": True,
136
+ "600": True,
137
+ "700": True,
138
+ "800": True,
139
+ "900": True,
140
+ "normal": False,
141
+ "lighter": False,
142
+ "100": False,
143
+ "200": False,
144
+ "300": False,
145
+ "400": False,
146
+ "500": False,
147
+ }
148
+
149
+ ITALIC_MAP = {
150
+ "normal": False,
151
+ "italic": True,
152
+ "oblique": True,
153
+ }
154
+
155
+ FAMILY_MAP = {
156
+ "serif": 1, # roman
157
+ "sans-serif": 2, # swiss
158
+ "cursive": 4, # script
159
+ "fantasy": 5, # decorative
160
+ }
161
+
162
+ BORDER_STYLE_MAP = {
163
+ style.lower(): style
164
+ for style in [
165
+ "dashed",
166
+ "mediumDashDot",
167
+ "dashDotDot",
168
+ "hair",
169
+ "dotted",
170
+ "mediumDashDotDot",
171
+ "double",
172
+ "dashDot",
173
+ "slantDashDot",
174
+ "mediumDashed",
175
+ ]
176
+ }
177
+
178
+ # NB: Most of the methods here could be classmethods, as only __init__
179
+ # and __call__ make use of instance attributes. We leave them as
180
+ # instancemethods so that users can easily experiment with extensions
181
+ # without monkey-patching.
182
+ inherited: dict[str, str] | None
183
+
184
+ def __init__(self, inherited: str | None = None) -> None:
185
+ if inherited is not None:
186
+ self.inherited = self.compute_css(inherited)
187
+ else:
188
+ self.inherited = None
189
+ # We should avoid lru_cache on the __call__ method.
190
+ # Otherwise once the method __call__ has been called
191
+ # garbage collection no longer deletes the instance.
192
+ self._call_cached = lru_cache(maxsize=None)(self._call_uncached)
193
+
194
+ compute_css = CSSResolver()
195
+
196
+ def __call__(
197
+ self, declarations: str | frozenset[tuple[str, str]]
198
+ ) -> dict[str, dict[str, str]]:
199
+ """
200
+ Convert CSS declarations to ExcelWriter style.
201
+
202
+ Parameters
203
+ ----------
204
+ declarations : str | frozenset[tuple[str, str]]
205
+ CSS string or set of CSS declaration tuples.
206
+ e.g. "font-weight: bold; background: blue" or
207
+ {("font-weight", "bold"), ("background", "blue")}
208
+
209
+ Returns
210
+ -------
211
+ xlstyle : dict
212
+ A style as interpreted by ExcelWriter when found in
213
+ ExcelCell.style.
214
+ """
215
+ return self._call_cached(declarations)
216
+
217
+ def _call_uncached(
218
+ self, declarations: str | frozenset[tuple[str, str]]
219
+ ) -> dict[str, dict[str, str]]:
220
+ properties = self.compute_css(declarations, self.inherited)
221
+ return self.build_xlstyle(properties)
222
+
223
+ def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:
224
+ out = {
225
+ "alignment": self.build_alignment(props),
226
+ "border": self.build_border(props),
227
+ "fill": self.build_fill(props),
228
+ "font": self.build_font(props),
229
+ "number_format": self.build_number_format(props),
230
+ }
231
+
232
+ # TODO: handle cell width and height: needs support in pandas.io.excel
233
+
234
+ def remove_none(d: dict[str, str | None]) -> None:
235
+ """Remove key where value is None, through nested dicts"""
236
+ for k, v in list(d.items()):
237
+ if v is None:
238
+ del d[k]
239
+ elif isinstance(v, dict):
240
+ remove_none(v)
241
+ if not v:
242
+ del d[k]
243
+
244
+ remove_none(out)
245
+ return out
246
+
247
+ def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]:
248
+ # TODO: text-indent, padding-left -> alignment.indent
249
+ return {
250
+ "horizontal": props.get("text-align"),
251
+ "vertical": self._get_vertical_alignment(props),
252
+ "wrap_text": self._get_is_wrap_text(props),
253
+ }
254
+
255
+ def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None:
256
+ vertical_align = props.get("vertical-align")
257
+ if vertical_align:
258
+ return self.VERTICAL_MAP.get(vertical_align)
259
+ return None
260
+
261
+ def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None:
262
+ if props.get("white-space") is None:
263
+ return None
264
+ return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))
265
+
266
+ def build_border(
267
+ self, props: Mapping[str, str]
268
+ ) -> dict[str, dict[str, str | None]]:
269
+ return {
270
+ side: {
271
+ "style": self._border_style(
272
+ props.get(f"border-{side}-style"),
273
+ props.get(f"border-{side}-width"),
274
+ self.color_to_excel(props.get(f"border-{side}-color")),
275
+ ),
276
+ "color": self.color_to_excel(props.get(f"border-{side}-color")),
277
+ }
278
+ for side in ["top", "right", "bottom", "left"]
279
+ }
280
+
281
+ def _border_style(self, style: str | None, width: str | None, color: str | None):
282
+ # convert styles and widths to openxml, one of:
283
+ # 'dashDot'
284
+ # 'dashDotDot'
285
+ # 'dashed'
286
+ # 'dotted'
287
+ # 'double'
288
+ # 'hair'
289
+ # 'medium'
290
+ # 'mediumDashDot'
291
+ # 'mediumDashDotDot'
292
+ # 'mediumDashed'
293
+ # 'slantDashDot'
294
+ # 'thick'
295
+ # 'thin'
296
+ if width is None and style is None and color is None:
297
+ # Return None will remove "border" from style dictionary
298
+ return None
299
+
300
+ if width is None and style is None:
301
+ # Return "none" will keep "border" in style dictionary
302
+ return "none"
303
+
304
+ if style in ("none", "hidden"):
305
+ return "none"
306
+
307
+ width_name = self._get_width_name(width)
308
+ if width_name is None:
309
+ return "none"
310
+
311
+ if style in (None, "groove", "ridge", "inset", "outset", "solid"):
312
+ # not handled
313
+ return width_name
314
+
315
+ if style == "double":
316
+ return "double"
317
+ if style == "dotted":
318
+ if width_name in ("hair", "thin"):
319
+ return "dotted"
320
+ return "mediumDashDotDot"
321
+ if style == "dashed":
322
+ if width_name in ("hair", "thin"):
323
+ return "dashed"
324
+ return "mediumDashed"
325
+ elif style in self.BORDER_STYLE_MAP:
326
+ # Excel-specific styles
327
+ return self.BORDER_STYLE_MAP[style]
328
+ else:
329
+ warnings.warn(
330
+ f"Unhandled border style format: {repr(style)}",
331
+ CSSWarning,
332
+ stacklevel=find_stack_level(),
333
+ )
334
+ return "none"
335
+
336
+ def _get_width_name(self, width_input: str | None) -> str | None:
337
+ width = self._width_to_float(width_input)
338
+ if width < 1e-5:
339
+ return None
340
+ elif width < 1.3:
341
+ return "thin"
342
+ elif width < 2.8:
343
+ return "medium"
344
+ return "thick"
345
+
346
+ def _width_to_float(self, width: str | None) -> float:
347
+ if width is None:
348
+ width = "2pt"
349
+ return self._pt_to_float(width)
350
+
351
+ def _pt_to_float(self, pt_string: str) -> float:
352
+ assert pt_string.endswith("pt")
353
+ return float(pt_string.rstrip("pt"))
354
+
355
+ def build_fill(self, props: Mapping[str, str]):
356
+ # TODO: perhaps allow for special properties
357
+ # -excel-pattern-bgcolor and -excel-pattern-type
358
+ fill_color = props.get("background-color")
359
+ if fill_color not in (None, "transparent", "none"):
360
+ return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}
361
+
362
+ def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]:
363
+ fc = props.get("number-format")
364
+ fc = fc.replace("§", ";") if isinstance(fc, str) else fc
365
+ return {"format_code": fc}
366
+
367
+ def build_font(
368
+ self, props: Mapping[str, str]
369
+ ) -> dict[str, bool | float | str | None]:
370
+ font_names = self._get_font_names(props)
371
+ decoration = self._get_decoration(props)
372
+ return {
373
+ "name": font_names[0] if font_names else None,
374
+ "family": self._select_font_family(font_names),
375
+ "size": self._get_font_size(props),
376
+ "bold": self._get_is_bold(props),
377
+ "italic": self._get_is_italic(props),
378
+ "underline": ("single" if "underline" in decoration else None),
379
+ "strike": ("line-through" in decoration) or None,
380
+ "color": self.color_to_excel(props.get("color")),
381
+ # shadow if nonzero digit before shadow color
382
+ "shadow": self._get_shadow(props),
383
+ }
384
+
385
+ def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:
386
+ weight = props.get("font-weight")
387
+ if weight:
388
+ return self.BOLD_MAP.get(weight)
389
+ return None
390
+
391
+ def _get_is_italic(self, props: Mapping[str, str]) -> bool | None:
392
+ font_style = props.get("font-style")
393
+ if font_style:
394
+ return self.ITALIC_MAP.get(font_style)
395
+ return None
396
+
397
+ def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:
398
+ decoration = props.get("text-decoration")
399
+ if decoration is not None:
400
+ return decoration.split()
401
+ else:
402
+ return ()
403
+
404
+ def _get_underline(self, decoration: Sequence[str]) -> str | None:
405
+ if "underline" in decoration:
406
+ return "single"
407
+ return None
408
+
409
+ def _get_shadow(self, props: Mapping[str, str]) -> bool | None:
410
+ if "text-shadow" in props:
411
+ return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
412
+ return None
413
+
414
+ def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:
415
+ font_names_tmp = re.findall(
416
+ r"""(?x)
417
+ (
418
+ "(?:[^"]|\\")+"
419
+ |
420
+ '(?:[^']|\\')+'
421
+ |
422
+ [^'",]+
423
+ )(?=,|\s*$)
424
+ """,
425
+ props.get("font-family", ""),
426
+ )
427
+
428
+ font_names = []
429
+ for name in font_names_tmp:
430
+ if name[:1] == '"':
431
+ name = name[1:-1].replace('\\"', '"')
432
+ elif name[:1] == "'":
433
+ name = name[1:-1].replace("\\'", "'")
434
+ else:
435
+ name = name.strip()
436
+ if name:
437
+ font_names.append(name)
438
+ return font_names
439
+
440
+ def _get_font_size(self, props: Mapping[str, str]) -> float | None:
441
+ size = props.get("font-size")
442
+ if size is None:
443
+ return size
444
+ return self._pt_to_float(size)
445
+
446
+ def _select_font_family(self, font_names) -> int | None:
447
+ family = None
448
+ for name in font_names:
449
+ family = self.FAMILY_MAP.get(name)
450
+ if family:
451
+ break
452
+
453
+ return family
454
+
455
+ def color_to_excel(self, val: str | None) -> str | None:
456
+ if val is None:
457
+ return None
458
+
459
+ if self._is_hex_color(val):
460
+ return self._convert_hex_to_excel(val)
461
+
462
+ try:
463
+ return self.NAMED_COLORS[val]
464
+ except KeyError:
465
+ warnings.warn(
466
+ f"Unhandled color format: {repr(val)}",
467
+ CSSWarning,
468
+ stacklevel=find_stack_level(),
469
+ )
470
+ return None
471
+
472
+ def _is_hex_color(self, color_string: str) -> bool:
473
+ return bool(color_string.startswith("#"))
474
+
475
+ def _convert_hex_to_excel(self, color_string: str) -> str:
476
+ code = color_string.lstrip("#")
477
+ if self._is_shorthand_color(color_string):
478
+ return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()
479
+ else:
480
+ return code.upper()
481
+
482
+ def _is_shorthand_color(self, color_string: str) -> bool:
483
+ """Check if color code is shorthand.
484
+
485
+ #FFF is a shorthand as opposed to full #FFFFFF.
486
+ """
487
+ code = color_string.lstrip("#")
488
+ if len(code) == 3:
489
+ return True
490
+ elif len(code) == 6:
491
+ return False
492
+ else:
493
+ raise ValueError(f"Unexpected color {color_string}")
494
+
495
+
496
+ class ExcelFormatter:
497
+ """
498
+ Class for formatting a DataFrame to a list of ExcelCells,
499
+
500
+ Parameters
501
+ ----------
502
+ df : DataFrame or Styler
503
+ na_rep: na representation
504
+ float_format : str, default None
505
+ Format string for floating point numbers
506
+ cols : sequence, optional
507
+ Columns to write
508
+ header : bool or sequence of str, default True
509
+ Write out column names. If a list of string is given it is
510
+ assumed to be aliases for the column names
511
+ index : bool, default True
512
+ output row names (index)
513
+ index_label : str or sequence, default None
514
+ Column label for index column(s) if desired. If None is given, and
515
+ `header` and `index` are True, then the index names are used. A
516
+ sequence should be given if the DataFrame uses MultiIndex.
517
+ merge_cells : bool, default False
518
+ Format MultiIndex and Hierarchical Rows as merged cells.
519
+ inf_rep : str, default `'inf'`
520
+ representation for np.inf values (which aren't representable in Excel)
521
+ A `'-'` sign will be added in front of -inf.
522
+ style_converter : callable, optional
523
+ This translates Styler styles (CSS) into ExcelWriter styles.
524
+ Defaults to ``CSSToExcelConverter()``.
525
+ It should have signature css_declarations string -> excel style.
526
+ This is only called for body cells.
527
+ """
528
+
529
+ max_rows = 2**20
530
+ max_cols = 2**14
531
+
532
+ def __init__(
533
+ self,
534
+ df,
535
+ na_rep: str = "",
536
+ float_format: str | None = None,
537
+ cols: Sequence[Hashable] | None = None,
538
+ header: Sequence[Hashable] | bool = True,
539
+ index: bool = True,
540
+ index_label: IndexLabel | None = None,
541
+ merge_cells: bool = False,
542
+ inf_rep: str = "inf",
543
+ style_converter: Callable | None = None,
544
+ ) -> None:
545
+ self.rowcounter = 0
546
+ self.na_rep = na_rep
547
+ if not isinstance(df, DataFrame):
548
+ self.styler = df
549
+ self.styler._compute() # calculate applied styles
550
+ df = df.data
551
+ if style_converter is None:
552
+ style_converter = CSSToExcelConverter()
553
+ self.style_converter: Callable | None = style_converter
554
+ else:
555
+ self.styler = None
556
+ self.style_converter = None
557
+ self.df = df
558
+ if cols is not None:
559
+ # all missing, raise
560
+ if not len(Index(cols).intersection(df.columns)):
561
+ raise KeyError("passes columns are not ALL present dataframe")
562
+
563
+ if len(Index(cols).intersection(df.columns)) != len(set(cols)):
564
+ # Deprecated in GH#17295, enforced in 1.0.0
565
+ raise KeyError("Not all names specified in 'columns' are found")
566
+
567
+ self.df = df.reindex(columns=cols)
568
+
569
+ self.columns = self.df.columns
570
+ self.float_format = float_format
571
+ self.index = index
572
+ self.index_label = index_label
573
+ self.header = header
574
+ self.merge_cells = merge_cells
575
+ self.inf_rep = inf_rep
576
+
577
+ @property
578
+ def header_style(self) -> dict[str, dict[str, str | bool]]:
579
+ return {
580
+ "font": {"bold": True},
581
+ "borders": {
582
+ "top": "thin",
583
+ "right": "thin",
584
+ "bottom": "thin",
585
+ "left": "thin",
586
+ },
587
+ "alignment": {"horizontal": "center", "vertical": "top"},
588
+ }
589
+
590
+ def _format_value(self, val):
591
+ if is_scalar(val) and missing.isna(val):
592
+ val = self.na_rep
593
+ elif is_float(val):
594
+ if missing.isposinf_scalar(val):
595
+ val = self.inf_rep
596
+ elif missing.isneginf_scalar(val):
597
+ val = f"-{self.inf_rep}"
598
+ elif self.float_format is not None:
599
+ val = float(self.float_format % val)
600
+ if getattr(val, "tzinfo", None) is not None:
601
+ raise ValueError(
602
+ "Excel does not support datetimes with "
603
+ "timezones. Please ensure that datetimes "
604
+ "are timezone unaware before writing to Excel."
605
+ )
606
+ return val
607
+
608
+ def _format_header_mi(self) -> Iterable[ExcelCell]:
609
+ if self.columns.nlevels > 1:
610
+ if not self.index:
611
+ raise NotImplementedError(
612
+ "Writing to Excel with MultiIndex columns and no "
613
+ "index ('index'=False) is not yet implemented."
614
+ )
615
+
616
+ if not (self._has_aliases or self.header):
617
+ return
618
+
619
+ columns = self.columns
620
+ level_strs = columns.format(
621
+ sparsify=self.merge_cells, adjoin=False, names=False
622
+ )
623
+ level_lengths = get_level_lengths(level_strs)
624
+ coloffset = 0
625
+ lnum = 0
626
+
627
+ if self.index and isinstance(self.df.index, MultiIndex):
628
+ coloffset = len(self.df.index[0]) - 1
629
+
630
+ if self.merge_cells:
631
+ # Format multi-index as a merged cells.
632
+ for lnum, name in enumerate(columns.names):
633
+ yield ExcelCell(
634
+ row=lnum,
635
+ col=coloffset,
636
+ val=name,
637
+ style=self.header_style,
638
+ )
639
+
640
+ for lnum, (spans, levels, level_codes) in enumerate(
641
+ zip(level_lengths, columns.levels, columns.codes)
642
+ ):
643
+ values = levels.take(level_codes)
644
+ for i, span_val in spans.items():
645
+ mergestart, mergeend = None, None
646
+ if span_val > 1:
647
+ mergestart, mergeend = lnum, coloffset + i + span_val
648
+ yield CssExcelCell(
649
+ row=lnum,
650
+ col=coloffset + i + 1,
651
+ val=values[i],
652
+ style=self.header_style,
653
+ css_styles=getattr(self.styler, "ctx_columns", None),
654
+ css_row=lnum,
655
+ css_col=i,
656
+ css_converter=self.style_converter,
657
+ mergestart=mergestart,
658
+ mergeend=mergeend,
659
+ )
660
+ else:
661
+ # Format in legacy format with dots to indicate levels.
662
+ for i, values in enumerate(zip(*level_strs)):
663
+ v = ".".join(map(pprint_thing, values))
664
+ yield CssExcelCell(
665
+ row=lnum,
666
+ col=coloffset + i + 1,
667
+ val=v,
668
+ style=self.header_style,
669
+ css_styles=getattr(self.styler, "ctx_columns", None),
670
+ css_row=lnum,
671
+ css_col=i,
672
+ css_converter=self.style_converter,
673
+ )
674
+
675
+ self.rowcounter = lnum
676
+
677
+ def _format_header_regular(self) -> Iterable[ExcelCell]:
678
+ if self._has_aliases or self.header:
679
+ coloffset = 0
680
+
681
+ if self.index:
682
+ coloffset = 1
683
+ if isinstance(self.df.index, MultiIndex):
684
+ coloffset = len(self.df.index.names)
685
+
686
+ colnames = self.columns
687
+ if self._has_aliases:
688
+ self.header = cast(Sequence, self.header)
689
+ if len(self.header) != len(self.columns):
690
+ raise ValueError(
691
+ f"Writing {len(self.columns)} cols "
692
+ f"but got {len(self.header)} aliases"
693
+ )
694
+ colnames = self.header
695
+
696
+ for colindex, colname in enumerate(colnames):
697
+ yield CssExcelCell(
698
+ row=self.rowcounter,
699
+ col=colindex + coloffset,
700
+ val=colname,
701
+ style=self.header_style,
702
+ css_styles=getattr(self.styler, "ctx_columns", None),
703
+ css_row=0,
704
+ css_col=colindex,
705
+ css_converter=self.style_converter,
706
+ )
707
+
708
+ def _format_header(self) -> Iterable[ExcelCell]:
709
+ gen: Iterable[ExcelCell]
710
+
711
+ if isinstance(self.columns, MultiIndex):
712
+ gen = self._format_header_mi()
713
+ else:
714
+ gen = self._format_header_regular()
715
+
716
+ gen2: Iterable[ExcelCell] = ()
717
+
718
+ if self.df.index.names:
719
+ row = [x if x is not None else "" for x in self.df.index.names] + [
720
+ ""
721
+ ] * len(self.columns)
722
+ if reduce(lambda x, y: x and y, map(lambda x: x != "", row)):
723
+ gen2 = (
724
+ ExcelCell(self.rowcounter, colindex, val, self.header_style)
725
+ for colindex, val in enumerate(row)
726
+ )
727
+ self.rowcounter += 1
728
+ return itertools.chain(gen, gen2)
729
+
730
+ def _format_body(self) -> Iterable[ExcelCell]:
731
+ if isinstance(self.df.index, MultiIndex):
732
+ return self._format_hierarchical_rows()
733
+ else:
734
+ return self._format_regular_rows()
735
+
736
+ def _format_regular_rows(self) -> Iterable[ExcelCell]:
737
+ if self._has_aliases or self.header:
738
+ self.rowcounter += 1
739
+
740
+ # output index and index_label?
741
+ if self.index:
742
+ # check aliases
743
+ # if list only take first as this is not a MultiIndex
744
+ if self.index_label and isinstance(
745
+ self.index_label, (list, tuple, np.ndarray, Index)
746
+ ):
747
+ index_label = self.index_label[0]
748
+ # if string good to go
749
+ elif self.index_label and isinstance(self.index_label, str):
750
+ index_label = self.index_label
751
+ else:
752
+ index_label = self.df.index.names[0]
753
+
754
+ if isinstance(self.columns, MultiIndex):
755
+ self.rowcounter += 1
756
+
757
+ if index_label and self.header is not False:
758
+ yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)
759
+
760
+ # write index_values
761
+ index_values = self.df.index
762
+ if isinstance(self.df.index, PeriodIndex):
763
+ index_values = self.df.index.to_timestamp()
764
+
765
+ for idx, idxval in enumerate(index_values):
766
+ yield CssExcelCell(
767
+ row=self.rowcounter + idx,
768
+ col=0,
769
+ val=idxval,
770
+ style=self.header_style,
771
+ css_styles=getattr(self.styler, "ctx_index", None),
772
+ css_row=idx,
773
+ css_col=0,
774
+ css_converter=self.style_converter,
775
+ )
776
+ coloffset = 1
777
+ else:
778
+ coloffset = 0
779
+
780
+ yield from self._generate_body(coloffset)
781
+
782
+ def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:
783
+ if self._has_aliases or self.header:
784
+ self.rowcounter += 1
785
+
786
+ gcolidx = 0
787
+
788
+ if self.index:
789
+ index_labels = self.df.index.names
790
+ # check for aliases
791
+ if self.index_label and isinstance(
792
+ self.index_label, (list, tuple, np.ndarray, Index)
793
+ ):
794
+ index_labels = self.index_label
795
+
796
+ # MultiIndex columns require an extra row
797
+ # with index names (blank if None) for
798
+ # unambiguous round-trip, unless not merging,
799
+ # in which case the names all go on one row Issue #11328
800
+ if isinstance(self.columns, MultiIndex) and self.merge_cells:
801
+ self.rowcounter += 1
802
+
803
+ # if index labels are not empty go ahead and dump
804
+ if com.any_not_none(*index_labels) and self.header is not False:
805
+ for cidx, name in enumerate(index_labels):
806
+ yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)
807
+
808
+ if self.merge_cells:
809
+ # Format hierarchical rows as merged cells.
810
+ level_strs = self.df.index.format(
811
+ sparsify=True, adjoin=False, names=False
812
+ )
813
+ level_lengths = get_level_lengths(level_strs)
814
+
815
+ for spans, levels, level_codes in zip(
816
+ level_lengths, self.df.index.levels, self.df.index.codes
817
+ ):
818
+ values = levels.take(
819
+ level_codes,
820
+ allow_fill=levels._can_hold_na,
821
+ fill_value=levels._na_value,
822
+ )
823
+
824
+ for i, span_val in spans.items():
825
+ mergestart, mergeend = None, None
826
+ if span_val > 1:
827
+ mergestart = self.rowcounter + i + span_val - 1
828
+ mergeend = gcolidx
829
+ yield CssExcelCell(
830
+ row=self.rowcounter + i,
831
+ col=gcolidx,
832
+ val=values[i],
833
+ style=self.header_style,
834
+ css_styles=getattr(self.styler, "ctx_index", None),
835
+ css_row=i,
836
+ css_col=gcolidx,
837
+ css_converter=self.style_converter,
838
+ mergestart=mergestart,
839
+ mergeend=mergeend,
840
+ )
841
+ gcolidx += 1
842
+
843
+ else:
844
+ # Format hierarchical rows with non-merged values.
845
+ for indexcolvals in zip(*self.df.index):
846
+ for idx, indexcolval in enumerate(indexcolvals):
847
+ yield CssExcelCell(
848
+ row=self.rowcounter + idx,
849
+ col=gcolidx,
850
+ val=indexcolval,
851
+ style=self.header_style,
852
+ css_styles=getattr(self.styler, "ctx_index", None),
853
+ css_row=idx,
854
+ css_col=gcolidx,
855
+ css_converter=self.style_converter,
856
+ )
857
+ gcolidx += 1
858
+
859
+ yield from self._generate_body(gcolidx)
860
+
861
+ @property
862
+ def _has_aliases(self) -> bool:
863
+ """Whether the aliases for column names are present."""
864
+ return is_list_like(self.header)
865
+
866
+ def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
867
+ # Write the body of the frame data series by series.
868
+ for colidx in range(len(self.columns)):
869
+ series = self.df.iloc[:, colidx]
870
+ for i, val in enumerate(series):
871
+ yield CssExcelCell(
872
+ row=self.rowcounter + i,
873
+ col=colidx + coloffset,
874
+ val=val,
875
+ style=None,
876
+ css_styles=getattr(self.styler, "ctx", None),
877
+ css_row=i,
878
+ css_col=colidx,
879
+ css_converter=self.style_converter,
880
+ )
881
+
882
+ def get_formatted_cells(self) -> Iterable[ExcelCell]:
883
+ for cell in itertools.chain(self._format_header(), self._format_body()):
884
+ cell.val = self._format_value(cell.val)
885
+ yield cell
886
+
887
+ @doc(storage_options=_shared_docs["storage_options"])
888
+ def write(
889
+ self,
890
+ writer,
891
+ sheet_name: str = "Sheet1",
892
+ startrow: int = 0,
893
+ startcol: int = 0,
894
+ freeze_panes: tuple[int, int] | None = None,
895
+ engine: str | None = None,
896
+ storage_options: StorageOptions = None,
897
+ ) -> None:
898
+ """
899
+ writer : path-like, file-like, or ExcelWriter object
900
+ File path or existing ExcelWriter
901
+ sheet_name : str, default 'Sheet1'
902
+ Name of sheet which will contain DataFrame
903
+ startrow :
904
+ upper left cell row to dump data frame
905
+ startcol :
906
+ upper left cell column to dump data frame
907
+ freeze_panes : tuple of integer (length 2), default None
908
+ Specifies the one-based bottommost row and rightmost column that
909
+ is to be frozen
910
+ engine : string, default None
911
+ write engine to use if writer is a path - you can also set this
912
+ via the options ``io.excel.xlsx.writer``,
913
+ or ``io.excel.xlsm.writer``.
914
+
915
+ {storage_options}
916
+
917
+ .. versionadded:: 1.2.0
918
+ """
919
+ from pandas.io.excel import ExcelWriter
920
+
921
+ num_rows, num_cols = self.df.shape
922
+ if num_rows > self.max_rows or num_cols > self.max_cols:
923
+ raise ValueError(
924
+ f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "
925
+ f"Max sheet size is: {self.max_rows}, {self.max_cols}"
926
+ )
927
+
928
+ formatted_cells = self.get_formatted_cells()
929
+ if isinstance(writer, ExcelWriter):
930
+ need_save = False
931
+ else:
932
+ # error: Cannot instantiate abstract class 'ExcelWriter' with abstract
933
+ # attributes 'engine', 'save', 'supported_extensions' and 'write_cells'
934
+ writer = ExcelWriter( # type: ignore[abstract]
935
+ writer, engine=engine, storage_options=storage_options
936
+ )
937
+ need_save = True
938
+
939
+ try:
940
+ writer._write_cells(
941
+ formatted_cells,
942
+ sheet_name,
943
+ startrow=startrow,
944
+ startcol=startcol,
945
+ freeze_panes=freeze_panes,
946
+ )
947
+ finally:
948
+ # make sure to close opened file handles
949
+ if need_save:
950
+ writer.close()
videochat2/lib/python3.10/site-packages/pandas/io/formats/format.py ADDED
@@ -0,0 +1,2240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Internal module for formatting output data in csv, html, xml,
3
+ and latex files. This module also applies to display formatting.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from contextlib import contextmanager
8
+ from csv import (
9
+ QUOTE_NONE,
10
+ QUOTE_NONNUMERIC,
11
+ )
12
+ from decimal import Decimal
13
+ from functools import partial
14
+ from io import StringIO
15
+ import math
16
+ import re
17
+ from shutil import get_terminal_size
18
+ from typing import (
19
+ IO,
20
+ TYPE_CHECKING,
21
+ Any,
22
+ Callable,
23
+ Final,
24
+ Generator,
25
+ Hashable,
26
+ Iterable,
27
+ List,
28
+ Mapping,
29
+ Sequence,
30
+ cast,
31
+ )
32
+ from unicodedata import east_asian_width
33
+
34
+ import numpy as np
35
+
36
+ from pandas._config.config import (
37
+ get_option,
38
+ set_option,
39
+ )
40
+
41
+ from pandas._libs import lib
42
+ from pandas._libs.missing import NA
43
+ from pandas._libs.tslibs import (
44
+ NaT,
45
+ Timedelta,
46
+ Timestamp,
47
+ get_unit_from_dtype,
48
+ iNaT,
49
+ periods_per_day,
50
+ )
51
+ from pandas._libs.tslibs.nattype import NaTType
52
+ from pandas._typing import (
53
+ ArrayLike,
54
+ Axes,
55
+ ColspaceArgType,
56
+ ColspaceType,
57
+ CompressionOptions,
58
+ FilePath,
59
+ FloatFormatType,
60
+ FormattersType,
61
+ IndexLabel,
62
+ StorageOptions,
63
+ WriteBuffer,
64
+ )
65
+
66
+ from pandas.core.dtypes.common import (
67
+ is_categorical_dtype,
68
+ is_complex_dtype,
69
+ is_datetime64_dtype,
70
+ is_extension_array_dtype,
71
+ is_float,
72
+ is_float_dtype,
73
+ is_integer,
74
+ is_integer_dtype,
75
+ is_list_like,
76
+ is_numeric_dtype,
77
+ is_scalar,
78
+ is_timedelta64_dtype,
79
+ )
80
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
81
+ from pandas.core.dtypes.missing import (
82
+ isna,
83
+ notna,
84
+ )
85
+
86
+ from pandas.core.arrays import (
87
+ Categorical,
88
+ DatetimeArray,
89
+ TimedeltaArray,
90
+ )
91
+ from pandas.core.arrays.string_ import StringDtype
92
+ from pandas.core.base import PandasObject
93
+ import pandas.core.common as com
94
+ from pandas.core.construction import extract_array
95
+ from pandas.core.indexes.api import (
96
+ Index,
97
+ MultiIndex,
98
+ PeriodIndex,
99
+ ensure_index,
100
+ )
101
+ from pandas.core.indexes.datetimes import DatetimeIndex
102
+ from pandas.core.indexes.timedeltas import TimedeltaIndex
103
+ from pandas.core.reshape.concat import concat
104
+
105
+ from pandas.io.common import (
106
+ check_parent_directory,
107
+ stringify_path,
108
+ )
109
+ from pandas.io.formats import printing
110
+
111
+ if TYPE_CHECKING:
112
+ from pandas import (
113
+ DataFrame,
114
+ Series,
115
+ )
116
+
117
+
118
+ common_docstring: Final = """
119
+ Parameters
120
+ ----------
121
+ buf : str, Path or StringIO-like, optional, default None
122
+ Buffer to write to. If None, the output is returned as a string.
123
+ columns : sequence, optional, default None
124
+ The subset of columns to write. Writes all columns by default.
125
+ col_space : %(col_space_type)s, optional
126
+ %(col_space)s.
127
+ header : %(header_type)s, optional
128
+ %(header)s.
129
+ index : bool, optional, default True
130
+ Whether to print index (row) labels.
131
+ na_rep : str, optional, default 'NaN'
132
+ String representation of ``NaN`` to use.
133
+ formatters : list, tuple or dict of one-param. functions, optional
134
+ Formatter functions to apply to columns' elements by position or
135
+ name.
136
+ The result of each function must be a unicode string.
137
+ List/tuple must be of length equal to the number of columns.
138
+ float_format : one-parameter function, optional, default None
139
+ Formatter function to apply to columns' elements if they are
140
+ floats. This function must return a unicode string and will be
141
+ applied only to the non-``NaN`` elements, with ``NaN`` being
142
+ handled by ``na_rep``.
143
+
144
+ .. versionchanged:: 1.2.0
145
+
146
+ sparsify : bool, optional, default True
147
+ Set to False for a DataFrame with a hierarchical index to print
148
+ every multiindex key at each row.
149
+ index_names : bool, optional, default True
150
+ Prints the names of the indexes.
151
+ justify : str, default None
152
+ How to justify the column labels. If None uses the option from
153
+ the print configuration (controlled by set_option), 'right' out
154
+ of the box. Valid values are
155
+
156
+ * left
157
+ * right
158
+ * center
159
+ * justify
160
+ * justify-all
161
+ * start
162
+ * end
163
+ * inherit
164
+ * match-parent
165
+ * initial
166
+ * unset.
167
+ max_rows : int, optional
168
+ Maximum number of rows to display in the console.
169
+ max_cols : int, optional
170
+ Maximum number of columns to display in the console.
171
+ show_dimensions : bool, default False
172
+ Display DataFrame dimensions (number of rows by number of columns).
173
+ decimal : str, default '.'
174
+ Character recognized as decimal separator, e.g. ',' in Europe.
175
+ """
176
+
177
+ _VALID_JUSTIFY_PARAMETERS = (
178
+ "left",
179
+ "right",
180
+ "center",
181
+ "justify",
182
+ "justify-all",
183
+ "start",
184
+ "end",
185
+ "inherit",
186
+ "match-parent",
187
+ "initial",
188
+ "unset",
189
+ )
190
+
191
+ return_docstring: Final = """
192
+ Returns
193
+ -------
194
+ str or None
195
+ If buf is None, returns the result as a string. Otherwise returns
196
+ None.
197
+ """
198
+
199
+
200
+ class CategoricalFormatter:
201
+ def __init__(
202
+ self,
203
+ categorical: Categorical,
204
+ buf: IO[str] | None = None,
205
+ length: bool = True,
206
+ na_rep: str = "NaN",
207
+ footer: bool = True,
208
+ ) -> None:
209
+ self.categorical = categorical
210
+ self.buf = buf if buf is not None else StringIO("")
211
+ self.na_rep = na_rep
212
+ self.length = length
213
+ self.footer = footer
214
+ self.quoting = QUOTE_NONNUMERIC
215
+
216
+ def _get_footer(self) -> str:
217
+ footer = ""
218
+
219
+ if self.length:
220
+ if footer:
221
+ footer += ", "
222
+ footer += f"Length: {len(self.categorical)}"
223
+
224
+ level_info = self.categorical._repr_categories_info()
225
+
226
+ # Levels are added in a newline
227
+ if footer:
228
+ footer += "\n"
229
+ footer += level_info
230
+
231
+ return str(footer)
232
+
233
+ def _get_formatted_values(self) -> list[str]:
234
+ return format_array(
235
+ self.categorical._internal_get_values(),
236
+ None,
237
+ float_format=None,
238
+ na_rep=self.na_rep,
239
+ quoting=self.quoting,
240
+ )
241
+
242
+ def to_string(self) -> str:
243
+ categorical = self.categorical
244
+
245
+ if len(categorical) == 0:
246
+ if self.footer:
247
+ return self._get_footer()
248
+ else:
249
+ return ""
250
+
251
+ fmt_values = self._get_formatted_values()
252
+
253
+ fmt_values = [i.strip() for i in fmt_values]
254
+ values = ", ".join(fmt_values)
255
+ result = ["[" + values + "]"]
256
+ if self.footer:
257
+ footer = self._get_footer()
258
+ if footer:
259
+ result.append(footer)
260
+
261
+ return str("\n".join(result))
262
+
263
+
264
+ class SeriesFormatter:
265
+ def __init__(
266
+ self,
267
+ series: Series,
268
+ buf: IO[str] | None = None,
269
+ length: bool | str = True,
270
+ header: bool = True,
271
+ index: bool = True,
272
+ na_rep: str = "NaN",
273
+ name: bool = False,
274
+ float_format: str | None = None,
275
+ dtype: bool = True,
276
+ max_rows: int | None = None,
277
+ min_rows: int | None = None,
278
+ ) -> None:
279
+ self.series = series
280
+ self.buf = buf if buf is not None else StringIO()
281
+ self.name = name
282
+ self.na_rep = na_rep
283
+ self.header = header
284
+ self.length = length
285
+ self.index = index
286
+ self.max_rows = max_rows
287
+ self.min_rows = min_rows
288
+
289
+ if float_format is None:
290
+ float_format = get_option("display.float_format")
291
+ self.float_format = float_format
292
+ self.dtype = dtype
293
+ self.adj = get_adjustment()
294
+
295
+ self._chk_truncate()
296
+
297
+ def _chk_truncate(self) -> None:
298
+ self.tr_row_num: int | None
299
+
300
+ min_rows = self.min_rows
301
+ max_rows = self.max_rows
302
+ # truncation determined by max_rows, actual truncated number of rows
303
+ # used below by min_rows
304
+ is_truncated_vertically = max_rows and (len(self.series) > max_rows)
305
+ series = self.series
306
+ if is_truncated_vertically:
307
+ max_rows = cast(int, max_rows)
308
+ if min_rows:
309
+ # if min_rows is set (not None or 0), set max_rows to minimum
310
+ # of both
311
+ max_rows = min(min_rows, max_rows)
312
+ if max_rows == 1:
313
+ row_num = max_rows
314
+ series = series.iloc[:max_rows]
315
+ else:
316
+ row_num = max_rows // 2
317
+ series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
318
+ self.tr_row_num = row_num
319
+ else:
320
+ self.tr_row_num = None
321
+ self.tr_series = series
322
+ self.is_truncated_vertically = is_truncated_vertically
323
+
324
+ def _get_footer(self) -> str:
325
+ name = self.series.name
326
+ footer = ""
327
+
328
+ if getattr(self.series.index, "freq", None) is not None:
329
+ assert isinstance(
330
+ self.series.index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)
331
+ )
332
+ footer += f"Freq: {self.series.index.freqstr}"
333
+
334
+ if self.name is not False and name is not None:
335
+ if footer:
336
+ footer += ", "
337
+
338
+ series_name = printing.pprint_thing(name, escape_chars=("\t", "\r", "\n"))
339
+ footer += f"Name: {series_name}"
340
+
341
+ if self.length is True or (
342
+ self.length == "truncate" and self.is_truncated_vertically
343
+ ):
344
+ if footer:
345
+ footer += ", "
346
+ footer += f"Length: {len(self.series)}"
347
+
348
+ if self.dtype is not False and self.dtype is not None:
349
+ dtype_name = getattr(self.tr_series.dtype, "name", None)
350
+ if dtype_name:
351
+ if footer:
352
+ footer += ", "
353
+ footer += f"dtype: {printing.pprint_thing(dtype_name)}"
354
+
355
+ # level infos are added to the end and in a new line, like it is done
356
+ # for Categoricals
357
+ if is_categorical_dtype(self.tr_series.dtype):
358
+ level_info = self.tr_series._values._repr_categories_info()
359
+ if footer:
360
+ footer += "\n"
361
+ footer += level_info
362
+
363
+ return str(footer)
364
+
365
+ def _get_formatted_index(self) -> tuple[list[str], bool]:
366
+ index = self.tr_series.index
367
+
368
+ if isinstance(index, MultiIndex):
369
+ have_header = any(name for name in index.names)
370
+ fmt_index = index.format(names=True)
371
+ else:
372
+ have_header = index.name is not None
373
+ fmt_index = index.format(name=True)
374
+ return fmt_index, have_header
375
+
376
+ def _get_formatted_values(self) -> list[str]:
377
+ return format_array(
378
+ self.tr_series._values,
379
+ None,
380
+ float_format=self.float_format,
381
+ na_rep=self.na_rep,
382
+ leading_space=self.index,
383
+ )
384
+
385
+ def to_string(self) -> str:
386
+ series = self.tr_series
387
+ footer = self._get_footer()
388
+
389
+ if len(series) == 0:
390
+ return f"{type(self.series).__name__}([], {footer})"
391
+
392
+ fmt_index, have_header = self._get_formatted_index()
393
+ fmt_values = self._get_formatted_values()
394
+
395
+ if self.is_truncated_vertically:
396
+ n_header_rows = 0
397
+ row_num = self.tr_row_num
398
+ row_num = cast(int, row_num)
399
+ width = self.adj.len(fmt_values[row_num - 1])
400
+ if width > 3:
401
+ dot_str = "..."
402
+ else:
403
+ dot_str = ".."
404
+ # Series uses mode=center because it has single value columns
405
+ # DataFrame uses mode=left
406
+ dot_str = self.adj.justify([dot_str], width, mode="center")[0]
407
+ fmt_values.insert(row_num + n_header_rows, dot_str)
408
+ fmt_index.insert(row_num + 1, "")
409
+
410
+ if self.index:
411
+ result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
412
+ else:
413
+ result = self.adj.adjoin(3, fmt_values)
414
+
415
+ if self.header and have_header:
416
+ result = fmt_index[0] + "\n" + result
417
+
418
+ if footer:
419
+ result += "\n" + footer
420
+
421
+ return str("".join(result))
422
+
423
+
424
+ class TextAdjustment:
425
+ def __init__(self) -> None:
426
+ self.encoding = get_option("display.encoding")
427
+
428
+ def len(self, text: str) -> int:
429
+ return len(text)
430
+
431
+ def justify(self, texts: Any, max_len: int, mode: str = "right") -> list[str]:
432
+ return printing.justify(texts, max_len, mode=mode)
433
+
434
+ def adjoin(self, space: int, *lists, **kwargs) -> str:
435
+ return printing.adjoin(
436
+ space, *lists, strlen=self.len, justfunc=self.justify, **kwargs
437
+ )
438
+
439
+
440
+ class EastAsianTextAdjustment(TextAdjustment):
441
+ def __init__(self) -> None:
442
+ super().__init__()
443
+ if get_option("display.unicode.ambiguous_as_wide"):
444
+ self.ambiguous_width = 2
445
+ else:
446
+ self.ambiguous_width = 1
447
+
448
+ # Definition of East Asian Width
449
+ # https://unicode.org/reports/tr11/
450
+ # Ambiguous width can be changed by option
451
+ self._EAW_MAP = {"Na": 1, "N": 1, "W": 2, "F": 2, "H": 1}
452
+
453
+ def len(self, text: str) -> int:
454
+ """
455
+ Calculate display width considering unicode East Asian Width
456
+ """
457
+ if not isinstance(text, str):
458
+ return len(text)
459
+
460
+ return sum(
461
+ self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text
462
+ )
463
+
464
+ def justify(
465
+ self, texts: Iterable[str], max_len: int, mode: str = "right"
466
+ ) -> list[str]:
467
+ # re-calculate padding space per str considering East Asian Width
468
+ def _get_pad(t):
469
+ return max_len - self.len(t) + len(t)
470
+
471
+ if mode == "left":
472
+ return [x.ljust(_get_pad(x)) for x in texts]
473
+ elif mode == "center":
474
+ return [x.center(_get_pad(x)) for x in texts]
475
+ else:
476
+ return [x.rjust(_get_pad(x)) for x in texts]
477
+
478
+
479
+ def get_adjustment() -> TextAdjustment:
480
+ use_east_asian_width = get_option("display.unicode.east_asian_width")
481
+ if use_east_asian_width:
482
+ return EastAsianTextAdjustment()
483
+ else:
484
+ return TextAdjustment()
485
+
486
+
487
+ def get_dataframe_repr_params() -> dict[str, Any]:
488
+ """Get the parameters used to repr(dataFrame) calls using DataFrame.to_string.
489
+
490
+ Supplying these parameters to DataFrame.to_string is equivalent to calling
491
+ ``repr(DataFrame)``. This is useful if you want to adjust the repr output.
492
+
493
+ .. versionadded:: 1.4.0
494
+
495
+ Example
496
+ -------
497
+ >>> import pandas as pd
498
+ >>>
499
+ >>> df = pd.DataFrame([[1, 2], [3, 4]])
500
+ >>> repr_params = pd.io.formats.format.get_dataframe_repr_params()
501
+ >>> repr(df) == df.to_string(**repr_params)
502
+ True
503
+ """
504
+ from pandas.io.formats import console
505
+
506
+ if get_option("display.expand_frame_repr"):
507
+ line_width, _ = console.get_console_size()
508
+ else:
509
+ line_width = None
510
+ return {
511
+ "max_rows": get_option("display.max_rows"),
512
+ "min_rows": get_option("display.min_rows"),
513
+ "max_cols": get_option("display.max_columns"),
514
+ "max_colwidth": get_option("display.max_colwidth"),
515
+ "show_dimensions": get_option("display.show_dimensions"),
516
+ "line_width": line_width,
517
+ }
518
+
519
+
520
+ def get_series_repr_params() -> dict[str, Any]:
521
+ """Get the parameters used to repr(Series) calls using Series.to_string.
522
+
523
+ Supplying these parameters to Series.to_string is equivalent to calling
524
+ ``repr(series)``. This is useful if you want to adjust the series repr output.
525
+
526
+ .. versionadded:: 1.4.0
527
+
528
+ Example
529
+ -------
530
+ >>> import pandas as pd
531
+ >>>
532
+ >>> ser = pd.Series([1, 2, 3, 4])
533
+ >>> repr_params = pd.io.formats.format.get_series_repr_params()
534
+ >>> repr(ser) == ser.to_string(**repr_params)
535
+ True
536
+ """
537
+ width, height = get_terminal_size()
538
+ max_rows = (
539
+ height
540
+ if get_option("display.max_rows") == 0
541
+ else get_option("display.max_rows")
542
+ )
543
+ min_rows = (
544
+ height
545
+ if get_option("display.max_rows") == 0
546
+ else get_option("display.min_rows")
547
+ )
548
+
549
+ return {
550
+ "name": True,
551
+ "dtype": True,
552
+ "min_rows": min_rows,
553
+ "max_rows": max_rows,
554
+ "length": get_option("display.show_dimensions"),
555
+ }
556
+
557
+
558
+ class DataFrameFormatter:
559
+ """Class for processing dataframe formatting options and data."""
560
+
561
+ __doc__ = __doc__ if __doc__ else ""
562
+ __doc__ += common_docstring + return_docstring
563
+
564
+ def __init__(
565
+ self,
566
+ frame: DataFrame,
567
+ columns: Sequence[Hashable] | None = None,
568
+ col_space: ColspaceArgType | None = None,
569
+ header: bool | Sequence[str] = True,
570
+ index: bool = True,
571
+ na_rep: str = "NaN",
572
+ formatters: FormattersType | None = None,
573
+ justify: str | None = None,
574
+ float_format: FloatFormatType | None = None,
575
+ sparsify: bool | None = None,
576
+ index_names: bool = True,
577
+ max_rows: int | None = None,
578
+ min_rows: int | None = None,
579
+ max_cols: int | None = None,
580
+ show_dimensions: bool | str = False,
581
+ decimal: str = ".",
582
+ bold_rows: bool = False,
583
+ escape: bool = True,
584
+ ) -> None:
585
+ self.frame = frame
586
+ self.columns = self._initialize_columns(columns)
587
+ self.col_space = self._initialize_colspace(col_space)
588
+ self.header = header
589
+ self.index = index
590
+ self.na_rep = na_rep
591
+ self.formatters = self._initialize_formatters(formatters)
592
+ self.justify = self._initialize_justify(justify)
593
+ self.float_format = float_format
594
+ self.sparsify = self._initialize_sparsify(sparsify)
595
+ self.show_index_names = index_names
596
+ self.decimal = decimal
597
+ self.bold_rows = bold_rows
598
+ self.escape = escape
599
+ self.max_rows = max_rows
600
+ self.min_rows = min_rows
601
+ self.max_cols = max_cols
602
+ self.show_dimensions = show_dimensions
603
+
604
+ self.max_cols_fitted = self._calc_max_cols_fitted()
605
+ self.max_rows_fitted = self._calc_max_rows_fitted()
606
+
607
+ self.tr_frame = self.frame
608
+ self.truncate()
609
+ self.adj = get_adjustment()
610
+
611
+ def get_strcols(self) -> list[list[str]]:
612
+ """
613
+ Render a DataFrame to a list of columns (as lists of strings).
614
+ """
615
+ strcols = self._get_strcols_without_index()
616
+
617
+ if self.index:
618
+ str_index = self._get_formatted_index(self.tr_frame)
619
+ strcols.insert(0, str_index)
620
+
621
+ return strcols
622
+
623
+ @property
624
+ def should_show_dimensions(self) -> bool:
625
+ return self.show_dimensions is True or (
626
+ self.show_dimensions == "truncate" and self.is_truncated
627
+ )
628
+
629
+ @property
630
+ def is_truncated(self) -> bool:
631
+ return bool(self.is_truncated_horizontally or self.is_truncated_vertically)
632
+
633
+ @property
634
+ def is_truncated_horizontally(self) -> bool:
635
+ return bool(self.max_cols_fitted and (len(self.columns) > self.max_cols_fitted))
636
+
637
+ @property
638
+ def is_truncated_vertically(self) -> bool:
639
+ return bool(self.max_rows_fitted and (len(self.frame) > self.max_rows_fitted))
640
+
641
+ @property
642
+ def dimensions_info(self) -> str:
643
+ return f"\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]"
644
+
645
+ @property
646
+ def has_index_names(self) -> bool:
647
+ return _has_names(self.frame.index)
648
+
649
+ @property
650
+ def has_column_names(self) -> bool:
651
+ return _has_names(self.frame.columns)
652
+
653
+ @property
654
+ def show_row_idx_names(self) -> bool:
655
+ return all((self.has_index_names, self.index, self.show_index_names))
656
+
657
+ @property
658
+ def show_col_idx_names(self) -> bool:
659
+ return all((self.has_column_names, self.show_index_names, self.header))
660
+
661
+ @property
662
+ def max_rows_displayed(self) -> int:
663
+ return min(self.max_rows or len(self.frame), len(self.frame))
664
+
665
+ def _initialize_sparsify(self, sparsify: bool | None) -> bool:
666
+ if sparsify is None:
667
+ return get_option("display.multi_sparse")
668
+ return sparsify
669
+
670
+ def _initialize_formatters(
671
+ self, formatters: FormattersType | None
672
+ ) -> FormattersType:
673
+ if formatters is None:
674
+ return {}
675
+ elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict):
676
+ return formatters
677
+ else:
678
+ raise ValueError(
679
+ f"Formatters length({len(formatters)}) should match "
680
+ f"DataFrame number of columns({len(self.frame.columns)})"
681
+ )
682
+
683
+ def _initialize_justify(self, justify: str | None) -> str:
684
+ if justify is None:
685
+ return get_option("display.colheader_justify")
686
+ else:
687
+ return justify
688
+
689
+ def _initialize_columns(self, columns: Sequence[Hashable] | None) -> Index:
690
+ if columns is not None:
691
+ # GH 47231 - columns doesn't have to be `Sequence[str]`
692
+ # Will fix in later PR
693
+ cols = ensure_index(cast(Axes, columns))
694
+ self.frame = self.frame[cols]
695
+ return cols
696
+ else:
697
+ return self.frame.columns
698
+
699
+ def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType:
700
+ result: ColspaceType
701
+
702
+ if col_space is None:
703
+ result = {}
704
+ elif isinstance(col_space, (int, str)):
705
+ result = {"": col_space}
706
+ result.update({column: col_space for column in self.frame.columns})
707
+ elif isinstance(col_space, Mapping):
708
+ for column in col_space.keys():
709
+ if column not in self.frame.columns and column != "":
710
+ raise ValueError(
711
+ f"Col_space is defined for an unknown column: {column}"
712
+ )
713
+ result = col_space
714
+ else:
715
+ if len(self.frame.columns) != len(col_space):
716
+ raise ValueError(
717
+ f"Col_space length({len(col_space)}) should match "
718
+ f"DataFrame number of columns({len(self.frame.columns)})"
719
+ )
720
+ result = dict(zip(self.frame.columns, col_space))
721
+ return result
722
+
723
+ def _calc_max_cols_fitted(self) -> int | None:
724
+ """Number of columns fitting the screen."""
725
+ if not self._is_in_terminal():
726
+ return self.max_cols
727
+
728
+ width, _ = get_terminal_size()
729
+ if self._is_screen_narrow(width):
730
+ return width
731
+ else:
732
+ return self.max_cols
733
+
734
+ def _calc_max_rows_fitted(self) -> int | None:
735
+ """Number of rows with data fitting the screen."""
736
+ max_rows: int | None
737
+
738
+ if self._is_in_terminal():
739
+ _, height = get_terminal_size()
740
+ if self.max_rows == 0:
741
+ # rows available to fill with actual data
742
+ return height - self._get_number_of_auxillary_rows()
743
+
744
+ if self._is_screen_short(height):
745
+ max_rows = height
746
+ else:
747
+ max_rows = self.max_rows
748
+ else:
749
+ max_rows = self.max_rows
750
+
751
+ return self._adjust_max_rows(max_rows)
752
+
753
+ def _adjust_max_rows(self, max_rows: int | None) -> int | None:
754
+ """Adjust max_rows using display logic.
755
+
756
+ See description here:
757
+ https://pandas.pydata.org/docs/dev/user_guide/options.html#frequently-used-options
758
+
759
+ GH #37359
760
+ """
761
+ if max_rows:
762
+ if (len(self.frame) > max_rows) and self.min_rows:
763
+ # if truncated, set max_rows showed to min_rows
764
+ max_rows = min(self.min_rows, max_rows)
765
+ return max_rows
766
+
767
+ def _is_in_terminal(self) -> bool:
768
+ """Check if the output is to be shown in terminal."""
769
+ return bool(self.max_cols == 0 or self.max_rows == 0)
770
+
771
+ def _is_screen_narrow(self, max_width) -> bool:
772
+ return bool(self.max_cols == 0 and len(self.frame.columns) > max_width)
773
+
774
+ def _is_screen_short(self, max_height) -> bool:
775
+ return bool(self.max_rows == 0 and len(self.frame) > max_height)
776
+
777
+ def _get_number_of_auxillary_rows(self) -> int:
778
+ """Get number of rows occupied by prompt, dots and dimension info."""
779
+ dot_row = 1
780
+ prompt_row = 1
781
+ num_rows = dot_row + prompt_row
782
+
783
+ if self.show_dimensions:
784
+ num_rows += len(self.dimensions_info.splitlines())
785
+
786
+ if self.header:
787
+ num_rows += 1
788
+
789
+ return num_rows
790
+
791
+ def truncate(self) -> None:
792
+ """
793
+ Check whether the frame should be truncated. If so, slice the frame up.
794
+ """
795
+ if self.is_truncated_horizontally:
796
+ self._truncate_horizontally()
797
+
798
+ if self.is_truncated_vertically:
799
+ self._truncate_vertically()
800
+
801
+ def _truncate_horizontally(self) -> None:
802
+ """Remove columns, which are not to be displayed and adjust formatters.
803
+
804
+ Attributes affected:
805
+ - tr_frame
806
+ - formatters
807
+ - tr_col_num
808
+ """
809
+ assert self.max_cols_fitted is not None
810
+ col_num = self.max_cols_fitted // 2
811
+ if col_num >= 1:
812
+ left = self.tr_frame.iloc[:, :col_num]
813
+ right = self.tr_frame.iloc[:, -col_num:]
814
+ self.tr_frame = concat((left, right), axis=1)
815
+
816
+ # truncate formatter
817
+ if isinstance(self.formatters, (list, tuple)):
818
+ self.formatters = [
819
+ *self.formatters[:col_num],
820
+ *self.formatters[-col_num:],
821
+ ]
822
+ else:
823
+ col_num = cast(int, self.max_cols)
824
+ self.tr_frame = self.tr_frame.iloc[:, :col_num]
825
+ self.tr_col_num = col_num
826
+
827
+ def _truncate_vertically(self) -> None:
828
+ """Remove rows, which are not to be displayed.
829
+
830
+ Attributes affected:
831
+ - tr_frame
832
+ - tr_row_num
833
+ """
834
+ assert self.max_rows_fitted is not None
835
+ row_num = self.max_rows_fitted // 2
836
+ if row_num >= 1:
837
+ head = self.tr_frame.iloc[:row_num, :]
838
+ tail = self.tr_frame.iloc[-row_num:, :]
839
+ self.tr_frame = concat((head, tail))
840
+ else:
841
+ row_num = cast(int, self.max_rows)
842
+ self.tr_frame = self.tr_frame.iloc[:row_num, :]
843
+ self.tr_row_num = row_num
844
+
845
+ def _get_strcols_without_index(self) -> list[list[str]]:
846
+ strcols: list[list[str]] = []
847
+
848
+ if not is_list_like(self.header) and not self.header:
849
+ for i, c in enumerate(self.tr_frame):
850
+ fmt_values = self.format_col(i)
851
+ fmt_values = _make_fixed_width(
852
+ strings=fmt_values,
853
+ justify=self.justify,
854
+ minimum=int(self.col_space.get(c, 0)),
855
+ adj=self.adj,
856
+ )
857
+ strcols.append(fmt_values)
858
+ return strcols
859
+
860
+ if is_list_like(self.header):
861
+ # cast here since can't be bool if is_list_like
862
+ self.header = cast(List[str], self.header)
863
+ if len(self.header) != len(self.columns):
864
+ raise ValueError(
865
+ f"Writing {len(self.columns)} cols "
866
+ f"but got {len(self.header)} aliases"
867
+ )
868
+ str_columns = [[label] for label in self.header]
869
+ else:
870
+ str_columns = self._get_formatted_column_labels(self.tr_frame)
871
+
872
+ if self.show_row_idx_names:
873
+ for x in str_columns:
874
+ x.append("")
875
+
876
+ for i, c in enumerate(self.tr_frame):
877
+ cheader = str_columns[i]
878
+ header_colwidth = max(
879
+ int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)
880
+ )
881
+ fmt_values = self.format_col(i)
882
+ fmt_values = _make_fixed_width(
883
+ fmt_values, self.justify, minimum=header_colwidth, adj=self.adj
884
+ )
885
+
886
+ max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth)
887
+ cheader = self.adj.justify(cheader, max_len, mode=self.justify)
888
+ strcols.append(cheader + fmt_values)
889
+
890
+ return strcols
891
+
892
+ def format_col(self, i: int) -> list[str]:
893
+ frame = self.tr_frame
894
+ formatter = self._get_formatter(i)
895
+ return format_array(
896
+ frame.iloc[:, i]._values,
897
+ formatter,
898
+ float_format=self.float_format,
899
+ na_rep=self.na_rep,
900
+ space=self.col_space.get(frame.columns[i]),
901
+ decimal=self.decimal,
902
+ leading_space=self.index,
903
+ )
904
+
905
+ def _get_formatter(self, i: str | int) -> Callable | None:
906
+ if isinstance(self.formatters, (list, tuple)):
907
+ if is_integer(i):
908
+ i = cast(int, i)
909
+ return self.formatters[i]
910
+ else:
911
+ return None
912
+ else:
913
+ if is_integer(i) and i not in self.columns:
914
+ i = self.columns[i]
915
+ return self.formatters.get(i, None)
916
+
917
+ def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]:
918
+ from pandas.core.indexes.multi import sparsify_labels
919
+
920
+ columns = frame.columns
921
+
922
+ if isinstance(columns, MultiIndex):
923
+ fmt_columns = columns.format(sparsify=False, adjoin=False)
924
+ fmt_columns = list(zip(*fmt_columns))
925
+ dtypes = self.frame.dtypes._values
926
+
927
+ # if we have a Float level, they don't use leading space at all
928
+ restrict_formatting = any(level.is_floating for level in columns.levels)
929
+ need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
930
+
931
+ def space_format(x, y):
932
+ if (
933
+ y not in self.formatters
934
+ and need_leadsp[x]
935
+ and not restrict_formatting
936
+ ):
937
+ return " " + y
938
+ return y
939
+
940
+ str_columns = list(
941
+ zip(*([space_format(x, y) for y in x] for x in fmt_columns))
942
+ )
943
+ if self.sparsify and len(str_columns):
944
+ str_columns = sparsify_labels(str_columns)
945
+
946
+ str_columns = [list(x) for x in zip(*str_columns)]
947
+ else:
948
+ fmt_columns = columns.format()
949
+ dtypes = self.frame.dtypes
950
+ need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
951
+ str_columns = [
952
+ [" " + x if not self._get_formatter(i) and need_leadsp[x] else x]
953
+ for i, x in enumerate(fmt_columns)
954
+ ]
955
+ # self.str_columns = str_columns
956
+ return str_columns
957
+
958
+ def _get_formatted_index(self, frame: DataFrame) -> list[str]:
959
+ # Note: this is only used by to_string() and to_latex(), not by
960
+ # to_html(). so safe to cast col_space here.
961
+ col_space = {k: cast(int, v) for k, v in self.col_space.items()}
962
+ index = frame.index
963
+ columns = frame.columns
964
+ fmt = self._get_formatter("__index__")
965
+
966
+ if isinstance(index, MultiIndex):
967
+ fmt_index = index.format(
968
+ sparsify=self.sparsify,
969
+ adjoin=False,
970
+ names=self.show_row_idx_names,
971
+ formatter=fmt,
972
+ )
973
+ else:
974
+ fmt_index = [index.format(name=self.show_row_idx_names, formatter=fmt)]
975
+
976
+ fmt_index = [
977
+ tuple(
978
+ _make_fixed_width(
979
+ list(x), justify="left", minimum=col_space.get("", 0), adj=self.adj
980
+ )
981
+ )
982
+ for x in fmt_index
983
+ ]
984
+
985
+ adjoined = self.adj.adjoin(1, *fmt_index).split("\n")
986
+
987
+ # empty space for columns
988
+ if self.show_col_idx_names:
989
+ col_header = [str(x) for x in self._get_column_name_list()]
990
+ else:
991
+ col_header = [""] * columns.nlevels
992
+
993
+ if self.header:
994
+ return col_header + adjoined
995
+ else:
996
+ return adjoined
997
+
998
+ def _get_column_name_list(self) -> list[Hashable]:
999
+ names: list[Hashable] = []
1000
+ columns = self.frame.columns
1001
+ if isinstance(columns, MultiIndex):
1002
+ names.extend("" if name is None else name for name in columns.names)
1003
+ else:
1004
+ names.append("" if columns.name is None else columns.name)
1005
+ return names
1006
+
1007
+
1008
+ class DataFrameRenderer:
1009
+ """Class for creating dataframe output in multiple formats.
1010
+
1011
+ Called in pandas.core.generic.NDFrame:
1012
+ - to_csv
1013
+ - to_latex
1014
+
1015
+ Called in pandas.core.frame.DataFrame:
1016
+ - to_html
1017
+ - to_string
1018
+
1019
+ Parameters
1020
+ ----------
1021
+ fmt : DataFrameFormatter
1022
+ Formatter with the formatting options.
1023
+ """
1024
+
1025
+ def __init__(self, fmt: DataFrameFormatter) -> None:
1026
+ self.fmt = fmt
1027
+
1028
+ def to_latex(
1029
+ self,
1030
+ buf: FilePath | WriteBuffer[str] | None = None,
1031
+ column_format: str | None = None,
1032
+ longtable: bool = False,
1033
+ encoding: str | None = None,
1034
+ multicolumn: bool = False,
1035
+ multicolumn_format: str | None = None,
1036
+ multirow: bool = False,
1037
+ caption: str | tuple[str, str] | None = None,
1038
+ label: str | None = None,
1039
+ position: str | None = None,
1040
+ ) -> str | None:
1041
+ """
1042
+ Render a DataFrame to a LaTeX tabular/longtable environment output.
1043
+ """
1044
+ from pandas.io.formats.latex import LatexFormatter
1045
+
1046
+ latex_formatter = LatexFormatter(
1047
+ self.fmt,
1048
+ longtable=longtable,
1049
+ column_format=column_format,
1050
+ multicolumn=multicolumn,
1051
+ multicolumn_format=multicolumn_format,
1052
+ multirow=multirow,
1053
+ caption=caption,
1054
+ label=label,
1055
+ position=position,
1056
+ )
1057
+ string = latex_formatter.to_string()
1058
+ return save_to_buffer(string, buf=buf, encoding=encoding)
1059
+
1060
+ def to_html(
1061
+ self,
1062
+ buf: FilePath | WriteBuffer[str] | None = None,
1063
+ encoding: str | None = None,
1064
+ classes: str | list | tuple | None = None,
1065
+ notebook: bool = False,
1066
+ border: int | bool | None = None,
1067
+ table_id: str | None = None,
1068
+ render_links: bool = False,
1069
+ ) -> str | None:
1070
+ """
1071
+ Render a DataFrame to a html table.
1072
+
1073
+ Parameters
1074
+ ----------
1075
+ buf : str, path object, file-like object, or None, default None
1076
+ String, path object (implementing ``os.PathLike[str]``), or file-like
1077
+ object implementing a string ``write()`` function. If None, the result is
1078
+ returned as a string.
1079
+ encoding : str, default “utf-8”
1080
+ Set character encoding.
1081
+ classes : str or list-like
1082
+ classes to include in the `class` attribute of the opening
1083
+ ``<table>`` tag, in addition to the default "dataframe".
1084
+ notebook : {True, False}, optional, default False
1085
+ Whether the generated HTML is for IPython Notebook.
1086
+ border : int
1087
+ A ``border=border`` attribute is included in the opening
1088
+ ``<table>`` tag. Default ``pd.options.display.html.border``.
1089
+ table_id : str, optional
1090
+ A css id is included in the opening `<table>` tag if specified.
1091
+ render_links : bool, default False
1092
+ Convert URLs to HTML links.
1093
+ """
1094
+ from pandas.io.formats.html import (
1095
+ HTMLFormatter,
1096
+ NotebookFormatter,
1097
+ )
1098
+
1099
+ Klass = NotebookFormatter if notebook else HTMLFormatter
1100
+
1101
+ html_formatter = Klass(
1102
+ self.fmt,
1103
+ classes=classes,
1104
+ border=border,
1105
+ table_id=table_id,
1106
+ render_links=render_links,
1107
+ )
1108
+ string = html_formatter.to_string()
1109
+ return save_to_buffer(string, buf=buf, encoding=encoding)
1110
+
1111
+ def to_string(
1112
+ self,
1113
+ buf: FilePath | WriteBuffer[str] | None = None,
1114
+ encoding: str | None = None,
1115
+ line_width: int | None = None,
1116
+ ) -> str | None:
1117
+ """
1118
+ Render a DataFrame to a console-friendly tabular output.
1119
+
1120
+ Parameters
1121
+ ----------
1122
+ buf : str, path object, file-like object, or None, default None
1123
+ String, path object (implementing ``os.PathLike[str]``), or file-like
1124
+ object implementing a string ``write()`` function. If None, the result is
1125
+ returned as a string.
1126
+ encoding: str, default “utf-8”
1127
+ Set character encoding.
1128
+ line_width : int, optional
1129
+ Width to wrap a line in characters.
1130
+ """
1131
+ from pandas.io.formats.string import StringFormatter
1132
+
1133
+ string_formatter = StringFormatter(self.fmt, line_width=line_width)
1134
+ string = string_formatter.to_string()
1135
+ return save_to_buffer(string, buf=buf, encoding=encoding)
1136
+
1137
+ def to_csv(
1138
+ self,
1139
+ path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
1140
+ encoding: str | None = None,
1141
+ sep: str = ",",
1142
+ columns: Sequence[Hashable] | None = None,
1143
+ index_label: IndexLabel | None = None,
1144
+ mode: str = "w",
1145
+ compression: CompressionOptions = "infer",
1146
+ quoting: int | None = None,
1147
+ quotechar: str = '"',
1148
+ lineterminator: str | None = None,
1149
+ chunksize: int | None = None,
1150
+ date_format: str | None = None,
1151
+ doublequote: bool = True,
1152
+ escapechar: str | None = None,
1153
+ errors: str = "strict",
1154
+ storage_options: StorageOptions = None,
1155
+ ) -> str | None:
1156
+ """
1157
+ Render dataframe as comma-separated file.
1158
+ """
1159
+ from pandas.io.formats.csvs import CSVFormatter
1160
+
1161
+ if path_or_buf is None:
1162
+ created_buffer = True
1163
+ path_or_buf = StringIO()
1164
+ else:
1165
+ created_buffer = False
1166
+
1167
+ csv_formatter = CSVFormatter(
1168
+ path_or_buf=path_or_buf,
1169
+ lineterminator=lineterminator,
1170
+ sep=sep,
1171
+ encoding=encoding,
1172
+ errors=errors,
1173
+ compression=compression,
1174
+ quoting=quoting,
1175
+ cols=columns,
1176
+ index_label=index_label,
1177
+ mode=mode,
1178
+ chunksize=chunksize,
1179
+ quotechar=quotechar,
1180
+ date_format=date_format,
1181
+ doublequote=doublequote,
1182
+ escapechar=escapechar,
1183
+ storage_options=storage_options,
1184
+ formatter=self.fmt,
1185
+ )
1186
+ csv_formatter.save()
1187
+
1188
+ if created_buffer:
1189
+ assert isinstance(path_or_buf, StringIO)
1190
+ content = path_or_buf.getvalue()
1191
+ path_or_buf.close()
1192
+ return content
1193
+
1194
+ return None
1195
+
1196
+
1197
+ def save_to_buffer(
1198
+ string: str,
1199
+ buf: FilePath | WriteBuffer[str] | None = None,
1200
+ encoding: str | None = None,
1201
+ ) -> str | None:
1202
+ """
1203
+ Perform serialization. Write to buf or return as string if buf is None.
1204
+ """
1205
+ with get_buffer(buf, encoding=encoding) as f:
1206
+ f.write(string)
1207
+ if buf is None:
1208
+ # error: "WriteBuffer[str]" has no attribute "getvalue"
1209
+ return f.getvalue() # type: ignore[attr-defined]
1210
+ return None
1211
+
1212
+
1213
+ @contextmanager
1214
+ def get_buffer(
1215
+ buf: FilePath | WriteBuffer[str] | None, encoding: str | None = None
1216
+ ) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]:
1217
+ """
1218
+ Context manager to open, yield and close buffer for filenames or Path-like
1219
+ objects, otherwise yield buf unchanged.
1220
+ """
1221
+ if buf is not None:
1222
+ buf = stringify_path(buf)
1223
+ else:
1224
+ buf = StringIO()
1225
+
1226
+ if encoding is None:
1227
+ encoding = "utf-8"
1228
+ elif not isinstance(buf, str):
1229
+ raise ValueError("buf is not a file name and encoding is specified.")
1230
+
1231
+ if hasattr(buf, "write"):
1232
+ # Incompatible types in "yield" (actual type "Union[str, WriteBuffer[str],
1233
+ # StringIO]", expected type "Union[WriteBuffer[str], StringIO]")
1234
+ yield buf # type: ignore[misc]
1235
+ elif isinstance(buf, str):
1236
+ check_parent_directory(str(buf))
1237
+ with open(buf, "w", encoding=encoding, newline="") as f:
1238
+ # GH#30034 open instead of codecs.open prevents a file leak
1239
+ # if we have an invalid encoding argument.
1240
+ # newline="" is needed to roundtrip correctly on
1241
+ # windows test_to_latex_filename
1242
+ yield f
1243
+ else:
1244
+ raise TypeError("buf is not a file name and it has no write method")
1245
+
1246
+
1247
+ # ----------------------------------------------------------------------
1248
+ # Array formatters
1249
+
1250
+
1251
+ def format_array(
1252
+ values: Any,
1253
+ formatter: Callable | None,
1254
+ float_format: FloatFormatType | None = None,
1255
+ na_rep: str = "NaN",
1256
+ digits: int | None = None,
1257
+ space: str | int | None = None,
1258
+ justify: str = "right",
1259
+ decimal: str = ".",
1260
+ leading_space: bool | None = True,
1261
+ quoting: int | None = None,
1262
+ fallback_formatter: Callable | None = None,
1263
+ ) -> list[str]:
1264
+ """
1265
+ Format an array for printing.
1266
+
1267
+ Parameters
1268
+ ----------
1269
+ values
1270
+ formatter
1271
+ float_format
1272
+ na_rep
1273
+ digits
1274
+ space
1275
+ justify
1276
+ decimal
1277
+ leading_space : bool, optional, default True
1278
+ Whether the array should be formatted with a leading space.
1279
+ When an array as a column of a Series or DataFrame, we do want
1280
+ the leading space to pad between columns.
1281
+
1282
+ When formatting an Index subclass
1283
+ (e.g. IntervalIndex._format_native_types), we don't want the
1284
+ leading space since it should be left-aligned.
1285
+ fallback_formatter
1286
+
1287
+ Returns
1288
+ -------
1289
+ List[str]
1290
+ """
1291
+ fmt_klass: type[GenericArrayFormatter]
1292
+ if is_datetime64_dtype(values.dtype):
1293
+ fmt_klass = Datetime64Formatter
1294
+ elif isinstance(values.dtype, DatetimeTZDtype):
1295
+ fmt_klass = Datetime64TZFormatter
1296
+ elif is_timedelta64_dtype(values.dtype):
1297
+ fmt_klass = Timedelta64Formatter
1298
+ elif is_extension_array_dtype(values.dtype):
1299
+ fmt_klass = ExtensionArrayFormatter
1300
+ elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
1301
+ fmt_klass = FloatArrayFormatter
1302
+ elif is_integer_dtype(values.dtype):
1303
+ fmt_klass = IntArrayFormatter
1304
+ else:
1305
+ fmt_klass = GenericArrayFormatter
1306
+
1307
+ if space is None:
1308
+ space = 12
1309
+
1310
+ if float_format is None:
1311
+ float_format = get_option("display.float_format")
1312
+
1313
+ if digits is None:
1314
+ digits = get_option("display.precision")
1315
+
1316
+ fmt_obj = fmt_klass(
1317
+ values,
1318
+ digits=digits,
1319
+ na_rep=na_rep,
1320
+ float_format=float_format,
1321
+ formatter=formatter,
1322
+ space=space,
1323
+ justify=justify,
1324
+ decimal=decimal,
1325
+ leading_space=leading_space,
1326
+ quoting=quoting,
1327
+ fallback_formatter=fallback_formatter,
1328
+ )
1329
+
1330
+ return fmt_obj.get_result()
1331
+
1332
+
1333
+ class GenericArrayFormatter:
1334
+ def __init__(
1335
+ self,
1336
+ values: Any,
1337
+ digits: int = 7,
1338
+ formatter: Callable | None = None,
1339
+ na_rep: str = "NaN",
1340
+ space: str | int = 12,
1341
+ float_format: FloatFormatType | None = None,
1342
+ justify: str = "right",
1343
+ decimal: str = ".",
1344
+ quoting: int | None = None,
1345
+ fixed_width: bool = True,
1346
+ leading_space: bool | None = True,
1347
+ fallback_formatter: Callable | None = None,
1348
+ ) -> None:
1349
+ self.values = values
1350
+ self.digits = digits
1351
+ self.na_rep = na_rep
1352
+ self.space = space
1353
+ self.formatter = formatter
1354
+ self.float_format = float_format
1355
+ self.justify = justify
1356
+ self.decimal = decimal
1357
+ self.quoting = quoting
1358
+ self.fixed_width = fixed_width
1359
+ self.leading_space = leading_space
1360
+ self.fallback_formatter = fallback_formatter
1361
+
1362
+ def get_result(self) -> list[str]:
1363
+ fmt_values = self._format_strings()
1364
+ return _make_fixed_width(fmt_values, self.justify)
1365
+
1366
+ def _format_strings(self) -> list[str]:
1367
+ if self.float_format is None:
1368
+ float_format = get_option("display.float_format")
1369
+ if float_format is None:
1370
+ precision = get_option("display.precision")
1371
+ float_format = lambda x: _trim_zeros_single_float(
1372
+ f"{x: .{precision:d}f}"
1373
+ )
1374
+ else:
1375
+ float_format = self.float_format
1376
+
1377
+ if self.formatter is not None:
1378
+ formatter = self.formatter
1379
+ elif self.fallback_formatter is not None:
1380
+ formatter = self.fallback_formatter
1381
+ else:
1382
+ quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE
1383
+ formatter = partial(
1384
+ printing.pprint_thing,
1385
+ escape_chars=("\t", "\r", "\n"),
1386
+ quote_strings=quote_strings,
1387
+ )
1388
+
1389
+ def _format(x):
1390
+ if self.na_rep is not None and is_scalar(x) and isna(x):
1391
+ try:
1392
+ # try block for np.isnat specifically
1393
+ # determine na_rep if x is None or NaT-like
1394
+ if x is None:
1395
+ return "None"
1396
+ elif x is NA:
1397
+ return str(NA)
1398
+ elif x is NaT or np.isnat(x):
1399
+ return "NaT"
1400
+ except (TypeError, ValueError):
1401
+ # np.isnat only handles datetime or timedelta objects
1402
+ pass
1403
+ return self.na_rep
1404
+ elif isinstance(x, PandasObject):
1405
+ return str(x)
1406
+ elif isinstance(x, StringDtype):
1407
+ return repr(x)
1408
+ else:
1409
+ # object dtype
1410
+ return str(formatter(x))
1411
+
1412
+ vals = extract_array(self.values, extract_numpy=True)
1413
+ if not isinstance(vals, np.ndarray):
1414
+ raise TypeError(
1415
+ "ExtensionArray formatting should use ExtensionArrayFormatter"
1416
+ )
1417
+ inferred = lib.map_infer(vals, is_float)
1418
+ is_float_type = (
1419
+ inferred
1420
+ # vals may have 2 or more dimensions
1421
+ & np.all(notna(vals), axis=tuple(range(1, len(vals.shape))))
1422
+ )
1423
+ leading_space = self.leading_space
1424
+ if leading_space is None:
1425
+ leading_space = is_float_type.any()
1426
+
1427
+ fmt_values = []
1428
+ for i, v in enumerate(vals):
1429
+ if (not is_float_type[i] or self.formatter is not None) and leading_space:
1430
+ fmt_values.append(f" {_format(v)}")
1431
+ elif is_float_type[i]:
1432
+ fmt_values.append(float_format(v))
1433
+ else:
1434
+ if leading_space is False:
1435
+ # False specifically, so that the default is
1436
+ # to include a space if we get here.
1437
+ tpl = "{v}"
1438
+ else:
1439
+ tpl = " {v}"
1440
+ fmt_values.append(tpl.format(v=_format(v)))
1441
+
1442
+ return fmt_values
1443
+
1444
+
1445
+ class FloatArrayFormatter(GenericArrayFormatter):
1446
+ def __init__(self, *args, **kwargs) -> None:
1447
+ super().__init__(*args, **kwargs)
1448
+
1449
+ # float_format is expected to be a string
1450
+ # formatter should be used to pass a function
1451
+ if self.float_format is not None and self.formatter is None:
1452
+ # GH21625, GH22270
1453
+ self.fixed_width = False
1454
+ if callable(self.float_format):
1455
+ self.formatter = self.float_format
1456
+ self.float_format = None
1457
+
1458
+ def _value_formatter(
1459
+ self,
1460
+ float_format: FloatFormatType | None = None,
1461
+ threshold: float | None = None,
1462
+ ) -> Callable:
1463
+ """Returns a function to be applied on each value to format it"""
1464
+ # the float_format parameter supersedes self.float_format
1465
+ if float_format is None:
1466
+ float_format = self.float_format
1467
+
1468
+ # we are going to compose different functions, to first convert to
1469
+ # a string, then replace the decimal symbol, and finally chop according
1470
+ # to the threshold
1471
+
1472
+ # when there is no float_format, we use str instead of '%g'
1473
+ # because str(0.0) = '0.0' while '%g' % 0.0 = '0'
1474
+ if float_format:
1475
+
1476
+ def base_formatter(v):
1477
+ assert float_format is not None # for mypy
1478
+ # error: "str" not callable
1479
+ # error: Unexpected keyword argument "value" for "__call__" of
1480
+ # "EngFormatter"
1481
+ return (
1482
+ float_format(value=v) # type: ignore[operator,call-arg]
1483
+ if notna(v)
1484
+ else self.na_rep
1485
+ )
1486
+
1487
+ else:
1488
+
1489
+ def base_formatter(v):
1490
+ return str(v) if notna(v) else self.na_rep
1491
+
1492
+ if self.decimal != ".":
1493
+
1494
+ def decimal_formatter(v):
1495
+ return base_formatter(v).replace(".", self.decimal, 1)
1496
+
1497
+ else:
1498
+ decimal_formatter = base_formatter
1499
+
1500
+ if threshold is None:
1501
+ return decimal_formatter
1502
+
1503
+ def formatter(value):
1504
+ if notna(value):
1505
+ if abs(value) > threshold:
1506
+ return decimal_formatter(value)
1507
+ else:
1508
+ return decimal_formatter(0.0)
1509
+ else:
1510
+ return self.na_rep
1511
+
1512
+ return formatter
1513
+
1514
+ def get_result_as_array(self) -> np.ndarray:
1515
+ """
1516
+ Returns the float values converted into strings using
1517
+ the parameters given at initialisation, as a numpy array
1518
+ """
1519
+
1520
+ def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str):
1521
+ mask = isna(values)
1522
+ formatted = np.array(
1523
+ [
1524
+ formatter(val) if not m else na_rep
1525
+ for val, m in zip(values.ravel(), mask.ravel())
1526
+ ]
1527
+ ).reshape(values.shape)
1528
+ return formatted
1529
+
1530
+ if self.formatter is not None:
1531
+ return format_with_na_rep(self.values, self.formatter, self.na_rep)
1532
+
1533
+ if self.fixed_width:
1534
+ threshold = get_option("display.chop_threshold")
1535
+ else:
1536
+ threshold = None
1537
+
1538
+ # if we have a fixed_width, we'll need to try different float_format
1539
+ def format_values_with(float_format):
1540
+ formatter = self._value_formatter(float_format, threshold)
1541
+
1542
+ # default formatter leaves a space to the left when formatting
1543
+ # floats, must be consistent for left-justifying NaNs (GH #25061)
1544
+ if self.justify == "left":
1545
+ na_rep = " " + self.na_rep
1546
+ else:
1547
+ na_rep = self.na_rep
1548
+
1549
+ # separate the wheat from the chaff
1550
+ values = self.values
1551
+ is_complex = is_complex_dtype(values)
1552
+ values = format_with_na_rep(values, formatter, na_rep)
1553
+
1554
+ if self.fixed_width:
1555
+ if is_complex:
1556
+ result = _trim_zeros_complex(values, self.decimal)
1557
+ else:
1558
+ result = _trim_zeros_float(values, self.decimal)
1559
+ return np.asarray(result, dtype="object")
1560
+
1561
+ return values
1562
+
1563
+ # There is a special default string when we are fixed-width
1564
+ # The default is otherwise to use str instead of a formatting string
1565
+ float_format: FloatFormatType | None
1566
+ if self.float_format is None:
1567
+ if self.fixed_width:
1568
+ if self.leading_space is True:
1569
+ fmt_str = "{value: .{digits:d}f}"
1570
+ else:
1571
+ fmt_str = "{value:.{digits:d}f}"
1572
+ float_format = partial(fmt_str.format, digits=self.digits)
1573
+ else:
1574
+ float_format = self.float_format
1575
+ else:
1576
+ float_format = lambda value: self.float_format % value
1577
+
1578
+ formatted_values = format_values_with(float_format)
1579
+
1580
+ if not self.fixed_width:
1581
+ return formatted_values
1582
+
1583
+ # we need do convert to engineering format if some values are too small
1584
+ # and would appear as 0, or if some values are too big and take too
1585
+ # much space
1586
+
1587
+ if len(formatted_values) > 0:
1588
+ maxlen = max(len(x) for x in formatted_values)
1589
+ too_long = maxlen > self.digits + 6
1590
+ else:
1591
+ too_long = False
1592
+
1593
+ with np.errstate(invalid="ignore"):
1594
+ abs_vals = np.abs(self.values)
1595
+ # this is pretty arbitrary for now
1596
+ # large values: more that 8 characters including decimal symbol
1597
+ # and first digit, hence > 1e6
1598
+ has_large_values = (abs_vals > 1e6).any()
1599
+ has_small_values = (
1600
+ (abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)
1601
+ ).any()
1602
+
1603
+ if has_small_values or (too_long and has_large_values):
1604
+ if self.leading_space is True:
1605
+ fmt_str = "{value: .{digits:d}e}"
1606
+ else:
1607
+ fmt_str = "{value:.{digits:d}e}"
1608
+ float_format = partial(fmt_str.format, digits=self.digits)
1609
+ formatted_values = format_values_with(float_format)
1610
+
1611
+ return formatted_values
1612
+
1613
+ def _format_strings(self) -> list[str]:
1614
+ return list(self.get_result_as_array())
1615
+
1616
+
1617
+ class IntArrayFormatter(GenericArrayFormatter):
1618
+ def _format_strings(self) -> list[str]:
1619
+ if self.leading_space is False:
1620
+ formatter_str = lambda x: f"{x:d}".format(x=x)
1621
+ else:
1622
+ formatter_str = lambda x: f"{x: d}".format(x=x)
1623
+ formatter = self.formatter or formatter_str
1624
+ fmt_values = [formatter(x) for x in self.values]
1625
+ return fmt_values
1626
+
1627
+
1628
+ class Datetime64Formatter(GenericArrayFormatter):
1629
+ def __init__(
1630
+ self,
1631
+ values: np.ndarray | Series | DatetimeIndex | DatetimeArray,
1632
+ nat_rep: str = "NaT",
1633
+ date_format: None = None,
1634
+ **kwargs,
1635
+ ) -> None:
1636
+ super().__init__(values, **kwargs)
1637
+ self.nat_rep = nat_rep
1638
+ self.date_format = date_format
1639
+
1640
+ def _format_strings(self) -> list[str]:
1641
+ """we by definition have DO NOT have a TZ"""
1642
+ values = self.values
1643
+
1644
+ if not isinstance(values, DatetimeIndex):
1645
+ values = DatetimeIndex(values)
1646
+
1647
+ if self.formatter is not None and callable(self.formatter):
1648
+ return [self.formatter(x) for x in values]
1649
+
1650
+ fmt_values = values._data._format_native_types(
1651
+ na_rep=self.nat_rep, date_format=self.date_format
1652
+ )
1653
+ return fmt_values.tolist()
1654
+
1655
+
1656
+ class ExtensionArrayFormatter(GenericArrayFormatter):
1657
+ def _format_strings(self) -> list[str]:
1658
+ values = extract_array(self.values, extract_numpy=True)
1659
+
1660
+ formatter = self.formatter
1661
+ fallback_formatter = None
1662
+ if formatter is None:
1663
+ fallback_formatter = values._formatter(boxed=True)
1664
+
1665
+ if isinstance(values, Categorical):
1666
+ # Categorical is special for now, so that we can preserve tzinfo
1667
+ array = values._internal_get_values()
1668
+ else:
1669
+ array = np.asarray(values)
1670
+
1671
+ fmt_values = format_array(
1672
+ array,
1673
+ formatter,
1674
+ float_format=self.float_format,
1675
+ na_rep=self.na_rep,
1676
+ digits=self.digits,
1677
+ space=self.space,
1678
+ justify=self.justify,
1679
+ decimal=self.decimal,
1680
+ leading_space=self.leading_space,
1681
+ quoting=self.quoting,
1682
+ fallback_formatter=fallback_formatter,
1683
+ )
1684
+ return fmt_values
1685
+
1686
+
1687
+ def format_percentiles(
1688
+ percentiles: (np.ndarray | Sequence[float]),
1689
+ ) -> list[str]:
1690
+ """
1691
+ Outputs rounded and formatted percentiles.
1692
+
1693
+ Parameters
1694
+ ----------
1695
+ percentiles : list-like, containing floats from interval [0,1]
1696
+
1697
+ Returns
1698
+ -------
1699
+ formatted : list of strings
1700
+
1701
+ Notes
1702
+ -----
1703
+ Rounding precision is chosen so that: (1) if any two elements of
1704
+ ``percentiles`` differ, they remain different after rounding
1705
+ (2) no entry is *rounded* to 0% or 100%.
1706
+ Any non-integer is always rounded to at least 1 decimal place.
1707
+
1708
+ Examples
1709
+ --------
1710
+ Keeps all entries different after rounding:
1711
+
1712
+ >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
1713
+ ['1.999%', '2.001%', '50%', '66.667%', '99.99%']
1714
+
1715
+ No element is rounded to 0% or 100% (unless already equal to it).
1716
+ Duplicates are allowed:
1717
+
1718
+ >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
1719
+ ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
1720
+ """
1721
+ percentiles = np.asarray(percentiles)
1722
+
1723
+ # It checks for np.NaN as well
1724
+ with np.errstate(invalid="ignore"):
1725
+ if (
1726
+ not is_numeric_dtype(percentiles)
1727
+ or not np.all(percentiles >= 0)
1728
+ or not np.all(percentiles <= 1)
1729
+ ):
1730
+ raise ValueError("percentiles should all be in the interval [0,1]")
1731
+
1732
+ percentiles = 100 * percentiles
1733
+ percentiles_round_type = percentiles.round().astype(int)
1734
+
1735
+ int_idx = np.isclose(percentiles_round_type, percentiles)
1736
+
1737
+ if np.all(int_idx):
1738
+ out = percentiles_round_type.astype(str)
1739
+ return [i + "%" for i in out]
1740
+
1741
+ unique_pcts = np.unique(percentiles)
1742
+ to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
1743
+ to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
1744
+
1745
+ # Least precision that keeps percentiles unique after rounding
1746
+ prec = -np.floor(
1747
+ np.log10(np.min(np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)))
1748
+ ).astype(int)
1749
+ prec = max(1, prec)
1750
+ out = np.empty_like(percentiles, dtype=object)
1751
+ out[int_idx] = percentiles[int_idx].round().astype(int).astype(str)
1752
+
1753
+ out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
1754
+ return [i + "%" for i in out]
1755
+
1756
+
1757
+ def is_dates_only(values: np.ndarray | DatetimeArray | Index | DatetimeIndex) -> bool:
1758
+ # return a boolean if we are only dates (and don't have a timezone)
1759
+ if not isinstance(values, Index):
1760
+ values = values.ravel()
1761
+
1762
+ if not isinstance(values, (DatetimeArray, DatetimeIndex)):
1763
+ values = DatetimeIndex(values)
1764
+
1765
+ if values.tz is not None:
1766
+ return False
1767
+
1768
+ values_int = values.asi8
1769
+ consider_values = values_int != iNaT
1770
+ # error: Argument 1 to "py_get_unit_from_dtype" has incompatible type
1771
+ # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]"
1772
+ reso = get_unit_from_dtype(values.dtype) # type: ignore[arg-type]
1773
+ ppd = periods_per_day(reso)
1774
+
1775
+ # TODO: can we reuse is_date_array_normalized? would need a skipna kwd
1776
+ even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0
1777
+ if even_days:
1778
+ return True
1779
+ return False
1780
+
1781
+
1782
+ def _format_datetime64(x: NaTType | Timestamp, nat_rep: str = "NaT") -> str:
1783
+ if x is NaT:
1784
+ return nat_rep
1785
+
1786
+ # Timestamp.__str__ falls back to datetime.datetime.__str__ = isoformat(sep=' ')
1787
+ # so it already uses string formatting rather than strftime (faster).
1788
+ return str(x)
1789
+
1790
+
1791
+ def _format_datetime64_dateonly(
1792
+ x: NaTType | Timestamp,
1793
+ nat_rep: str = "NaT",
1794
+ date_format: str | None = None,
1795
+ ) -> str:
1796
+ if isinstance(x, NaTType):
1797
+ return nat_rep
1798
+
1799
+ if date_format:
1800
+ return x.strftime(date_format)
1801
+ else:
1802
+ # Timestamp._date_repr relies on string formatting (faster than strftime)
1803
+ return x._date_repr
1804
+
1805
+
1806
+ def get_format_datetime64(
1807
+ is_dates_only_: bool, nat_rep: str = "NaT", date_format: str | None = None
1808
+ ) -> Callable:
1809
+ """Return a formatter callable taking a datetime64 as input and providing
1810
+ a string as output"""
1811
+
1812
+ if is_dates_only_:
1813
+ return lambda x: _format_datetime64_dateonly(
1814
+ x, nat_rep=nat_rep, date_format=date_format
1815
+ )
1816
+ else:
1817
+ return lambda x: _format_datetime64(x, nat_rep=nat_rep)
1818
+
1819
+
1820
+ def get_format_datetime64_from_values(
1821
+ values: np.ndarray | DatetimeArray | DatetimeIndex, date_format: str | None
1822
+ ) -> str | None:
1823
+ """given values and a date_format, return a string format"""
1824
+ if isinstance(values, np.ndarray) and values.ndim > 1:
1825
+ # We don't actually care about the order of values, and DatetimeIndex
1826
+ # only accepts 1D values
1827
+ values = values.ravel()
1828
+
1829
+ ido = is_dates_only(values)
1830
+ if ido:
1831
+ # Only dates and no timezone: provide a default format
1832
+ return date_format or "%Y-%m-%d"
1833
+ return date_format
1834
+
1835
+
1836
+ class Datetime64TZFormatter(Datetime64Formatter):
1837
+ def _format_strings(self) -> list[str]:
1838
+ """we by definition have a TZ"""
1839
+ values = self.values.astype(object)
1840
+ ido = is_dates_only(values)
1841
+ formatter = self.formatter or get_format_datetime64(
1842
+ ido, date_format=self.date_format
1843
+ )
1844
+ fmt_values = [formatter(x) for x in values]
1845
+
1846
+ return fmt_values
1847
+
1848
+
1849
+ class Timedelta64Formatter(GenericArrayFormatter):
1850
+ def __init__(
1851
+ self,
1852
+ values: np.ndarray | TimedeltaIndex,
1853
+ nat_rep: str = "NaT",
1854
+ box: bool = False,
1855
+ **kwargs,
1856
+ ) -> None:
1857
+ super().__init__(values, **kwargs)
1858
+ self.nat_rep = nat_rep
1859
+ self.box = box
1860
+
1861
+ def _format_strings(self) -> list[str]:
1862
+ formatter = self.formatter or get_format_timedelta64(
1863
+ self.values, nat_rep=self.nat_rep, box=self.box
1864
+ )
1865
+ return [formatter(x) for x in self.values]
1866
+
1867
+
1868
+ def get_format_timedelta64(
1869
+ values: np.ndarray | TimedeltaIndex | TimedeltaArray,
1870
+ nat_rep: str | float = "NaT",
1871
+ box: bool = False,
1872
+ ) -> Callable:
1873
+ """
1874
+ Return a formatter function for a range of timedeltas.
1875
+ These will all have the same format argument
1876
+
1877
+ If box, then show the return in quotes
1878
+ """
1879
+ values_int = values.view(np.int64)
1880
+
1881
+ consider_values = values_int != iNaT
1882
+
1883
+ one_day_nanos = 86400 * 10**9
1884
+ # error: Unsupported operand types for % ("ExtensionArray" and "int")
1885
+ not_midnight = values_int % one_day_nanos != 0 # type: ignore[operator]
1886
+ # error: Argument 1 to "__call__" of "ufunc" has incompatible type
1887
+ # "Union[Any, ExtensionArray, ndarray]"; expected
1888
+ # "Union[Union[int, float, complex, str, bytes, generic],
1889
+ # Sequence[Union[int, float, complex, str, bytes, generic]],
1890
+ # Sequence[Sequence[Any]], _SupportsArray]"
1891
+ both = np.logical_and(consider_values, not_midnight) # type: ignore[arg-type]
1892
+ even_days = both.sum() == 0
1893
+
1894
+ if even_days:
1895
+ format = None
1896
+ else:
1897
+ format = "long"
1898
+
1899
+ def _formatter(x):
1900
+ if x is None or (is_scalar(x) and isna(x)):
1901
+ return nat_rep
1902
+
1903
+ if not isinstance(x, Timedelta):
1904
+ x = Timedelta(x)
1905
+
1906
+ # Timedelta._repr_base uses string formatting (faster than strftime)
1907
+ result = x._repr_base(format=format)
1908
+ if box:
1909
+ result = f"'{result}'"
1910
+ return result
1911
+
1912
+ return _formatter
1913
+
1914
+
1915
+ def _make_fixed_width(
1916
+ strings: list[str],
1917
+ justify: str = "right",
1918
+ minimum: int | None = None,
1919
+ adj: TextAdjustment | None = None,
1920
+ ) -> list[str]:
1921
+ if len(strings) == 0 or justify == "all":
1922
+ return strings
1923
+
1924
+ if adj is None:
1925
+ adjustment = get_adjustment()
1926
+ else:
1927
+ adjustment = adj
1928
+
1929
+ max_len = max(adjustment.len(x) for x in strings)
1930
+
1931
+ if minimum is not None:
1932
+ max_len = max(minimum, max_len)
1933
+
1934
+ conf_max = get_option("display.max_colwidth")
1935
+ if conf_max is not None and max_len > conf_max:
1936
+ max_len = conf_max
1937
+
1938
+ def just(x: str) -> str:
1939
+ if conf_max is not None:
1940
+ if (conf_max > 3) & (adjustment.len(x) > max_len):
1941
+ x = x[: max_len - 3] + "..."
1942
+ return x
1943
+
1944
+ strings = [just(x) for x in strings]
1945
+ result = adjustment.justify(strings, max_len, mode=justify)
1946
+ return result
1947
+
1948
+
1949
+ def _trim_zeros_complex(str_complexes: np.ndarray, decimal: str = ".") -> list[str]:
1950
+ """
1951
+ Separates the real and imaginary parts from the complex number, and
1952
+ executes the _trim_zeros_float method on each of those.
1953
+ """
1954
+ trimmed = [
1955
+ "".join(_trim_zeros_float(re.split(r"([j+-])", x), decimal))
1956
+ for x in str_complexes
1957
+ ]
1958
+
1959
+ # pad strings to the length of the longest trimmed string for alignment
1960
+ lengths = [len(s) for s in trimmed]
1961
+ max_length = max(lengths)
1962
+ padded = [
1963
+ s[: -((k - 1) // 2 + 1)] # real part
1964
+ + (max_length - k) // 2 * "0"
1965
+ + s[-((k - 1) // 2 + 1) : -((k - 1) // 2)] # + / -
1966
+ + s[-((k - 1) // 2) : -1] # imaginary part
1967
+ + (max_length - k) // 2 * "0"
1968
+ + s[-1]
1969
+ for s, k in zip(trimmed, lengths)
1970
+ ]
1971
+ return padded
1972
+
1973
+
1974
+ def _trim_zeros_single_float(str_float: str) -> str:
1975
+ """
1976
+ Trims trailing zeros after a decimal point,
1977
+ leaving just one if necessary.
1978
+ """
1979
+ str_float = str_float.rstrip("0")
1980
+ if str_float.endswith("."):
1981
+ str_float += "0"
1982
+
1983
+ return str_float
1984
+
1985
+
1986
+ def _trim_zeros_float(
1987
+ str_floats: np.ndarray | list[str], decimal: str = "."
1988
+ ) -> list[str]:
1989
+ """
1990
+ Trims the maximum number of trailing zeros equally from
1991
+ all numbers containing decimals, leaving just one if
1992
+ necessary.
1993
+ """
1994
+ trimmed = str_floats
1995
+ number_regex = re.compile(rf"^\s*[\+-]?[0-9]+\{decimal}[0-9]*$")
1996
+
1997
+ def is_number_with_decimal(x) -> bool:
1998
+ return re.match(number_regex, x) is not None
1999
+
2000
+ def should_trim(values: np.ndarray | list[str]) -> bool:
2001
+ """
2002
+ Determine if an array of strings should be trimmed.
2003
+
2004
+ Returns True if all numbers containing decimals (defined by the
2005
+ above regular expression) within the array end in a zero, otherwise
2006
+ returns False.
2007
+ """
2008
+ numbers = [x for x in values if is_number_with_decimal(x)]
2009
+ return len(numbers) > 0 and all(x.endswith("0") for x in numbers)
2010
+
2011
+ while should_trim(trimmed):
2012
+ trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed]
2013
+
2014
+ # leave one 0 after the decimal points if need be.
2015
+ result = [
2016
+ x + "0" if is_number_with_decimal(x) and x.endswith(decimal) else x
2017
+ for x in trimmed
2018
+ ]
2019
+ return result
2020
+
2021
+
2022
+ def _has_names(index: Index) -> bool:
2023
+ if isinstance(index, MultiIndex):
2024
+ return com.any_not_none(*index.names)
2025
+ else:
2026
+ return index.name is not None
2027
+
2028
+
2029
+ class EngFormatter:
2030
+ """
2031
+ Formats float values according to engineering format.
2032
+
2033
+ Based on matplotlib.ticker.EngFormatter
2034
+ """
2035
+
2036
+ # The SI engineering prefixes
2037
+ ENG_PREFIXES = {
2038
+ -24: "y",
2039
+ -21: "z",
2040
+ -18: "a",
2041
+ -15: "f",
2042
+ -12: "p",
2043
+ -9: "n",
2044
+ -6: "u",
2045
+ -3: "m",
2046
+ 0: "",
2047
+ 3: "k",
2048
+ 6: "M",
2049
+ 9: "G",
2050
+ 12: "T",
2051
+ 15: "P",
2052
+ 18: "E",
2053
+ 21: "Z",
2054
+ 24: "Y",
2055
+ }
2056
+
2057
+ def __init__(
2058
+ self, accuracy: int | None = None, use_eng_prefix: bool = False
2059
+ ) -> None:
2060
+ self.accuracy = accuracy
2061
+ self.use_eng_prefix = use_eng_prefix
2062
+
2063
+ def __call__(self, num: float) -> str:
2064
+ """
2065
+ Formats a number in engineering notation, appending a letter
2066
+ representing the power of 1000 of the original number. Some examples:
2067
+ >>> format_eng = EngFormatter(accuracy=0, use_eng_prefix=True)
2068
+ >>> format_eng(0)
2069
+ ' 0'
2070
+ >>> format_eng = EngFormatter(accuracy=1, use_eng_prefix=True)
2071
+ >>> format_eng(1_000_000)
2072
+ ' 1.0M'
2073
+ >>> format_eng = EngFormatter(accuracy=2, use_eng_prefix=False)
2074
+ >>> format_eng("-1e-6")
2075
+ '-1.00E-06'
2076
+
2077
+ @param num: the value to represent
2078
+ @type num: either a numeric value or a string that can be converted to
2079
+ a numeric value (as per decimal.Decimal constructor)
2080
+
2081
+ @return: engineering formatted string
2082
+ """
2083
+ dnum = Decimal(str(num))
2084
+
2085
+ if Decimal.is_nan(dnum):
2086
+ return "NaN"
2087
+
2088
+ if Decimal.is_infinite(dnum):
2089
+ return "inf"
2090
+
2091
+ sign = 1
2092
+
2093
+ if dnum < 0: # pragma: no cover
2094
+ sign = -1
2095
+ dnum = -dnum
2096
+
2097
+ if dnum != 0:
2098
+ pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3))
2099
+ else:
2100
+ pow10 = Decimal(0)
2101
+
2102
+ pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
2103
+ pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
2104
+ int_pow10 = int(pow10)
2105
+
2106
+ if self.use_eng_prefix:
2107
+ prefix = self.ENG_PREFIXES[int_pow10]
2108
+ else:
2109
+ if int_pow10 < 0:
2110
+ prefix = f"E-{-int_pow10:02d}"
2111
+ else:
2112
+ prefix = f"E+{int_pow10:02d}"
2113
+
2114
+ mant = sign * dnum / (10**pow10)
2115
+
2116
+ if self.accuracy is None: # pragma: no cover
2117
+ format_str = "{mant: g}{prefix}"
2118
+ else:
2119
+ format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
2120
+
2121
+ formatted = format_str.format(mant=mant, prefix=prefix)
2122
+
2123
+ return formatted
2124
+
2125
+
2126
+ def set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None:
2127
+ """
2128
+ Format float representation in DataFrame with SI notation.
2129
+
2130
+ Parameters
2131
+ ----------
2132
+ accuracy : int, default 3
2133
+ Number of decimal digits after the floating point.
2134
+ use_eng_prefix : bool, default False
2135
+ Whether to represent a value with SI prefixes.
2136
+
2137
+ Returns
2138
+ -------
2139
+ None
2140
+
2141
+ Examples
2142
+ --------
2143
+ >>> df = pd.DataFrame([1e-9, 1e-3, 1, 1e3, 1e6])
2144
+ >>> df
2145
+ 0
2146
+ 0 1.000000e-09
2147
+ 1 1.000000e-03
2148
+ 2 1.000000e+00
2149
+ 3 1.000000e+03
2150
+ 4 1.000000e+06
2151
+
2152
+ >>> pd.set_eng_float_format(accuracy=1)
2153
+ >>> df
2154
+ 0
2155
+ 0 1.0E-09
2156
+ 1 1.0E-03
2157
+ 2 1.0E+00
2158
+ 3 1.0E+03
2159
+ 4 1.0E+06
2160
+
2161
+ >>> pd.set_eng_float_format(use_eng_prefix=True)
2162
+ >>> df
2163
+ 0
2164
+ 0 1.000n
2165
+ 1 1.000m
2166
+ 2 1.000
2167
+ 3 1.000k
2168
+ 4 1.000M
2169
+
2170
+ >>> pd.set_eng_float_format(accuracy=1, use_eng_prefix=True)
2171
+ >>> df
2172
+ 0
2173
+ 0 1.0n
2174
+ 1 1.0m
2175
+ 2 1.0
2176
+ 3 1.0k
2177
+ 4 1.0M
2178
+
2179
+ >>> pd.set_option("display.float_format", None) # unset option
2180
+ """
2181
+ set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
2182
+
2183
+
2184
+ def get_level_lengths(
2185
+ levels: Any, sentinel: bool | object | str = ""
2186
+ ) -> list[dict[int, int]]:
2187
+ """
2188
+ For each index in each level the function returns lengths of indexes.
2189
+
2190
+ Parameters
2191
+ ----------
2192
+ levels : list of lists
2193
+ List of values on for level.
2194
+ sentinel : string, optional
2195
+ Value which states that no new index starts on there.
2196
+
2197
+ Returns
2198
+ -------
2199
+ Returns list of maps. For each level returns map of indexes (key is index
2200
+ in row and value is length of index).
2201
+ """
2202
+ if len(levels) == 0:
2203
+ return []
2204
+
2205
+ control = [True] * len(levels[0])
2206
+
2207
+ result = []
2208
+ for level in levels:
2209
+ last_index = 0
2210
+
2211
+ lengths = {}
2212
+ for i, key in enumerate(level):
2213
+ if control[i] and key == sentinel:
2214
+ pass
2215
+ else:
2216
+ control[i] = False
2217
+ lengths[last_index] = i - last_index
2218
+ last_index = i
2219
+
2220
+ lengths[last_index] = len(level) - last_index
2221
+
2222
+ result.append(lengths)
2223
+
2224
+ return result
2225
+
2226
+
2227
+ def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None:
2228
+ """
2229
+ Appends lines to a buffer.
2230
+
2231
+ Parameters
2232
+ ----------
2233
+ buf
2234
+ The buffer to write to
2235
+ lines
2236
+ The lines to append.
2237
+ """
2238
+ if any(isinstance(x, str) for x in lines):
2239
+ lines = [str(x) for x in lines]
2240
+ buf.write("\n".join(lines))
videochat2/lib/python3.10/site-packages/pandas/io/formats/html.py ADDED
@@ -0,0 +1,633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module for formatting output data in HTML.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from textwrap import dedent
7
+ from typing import (
8
+ Any,
9
+ Final,
10
+ Hashable,
11
+ Iterable,
12
+ Mapping,
13
+ cast,
14
+ )
15
+
16
+ from pandas._config import get_option
17
+
18
+ from pandas._libs import lib
19
+
20
+ from pandas import (
21
+ MultiIndex,
22
+ option_context,
23
+ )
24
+
25
+ from pandas.io.common import is_url
26
+ from pandas.io.formats.format import (
27
+ DataFrameFormatter,
28
+ get_level_lengths,
29
+ )
30
+ from pandas.io.formats.printing import pprint_thing
31
+
32
+
33
+ class HTMLFormatter:
34
+ """
35
+ Internal class for formatting output data in html.
36
+ This class is intended for shared functionality between
37
+ DataFrame.to_html() and DataFrame._repr_html_().
38
+ Any logic in common with other output formatting methods
39
+ should ideally be inherited from classes in format.py
40
+ and this class responsible for only producing html markup.
41
+ """
42
+
43
+ indent_delta: Final = 2
44
+
45
+ def __init__(
46
+ self,
47
+ formatter: DataFrameFormatter,
48
+ classes: str | list[str] | tuple[str, ...] | None = None,
49
+ border: int | bool | None = None,
50
+ table_id: str | None = None,
51
+ render_links: bool = False,
52
+ ) -> None:
53
+ self.fmt = formatter
54
+ self.classes = classes
55
+
56
+ self.frame = self.fmt.frame
57
+ self.columns = self.fmt.tr_frame.columns
58
+ self.elements: list[str] = []
59
+ self.bold_rows = self.fmt.bold_rows
60
+ self.escape = self.fmt.escape
61
+ self.show_dimensions = self.fmt.show_dimensions
62
+ if border is None or border is True:
63
+ border = cast(int, get_option("display.html.border"))
64
+ elif not border:
65
+ border = None
66
+
67
+ self.border = border
68
+ self.table_id = table_id
69
+ self.render_links = render_links
70
+
71
+ self.col_space = {
72
+ column: f"{value}px" if isinstance(value, int) else value
73
+ for column, value in self.fmt.col_space.items()
74
+ }
75
+
76
+ def to_string(self) -> str:
77
+ lines = self.render()
78
+ if any(isinstance(x, str) for x in lines):
79
+ lines = [str(x) for x in lines]
80
+ return "\n".join(lines)
81
+
82
+ def render(self) -> list[str]:
83
+ self._write_table()
84
+
85
+ if self.should_show_dimensions:
86
+ by = chr(215) # ×
87
+ self.write(
88
+ f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
89
+ )
90
+
91
+ return self.elements
92
+
93
+ @property
94
+ def should_show_dimensions(self) -> bool:
95
+ return self.fmt.should_show_dimensions
96
+
97
+ @property
98
+ def show_row_idx_names(self) -> bool:
99
+ return self.fmt.show_row_idx_names
100
+
101
+ @property
102
+ def show_col_idx_names(self) -> bool:
103
+ return self.fmt.show_col_idx_names
104
+
105
+ @property
106
+ def row_levels(self) -> int:
107
+ if self.fmt.index:
108
+ # showing (row) index
109
+ return self.frame.index.nlevels
110
+ elif self.show_col_idx_names:
111
+ # see gh-22579
112
+ # Column misalignment also occurs for
113
+ # a standard index when the columns index is named.
114
+ # If the row index is not displayed a column of
115
+ # blank cells need to be included before the DataFrame values.
116
+ return 1
117
+ # not showing (row) index
118
+ return 0
119
+
120
+ def _get_columns_formatted_values(self) -> Iterable:
121
+ return self.columns
122
+
123
+ @property
124
+ def is_truncated(self) -> bool:
125
+ return self.fmt.is_truncated
126
+
127
+ @property
128
+ def ncols(self) -> int:
129
+ return len(self.fmt.tr_frame.columns)
130
+
131
+ def write(self, s: Any, indent: int = 0) -> None:
132
+ rs = pprint_thing(s)
133
+ self.elements.append(" " * indent + rs)
134
+
135
+ def write_th(
136
+ self, s: Any, header: bool = False, indent: int = 0, tags: str | None = None
137
+ ) -> None:
138
+ """
139
+ Method for writing a formatted <th> cell.
140
+
141
+ If col_space is set on the formatter then that is used for
142
+ the value of min-width.
143
+
144
+ Parameters
145
+ ----------
146
+ s : object
147
+ The data to be written inside the cell.
148
+ header : bool, default False
149
+ Set to True if the <th> is for use inside <thead>. This will
150
+ cause min-width to be set if there is one.
151
+ indent : int, default 0
152
+ The indentation level of the cell.
153
+ tags : str, default None
154
+ Tags to include in the cell.
155
+
156
+ Returns
157
+ -------
158
+ A written <th> cell.
159
+ """
160
+ col_space = self.col_space.get(s, None)
161
+
162
+ if header and col_space is not None:
163
+ tags = tags or ""
164
+ tags += f'style="min-width: {col_space};"'
165
+
166
+ self._write_cell(s, kind="th", indent=indent, tags=tags)
167
+
168
+ def write_td(self, s: Any, indent: int = 0, tags: str | None = None) -> None:
169
+ self._write_cell(s, kind="td", indent=indent, tags=tags)
170
+
171
+ def _write_cell(
172
+ self, s: Any, kind: str = "td", indent: int = 0, tags: str | None = None
173
+ ) -> None:
174
+ if tags is not None:
175
+ start_tag = f"<{kind} {tags}>"
176
+ else:
177
+ start_tag = f"<{kind}>"
178
+
179
+ if self.escape:
180
+ # escape & first to prevent double escaping of &
181
+ esc = {"&": r"&amp;", "<": r"&lt;", ">": r"&gt;"}
182
+ else:
183
+ esc = {}
184
+
185
+ rs = pprint_thing(s, escape_chars=esc).strip()
186
+
187
+ if self.render_links and is_url(rs):
188
+ rs_unescaped = pprint_thing(s, escape_chars={}).strip()
189
+ start_tag += f'<a href="{rs_unescaped}" target="_blank">'
190
+ end_a = "</a>"
191
+ else:
192
+ end_a = ""
193
+
194
+ self.write(f"{start_tag}{rs}{end_a}</{kind}>", indent)
195
+
196
+ def write_tr(
197
+ self,
198
+ line: Iterable,
199
+ indent: int = 0,
200
+ indent_delta: int = 0,
201
+ header: bool = False,
202
+ align: str | None = None,
203
+ tags: dict[int, str] | None = None,
204
+ nindex_levels: int = 0,
205
+ ) -> None:
206
+ if tags is None:
207
+ tags = {}
208
+
209
+ if align is None:
210
+ self.write("<tr>", indent)
211
+ else:
212
+ self.write(f'<tr style="text-align: {align};">', indent)
213
+ indent += indent_delta
214
+
215
+ for i, s in enumerate(line):
216
+ val_tag = tags.get(i, None)
217
+ if header or (self.bold_rows and i < nindex_levels):
218
+ self.write_th(s, indent=indent, header=header, tags=val_tag)
219
+ else:
220
+ self.write_td(s, indent, tags=val_tag)
221
+
222
+ indent -= indent_delta
223
+ self.write("</tr>", indent)
224
+
225
+ def _write_table(self, indent: int = 0) -> None:
226
+ _classes = ["dataframe"] # Default class.
227
+ use_mathjax = get_option("display.html.use_mathjax")
228
+ if not use_mathjax:
229
+ _classes.append("tex2jax_ignore")
230
+ if self.classes is not None:
231
+ if isinstance(self.classes, str):
232
+ self.classes = self.classes.split()
233
+ if not isinstance(self.classes, (list, tuple)):
234
+ raise TypeError(
235
+ "classes must be a string, list, "
236
+ f"or tuple, not {type(self.classes)}"
237
+ )
238
+ _classes.extend(self.classes)
239
+
240
+ if self.table_id is None:
241
+ id_section = ""
242
+ else:
243
+ id_section = f' id="{self.table_id}"'
244
+
245
+ if self.border is None:
246
+ border_attr = ""
247
+ else:
248
+ border_attr = f' border="{self.border}"'
249
+
250
+ self.write(
251
+ f'<table{border_attr} class="{" ".join(_classes)}"{id_section}>',
252
+ indent,
253
+ )
254
+
255
+ if self.fmt.header or self.show_row_idx_names:
256
+ self._write_header(indent + self.indent_delta)
257
+
258
+ self._write_body(indent + self.indent_delta)
259
+
260
+ self.write("</table>", indent)
261
+
262
+ def _write_col_header(self, indent: int) -> None:
263
+ row: list[Hashable]
264
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
265
+ if isinstance(self.columns, MultiIndex):
266
+ template = 'colspan="{span:d}" halign="left"'
267
+
268
+ sentinel: lib.NoDefault | bool
269
+ if self.fmt.sparsify:
270
+ # GH3547
271
+ sentinel = lib.no_default
272
+ else:
273
+ sentinel = False
274
+ levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False)
275
+ level_lengths = get_level_lengths(levels, sentinel)
276
+ inner_lvl = len(level_lengths) - 1
277
+ for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
278
+ if is_truncated_horizontally:
279
+ # modify the header lines
280
+ ins_col = self.fmt.tr_col_num
281
+ if self.fmt.sparsify:
282
+ recs_new = {}
283
+ # Increment tags after ... col.
284
+ for tag, span in list(records.items()):
285
+ if tag >= ins_col:
286
+ recs_new[tag + 1] = span
287
+ elif tag + span > ins_col:
288
+ recs_new[tag] = span + 1
289
+ if lnum == inner_lvl:
290
+ values = (
291
+ values[:ins_col] + ("...",) + values[ins_col:]
292
+ )
293
+ else:
294
+ # sparse col headers do not receive a ...
295
+ values = (
296
+ values[:ins_col]
297
+ + (values[ins_col - 1],)
298
+ + values[ins_col:]
299
+ )
300
+ else:
301
+ recs_new[tag] = span
302
+ # if ins_col lies between tags, all col headers
303
+ # get ...
304
+ if tag + span == ins_col:
305
+ recs_new[ins_col] = 1
306
+ values = values[:ins_col] + ("...",) + values[ins_col:]
307
+ records = recs_new
308
+ inner_lvl = len(level_lengths) - 1
309
+ if lnum == inner_lvl:
310
+ records[ins_col] = 1
311
+ else:
312
+ recs_new = {}
313
+ for tag, span in list(records.items()):
314
+ if tag >= ins_col:
315
+ recs_new[tag + 1] = span
316
+ else:
317
+ recs_new[tag] = span
318
+ recs_new[ins_col] = 1
319
+ records = recs_new
320
+ values = values[:ins_col] + ["..."] + values[ins_col:]
321
+
322
+ # see gh-22579
323
+ # Column Offset Bug with to_html(index=False) with
324
+ # MultiIndex Columns and Index.
325
+ # Initially fill row with blank cells before column names.
326
+ # TODO: Refactor to remove code duplication with code
327
+ # block below for standard columns index.
328
+ row = [""] * (self.row_levels - 1)
329
+ if self.fmt.index or self.show_col_idx_names:
330
+ # see gh-22747
331
+ # If to_html(index_names=False) do not show columns
332
+ # index names.
333
+ # TODO: Refactor to use _get_column_name_list from
334
+ # DataFrameFormatter class and create a
335
+ # _get_formatted_column_labels function for code
336
+ # parity with DataFrameFormatter class.
337
+ if self.fmt.show_index_names:
338
+ name = self.columns.names[lnum]
339
+ row.append(pprint_thing(name or ""))
340
+ else:
341
+ row.append("")
342
+
343
+ tags = {}
344
+ j = len(row)
345
+ for i, v in enumerate(values):
346
+ if i in records:
347
+ if records[i] > 1:
348
+ tags[j] = template.format(span=records[i])
349
+ else:
350
+ continue
351
+ j += 1
352
+ row.append(v)
353
+ self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
354
+ else:
355
+ # see gh-22579
356
+ # Column misalignment also occurs for
357
+ # a standard index when the columns index is named.
358
+ # Initially fill row with blank cells before column names.
359
+ # TODO: Refactor to remove code duplication with code block
360
+ # above for columns MultiIndex.
361
+ row = [""] * (self.row_levels - 1)
362
+ if self.fmt.index or self.show_col_idx_names:
363
+ # see gh-22747
364
+ # If to_html(index_names=False) do not show columns
365
+ # index names.
366
+ # TODO: Refactor to use _get_column_name_list from
367
+ # DataFrameFormatter class.
368
+ if self.fmt.show_index_names:
369
+ row.append(self.columns.name or "")
370
+ else:
371
+ row.append("")
372
+ row.extend(self._get_columns_formatted_values())
373
+ align = self.fmt.justify
374
+
375
+ if is_truncated_horizontally:
376
+ ins_col = self.row_levels + self.fmt.tr_col_num
377
+ row.insert(ins_col, "...")
378
+
379
+ self.write_tr(row, indent, self.indent_delta, header=True, align=align)
380
+
381
+ def _write_row_header(self, indent: int) -> None:
382
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
383
+ row = [x if x is not None else "" for x in self.frame.index.names] + [""] * (
384
+ self.ncols + (1 if is_truncated_horizontally else 0)
385
+ )
386
+ self.write_tr(row, indent, self.indent_delta, header=True)
387
+
388
+ def _write_header(self, indent: int) -> None:
389
+ self.write("<thead>", indent)
390
+
391
+ if self.fmt.header:
392
+ self._write_col_header(indent + self.indent_delta)
393
+
394
+ if self.show_row_idx_names:
395
+ self._write_row_header(indent + self.indent_delta)
396
+
397
+ self.write("</thead>", indent)
398
+
399
+ def _get_formatted_values(self) -> dict[int, list[str]]:
400
+ with option_context("display.max_colwidth", None):
401
+ fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)}
402
+ return fmt_values
403
+
404
+ def _write_body(self, indent: int) -> None:
405
+ self.write("<tbody>", indent)
406
+ fmt_values = self._get_formatted_values()
407
+
408
+ # write values
409
+ if self.fmt.index and isinstance(self.frame.index, MultiIndex):
410
+ self._write_hierarchical_rows(fmt_values, indent + self.indent_delta)
411
+ else:
412
+ self._write_regular_rows(fmt_values, indent + self.indent_delta)
413
+
414
+ self.write("</tbody>", indent)
415
+
416
+ def _write_regular_rows(
417
+ self, fmt_values: Mapping[int, list[str]], indent: int
418
+ ) -> None:
419
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
420
+ is_truncated_vertically = self.fmt.is_truncated_vertically
421
+
422
+ nrows = len(self.fmt.tr_frame)
423
+
424
+ if self.fmt.index:
425
+ fmt = self.fmt._get_formatter("__index__")
426
+ if fmt is not None:
427
+ index_values = self.fmt.tr_frame.index.map(fmt)
428
+ else:
429
+ index_values = self.fmt.tr_frame.index.format()
430
+
431
+ row: list[str] = []
432
+ for i in range(nrows):
433
+ if is_truncated_vertically and i == (self.fmt.tr_row_num):
434
+ str_sep_row = ["..."] * len(row)
435
+ self.write_tr(
436
+ str_sep_row,
437
+ indent,
438
+ self.indent_delta,
439
+ tags=None,
440
+ nindex_levels=self.row_levels,
441
+ )
442
+
443
+ row = []
444
+ if self.fmt.index:
445
+ row.append(index_values[i])
446
+ # see gh-22579
447
+ # Column misalignment also occurs for
448
+ # a standard index when the columns index is named.
449
+ # Add blank cell before data cells.
450
+ elif self.show_col_idx_names:
451
+ row.append("")
452
+ row.extend(fmt_values[j][i] for j in range(self.ncols))
453
+
454
+ if is_truncated_horizontally:
455
+ dot_col_ix = self.fmt.tr_col_num + self.row_levels
456
+ row.insert(dot_col_ix, "...")
457
+ self.write_tr(
458
+ row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels
459
+ )
460
+
461
+ def _write_hierarchical_rows(
462
+ self, fmt_values: Mapping[int, list[str]], indent: int
463
+ ) -> None:
464
+ template = 'rowspan="{span}" valign="top"'
465
+
466
+ is_truncated_horizontally = self.fmt.is_truncated_horizontally
467
+ is_truncated_vertically = self.fmt.is_truncated_vertically
468
+ frame = self.fmt.tr_frame
469
+ nrows = len(frame)
470
+
471
+ assert isinstance(frame.index, MultiIndex)
472
+ idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
473
+ idx_values = list(zip(*idx_values))
474
+
475
+ if self.fmt.sparsify:
476
+ # GH3547
477
+ sentinel = lib.no_default
478
+ levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
479
+
480
+ level_lengths = get_level_lengths(levels, sentinel)
481
+ inner_lvl = len(level_lengths) - 1
482
+ if is_truncated_vertically:
483
+ # Insert ... row and adjust idx_values and
484
+ # level_lengths to take this into account.
485
+ ins_row = self.fmt.tr_row_num
486
+ inserted = False
487
+ for lnum, records in enumerate(level_lengths):
488
+ rec_new = {}
489
+ for tag, span in list(records.items()):
490
+ if tag >= ins_row:
491
+ rec_new[tag + 1] = span
492
+ elif tag + span > ins_row:
493
+ rec_new[tag] = span + 1
494
+
495
+ # GH 14882 - Make sure insertion done once
496
+ if not inserted:
497
+ dot_row = list(idx_values[ins_row - 1])
498
+ dot_row[-1] = "..."
499
+ idx_values.insert(ins_row, tuple(dot_row))
500
+ inserted = True
501
+ else:
502
+ dot_row = list(idx_values[ins_row])
503
+ dot_row[inner_lvl - lnum] = "..."
504
+ idx_values[ins_row] = tuple(dot_row)
505
+ else:
506
+ rec_new[tag] = span
507
+ # If ins_row lies between tags, all cols idx cols
508
+ # receive ...
509
+ if tag + span == ins_row:
510
+ rec_new[ins_row] = 1
511
+ if lnum == 0:
512
+ idx_values.insert(
513
+ ins_row, tuple(["..."] * len(level_lengths))
514
+ )
515
+
516
+ # GH 14882 - Place ... in correct level
517
+ elif inserted:
518
+ dot_row = list(idx_values[ins_row])
519
+ dot_row[inner_lvl - lnum] = "..."
520
+ idx_values[ins_row] = tuple(dot_row)
521
+ level_lengths[lnum] = rec_new
522
+
523
+ level_lengths[inner_lvl][ins_row] = 1
524
+ for ix_col in fmt_values:
525
+ fmt_values[ix_col].insert(ins_row, "...")
526
+ nrows += 1
527
+
528
+ for i in range(nrows):
529
+ row = []
530
+ tags = {}
531
+
532
+ sparse_offset = 0
533
+ j = 0
534
+ for records, v in zip(level_lengths, idx_values[i]):
535
+ if i in records:
536
+ if records[i] > 1:
537
+ tags[j] = template.format(span=records[i])
538
+ else:
539
+ sparse_offset += 1
540
+ continue
541
+
542
+ j += 1
543
+ row.append(v)
544
+
545
+ row.extend(fmt_values[j][i] for j in range(self.ncols))
546
+ if is_truncated_horizontally:
547
+ row.insert(
548
+ self.row_levels - sparse_offset + self.fmt.tr_col_num, "..."
549
+ )
550
+ self.write_tr(
551
+ row,
552
+ indent,
553
+ self.indent_delta,
554
+ tags=tags,
555
+ nindex_levels=len(levels) - sparse_offset,
556
+ )
557
+ else:
558
+ row = []
559
+ for i in range(len(frame)):
560
+ if is_truncated_vertically and i == (self.fmt.tr_row_num):
561
+ str_sep_row = ["..."] * len(row)
562
+ self.write_tr(
563
+ str_sep_row,
564
+ indent,
565
+ self.indent_delta,
566
+ tags=None,
567
+ nindex_levels=self.row_levels,
568
+ )
569
+
570
+ idx_values = list(
571
+ zip(*frame.index.format(sparsify=False, adjoin=False, names=False))
572
+ )
573
+ row = []
574
+ row.extend(idx_values[i])
575
+ row.extend(fmt_values[j][i] for j in range(self.ncols))
576
+ if is_truncated_horizontally:
577
+ row.insert(self.row_levels + self.fmt.tr_col_num, "...")
578
+ self.write_tr(
579
+ row,
580
+ indent,
581
+ self.indent_delta,
582
+ tags=None,
583
+ nindex_levels=frame.index.nlevels,
584
+ )
585
+
586
+
587
+ class NotebookFormatter(HTMLFormatter):
588
+ """
589
+ Internal class for formatting output data in html for display in Jupyter
590
+ Notebooks. This class is intended for functionality specific to
591
+ DataFrame._repr_html_() and DataFrame.to_html(notebook=True)
592
+ """
593
+
594
+ def _get_formatted_values(self) -> dict[int, list[str]]:
595
+ return {i: self.fmt.format_col(i) for i in range(self.ncols)}
596
+
597
+ def _get_columns_formatted_values(self) -> list[str]:
598
+ return self.columns.format()
599
+
600
+ def write_style(self) -> None:
601
+ # We use the "scoped" attribute here so that the desired
602
+ # style properties for the data frame are not then applied
603
+ # throughout the entire notebook.
604
+ template_first = """\
605
+ <style scoped>"""
606
+ template_last = """\
607
+ </style>"""
608
+ template_select = """\
609
+ .dataframe %s {
610
+ %s: %s;
611
+ }"""
612
+ element_props = [
613
+ ("tbody tr th:only-of-type", "vertical-align", "middle"),
614
+ ("tbody tr th", "vertical-align", "top"),
615
+ ]
616
+ if isinstance(self.columns, MultiIndex):
617
+ element_props.append(("thead tr th", "text-align", "left"))
618
+ if self.show_row_idx_names:
619
+ element_props.append(
620
+ ("thead tr:last-of-type th", "text-align", "right")
621
+ )
622
+ else:
623
+ element_props.append(("thead th", "text-align", "right"))
624
+ template_mid = "\n\n".join(map(lambda t: template_select % t, element_props))
625
+ template = dedent("\n".join((template_first, template_mid, template_last)))
626
+ self.write(template)
627
+
628
+ def render(self) -> list[str]:
629
+ self.write("<div>")
630
+ self.write_style()
631
+ super().render()
632
+ self.write("</div>")
633
+ return self.elements
videochat2/lib/python3.10/site-packages/pandas/io/formats/info.py ADDED
@@ -0,0 +1,1101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from abc import (
4
+ ABC,
5
+ abstractmethod,
6
+ )
7
+ import sys
8
+ from textwrap import dedent
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Iterable,
12
+ Iterator,
13
+ Mapping,
14
+ Sequence,
15
+ )
16
+
17
+ from pandas._config import get_option
18
+
19
+ from pandas._typing import (
20
+ Dtype,
21
+ WriteBuffer,
22
+ )
23
+
24
+ from pandas.io.formats import format as fmt
25
+ from pandas.io.formats.printing import pprint_thing
26
+
27
+ if TYPE_CHECKING:
28
+ from pandas import (
29
+ DataFrame,
30
+ Index,
31
+ Series,
32
+ )
33
+
34
+
35
+ frame_max_cols_sub = dedent(
36
+ """\
37
+ max_cols : int, optional
38
+ When to switch from the verbose to the truncated output. If the
39
+ DataFrame has more than `max_cols` columns, the truncated output
40
+ is used. By default, the setting in
41
+ ``pandas.options.display.max_info_columns`` is used."""
42
+ )
43
+
44
+
45
+ show_counts_sub = dedent(
46
+ """\
47
+ show_counts : bool, optional
48
+ Whether to show the non-null counts. By default, this is shown
49
+ only if the DataFrame is smaller than
50
+ ``pandas.options.display.max_info_rows`` and
51
+ ``pandas.options.display.max_info_columns``. A value of True always
52
+ shows the counts, and False never shows the counts."""
53
+ )
54
+
55
+
56
+ frame_examples_sub = dedent(
57
+ """\
58
+ >>> int_values = [1, 2, 3, 4, 5]
59
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
60
+ >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
61
+ >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
62
+ ... "float_col": float_values})
63
+ >>> df
64
+ int_col text_col float_col
65
+ 0 1 alpha 0.00
66
+ 1 2 beta 0.25
67
+ 2 3 gamma 0.50
68
+ 3 4 delta 0.75
69
+ 4 5 epsilon 1.00
70
+
71
+ Prints information of all columns:
72
+
73
+ >>> df.info(verbose=True)
74
+ <class 'pandas.core.frame.DataFrame'>
75
+ RangeIndex: 5 entries, 0 to 4
76
+ Data columns (total 3 columns):
77
+ # Column Non-Null Count Dtype
78
+ --- ------ -------------- -----
79
+ 0 int_col 5 non-null int64
80
+ 1 text_col 5 non-null object
81
+ 2 float_col 5 non-null float64
82
+ dtypes: float64(1), int64(1), object(1)
83
+ memory usage: 248.0+ bytes
84
+
85
+ Prints a summary of columns count and its dtypes but not per column
86
+ information:
87
+
88
+ >>> df.info(verbose=False)
89
+ <class 'pandas.core.frame.DataFrame'>
90
+ RangeIndex: 5 entries, 0 to 4
91
+ Columns: 3 entries, int_col to float_col
92
+ dtypes: float64(1), int64(1), object(1)
93
+ memory usage: 248.0+ bytes
94
+
95
+ Pipe output of DataFrame.info to buffer instead of sys.stdout, get
96
+ buffer content and writes to a text file:
97
+
98
+ >>> import io
99
+ >>> buffer = io.StringIO()
100
+ >>> df.info(buf=buffer)
101
+ >>> s = buffer.getvalue()
102
+ >>> with open("df_info.txt", "w",
103
+ ... encoding="utf-8") as f: # doctest: +SKIP
104
+ ... f.write(s)
105
+ 260
106
+
107
+ The `memory_usage` parameter allows deep introspection mode, specially
108
+ useful for big DataFrames and fine-tune memory optimization:
109
+
110
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
111
+ >>> df = pd.DataFrame({
112
+ ... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
113
+ ... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
114
+ ... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
115
+ ... })
116
+ >>> df.info()
117
+ <class 'pandas.core.frame.DataFrame'>
118
+ RangeIndex: 1000000 entries, 0 to 999999
119
+ Data columns (total 3 columns):
120
+ # Column Non-Null Count Dtype
121
+ --- ------ -------------- -----
122
+ 0 column_1 1000000 non-null object
123
+ 1 column_2 1000000 non-null object
124
+ 2 column_3 1000000 non-null object
125
+ dtypes: object(3)
126
+ memory usage: 22.9+ MB
127
+
128
+ >>> df.info(memory_usage='deep')
129
+ <class 'pandas.core.frame.DataFrame'>
130
+ RangeIndex: 1000000 entries, 0 to 999999
131
+ Data columns (total 3 columns):
132
+ # Column Non-Null Count Dtype
133
+ --- ------ -------------- -----
134
+ 0 column_1 1000000 non-null object
135
+ 1 column_2 1000000 non-null object
136
+ 2 column_3 1000000 non-null object
137
+ dtypes: object(3)
138
+ memory usage: 165.9 MB"""
139
+ )
140
+
141
+
142
+ frame_see_also_sub = dedent(
143
+ """\
144
+ DataFrame.describe: Generate descriptive statistics of DataFrame
145
+ columns.
146
+ DataFrame.memory_usage: Memory usage of DataFrame columns."""
147
+ )
148
+
149
+
150
+ frame_sub_kwargs = {
151
+ "klass": "DataFrame",
152
+ "type_sub": " and columns",
153
+ "max_cols_sub": frame_max_cols_sub,
154
+ "show_counts_sub": show_counts_sub,
155
+ "examples_sub": frame_examples_sub,
156
+ "see_also_sub": frame_see_also_sub,
157
+ "version_added_sub": "",
158
+ }
159
+
160
+
161
+ series_examples_sub = dedent(
162
+ """\
163
+ >>> int_values = [1, 2, 3, 4, 5]
164
+ >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
165
+ >>> s = pd.Series(text_values, index=int_values)
166
+ >>> s.info()
167
+ <class 'pandas.core.series.Series'>
168
+ Index: 5 entries, 1 to 5
169
+ Series name: None
170
+ Non-Null Count Dtype
171
+ -------------- -----
172
+ 5 non-null object
173
+ dtypes: object(1)
174
+ memory usage: 80.0+ bytes
175
+
176
+ Prints a summary excluding information about its values:
177
+
178
+ >>> s.info(verbose=False)
179
+ <class 'pandas.core.series.Series'>
180
+ Index: 5 entries, 1 to 5
181
+ dtypes: object(1)
182
+ memory usage: 80.0+ bytes
183
+
184
+ Pipe output of Series.info to buffer instead of sys.stdout, get
185
+ buffer content and writes to a text file:
186
+
187
+ >>> import io
188
+ >>> buffer = io.StringIO()
189
+ >>> s.info(buf=buffer)
190
+ >>> s = buffer.getvalue()
191
+ >>> with open("df_info.txt", "w",
192
+ ... encoding="utf-8") as f: # doctest: +SKIP
193
+ ... f.write(s)
194
+ 260
195
+
196
+ The `memory_usage` parameter allows deep introspection mode, specially
197
+ useful for big Series and fine-tune memory optimization:
198
+
199
+ >>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
200
+ >>> s = pd.Series(np.random.choice(['a', 'b', 'c'], 10 ** 6))
201
+ >>> s.info()
202
+ <class 'pandas.core.series.Series'>
203
+ RangeIndex: 1000000 entries, 0 to 999999
204
+ Series name: None
205
+ Non-Null Count Dtype
206
+ -------------- -----
207
+ 1000000 non-null object
208
+ dtypes: object(1)
209
+ memory usage: 7.6+ MB
210
+
211
+ >>> s.info(memory_usage='deep')
212
+ <class 'pandas.core.series.Series'>
213
+ RangeIndex: 1000000 entries, 0 to 999999
214
+ Series name: None
215
+ Non-Null Count Dtype
216
+ -------------- -----
217
+ 1000000 non-null object
218
+ dtypes: object(1)
219
+ memory usage: 55.3 MB"""
220
+ )
221
+
222
+
223
+ series_see_also_sub = dedent(
224
+ """\
225
+ Series.describe: Generate descriptive statistics of Series.
226
+ Series.memory_usage: Memory usage of Series."""
227
+ )
228
+
229
+
230
+ series_sub_kwargs = {
231
+ "klass": "Series",
232
+ "type_sub": "",
233
+ "max_cols_sub": "",
234
+ "show_counts_sub": show_counts_sub,
235
+ "examples_sub": series_examples_sub,
236
+ "see_also_sub": series_see_also_sub,
237
+ "version_added_sub": "\n.. versionadded:: 1.4.0\n",
238
+ }
239
+
240
+
241
+ INFO_DOCSTRING = dedent(
242
+ """
243
+ Print a concise summary of a {klass}.
244
+
245
+ This method prints information about a {klass} including
246
+ the index dtype{type_sub}, non-null values and memory usage.
247
+ {version_added_sub}\
248
+
249
+ Parameters
250
+ ----------
251
+ verbose : bool, optional
252
+ Whether to print the full summary. By default, the setting in
253
+ ``pandas.options.display.max_info_columns`` is followed.
254
+ buf : writable buffer, defaults to sys.stdout
255
+ Where to send the output. By default, the output is printed to
256
+ sys.stdout. Pass a writable buffer if you need to further process
257
+ the output.
258
+ {max_cols_sub}
259
+ memory_usage : bool, str, optional
260
+ Specifies whether total memory usage of the {klass}
261
+ elements (including the index) should be displayed. By default,
262
+ this follows the ``pandas.options.display.memory_usage`` setting.
263
+
264
+ True always show memory usage. False never shows memory usage.
265
+ A value of 'deep' is equivalent to "True with deep introspection".
266
+ Memory usage is shown in human-readable units (base-2
267
+ representation). Without deep introspection a memory estimation is
268
+ made based in column dtype and number of rows assuming values
269
+ consume the same memory amount for corresponding dtypes. With deep
270
+ memory introspection, a real memory usage calculation is performed
271
+ at the cost of computational resources. See the
272
+ :ref:`Frequently Asked Questions <df-memory-usage>` for more
273
+ details.
274
+ {show_counts_sub}
275
+
276
+ Returns
277
+ -------
278
+ None
279
+ This method prints a summary of a {klass} and returns None.
280
+
281
+ See Also
282
+ --------
283
+ {see_also_sub}
284
+
285
+ Examples
286
+ --------
287
+ {examples_sub}
288
+ """
289
+ )
290
+
291
+
292
+ def _put_str(s: str | Dtype, space: int) -> str:
293
+ """
294
+ Make string of specified length, padding to the right if necessary.
295
+
296
+ Parameters
297
+ ----------
298
+ s : Union[str, Dtype]
299
+ String to be formatted.
300
+ space : int
301
+ Length to force string to be of.
302
+
303
+ Returns
304
+ -------
305
+ str
306
+ String coerced to given length.
307
+
308
+ Examples
309
+ --------
310
+ >>> pd.io.formats.info._put_str("panda", 6)
311
+ 'panda '
312
+ >>> pd.io.formats.info._put_str("panda", 4)
313
+ 'pand'
314
+ """
315
+ return str(s)[:space].ljust(space)
316
+
317
+
318
+ def _sizeof_fmt(num: float, size_qualifier: str) -> str:
319
+ """
320
+ Return size in human readable format.
321
+
322
+ Parameters
323
+ ----------
324
+ num : int
325
+ Size in bytes.
326
+ size_qualifier : str
327
+ Either empty, or '+' (if lower bound).
328
+
329
+ Returns
330
+ -------
331
+ str
332
+ Size in human readable format.
333
+
334
+ Examples
335
+ --------
336
+ >>> _sizeof_fmt(23028, '')
337
+ '22.5 KB'
338
+
339
+ >>> _sizeof_fmt(23028, '+')
340
+ '22.5+ KB'
341
+ """
342
+ for x in ["bytes", "KB", "MB", "GB", "TB"]:
343
+ if num < 1024.0:
344
+ return f"{num:3.1f}{size_qualifier} {x}"
345
+ num /= 1024.0
346
+ return f"{num:3.1f}{size_qualifier} PB"
347
+
348
+
349
+ def _initialize_memory_usage(
350
+ memory_usage: bool | str | None = None,
351
+ ) -> bool | str:
352
+ """Get memory usage based on inputs and display options."""
353
+ if memory_usage is None:
354
+ memory_usage = get_option("display.memory_usage")
355
+ return memory_usage
356
+
357
+
358
+ class BaseInfo(ABC):
359
+ """
360
+ Base class for DataFrameInfo and SeriesInfo.
361
+
362
+ Parameters
363
+ ----------
364
+ data : DataFrame or Series
365
+ Either dataframe or series.
366
+ memory_usage : bool or str, optional
367
+ If "deep", introspect the data deeply by interrogating object dtypes
368
+ for system-level memory consumption, and include it in the returned
369
+ values.
370
+ """
371
+
372
+ data: DataFrame | Series
373
+ memory_usage: bool | str
374
+
375
+ @property
376
+ @abstractmethod
377
+ def dtypes(self) -> Iterable[Dtype]:
378
+ """
379
+ Dtypes.
380
+
381
+ Returns
382
+ -------
383
+ dtypes : sequence
384
+ Dtype of each of the DataFrame's columns (or one series column).
385
+ """
386
+
387
+ @property
388
+ @abstractmethod
389
+ def dtype_counts(self) -> Mapping[str, int]:
390
+ """Mapping dtype - number of counts."""
391
+
392
+ @property
393
+ @abstractmethod
394
+ def non_null_counts(self) -> Sequence[int]:
395
+ """Sequence of non-null counts for all columns or column (if series)."""
396
+
397
+ @property
398
+ @abstractmethod
399
+ def memory_usage_bytes(self) -> int:
400
+ """
401
+ Memory usage in bytes.
402
+
403
+ Returns
404
+ -------
405
+ memory_usage_bytes : int
406
+ Object's total memory usage in bytes.
407
+ """
408
+
409
+ @property
410
+ def memory_usage_string(self) -> str:
411
+ """Memory usage in a form of human readable string."""
412
+ return f"{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n"
413
+
414
+ @property
415
+ def size_qualifier(self) -> str:
416
+ size_qualifier = ""
417
+ if self.memory_usage:
418
+ if self.memory_usage != "deep":
419
+ # size_qualifier is just a best effort; not guaranteed to catch
420
+ # all cases (e.g., it misses categorical data even with object
421
+ # categories)
422
+ if (
423
+ "object" in self.dtype_counts
424
+ or self.data.index._is_memory_usage_qualified()
425
+ ):
426
+ size_qualifier = "+"
427
+ return size_qualifier
428
+
429
+ @abstractmethod
430
+ def render(
431
+ self,
432
+ *,
433
+ buf: WriteBuffer[str] | None,
434
+ max_cols: int | None,
435
+ verbose: bool | None,
436
+ show_counts: bool | None,
437
+ ) -> None:
438
+ pass
439
+
440
+
441
+ class DataFrameInfo(BaseInfo):
442
+ """
443
+ Class storing dataframe-specific info.
444
+ """
445
+
446
+ def __init__(
447
+ self,
448
+ data: DataFrame,
449
+ memory_usage: bool | str | None = None,
450
+ ) -> None:
451
+ self.data: DataFrame = data
452
+ self.memory_usage = _initialize_memory_usage(memory_usage)
453
+
454
+ @property
455
+ def dtype_counts(self) -> Mapping[str, int]:
456
+ return _get_dataframe_dtype_counts(self.data)
457
+
458
+ @property
459
+ def dtypes(self) -> Iterable[Dtype]:
460
+ """
461
+ Dtypes.
462
+
463
+ Returns
464
+ -------
465
+ dtypes
466
+ Dtype of each of the DataFrame's columns.
467
+ """
468
+ return self.data.dtypes
469
+
470
+ @property
471
+ def ids(self) -> Index:
472
+ """
473
+ Column names.
474
+
475
+ Returns
476
+ -------
477
+ ids : Index
478
+ DataFrame's column names.
479
+ """
480
+ return self.data.columns
481
+
482
+ @property
483
+ def col_count(self) -> int:
484
+ """Number of columns to be summarized."""
485
+ return len(self.ids)
486
+
487
+ @property
488
+ def non_null_counts(self) -> Sequence[int]:
489
+ """Sequence of non-null counts for all columns or column (if series)."""
490
+ return self.data.count()
491
+
492
+ @property
493
+ def memory_usage_bytes(self) -> int:
494
+ deep = self.memory_usage == "deep"
495
+ return self.data.memory_usage(index=True, deep=deep).sum()
496
+
497
+ def render(
498
+ self,
499
+ *,
500
+ buf: WriteBuffer[str] | None,
501
+ max_cols: int | None,
502
+ verbose: bool | None,
503
+ show_counts: bool | None,
504
+ ) -> None:
505
+ printer = DataFrameInfoPrinter(
506
+ info=self,
507
+ max_cols=max_cols,
508
+ verbose=verbose,
509
+ show_counts=show_counts,
510
+ )
511
+ printer.to_buffer(buf)
512
+
513
+
514
+ class SeriesInfo(BaseInfo):
515
+ """
516
+ Class storing series-specific info.
517
+ """
518
+
519
+ def __init__(
520
+ self,
521
+ data: Series,
522
+ memory_usage: bool | str | None = None,
523
+ ) -> None:
524
+ self.data: Series = data
525
+ self.memory_usage = _initialize_memory_usage(memory_usage)
526
+
527
+ def render(
528
+ self,
529
+ *,
530
+ buf: WriteBuffer[str] | None = None,
531
+ max_cols: int | None = None,
532
+ verbose: bool | None = None,
533
+ show_counts: bool | None = None,
534
+ ) -> None:
535
+ if max_cols is not None:
536
+ raise ValueError(
537
+ "Argument `max_cols` can only be passed "
538
+ "in DataFrame.info, not Series.info"
539
+ )
540
+ printer = SeriesInfoPrinter(
541
+ info=self,
542
+ verbose=verbose,
543
+ show_counts=show_counts,
544
+ )
545
+ printer.to_buffer(buf)
546
+
547
+ @property
548
+ def non_null_counts(self) -> Sequence[int]:
549
+ return [self.data.count()]
550
+
551
+ @property
552
+ def dtypes(self) -> Iterable[Dtype]:
553
+ return [self.data.dtypes]
554
+
555
+ @property
556
+ def dtype_counts(self) -> Mapping[str, int]:
557
+ from pandas.core.frame import DataFrame
558
+
559
+ return _get_dataframe_dtype_counts(DataFrame(self.data))
560
+
561
+ @property
562
+ def memory_usage_bytes(self) -> int:
563
+ """Memory usage in bytes.
564
+
565
+ Returns
566
+ -------
567
+ memory_usage_bytes : int
568
+ Object's total memory usage in bytes.
569
+ """
570
+ deep = self.memory_usage == "deep"
571
+ return self.data.memory_usage(index=True, deep=deep)
572
+
573
+
574
+ class InfoPrinterAbstract:
575
+ """
576
+ Class for printing dataframe or series info.
577
+ """
578
+
579
+ def to_buffer(self, buf: WriteBuffer[str] | None = None) -> None:
580
+ """Save dataframe info into buffer."""
581
+ table_builder = self._create_table_builder()
582
+ lines = table_builder.get_lines()
583
+ if buf is None: # pragma: no cover
584
+ buf = sys.stdout
585
+ fmt.buffer_put_lines(buf, lines)
586
+
587
+ @abstractmethod
588
+ def _create_table_builder(self) -> TableBuilderAbstract:
589
+ """Create instance of table builder."""
590
+
591
+
592
+ class DataFrameInfoPrinter(InfoPrinterAbstract):
593
+ """
594
+ Class for printing dataframe info.
595
+
596
+ Parameters
597
+ ----------
598
+ info : DataFrameInfo
599
+ Instance of DataFrameInfo.
600
+ max_cols : int, optional
601
+ When to switch from the verbose to the truncated output.
602
+ verbose : bool, optional
603
+ Whether to print the full summary.
604
+ show_counts : bool, optional
605
+ Whether to show the non-null counts.
606
+ """
607
+
608
+ def __init__(
609
+ self,
610
+ info: DataFrameInfo,
611
+ max_cols: int | None = None,
612
+ verbose: bool | None = None,
613
+ show_counts: bool | None = None,
614
+ ) -> None:
615
+ self.info = info
616
+ self.data = info.data
617
+ self.verbose = verbose
618
+ self.max_cols = self._initialize_max_cols(max_cols)
619
+ self.show_counts = self._initialize_show_counts(show_counts)
620
+
621
+ @property
622
+ def max_rows(self) -> int:
623
+ """Maximum info rows to be displayed."""
624
+ return get_option("display.max_info_rows", len(self.data) + 1)
625
+
626
+ @property
627
+ def exceeds_info_cols(self) -> bool:
628
+ """Check if number of columns to be summarized does not exceed maximum."""
629
+ return bool(self.col_count > self.max_cols)
630
+
631
+ @property
632
+ def exceeds_info_rows(self) -> bool:
633
+ """Check if number of rows to be summarized does not exceed maximum."""
634
+ return bool(len(self.data) > self.max_rows)
635
+
636
+ @property
637
+ def col_count(self) -> int:
638
+ """Number of columns to be summarized."""
639
+ return self.info.col_count
640
+
641
+ def _initialize_max_cols(self, max_cols: int | None) -> int:
642
+ if max_cols is None:
643
+ return get_option("display.max_info_columns", self.col_count + 1)
644
+ return max_cols
645
+
646
+ def _initialize_show_counts(self, show_counts: bool | None) -> bool:
647
+ if show_counts is None:
648
+ return bool(not self.exceeds_info_cols and not self.exceeds_info_rows)
649
+ else:
650
+ return show_counts
651
+
652
+ def _create_table_builder(self) -> DataFrameTableBuilder:
653
+ """
654
+ Create instance of table builder based on verbosity and display settings.
655
+ """
656
+ if self.verbose:
657
+ return DataFrameTableBuilderVerbose(
658
+ info=self.info,
659
+ with_counts=self.show_counts,
660
+ )
661
+ elif self.verbose is False: # specifically set to False, not necessarily None
662
+ return DataFrameTableBuilderNonVerbose(info=self.info)
663
+ else:
664
+ if self.exceeds_info_cols:
665
+ return DataFrameTableBuilderNonVerbose(info=self.info)
666
+ else:
667
+ return DataFrameTableBuilderVerbose(
668
+ info=self.info,
669
+ with_counts=self.show_counts,
670
+ )
671
+
672
+
673
+ class SeriesInfoPrinter(InfoPrinterAbstract):
674
+ """Class for printing series info.
675
+
676
+ Parameters
677
+ ----------
678
+ info : SeriesInfo
679
+ Instance of SeriesInfo.
680
+ verbose : bool, optional
681
+ Whether to print the full summary.
682
+ show_counts : bool, optional
683
+ Whether to show the non-null counts.
684
+ """
685
+
686
+ def __init__(
687
+ self,
688
+ info: SeriesInfo,
689
+ verbose: bool | None = None,
690
+ show_counts: bool | None = None,
691
+ ) -> None:
692
+ self.info = info
693
+ self.data = info.data
694
+ self.verbose = verbose
695
+ self.show_counts = self._initialize_show_counts(show_counts)
696
+
697
+ def _create_table_builder(self) -> SeriesTableBuilder:
698
+ """
699
+ Create instance of table builder based on verbosity.
700
+ """
701
+ if self.verbose or self.verbose is None:
702
+ return SeriesTableBuilderVerbose(
703
+ info=self.info,
704
+ with_counts=self.show_counts,
705
+ )
706
+ else:
707
+ return SeriesTableBuilderNonVerbose(info=self.info)
708
+
709
+ def _initialize_show_counts(self, show_counts: bool | None) -> bool:
710
+ if show_counts is None:
711
+ return True
712
+ else:
713
+ return show_counts
714
+
715
+
716
+ class TableBuilderAbstract(ABC):
717
+ """
718
+ Abstract builder for info table.
719
+ """
720
+
721
+ _lines: list[str]
722
+ info: BaseInfo
723
+
724
+ @abstractmethod
725
+ def get_lines(self) -> list[str]:
726
+ """Product in a form of list of lines (strings)."""
727
+
728
+ @property
729
+ def data(self) -> DataFrame | Series:
730
+ return self.info.data
731
+
732
+ @property
733
+ def dtypes(self) -> Iterable[Dtype]:
734
+ """Dtypes of each of the DataFrame's columns."""
735
+ return self.info.dtypes
736
+
737
+ @property
738
+ def dtype_counts(self) -> Mapping[str, int]:
739
+ """Mapping dtype - number of counts."""
740
+ return self.info.dtype_counts
741
+
742
+ @property
743
+ def display_memory_usage(self) -> bool:
744
+ """Whether to display memory usage."""
745
+ return bool(self.info.memory_usage)
746
+
747
+ @property
748
+ def memory_usage_string(self) -> str:
749
+ """Memory usage string with proper size qualifier."""
750
+ return self.info.memory_usage_string
751
+
752
+ @property
753
+ def non_null_counts(self) -> Sequence[int]:
754
+ return self.info.non_null_counts
755
+
756
+ def add_object_type_line(self) -> None:
757
+ """Add line with string representation of dataframe to the table."""
758
+ self._lines.append(str(type(self.data)))
759
+
760
+ def add_index_range_line(self) -> None:
761
+ """Add line with range of indices to the table."""
762
+ self._lines.append(self.data.index._summary())
763
+
764
+ def add_dtypes_line(self) -> None:
765
+ """Add summary line with dtypes present in dataframe."""
766
+ collected_dtypes = [
767
+ f"{key}({val:d})" for key, val in sorted(self.dtype_counts.items())
768
+ ]
769
+ self._lines.append(f"dtypes: {', '.join(collected_dtypes)}")
770
+
771
+
772
+ class DataFrameTableBuilder(TableBuilderAbstract):
773
+ """
774
+ Abstract builder for dataframe info table.
775
+
776
+ Parameters
777
+ ----------
778
+ info : DataFrameInfo.
779
+ Instance of DataFrameInfo.
780
+ """
781
+
782
+ def __init__(self, *, info: DataFrameInfo) -> None:
783
+ self.info: DataFrameInfo = info
784
+
785
+ def get_lines(self) -> list[str]:
786
+ self._lines = []
787
+ if self.col_count == 0:
788
+ self._fill_empty_info()
789
+ else:
790
+ self._fill_non_empty_info()
791
+ return self._lines
792
+
793
+ def _fill_empty_info(self) -> None:
794
+ """Add lines to the info table, pertaining to empty dataframe."""
795
+ self.add_object_type_line()
796
+ self.add_index_range_line()
797
+ self._lines.append(f"Empty {type(self.data).__name__}\n")
798
+
799
+ @abstractmethod
800
+ def _fill_non_empty_info(self) -> None:
801
+ """Add lines to the info table, pertaining to non-empty dataframe."""
802
+
803
+ @property
804
+ def data(self) -> DataFrame:
805
+ """DataFrame."""
806
+ return self.info.data
807
+
808
+ @property
809
+ def ids(self) -> Index:
810
+ """Dataframe columns."""
811
+ return self.info.ids
812
+
813
+ @property
814
+ def col_count(self) -> int:
815
+ """Number of dataframe columns to be summarized."""
816
+ return self.info.col_count
817
+
818
+ def add_memory_usage_line(self) -> None:
819
+ """Add line containing memory usage."""
820
+ self._lines.append(f"memory usage: {self.memory_usage_string}")
821
+
822
+
823
+ class DataFrameTableBuilderNonVerbose(DataFrameTableBuilder):
824
+ """
825
+ Dataframe info table builder for non-verbose output.
826
+ """
827
+
828
+ def _fill_non_empty_info(self) -> None:
829
+ """Add lines to the info table, pertaining to non-empty dataframe."""
830
+ self.add_object_type_line()
831
+ self.add_index_range_line()
832
+ self.add_columns_summary_line()
833
+ self.add_dtypes_line()
834
+ if self.display_memory_usage:
835
+ self.add_memory_usage_line()
836
+
837
+ def add_columns_summary_line(self) -> None:
838
+ self._lines.append(self.ids._summary(name="Columns"))
839
+
840
+
841
+ class TableBuilderVerboseMixin(TableBuilderAbstract):
842
+ """
843
+ Mixin for verbose info output.
844
+ """
845
+
846
+ SPACING: str = " " * 2
847
+ strrows: Sequence[Sequence[str]]
848
+ gross_column_widths: Sequence[int]
849
+ with_counts: bool
850
+
851
+ @property
852
+ @abstractmethod
853
+ def headers(self) -> Sequence[str]:
854
+ """Headers names of the columns in verbose table."""
855
+
856
+ @property
857
+ def header_column_widths(self) -> Sequence[int]:
858
+ """Widths of header columns (only titles)."""
859
+ return [len(col) for col in self.headers]
860
+
861
+ def _get_gross_column_widths(self) -> Sequence[int]:
862
+ """Get widths of columns containing both headers and actual content."""
863
+ body_column_widths = self._get_body_column_widths()
864
+ return [
865
+ max(*widths)
866
+ for widths in zip(self.header_column_widths, body_column_widths)
867
+ ]
868
+
869
+ def _get_body_column_widths(self) -> Sequence[int]:
870
+ """Get widths of table content columns."""
871
+ strcols: Sequence[Sequence[str]] = list(zip(*self.strrows))
872
+ return [max(len(x) for x in col) for col in strcols]
873
+
874
+ def _gen_rows(self) -> Iterator[Sequence[str]]:
875
+ """
876
+ Generator function yielding rows content.
877
+
878
+ Each element represents a row comprising a sequence of strings.
879
+ """
880
+ if self.with_counts:
881
+ return self._gen_rows_with_counts()
882
+ else:
883
+ return self._gen_rows_without_counts()
884
+
885
+ @abstractmethod
886
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
887
+ """Iterator with string representation of body data with counts."""
888
+
889
+ @abstractmethod
890
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
891
+ """Iterator with string representation of body data without counts."""
892
+
893
+ def add_header_line(self) -> None:
894
+ header_line = self.SPACING.join(
895
+ [
896
+ _put_str(header, col_width)
897
+ for header, col_width in zip(self.headers, self.gross_column_widths)
898
+ ]
899
+ )
900
+ self._lines.append(header_line)
901
+
902
+ def add_separator_line(self) -> None:
903
+ separator_line = self.SPACING.join(
904
+ [
905
+ _put_str("-" * header_colwidth, gross_colwidth)
906
+ for header_colwidth, gross_colwidth in zip(
907
+ self.header_column_widths, self.gross_column_widths
908
+ )
909
+ ]
910
+ )
911
+ self._lines.append(separator_line)
912
+
913
+ def add_body_lines(self) -> None:
914
+ for row in self.strrows:
915
+ body_line = self.SPACING.join(
916
+ [
917
+ _put_str(col, gross_colwidth)
918
+ for col, gross_colwidth in zip(row, self.gross_column_widths)
919
+ ]
920
+ )
921
+ self._lines.append(body_line)
922
+
923
+ def _gen_non_null_counts(self) -> Iterator[str]:
924
+ """Iterator with string representation of non-null counts."""
925
+ for count in self.non_null_counts:
926
+ yield f"{count} non-null"
927
+
928
+ def _gen_dtypes(self) -> Iterator[str]:
929
+ """Iterator with string representation of column dtypes."""
930
+ for dtype in self.dtypes:
931
+ yield pprint_thing(dtype)
932
+
933
+
934
+ class DataFrameTableBuilderVerbose(DataFrameTableBuilder, TableBuilderVerboseMixin):
935
+ """
936
+ Dataframe info table builder for verbose output.
937
+ """
938
+
939
+ def __init__(
940
+ self,
941
+ *,
942
+ info: DataFrameInfo,
943
+ with_counts: bool,
944
+ ) -> None:
945
+ self.info = info
946
+ self.with_counts = with_counts
947
+ self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())
948
+ self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()
949
+
950
+ def _fill_non_empty_info(self) -> None:
951
+ """Add lines to the info table, pertaining to non-empty dataframe."""
952
+ self.add_object_type_line()
953
+ self.add_index_range_line()
954
+ self.add_columns_summary_line()
955
+ self.add_header_line()
956
+ self.add_separator_line()
957
+ self.add_body_lines()
958
+ self.add_dtypes_line()
959
+ if self.display_memory_usage:
960
+ self.add_memory_usage_line()
961
+
962
+ @property
963
+ def headers(self) -> Sequence[str]:
964
+ """Headers names of the columns in verbose table."""
965
+ if self.with_counts:
966
+ return [" # ", "Column", "Non-Null Count", "Dtype"]
967
+ return [" # ", "Column", "Dtype"]
968
+
969
+ def add_columns_summary_line(self) -> None:
970
+ self._lines.append(f"Data columns (total {self.col_count} columns):")
971
+
972
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
973
+ """Iterator with string representation of body data without counts."""
974
+ yield from zip(
975
+ self._gen_line_numbers(),
976
+ self._gen_columns(),
977
+ self._gen_dtypes(),
978
+ )
979
+
980
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
981
+ """Iterator with string representation of body data with counts."""
982
+ yield from zip(
983
+ self._gen_line_numbers(),
984
+ self._gen_columns(),
985
+ self._gen_non_null_counts(),
986
+ self._gen_dtypes(),
987
+ )
988
+
989
+ def _gen_line_numbers(self) -> Iterator[str]:
990
+ """Iterator with string representation of column numbers."""
991
+ for i, _ in enumerate(self.ids):
992
+ yield f" {i}"
993
+
994
+ def _gen_columns(self) -> Iterator[str]:
995
+ """Iterator with string representation of column names."""
996
+ for col in self.ids:
997
+ yield pprint_thing(col)
998
+
999
+
1000
+ class SeriesTableBuilder(TableBuilderAbstract):
1001
+ """
1002
+ Abstract builder for series info table.
1003
+
1004
+ Parameters
1005
+ ----------
1006
+ info : SeriesInfo.
1007
+ Instance of SeriesInfo.
1008
+ """
1009
+
1010
+ def __init__(self, *, info: SeriesInfo) -> None:
1011
+ self.info: SeriesInfo = info
1012
+
1013
+ def get_lines(self) -> list[str]:
1014
+ self._lines = []
1015
+ self._fill_non_empty_info()
1016
+ return self._lines
1017
+
1018
+ @property
1019
+ def data(self) -> Series:
1020
+ """Series."""
1021
+ return self.info.data
1022
+
1023
+ def add_memory_usage_line(self) -> None:
1024
+ """Add line containing memory usage."""
1025
+ self._lines.append(f"memory usage: {self.memory_usage_string}")
1026
+
1027
+ @abstractmethod
1028
+ def _fill_non_empty_info(self) -> None:
1029
+ """Add lines to the info table, pertaining to non-empty series."""
1030
+
1031
+
1032
+ class SeriesTableBuilderNonVerbose(SeriesTableBuilder):
1033
+ """
1034
+ Series info table builder for non-verbose output.
1035
+ """
1036
+
1037
+ def _fill_non_empty_info(self) -> None:
1038
+ """Add lines to the info table, pertaining to non-empty series."""
1039
+ self.add_object_type_line()
1040
+ self.add_index_range_line()
1041
+ self.add_dtypes_line()
1042
+ if self.display_memory_usage:
1043
+ self.add_memory_usage_line()
1044
+
1045
+
1046
+ class SeriesTableBuilderVerbose(SeriesTableBuilder, TableBuilderVerboseMixin):
1047
+ """
1048
+ Series info table builder for verbose output.
1049
+ """
1050
+
1051
+ def __init__(
1052
+ self,
1053
+ *,
1054
+ info: SeriesInfo,
1055
+ with_counts: bool,
1056
+ ) -> None:
1057
+ self.info = info
1058
+ self.with_counts = with_counts
1059
+ self.strrows: Sequence[Sequence[str]] = list(self._gen_rows())
1060
+ self.gross_column_widths: Sequence[int] = self._get_gross_column_widths()
1061
+
1062
+ def _fill_non_empty_info(self) -> None:
1063
+ """Add lines to the info table, pertaining to non-empty series."""
1064
+ self.add_object_type_line()
1065
+ self.add_index_range_line()
1066
+ self.add_series_name_line()
1067
+ self.add_header_line()
1068
+ self.add_separator_line()
1069
+ self.add_body_lines()
1070
+ self.add_dtypes_line()
1071
+ if self.display_memory_usage:
1072
+ self.add_memory_usage_line()
1073
+
1074
+ def add_series_name_line(self) -> None:
1075
+ self._lines.append(f"Series name: {self.data.name}")
1076
+
1077
+ @property
1078
+ def headers(self) -> Sequence[str]:
1079
+ """Headers names of the columns in verbose table."""
1080
+ if self.with_counts:
1081
+ return ["Non-Null Count", "Dtype"]
1082
+ return ["Dtype"]
1083
+
1084
+ def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]:
1085
+ """Iterator with string representation of body data without counts."""
1086
+ yield from self._gen_dtypes()
1087
+
1088
+ def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]:
1089
+ """Iterator with string representation of body data with counts."""
1090
+ yield from zip(
1091
+ self._gen_non_null_counts(),
1092
+ self._gen_dtypes(),
1093
+ )
1094
+
1095
+
1096
+ def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]:
1097
+ """
1098
+ Create mapping between datatypes and their number of occurrences.
1099
+ """
1100
+ # groupby dtype.name to collect e.g. Categorical columns
1101
+ return df.dtypes.value_counts().groupby(lambda x: x.name).sum()