ZTWHHH commited on
Commit
ff171e7
·
verified ·
1 Parent(s): 5e52a41

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc +0 -0
  2. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc +0 -0
  3. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc +0 -0
  4. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc +0 -0
  5. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc +0 -0
  6. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc +0 -0
  7. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc +0 -0
  8. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc +0 -0
  9. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc +0 -0
  10. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc +0 -0
  11. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc +0 -0
  12. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc +0 -0
  13. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc +0 -0
  14. videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc +0 -0
  15. videochat2/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py +678 -0
  16. videochat2/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc +0 -0
  17. videochat2/lib/python3.10/site-packages/pandas/io/excel/__init__.py +19 -0
  18. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc +0 -0
  19. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc +0 -0
  20. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc +0 -0
  21. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc +0 -0
  22. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc +0 -0
  23. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc +0 -0
  24. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc +0 -0
  25. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc +0 -0
  26. videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc +0 -0
  27. videochat2/lib/python3.10/site-packages/pandas/io/excel/_base.py +1594 -0
  28. videochat2/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py +249 -0
  29. videochat2/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py +337 -0
  30. videochat2/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py +626 -0
  31. videochat2/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py +112 -0
  32. videochat2/lib/python3.10/site-packages/pandas/io/excel/_util.py +332 -0
  33. videochat2/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py +126 -0
  34. videochat2/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py +275 -0
  35. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc +0 -0
  36. videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc +0 -0
  37. videochat2/lib/python3.10/site-packages/pandas/io/parsers/__init__.py +9 -0
  38. videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc +0 -0
  39. videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc +0 -0
  40. videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc +0 -0
  41. videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc +0 -0
  42. videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc +0 -0
  43. videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc +0 -0
  44. videochat2/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py +164 -0
  45. videochat2/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py +1388 -0
  46. videochat2/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py +423 -0
  47. videochat2/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py +1351 -0
  48. videochat2/lib/python3.10/site-packages/pandas/io/parsers/readers.py +2127 -0
  49. videochat2/lib/python3.10/site-packages/pandas/io/sas/__init__.py +3 -0
  50. videochat2/lib/python3.10/site-packages/pandas/io/sas/__pycache__/__init__.cpython-310.pyc +0 -0
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (312 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc ADDED
Binary file (886 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc ADDED
Binary file (4.6 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc ADDED
Binary file (35.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc ADDED
Binary file (5.75 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/sql.cpython-310.pyc ADDED
Binary file (66.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc ADDED
Binary file (32.1 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pyperclip
3
+
4
+ A cross-platform clipboard module for Python,
5
+ with copy & paste functions for plain text.
6
+ By Al Sweigart al@inventwithpython.com
7
+ BSD License
8
+
9
+ Usage:
10
+ import pyperclip
11
+ pyperclip.copy('The text to be copied to the clipboard.')
12
+ spam = pyperclip.paste()
13
+
14
+ if not pyperclip.is_available():
15
+ print("Copy functionality unavailable!")
16
+
17
+ On Windows, no additional modules are needed.
18
+ On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli
19
+ commands. (These commands should come with OS X.).
20
+ On Linux, install xclip or xsel via package manager. For example, in Debian:
21
+ sudo apt-get install xclip
22
+ sudo apt-get install xsel
23
+
24
+ Otherwise on Linux, you will need the PyQt5 modules installed.
25
+
26
+ This module does not work with PyGObject yet.
27
+
28
+ Cygwin is currently not supported.
29
+
30
+ Security Note: This module runs programs with these names:
31
+ - which
32
+ - where
33
+ - pbcopy
34
+ - pbpaste
35
+ - xclip
36
+ - xsel
37
+ - klipper
38
+ - qdbus
39
+ A malicious user could rename or add programs with these names, tricking
40
+ Pyperclip into running them with whatever permissions the Python process has.
41
+
42
+ """
43
+
44
+ __version__ = "1.7.0"
45
+
46
+
47
+ import contextlib
48
+ import ctypes
49
+ from ctypes import (
50
+ c_size_t,
51
+ c_wchar,
52
+ c_wchar_p,
53
+ get_errno,
54
+ sizeof,
55
+ )
56
+ import os
57
+ import platform
58
+ from shutil import which
59
+ import subprocess
60
+ import time
61
+ import warnings
62
+
63
+ from pandas.errors import (
64
+ PyperclipException,
65
+ PyperclipWindowsException,
66
+ )
67
+ from pandas.util._exceptions import find_stack_level
68
+
69
+ # `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
70
+ # Thus, we need to detect the presence of $DISPLAY manually
71
+ # and not load PyQt4 if it is absent.
72
+ HAS_DISPLAY = os.getenv("DISPLAY")
73
+
74
+ EXCEPT_MSG = """
75
+ Pyperclip could not find a copy/paste mechanism for your system.
76
+ For more information, please visit
77
+ https://pyperclip.readthedocs.io/en/latest/#not-implemented-error
78
+ """
79
+
80
+ ENCODING = "utf-8"
81
+
82
+ # The "which" unix command finds where a command is.
83
+ if platform.system() == "Windows":
84
+ WHICH_CMD = "where"
85
+ else:
86
+ WHICH_CMD = "which"
87
+
88
+
89
+ def _executable_exists(name):
90
+ return (
91
+ subprocess.call(
92
+ [WHICH_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE
93
+ )
94
+ == 0
95
+ )
96
+
97
+
98
+ def _stringifyText(text) -> str:
99
+ acceptedTypes = (str, int, float, bool)
100
+ if not isinstance(text, acceptedTypes):
101
+ raise PyperclipException(
102
+ f"only str, int, float, and bool values "
103
+ f"can be copied to the clipboard, not {type(text).__name__}"
104
+ )
105
+ return str(text)
106
+
107
+
108
+ def init_osx_pbcopy_clipboard():
109
+ def copy_osx_pbcopy(text):
110
+ text = _stringifyText(text) # Converts non-str values to str.
111
+ with subprocess.Popen(
112
+ ["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True
113
+ ) as p:
114
+ p.communicate(input=text.encode(ENCODING))
115
+
116
+ def paste_osx_pbcopy():
117
+ with subprocess.Popen(
118
+ ["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True
119
+ ) as p:
120
+ stdout = p.communicate()[0]
121
+ return stdout.decode(ENCODING)
122
+
123
+ return copy_osx_pbcopy, paste_osx_pbcopy
124
+
125
+
126
+ def init_osx_pyobjc_clipboard():
127
+ def copy_osx_pyobjc(text):
128
+ """Copy string argument to clipboard"""
129
+ text = _stringifyText(text) # Converts non-str values to str.
130
+ newStr = Foundation.NSString.stringWithString_(text).nsstring()
131
+ newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)
132
+ board = AppKit.NSPasteboard.generalPasteboard()
133
+ board.declareTypes_owner_([AppKit.NSStringPboardType], None)
134
+ board.setData_forType_(newData, AppKit.NSStringPboardType)
135
+
136
+ def paste_osx_pyobjc():
137
+ """Returns contents of clipboard"""
138
+ board = AppKit.NSPasteboard.generalPasteboard()
139
+ content = board.stringForType_(AppKit.NSStringPboardType)
140
+ return content
141
+
142
+ return copy_osx_pyobjc, paste_osx_pyobjc
143
+
144
+
145
+ def init_qt_clipboard():
146
+ global QApplication
147
+ # $DISPLAY should exist
148
+
149
+ # Try to import from qtpy, but if that fails try PyQt5 then PyQt4
150
+ try:
151
+ from qtpy.QtWidgets import QApplication
152
+ except ImportError:
153
+ try:
154
+ from PyQt5.QtWidgets import QApplication
155
+ except ImportError:
156
+ from PyQt4.QtGui import QApplication
157
+
158
+ app = QApplication.instance()
159
+ if app is None:
160
+ app = QApplication([])
161
+
162
+ def copy_qt(text):
163
+ text = _stringifyText(text) # Converts non-str values to str.
164
+ cb = app.clipboard()
165
+ cb.setText(text)
166
+
167
+ def paste_qt() -> str:
168
+ cb = app.clipboard()
169
+ return str(cb.text())
170
+
171
+ return copy_qt, paste_qt
172
+
173
+
174
+ def init_xclip_clipboard():
175
+ DEFAULT_SELECTION = "c"
176
+ PRIMARY_SELECTION = "p"
177
+
178
+ def copy_xclip(text, primary=False):
179
+ text = _stringifyText(text) # Converts non-str values to str.
180
+ selection = DEFAULT_SELECTION
181
+ if primary:
182
+ selection = PRIMARY_SELECTION
183
+ with subprocess.Popen(
184
+ ["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True
185
+ ) as p:
186
+ p.communicate(input=text.encode(ENCODING))
187
+
188
+ def paste_xclip(primary=False):
189
+ selection = DEFAULT_SELECTION
190
+ if primary:
191
+ selection = PRIMARY_SELECTION
192
+ with subprocess.Popen(
193
+ ["xclip", "-selection", selection, "-o"],
194
+ stdout=subprocess.PIPE,
195
+ stderr=subprocess.PIPE,
196
+ close_fds=True,
197
+ ) as p:
198
+ stdout = p.communicate()[0]
199
+ # Intentionally ignore extraneous output on stderr when clipboard is empty
200
+ return stdout.decode(ENCODING)
201
+
202
+ return copy_xclip, paste_xclip
203
+
204
+
205
+ def init_xsel_clipboard():
206
+ DEFAULT_SELECTION = "-b"
207
+ PRIMARY_SELECTION = "-p"
208
+
209
+ def copy_xsel(text, primary=False):
210
+ text = _stringifyText(text) # Converts non-str values to str.
211
+ selection_flag = DEFAULT_SELECTION
212
+ if primary:
213
+ selection_flag = PRIMARY_SELECTION
214
+ with subprocess.Popen(
215
+ ["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True
216
+ ) as p:
217
+ p.communicate(input=text.encode(ENCODING))
218
+
219
+ def paste_xsel(primary=False):
220
+ selection_flag = DEFAULT_SELECTION
221
+ if primary:
222
+ selection_flag = PRIMARY_SELECTION
223
+ with subprocess.Popen(
224
+ ["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True
225
+ ) as p:
226
+ stdout = p.communicate()[0]
227
+ return stdout.decode(ENCODING)
228
+
229
+ return copy_xsel, paste_xsel
230
+
231
+
232
+ def init_klipper_clipboard():
233
+ def copy_klipper(text):
234
+ text = _stringifyText(text) # Converts non-str values to str.
235
+ with subprocess.Popen(
236
+ [
237
+ "qdbus",
238
+ "org.kde.klipper",
239
+ "/klipper",
240
+ "setClipboardContents",
241
+ text.encode(ENCODING),
242
+ ],
243
+ stdin=subprocess.PIPE,
244
+ close_fds=True,
245
+ ) as p:
246
+ p.communicate(input=None)
247
+
248
+ def paste_klipper():
249
+ with subprocess.Popen(
250
+ ["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],
251
+ stdout=subprocess.PIPE,
252
+ close_fds=True,
253
+ ) as p:
254
+ stdout = p.communicate()[0]
255
+
256
+ # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
257
+ # TODO: https://github.com/asweigart/pyperclip/issues/43
258
+ clipboardContents = stdout.decode(ENCODING)
259
+ # even if blank, Klipper will append a newline at the end
260
+ assert len(clipboardContents) > 0
261
+ # make sure that newline is there
262
+ assert clipboardContents.endswith("\n")
263
+ if clipboardContents.endswith("\n"):
264
+ clipboardContents = clipboardContents[:-1]
265
+ return clipboardContents
266
+
267
+ return copy_klipper, paste_klipper
268
+
269
+
270
+ def init_dev_clipboard_clipboard():
271
+ def copy_dev_clipboard(text):
272
+ text = _stringifyText(text) # Converts non-str values to str.
273
+ if text == "":
274
+ warnings.warn(
275
+ "Pyperclip cannot copy a blank string to the clipboard on Cygwin. "
276
+ "This is effectively a no-op.",
277
+ stacklevel=find_stack_level(),
278
+ )
279
+ if "\r" in text:
280
+ warnings.warn(
281
+ "Pyperclip cannot handle \\r characters on Cygwin.",
282
+ stacklevel=find_stack_level(),
283
+ )
284
+
285
+ with open("/dev/clipboard", "w") as fd:
286
+ fd.write(text)
287
+
288
+ def paste_dev_clipboard() -> str:
289
+ with open("/dev/clipboard") as fd:
290
+ content = fd.read()
291
+ return content
292
+
293
+ return copy_dev_clipboard, paste_dev_clipboard
294
+
295
+
296
+ def init_no_clipboard():
297
+ class ClipboardUnavailable:
298
+ def __call__(self, *args, **kwargs):
299
+ raise PyperclipException(EXCEPT_MSG)
300
+
301
+ def __bool__(self) -> bool:
302
+ return False
303
+
304
+ return ClipboardUnavailable(), ClipboardUnavailable()
305
+
306
+
307
+ # Windows-related clipboard functions:
308
+ class CheckedCall:
309
+ def __init__(self, f) -> None:
310
+ super().__setattr__("f", f)
311
+
312
+ def __call__(self, *args):
313
+ ret = self.f(*args)
314
+ if not ret and get_errno():
315
+ raise PyperclipWindowsException("Error calling " + self.f.__name__)
316
+ return ret
317
+
318
+ def __setattr__(self, key, value):
319
+ setattr(self.f, key, value)
320
+
321
+
322
+ def init_windows_clipboard():
323
+ global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
324
+ global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
325
+ from ctypes.wintypes import (
326
+ BOOL,
327
+ DWORD,
328
+ HANDLE,
329
+ HGLOBAL,
330
+ HINSTANCE,
331
+ HMENU,
332
+ HWND,
333
+ INT,
334
+ LPCSTR,
335
+ LPVOID,
336
+ UINT,
337
+ )
338
+
339
+ windll = ctypes.windll
340
+ msvcrt = ctypes.CDLL("msvcrt")
341
+
342
+ safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
343
+ safeCreateWindowExA.argtypes = [
344
+ DWORD,
345
+ LPCSTR,
346
+ LPCSTR,
347
+ DWORD,
348
+ INT,
349
+ INT,
350
+ INT,
351
+ INT,
352
+ HWND,
353
+ HMENU,
354
+ HINSTANCE,
355
+ LPVOID,
356
+ ]
357
+ safeCreateWindowExA.restype = HWND
358
+
359
+ safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
360
+ safeDestroyWindow.argtypes = [HWND]
361
+ safeDestroyWindow.restype = BOOL
362
+
363
+ OpenClipboard = windll.user32.OpenClipboard
364
+ OpenClipboard.argtypes = [HWND]
365
+ OpenClipboard.restype = BOOL
366
+
367
+ safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
368
+ safeCloseClipboard.argtypes = []
369
+ safeCloseClipboard.restype = BOOL
370
+
371
+ safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
372
+ safeEmptyClipboard.argtypes = []
373
+ safeEmptyClipboard.restype = BOOL
374
+
375
+ safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
376
+ safeGetClipboardData.argtypes = [UINT]
377
+ safeGetClipboardData.restype = HANDLE
378
+
379
+ safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
380
+ safeSetClipboardData.argtypes = [UINT, HANDLE]
381
+ safeSetClipboardData.restype = HANDLE
382
+
383
+ safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
384
+ safeGlobalAlloc.argtypes = [UINT, c_size_t]
385
+ safeGlobalAlloc.restype = HGLOBAL
386
+
387
+ safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
388
+ safeGlobalLock.argtypes = [HGLOBAL]
389
+ safeGlobalLock.restype = LPVOID
390
+
391
+ safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
392
+ safeGlobalUnlock.argtypes = [HGLOBAL]
393
+ safeGlobalUnlock.restype = BOOL
394
+
395
+ wcslen = CheckedCall(msvcrt.wcslen)
396
+ wcslen.argtypes = [c_wchar_p]
397
+ wcslen.restype = UINT
398
+
399
+ GMEM_MOVEABLE = 0x0002
400
+ CF_UNICODETEXT = 13
401
+
402
+ @contextlib.contextmanager
403
+ def window():
404
+ """
405
+ Context that provides a valid Windows hwnd.
406
+ """
407
+ # we really just need the hwnd, so setting "STATIC"
408
+ # as predefined lpClass is just fine.
409
+ hwnd = safeCreateWindowExA(
410
+ 0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None
411
+ )
412
+ try:
413
+ yield hwnd
414
+ finally:
415
+ safeDestroyWindow(hwnd)
416
+
417
+ @contextlib.contextmanager
418
+ def clipboard(hwnd):
419
+ """
420
+ Context manager that opens the clipboard and prevents
421
+ other applications from modifying the clipboard content.
422
+ """
423
+ # We may not get the clipboard handle immediately because
424
+ # some other application is accessing it (?)
425
+ # We try for at least 500ms to get the clipboard.
426
+ t = time.time() + 0.5
427
+ success = False
428
+ while time.time() < t:
429
+ success = OpenClipboard(hwnd)
430
+ if success:
431
+ break
432
+ time.sleep(0.01)
433
+ if not success:
434
+ raise PyperclipWindowsException("Error calling OpenClipboard")
435
+
436
+ try:
437
+ yield
438
+ finally:
439
+ safeCloseClipboard()
440
+
441
+ def copy_windows(text):
442
+ # This function is heavily based on
443
+ # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
444
+
445
+ text = _stringifyText(text) # Converts non-str values to str.
446
+
447
+ with window() as hwnd:
448
+ # http://msdn.com/ms649048
449
+ # If an application calls OpenClipboard with hwnd set to NULL,
450
+ # EmptyClipboard sets the clipboard owner to NULL;
451
+ # this causes SetClipboardData to fail.
452
+ # => We need a valid hwnd to copy something.
453
+ with clipboard(hwnd):
454
+ safeEmptyClipboard()
455
+
456
+ if text:
457
+ # http://msdn.com/ms649051
458
+ # If the hMem parameter identifies a memory object,
459
+ # the object must have been allocated using the
460
+ # function with the GMEM_MOVEABLE flag.
461
+ count = wcslen(text) + 1
462
+ handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar))
463
+ locked_handle = safeGlobalLock(handle)
464
+
465
+ ctypes.memmove(
466
+ c_wchar_p(locked_handle),
467
+ c_wchar_p(text),
468
+ count * sizeof(c_wchar),
469
+ )
470
+
471
+ safeGlobalUnlock(handle)
472
+ safeSetClipboardData(CF_UNICODETEXT, handle)
473
+
474
+ def paste_windows():
475
+ with clipboard(None):
476
+ handle = safeGetClipboardData(CF_UNICODETEXT)
477
+ if not handle:
478
+ # GetClipboardData may return NULL with errno == NO_ERROR
479
+ # if the clipboard is empty.
480
+ # (Also, it may return a handle to an empty buffer,
481
+ # but technically that's not empty)
482
+ return ""
483
+ return c_wchar_p(handle).value
484
+
485
+ return copy_windows, paste_windows
486
+
487
+
488
+ def init_wsl_clipboard():
489
+ def copy_wsl(text):
490
+ text = _stringifyText(text) # Converts non-str values to str.
491
+ with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p:
492
+ p.communicate(input=text.encode(ENCODING))
493
+
494
+ def paste_wsl():
495
+ with subprocess.Popen(
496
+ ["powershell.exe", "-command", "Get-Clipboard"],
497
+ stdout=subprocess.PIPE,
498
+ stderr=subprocess.PIPE,
499
+ close_fds=True,
500
+ ) as p:
501
+ stdout = p.communicate()[0]
502
+ # WSL appends "\r\n" to the contents.
503
+ return stdout[:-2].decode(ENCODING)
504
+
505
+ return copy_wsl, paste_wsl
506
+
507
+
508
+ # Automatic detection of clipboard mechanisms
509
+ # and importing is done in determine_clipboard():
510
+ def determine_clipboard():
511
+ """
512
+ Determine the OS/platform and set the copy() and paste() functions
513
+ accordingly.
514
+ """
515
+ global Foundation, AppKit, qtpy, PyQt4, PyQt5
516
+
517
+ # Setup for the CYGWIN platform:
518
+ if (
519
+ "cygwin" in platform.system().lower()
520
+ ): # Cygwin has a variety of values returned by platform.system(),
521
+ # such as 'CYGWIN_NT-6.1'
522
+ # FIXME(pyperclip#55): pyperclip currently does not support Cygwin,
523
+ # see https://github.com/asweigart/pyperclip/issues/55
524
+ if os.path.exists("/dev/clipboard"):
525
+ warnings.warn(
526
+ "Pyperclip's support for Cygwin is not perfect, "
527
+ "see https://github.com/asweigart/pyperclip/issues/55",
528
+ stacklevel=find_stack_level(),
529
+ )
530
+ return init_dev_clipboard_clipboard()
531
+
532
+ # Setup for the WINDOWS platform:
533
+ elif os.name == "nt" or platform.system() == "Windows":
534
+ return init_windows_clipboard()
535
+
536
+ if platform.system() == "Linux":
537
+ if which("wslconfig.exe"):
538
+ return init_wsl_clipboard()
539
+
540
+ # Setup for the macOS platform:
541
+ if os.name == "mac" or platform.system() == "Darwin":
542
+ try:
543
+ import AppKit
544
+ import Foundation # check if pyobjc is installed
545
+ except ImportError:
546
+ return init_osx_pbcopy_clipboard()
547
+ else:
548
+ return init_osx_pyobjc_clipboard()
549
+
550
+ # Setup for the LINUX platform:
551
+ if HAS_DISPLAY:
552
+ if _executable_exists("xsel"):
553
+ return init_xsel_clipboard()
554
+ if _executable_exists("xclip"):
555
+ return init_xclip_clipboard()
556
+ if _executable_exists("klipper") and _executable_exists("qdbus"):
557
+ return init_klipper_clipboard()
558
+
559
+ try:
560
+ # qtpy is a small abstraction layer that lets you write applications
561
+ # using a single api call to either PyQt or PySide.
562
+ # https://pypi.python.org/project/QtPy
563
+ import qtpy # check if qtpy is installed
564
+ except ImportError:
565
+ # If qtpy isn't installed, fall back on importing PyQt4.
566
+ try:
567
+ import PyQt5 # check if PyQt5 is installed
568
+ except ImportError:
569
+ try:
570
+ import PyQt4 # check if PyQt4 is installed
571
+ except ImportError:
572
+ pass # We want to fail fast for all non-ImportError exceptions.
573
+ else:
574
+ return init_qt_clipboard()
575
+ else:
576
+ return init_qt_clipboard()
577
+ else:
578
+ return init_qt_clipboard()
579
+
580
+ return init_no_clipboard()
581
+
582
+
583
+ def set_clipboard(clipboard):
584
+ """
585
+ Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how
586
+ the copy() and paste() functions interact with the operating system to
587
+ implement the copy/paste feature. The clipboard parameter must be one of:
588
+ - pbcopy
589
+ - pyobjc (default on macOS)
590
+ - qt
591
+ - xclip
592
+ - xsel
593
+ - klipper
594
+ - windows (default on Windows)
595
+ - no (this is what is set when no clipboard mechanism can be found)
596
+ """
597
+ global copy, paste
598
+
599
+ clipboard_types = {
600
+ "pbcopy": init_osx_pbcopy_clipboard,
601
+ "pyobjc": init_osx_pyobjc_clipboard,
602
+ "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'
603
+ "xclip": init_xclip_clipboard,
604
+ "xsel": init_xsel_clipboard,
605
+ "klipper": init_klipper_clipboard,
606
+ "windows": init_windows_clipboard,
607
+ "no": init_no_clipboard,
608
+ }
609
+
610
+ if clipboard not in clipboard_types:
611
+ allowed_clipboard_types = [repr(_) for _ in clipboard_types]
612
+ raise ValueError(
613
+ f"Argument must be one of {', '.join(allowed_clipboard_types)}"
614
+ )
615
+
616
+ # Sets pyperclip's copy() and paste() functions:
617
+ copy, paste = clipboard_types[clipboard]()
618
+
619
+
620
+ def lazy_load_stub_copy(text):
621
+ """
622
+ A stub function for copy(), which will load the real copy() function when
623
+ called so that the real copy() function is used for later calls.
624
+
625
+ This allows users to import pyperclip without having determine_clipboard()
626
+ automatically run, which will automatically select a clipboard mechanism.
627
+ This could be a problem if it selects, say, the memory-heavy PyQt4 module
628
+ but the user was just going to immediately call set_clipboard() to use a
629
+ different clipboard mechanism.
630
+
631
+ The lazy loading this stub function implements gives the user a chance to
632
+ call set_clipboard() to pick another clipboard mechanism. Or, if the user
633
+ simply calls copy() or paste() without calling set_clipboard() first,
634
+ will fall back on whatever clipboard mechanism that determine_clipboard()
635
+ automatically chooses.
636
+ """
637
+ global copy, paste
638
+ copy, paste = determine_clipboard()
639
+ return copy(text)
640
+
641
+
642
+ def lazy_load_stub_paste():
643
+ """
644
+ A stub function for paste(), which will load the real paste() function when
645
+ called so that the real paste() function is used for later calls.
646
+
647
+ This allows users to import pyperclip without having determine_clipboard()
648
+ automatically run, which will automatically select a clipboard mechanism.
649
+ This could be a problem if it selects, say, the memory-heavy PyQt4 module
650
+ but the user was just going to immediately call set_clipboard() to use a
651
+ different clipboard mechanism.
652
+
653
+ The lazy loading this stub function implements gives the user a chance to
654
+ call set_clipboard() to pick another clipboard mechanism. Or, if the user
655
+ simply calls copy() or paste() without calling set_clipboard() first,
656
+ will fall back on whatever clipboard mechanism that determine_clipboard()
657
+ automatically chooses.
658
+ """
659
+ global copy, paste
660
+ copy, paste = determine_clipboard()
661
+ return paste()
662
+
663
+
664
+ def is_available() -> bool:
665
+ return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste
666
+
667
+
668
+ # Initially, copy() and paste() are set to lazy loading wrappers which will
669
+ # set `copy` and `paste` to real functions the first time they're used, unless
670
+ # set_clipboard() or determine_clipboard() is called first.
671
+ copy, paste = lazy_load_stub_copy, lazy_load_stub_paste
672
+
673
+
674
+ __all__ = ["copy", "paste", "set_clipboard", "determine_clipboard"]
675
+
676
+ # pandas aliases
677
+ clipboard_get = paste
678
+ clipboard_set = copy
videochat2/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.io.excel._base import (
2
+ ExcelFile,
3
+ ExcelWriter,
4
+ read_excel,
5
+ )
6
+ from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
7
+ from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
8
+ from pandas.io.excel._util import register_writer
9
+ from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
10
+
11
+ __all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
12
+
13
+
14
+ register_writer(_OpenpyxlWriter)
15
+
16
+ register_writer(_XlsxWriter)
17
+
18
+
19
+ register_writer(_ODSWriter)
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (624 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_base.cpython-310.pyc ADDED
Binary file (46 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odfreader.cpython-310.pyc ADDED
Binary file (6.91 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_odswriter.cpython-310.pyc ADDED
Binary file (8.41 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc ADDED
Binary file (17.4 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_pyxlsb.cpython-310.pyc ADDED
Binary file (3.82 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_util.cpython-310.pyc ADDED
Binary file (8.36 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlrd.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_xlsxwriter.cpython-310.pyc ADDED
Binary file (5.98 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/excel/_base.py ADDED
@@ -0,0 +1,1594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import datetime
5
+ from functools import partial
6
+ from io import BytesIO
7
+ import os
8
+ from textwrap import fill
9
+ from types import TracebackType
10
+ from typing import (
11
+ IO,
12
+ Any,
13
+ Callable,
14
+ Hashable,
15
+ Iterable,
16
+ List,
17
+ Literal,
18
+ Mapping,
19
+ Sequence,
20
+ Union,
21
+ cast,
22
+ overload,
23
+ )
24
+ import zipfile
25
+
26
+ from pandas._config import config
27
+
28
+ from pandas._libs import lib
29
+ from pandas._libs.parsers import STR_NA_VALUES
30
+ from pandas._typing import (
31
+ DtypeArg,
32
+ DtypeBackend,
33
+ FilePath,
34
+ IntStrT,
35
+ ReadBuffer,
36
+ StorageOptions,
37
+ WriteExcelBuffer,
38
+ )
39
+ from pandas.compat._optional import (
40
+ get_version,
41
+ import_optional_dependency,
42
+ )
43
+ from pandas.errors import EmptyDataError
44
+ from pandas.util._decorators import (
45
+ Appender,
46
+ doc,
47
+ )
48
+ from pandas.util._validators import check_dtype_backend
49
+
50
+ from pandas.core.dtypes.common import (
51
+ is_bool,
52
+ is_float,
53
+ is_integer,
54
+ is_list_like,
55
+ )
56
+
57
+ from pandas.core.frame import DataFrame
58
+ from pandas.core.shared_docs import _shared_docs
59
+ from pandas.util.version import Version
60
+
61
+ from pandas.io.common import (
62
+ IOHandles,
63
+ get_handle,
64
+ stringify_path,
65
+ validate_header_arg,
66
+ )
67
+ from pandas.io.excel._util import (
68
+ fill_mi_header,
69
+ get_default_engine,
70
+ get_writer,
71
+ maybe_convert_usecols,
72
+ pop_header_name,
73
+ )
74
+ from pandas.io.parsers import TextParser
75
+ from pandas.io.parsers.readers import validate_integer
76
+
77
+ _read_excel_doc = (
78
+ """
79
+ Read an Excel file into a pandas DataFrame.
80
+
81
+ Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
82
+ read from a local filesystem or URL. Supports an option to read
83
+ a single sheet or a list of sheets.
84
+
85
+ Parameters
86
+ ----------
87
+ io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
88
+ Any valid string path is acceptable. The string could be a URL. Valid
89
+ URL schemes include http, ftp, s3, and file. For file URLs, a host is
90
+ expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
91
+
92
+ If you want to pass in a path object, pandas accepts any ``os.PathLike``.
93
+
94
+ By file-like object, we refer to objects with a ``read()`` method,
95
+ such as a file handle (e.g. via builtin ``open`` function)
96
+ or ``StringIO``.
97
+ sheet_name : str, int, list, or None, default 0
98
+ Strings are used for sheet names. Integers are used in zero-indexed
99
+ sheet positions (chart sheets do not count as a sheet position).
100
+ Lists of strings/integers are used to request multiple sheets.
101
+ Specify None to get all worksheets.
102
+
103
+ Available cases:
104
+
105
+ * Defaults to ``0``: 1st sheet as a `DataFrame`
106
+ * ``1``: 2nd sheet as a `DataFrame`
107
+ * ``"Sheet1"``: Load sheet with name "Sheet1"
108
+ * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
109
+ as a dict of `DataFrame`
110
+ * None: All worksheets.
111
+
112
+ header : int, list of int, default 0
113
+ Row (0-indexed) to use for the column labels of the parsed
114
+ DataFrame. If a list of integers is passed those row positions will
115
+ be combined into a ``MultiIndex``. Use None if there is no header.
116
+ names : array-like, default None
117
+ List of column names to use. If file contains no header row,
118
+ then you should explicitly pass header=None.
119
+ index_col : int, list of int, default None
120
+ Column (0-indexed) to use as the row labels of the DataFrame.
121
+ Pass None if there is no such column. If a list is passed,
122
+ those columns will be combined into a ``MultiIndex``. If a
123
+ subset of data is selected with ``usecols``, index_col
124
+ is based on the subset.
125
+
126
+ Missing values will be forward filled to allow roundtripping with
127
+ ``to_excel`` for ``merged_cells=True``. To avoid forward filling the
128
+ missing values use ``set_index`` after reading the data instead of
129
+ ``index_col``.
130
+ usecols : str, list-like, or callable, default None
131
+ * If None, then parse all columns.
132
+ * If str, then indicates comma separated list of Excel column letters
133
+ and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
134
+ both sides.
135
+ * If list of int, then indicates list of column numbers to be parsed
136
+ (0-indexed).
137
+ * If list of string, then indicates list of column names to be parsed.
138
+ * If callable, then evaluate each column name against it and parse the
139
+ column if the callable returns ``True``.
140
+
141
+ Returns a subset of the columns according to behavior above.
142
+ dtype : Type name or dict of column -> type, default None
143
+ Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}
144
+ Use `object` to preserve data as stored in Excel and not interpret dtype.
145
+ If converters are specified, they will be applied INSTEAD
146
+ of dtype conversion.
147
+ engine : str, default None
148
+ If io is not a buffer or path, this must be set to identify io.
149
+ Supported engines: "xlrd", "openpyxl", "odf", "pyxlsb".
150
+ Engine compatibility :
151
+
152
+ - "xlrd" supports old-style Excel files (.xls).
153
+ - "openpyxl" supports newer Excel file formats.
154
+ - "odf" supports OpenDocument file formats (.odf, .ods, .odt).
155
+ - "pyxlsb" supports Binary Excel files.
156
+
157
+ .. versionchanged:: 1.2.0
158
+ The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
159
+ now only supports old-style ``.xls`` files.
160
+ When ``engine=None``, the following logic will be
161
+ used to determine the engine:
162
+
163
+ - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
164
+ then `odf <https://pypi.org/project/odfpy/>`_ will be used.
165
+ - Otherwise if ``path_or_buffer`` is an xls format,
166
+ ``xlrd`` will be used.
167
+ - Otherwise if ``path_or_buffer`` is in xlsb format,
168
+ ``pyxlsb`` will be used.
169
+
170
+ .. versionadded:: 1.3.0
171
+ - Otherwise ``openpyxl`` will be used.
172
+
173
+ .. versionchanged:: 1.3.0
174
+
175
+ converters : dict, default None
176
+ Dict of functions for converting values in certain columns. Keys can
177
+ either be integers or column labels, values are functions that take one
178
+ input argument, the Excel cell content, and return the transformed
179
+ content.
180
+ true_values : list, default None
181
+ Values to consider as True.
182
+ false_values : list, default None
183
+ Values to consider as False.
184
+ skiprows : list-like, int, or callable, optional
185
+ Line numbers to skip (0-indexed) or number of lines to skip (int) at the
186
+ start of the file. If callable, the callable function will be evaluated
187
+ against the row indices, returning True if the row should be skipped and
188
+ False otherwise. An example of a valid callable argument would be ``lambda
189
+ x: x in [0, 2]``.
190
+ nrows : int, default None
191
+ Number of rows to parse.
192
+ na_values : scalar, str, list-like, or dict, default None
193
+ Additional strings to recognize as NA/NaN. If dict passed, specific
194
+ per-column NA values. By default the following values are interpreted
195
+ as NaN: '"""
196
+ + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
197
+ + """'.
198
+ keep_default_na : bool, default True
199
+ Whether or not to include the default NaN values when parsing the data.
200
+ Depending on whether `na_values` is passed in, the behavior is as follows:
201
+
202
+ * If `keep_default_na` is True, and `na_values` are specified, `na_values`
203
+ is appended to the default NaN values used for parsing.
204
+ * If `keep_default_na` is True, and `na_values` are not specified, only
205
+ the default NaN values are used for parsing.
206
+ * If `keep_default_na` is False, and `na_values` are specified, only
207
+ the NaN values specified `na_values` are used for parsing.
208
+ * If `keep_default_na` is False, and `na_values` are not specified, no
209
+ strings will be parsed as NaN.
210
+
211
+ Note that if `na_filter` is passed in as False, the `keep_default_na` and
212
+ `na_values` parameters will be ignored.
213
+ na_filter : bool, default True
214
+ Detect missing value markers (empty strings and the value of na_values). In
215
+ data without any NAs, passing na_filter=False can improve the performance
216
+ of reading a large file.
217
+ verbose : bool, default False
218
+ Indicate number of NA values placed in non-numeric columns.
219
+ parse_dates : bool, list-like, or dict, default False
220
+ The behavior is as follows:
221
+
222
+ * bool. If True -> try parsing the index.
223
+ * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
224
+ each as a separate date column.
225
+ * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
226
+ a single date column.
227
+ * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
228
+ result 'foo'
229
+
230
+ If a column or index contains an unparsable date, the entire column or
231
+ index will be returned unaltered as an object data type. If you don`t want to
232
+ parse some cells as date just change their type in Excel to "Text".
233
+ For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
234
+
235
+ Note: A fast-path exists for iso8601-formatted dates.
236
+ date_parser : function, optional
237
+ Function to use for converting a sequence of string columns to an array of
238
+ datetime instances. The default uses ``dateutil.parser.parser`` to do the
239
+ conversion. Pandas will try to call `date_parser` in three different ways,
240
+ advancing to the next if an exception occurs: 1) Pass one or more arrays
241
+ (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
242
+ string values from the columns defined by `parse_dates` into a single array
243
+ and pass that; and 3) call `date_parser` once for each row using one or
244
+ more strings (corresponding to the columns defined by `parse_dates`) as
245
+ arguments.
246
+
247
+ .. deprecated:: 2.0.0
248
+ Use ``date_format`` instead, or read in as ``object`` and then apply
249
+ :func:`to_datetime` as-needed.
250
+ date_format : str or dict of column -> format, default ``None``
251
+ If used in conjunction with ``parse_dates``, will parse dates according to this
252
+ format. For anything more complex,
253
+ please read in as ``object`` and then apply :func:`to_datetime` as-needed.
254
+
255
+ .. versionadded:: 2.0.0
256
+ thousands : str, default None
257
+ Thousands separator for parsing string columns to numeric. Note that
258
+ this parameter is only necessary for columns stored as TEXT in Excel,
259
+ any numeric columns will automatically be parsed, regardless of display
260
+ format.
261
+ decimal : str, default '.'
262
+ Character to recognize as decimal point for parsing string columns to numeric.
263
+ Note that this parameter is only necessary for columns stored as TEXT in Excel,
264
+ any numeric columns will automatically be parsed, regardless of display
265
+ format.(e.g. use ',' for European data).
266
+
267
+ .. versionadded:: 1.4.0
268
+
269
+ comment : str, default None
270
+ Comments out remainder of line. Pass a character or characters to this
271
+ argument to indicate comments in the input file. Any data between the
272
+ comment string and the end of the current line is ignored.
273
+ skipfooter : int, default 0
274
+ Rows at the end to skip (0-indexed).
275
+ {storage_options}
276
+
277
+ .. versionadded:: 1.2.0
278
+
279
+ dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames
280
+ Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
281
+ arrays, nullable dtypes are used for all dtypes that have a nullable
282
+ implementation when "numpy_nullable" is set, pyarrow is used for all
283
+ dtypes if "pyarrow" is set.
284
+
285
+ The dtype_backends are still experimential.
286
+
287
+ .. versionadded:: 2.0
288
+
289
+ Returns
290
+ -------
291
+ DataFrame or dict of DataFrames
292
+ DataFrame from the passed in Excel file. See notes in sheet_name
293
+ argument for more information on when a dict of DataFrames is returned.
294
+
295
+ See Also
296
+ --------
297
+ DataFrame.to_excel : Write DataFrame to an Excel file.
298
+ DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
299
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
300
+ read_fwf : Read a table of fixed-width formatted lines into DataFrame.
301
+
302
+ Examples
303
+ --------
304
+ The file can be read using the file name as string or an open file object:
305
+
306
+ >>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
307
+ Name Value
308
+ 0 string1 1
309
+ 1 string2 2
310
+ 2 #Comment 3
311
+
312
+ >>> pd.read_excel(open('tmp.xlsx', 'rb'),
313
+ ... sheet_name='Sheet3') # doctest: +SKIP
314
+ Unnamed: 0 Name Value
315
+ 0 0 string1 1
316
+ 1 1 string2 2
317
+ 2 2 #Comment 3
318
+
319
+ Index and header can be specified via the `index_col` and `header` arguments
320
+
321
+ >>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
322
+ 0 1 2
323
+ 0 NaN Name Value
324
+ 1 0.0 string1 1
325
+ 2 1.0 string2 2
326
+ 3 2.0 #Comment 3
327
+
328
+ Column types are inferred but can be explicitly specified
329
+
330
+ >>> pd.read_excel('tmp.xlsx', index_col=0,
331
+ ... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP
332
+ Name Value
333
+ 0 string1 1.0
334
+ 1 string2 2.0
335
+ 2 #Comment 3.0
336
+
337
+ True, False, and NA values, and thousands separators have defaults,
338
+ but can be explicitly specified, too. Supply the values you would like
339
+ as strings or lists of strings!
340
+
341
+ >>> pd.read_excel('tmp.xlsx', index_col=0,
342
+ ... na_values=['string1', 'string2']) # doctest: +SKIP
343
+ Name Value
344
+ 0 NaN 1
345
+ 1 NaN 2
346
+ 2 #Comment 3
347
+
348
+ Comment lines in the excel input file can be skipped using the `comment` kwarg
349
+
350
+ >>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
351
+ Name Value
352
+ 0 string1 1.0
353
+ 1 string2 2.0
354
+ 2 None NaN
355
+ """
356
+ )
357
+
358
+
359
+ @overload
360
+ def read_excel(
361
+ io,
362
+ # sheet name is str or int -> DataFrame
363
+ sheet_name: str | int = ...,
364
+ *,
365
+ header: int | Sequence[int] | None = ...,
366
+ names: list[str] | None = ...,
367
+ index_col: int | Sequence[int] | None = ...,
368
+ usecols: int
369
+ | str
370
+ | Sequence[int]
371
+ | Sequence[str]
372
+ | Callable[[str], bool]
373
+ | None = ...,
374
+ dtype: DtypeArg | None = ...,
375
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ...,
376
+ converters: dict[str, Callable] | dict[int, Callable] | None = ...,
377
+ true_values: Iterable[Hashable] | None = ...,
378
+ false_values: Iterable[Hashable] | None = ...,
379
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
380
+ nrows: int | None = ...,
381
+ na_values=...,
382
+ keep_default_na: bool = ...,
383
+ na_filter: bool = ...,
384
+ verbose: bool = ...,
385
+ parse_dates: list | dict | bool = ...,
386
+ date_parser: Callable | lib.NoDefault = ...,
387
+ date_format: dict[Hashable, str] | str | None = ...,
388
+ thousands: str | None = ...,
389
+ decimal: str = ...,
390
+ comment: str | None = ...,
391
+ skipfooter: int = ...,
392
+ storage_options: StorageOptions = ...,
393
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
394
+ ) -> DataFrame:
395
+ ...
396
+
397
+
398
+ @overload
399
+ def read_excel(
400
+ io,
401
+ # sheet name is list or None -> dict[IntStrT, DataFrame]
402
+ sheet_name: list[IntStrT] | None,
403
+ *,
404
+ header: int | Sequence[int] | None = ...,
405
+ names: list[str] | None = ...,
406
+ index_col: int | Sequence[int] | None = ...,
407
+ usecols: int
408
+ | str
409
+ | Sequence[int]
410
+ | Sequence[str]
411
+ | Callable[[str], bool]
412
+ | None = ...,
413
+ dtype: DtypeArg | None = ...,
414
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = ...,
415
+ converters: dict[str, Callable] | dict[int, Callable] | None = ...,
416
+ true_values: Iterable[Hashable] | None = ...,
417
+ false_values: Iterable[Hashable] | None = ...,
418
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
419
+ nrows: int | None = ...,
420
+ na_values=...,
421
+ keep_default_na: bool = ...,
422
+ na_filter: bool = ...,
423
+ verbose: bool = ...,
424
+ parse_dates: list | dict | bool = ...,
425
+ date_parser: Callable | lib.NoDefault = ...,
426
+ date_format: dict[Hashable, str] | str | None = ...,
427
+ thousands: str | None = ...,
428
+ decimal: str = ...,
429
+ comment: str | None = ...,
430
+ skipfooter: int = ...,
431
+ storage_options: StorageOptions = ...,
432
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
433
+ ) -> dict[IntStrT, DataFrame]:
434
+ ...
435
+
436
+
437
+ @doc(storage_options=_shared_docs["storage_options"])
438
+ @Appender(_read_excel_doc)
439
+ def read_excel(
440
+ io,
441
+ sheet_name: str | int | list[IntStrT] | None = 0,
442
+ *,
443
+ header: int | Sequence[int] | None = 0,
444
+ names: list[str] | None = None,
445
+ index_col: int | Sequence[int] | None = None,
446
+ usecols: int
447
+ | str
448
+ | Sequence[int]
449
+ | Sequence[str]
450
+ | Callable[[str], bool]
451
+ | None = None,
452
+ dtype: DtypeArg | None = None,
453
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb"] | None = None,
454
+ converters: dict[str, Callable] | dict[int, Callable] | None = None,
455
+ true_values: Iterable[Hashable] | None = None,
456
+ false_values: Iterable[Hashable] | None = None,
457
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
458
+ nrows: int | None = None,
459
+ na_values=None,
460
+ keep_default_na: bool = True,
461
+ na_filter: bool = True,
462
+ verbose: bool = False,
463
+ parse_dates: list | dict | bool = False,
464
+ date_parser: Callable | lib.NoDefault = lib.no_default,
465
+ date_format: dict[Hashable, str] | str | None = None,
466
+ thousands: str | None = None,
467
+ decimal: str = ".",
468
+ comment: str | None = None,
469
+ skipfooter: int = 0,
470
+ storage_options: StorageOptions = None,
471
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
472
+ ) -> DataFrame | dict[IntStrT, DataFrame]:
473
+ check_dtype_backend(dtype_backend)
474
+
475
+ should_close = False
476
+ if not isinstance(io, ExcelFile):
477
+ should_close = True
478
+ io = ExcelFile(io, storage_options=storage_options, engine=engine)
479
+ elif engine and engine != io.engine:
480
+ raise ValueError(
481
+ "Engine should not be specified when passing "
482
+ "an ExcelFile - ExcelFile already has the engine set"
483
+ )
484
+
485
+ try:
486
+ data = io.parse(
487
+ sheet_name=sheet_name,
488
+ header=header,
489
+ names=names,
490
+ index_col=index_col,
491
+ usecols=usecols,
492
+ dtype=dtype,
493
+ converters=converters,
494
+ true_values=true_values,
495
+ false_values=false_values,
496
+ skiprows=skiprows,
497
+ nrows=nrows,
498
+ na_values=na_values,
499
+ keep_default_na=keep_default_na,
500
+ na_filter=na_filter,
501
+ verbose=verbose,
502
+ parse_dates=parse_dates,
503
+ date_parser=date_parser,
504
+ date_format=date_format,
505
+ thousands=thousands,
506
+ decimal=decimal,
507
+ comment=comment,
508
+ skipfooter=skipfooter,
509
+ dtype_backend=dtype_backend,
510
+ )
511
+ finally:
512
+ # make sure to close opened file handles
513
+ if should_close:
514
+ io.close()
515
+ return data
516
+
517
+
518
+ class BaseExcelReader(metaclass=abc.ABCMeta):
519
+ def __init__(
520
+ self, filepath_or_buffer, storage_options: StorageOptions = None
521
+ ) -> None:
522
+ # First argument can also be bytes, so create a buffer
523
+ if isinstance(filepath_or_buffer, bytes):
524
+ filepath_or_buffer = BytesIO(filepath_or_buffer)
525
+
526
+ self.handles = IOHandles(
527
+ handle=filepath_or_buffer, compression={"method": None}
528
+ )
529
+ if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
530
+ self.handles = get_handle(
531
+ filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
532
+ )
533
+
534
+ if isinstance(self.handles.handle, self._workbook_class):
535
+ self.book = self.handles.handle
536
+ elif hasattr(self.handles.handle, "read"):
537
+ # N.B. xlrd.Book has a read attribute too
538
+ self.handles.handle.seek(0)
539
+ try:
540
+ self.book = self.load_workbook(self.handles.handle)
541
+ except Exception:
542
+ self.close()
543
+ raise
544
+ else:
545
+ raise ValueError(
546
+ "Must explicitly set engine if not passing in buffer or path for io."
547
+ )
548
+
549
+ @property
550
+ @abc.abstractmethod
551
+ def _workbook_class(self):
552
+ pass
553
+
554
+ @abc.abstractmethod
555
+ def load_workbook(self, filepath_or_buffer):
556
+ pass
557
+
558
+ def close(self) -> None:
559
+ if hasattr(self, "book"):
560
+ if hasattr(self.book, "close"):
561
+ # pyxlsb: opens a TemporaryFile
562
+ # openpyxl: https://stackoverflow.com/questions/31416842/
563
+ # openpyxl-does-not-close-excel-workbook-in-read-only-mode
564
+ self.book.close()
565
+ elif hasattr(self.book, "release_resources"):
566
+ # xlrd
567
+ # https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548
568
+ self.book.release_resources()
569
+ self.handles.close()
570
+
571
+ @property
572
+ @abc.abstractmethod
573
+ def sheet_names(self) -> list[str]:
574
+ pass
575
+
576
+ @abc.abstractmethod
577
+ def get_sheet_by_name(self, name: str):
578
+ pass
579
+
580
+ @abc.abstractmethod
581
+ def get_sheet_by_index(self, index: int):
582
+ pass
583
+
584
+ @abc.abstractmethod
585
+ def get_sheet_data(self, sheet, rows: int | None = None):
586
+ pass
587
+
588
+ def raise_if_bad_sheet_by_index(self, index: int) -> None:
589
+ n_sheets = len(self.sheet_names)
590
+ if index >= n_sheets:
591
+ raise ValueError(
592
+ f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
593
+ )
594
+
595
+ def raise_if_bad_sheet_by_name(self, name: str) -> None:
596
+ if name not in self.sheet_names:
597
+ raise ValueError(f"Worksheet named '{name}' not found")
598
+
599
+ def _check_skiprows_func(
600
+ self,
601
+ skiprows: Callable,
602
+ rows_to_use: int,
603
+ ) -> int:
604
+ """
605
+ Determine how many file rows are required to obtain `nrows` data
606
+ rows when `skiprows` is a function.
607
+
608
+ Parameters
609
+ ----------
610
+ skiprows : function
611
+ The function passed to read_excel by the user.
612
+ rows_to_use : int
613
+ The number of rows that will be needed for the header and
614
+ the data.
615
+
616
+ Returns
617
+ -------
618
+ int
619
+ """
620
+ i = 0
621
+ rows_used_so_far = 0
622
+ while rows_used_so_far < rows_to_use:
623
+ if not skiprows(i):
624
+ rows_used_so_far += 1
625
+ i += 1
626
+ return i
627
+
628
+ def _calc_rows(
629
+ self,
630
+ header: int | Sequence[int] | None,
631
+ index_col: int | Sequence[int] | None,
632
+ skiprows: Sequence[int] | int | Callable[[int], object] | None,
633
+ nrows: int | None,
634
+ ) -> int | None:
635
+ """
636
+ If nrows specified, find the number of rows needed from the
637
+ file, otherwise return None.
638
+
639
+
640
+ Parameters
641
+ ----------
642
+ header : int, list of int, or None
643
+ See read_excel docstring.
644
+ index_col : int, list of int, or None
645
+ See read_excel docstring.
646
+ skiprows : list-like, int, callable, or None
647
+ See read_excel docstring.
648
+ nrows : int or None
649
+ See read_excel docstring.
650
+
651
+ Returns
652
+ -------
653
+ int or None
654
+ """
655
+ if nrows is None:
656
+ return None
657
+ if header is None:
658
+ header_rows = 1
659
+ elif is_integer(header):
660
+ header = cast(int, header)
661
+ header_rows = 1 + header
662
+ else:
663
+ header = cast(Sequence, header)
664
+ header_rows = 1 + header[-1]
665
+ # If there is a MultiIndex header and an index then there is also
666
+ # a row containing just the index name(s)
667
+ if is_list_like(header) and index_col is not None:
668
+ header = cast(Sequence, header)
669
+ if len(header) > 1:
670
+ header_rows += 1
671
+ if skiprows is None:
672
+ return header_rows + nrows
673
+ if is_integer(skiprows):
674
+ skiprows = cast(int, skiprows)
675
+ return header_rows + nrows + skiprows
676
+ if is_list_like(skiprows):
677
+
678
+ def f(skiprows: Sequence, x: int) -> bool:
679
+ return x in skiprows
680
+
681
+ skiprows = cast(Sequence, skiprows)
682
+ return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
683
+ if callable(skiprows):
684
+ return self._check_skiprows_func(
685
+ skiprows,
686
+ header_rows + nrows,
687
+ )
688
+ # else unexpected skiprows type: read_excel will not optimize
689
+ # the number of rows read from file
690
+ return None
691
+
692
+ def parse(
693
+ self,
694
+ sheet_name: str | int | list[int] | list[str] | None = 0,
695
+ header: int | Sequence[int] | None = 0,
696
+ names=None,
697
+ index_col: int | Sequence[int] | None = None,
698
+ usecols=None,
699
+ dtype: DtypeArg | None = None,
700
+ true_values: Iterable[Hashable] | None = None,
701
+ false_values: Iterable[Hashable] | None = None,
702
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
703
+ nrows: int | None = None,
704
+ na_values=None,
705
+ verbose: bool = False,
706
+ parse_dates: list | dict | bool = False,
707
+ date_parser: Callable | lib.NoDefault = lib.no_default,
708
+ date_format: dict[Hashable, str] | str | None = None,
709
+ thousands: str | None = None,
710
+ decimal: str = ".",
711
+ comment: str | None = None,
712
+ skipfooter: int = 0,
713
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
714
+ **kwds,
715
+ ):
716
+ validate_header_arg(header)
717
+ validate_integer("nrows", nrows)
718
+
719
+ ret_dict = False
720
+
721
+ # Keep sheetname to maintain backwards compatibility.
722
+ sheets: list[int] | list[str]
723
+ if isinstance(sheet_name, list):
724
+ sheets = sheet_name
725
+ ret_dict = True
726
+ elif sheet_name is None:
727
+ sheets = self.sheet_names
728
+ ret_dict = True
729
+ elif isinstance(sheet_name, str):
730
+ sheets = [sheet_name]
731
+ else:
732
+ sheets = [sheet_name]
733
+
734
+ # handle same-type duplicates.
735
+ sheets = cast(Union[List[int], List[str]], list(dict.fromkeys(sheets).keys()))
736
+
737
+ output = {}
738
+
739
+ last_sheetname = None
740
+ for asheetname in sheets:
741
+ last_sheetname = asheetname
742
+ if verbose:
743
+ print(f"Reading sheet {asheetname}")
744
+
745
+ if isinstance(asheetname, str):
746
+ sheet = self.get_sheet_by_name(asheetname)
747
+ else: # assume an integer if not a string
748
+ sheet = self.get_sheet_by_index(asheetname)
749
+
750
+ file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
751
+ data = self.get_sheet_data(sheet, file_rows_needed)
752
+ if hasattr(sheet, "close"):
753
+ # pyxlsb opens two TemporaryFiles
754
+ sheet.close()
755
+ usecols = maybe_convert_usecols(usecols)
756
+
757
+ if not data:
758
+ output[asheetname] = DataFrame()
759
+ continue
760
+
761
+ is_list_header = False
762
+ is_len_one_list_header = False
763
+ if is_list_like(header):
764
+ assert isinstance(header, Sequence)
765
+ is_list_header = True
766
+ if len(header) == 1:
767
+ is_len_one_list_header = True
768
+
769
+ if is_len_one_list_header:
770
+ header = cast(Sequence[int], header)[0]
771
+
772
+ # forward fill and pull out names for MultiIndex column
773
+ header_names = None
774
+ if header is not None and is_list_like(header):
775
+ assert isinstance(header, Sequence)
776
+
777
+ header_names = []
778
+ control_row = [True] * len(data[0])
779
+
780
+ for row in header:
781
+ if is_integer(skiprows):
782
+ assert isinstance(skiprows, int)
783
+ row += skiprows
784
+
785
+ if row > len(data) - 1:
786
+ raise ValueError(
787
+ f"header index {row} exceeds maximum index "
788
+ f"{len(data) - 1} of data.",
789
+ )
790
+
791
+ data[row], control_row = fill_mi_header(data[row], control_row)
792
+
793
+ if index_col is not None:
794
+ header_name, _ = pop_header_name(data[row], index_col)
795
+ header_names.append(header_name)
796
+
797
+ # If there is a MultiIndex header and an index then there is also
798
+ # a row containing just the index name(s)
799
+ has_index_names = False
800
+ if is_list_header and not is_len_one_list_header and index_col is not None:
801
+ index_col_list: Sequence[int]
802
+ if isinstance(index_col, int):
803
+ index_col_list = [index_col]
804
+ else:
805
+ assert isinstance(index_col, Sequence)
806
+ index_col_list = index_col
807
+
808
+ # We have to handle mi without names. If any of the entries in the data
809
+ # columns are not empty, this is a regular row
810
+ assert isinstance(header, Sequence)
811
+ if len(header) < len(data):
812
+ potential_index_names = data[len(header)]
813
+ potential_data = [
814
+ x
815
+ for i, x in enumerate(potential_index_names)
816
+ if not control_row[i] and i not in index_col_list
817
+ ]
818
+ has_index_names = all(x == "" or x is None for x in potential_data)
819
+
820
+ if is_list_like(index_col):
821
+ # Forward fill values for MultiIndex index.
822
+ if header is None:
823
+ offset = 0
824
+ elif isinstance(header, int):
825
+ offset = 1 + header
826
+ else:
827
+ offset = 1 + max(header)
828
+
829
+ # GH34673: if MultiIndex names present and not defined in the header,
830
+ # offset needs to be incremented so that forward filling starts
831
+ # from the first MI value instead of the name
832
+ if has_index_names:
833
+ offset += 1
834
+
835
+ # Check if we have an empty dataset
836
+ # before trying to collect data.
837
+ if offset < len(data):
838
+ assert isinstance(index_col, Sequence)
839
+
840
+ for col in index_col:
841
+ last = data[offset][col]
842
+
843
+ for row in range(offset + 1, len(data)):
844
+ if data[row][col] == "" or data[row][col] is None:
845
+ data[row][col] = last
846
+ else:
847
+ last = data[row][col]
848
+
849
+ # GH 12292 : error when read one empty column from excel file
850
+ try:
851
+ parser = TextParser(
852
+ data,
853
+ names=names,
854
+ header=header,
855
+ index_col=index_col,
856
+ has_index_names=has_index_names,
857
+ dtype=dtype,
858
+ true_values=true_values,
859
+ false_values=false_values,
860
+ skiprows=skiprows,
861
+ nrows=nrows,
862
+ na_values=na_values,
863
+ skip_blank_lines=False, # GH 39808
864
+ parse_dates=parse_dates,
865
+ date_parser=date_parser,
866
+ date_format=date_format,
867
+ thousands=thousands,
868
+ decimal=decimal,
869
+ comment=comment,
870
+ skipfooter=skipfooter,
871
+ usecols=usecols,
872
+ dtype_backend=dtype_backend,
873
+ **kwds,
874
+ )
875
+
876
+ output[asheetname] = parser.read(nrows=nrows)
877
+
878
+ if header_names:
879
+ output[asheetname].columns = output[asheetname].columns.set_names(
880
+ header_names
881
+ )
882
+
883
+ except EmptyDataError:
884
+ # No Data, return an empty DataFrame
885
+ output[asheetname] = DataFrame()
886
+
887
+ except Exception as err:
888
+ err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:])
889
+ raise err
890
+
891
+ if last_sheetname is None:
892
+ raise ValueError("Sheet name is an empty list")
893
+
894
+ if ret_dict:
895
+ return output
896
+ else:
897
+ return output[last_sheetname]
898
+
899
+
900
+ @doc(storage_options=_shared_docs["storage_options"])
901
+ class ExcelWriter(metaclass=abc.ABCMeta):
902
+ """
903
+ Class for writing DataFrame objects into excel sheets.
904
+
905
+ Default is to use:
906
+
907
+ * `xlsxwriter <https://pypi.org/project/XlsxWriter/>`__ for xlsx files if xlsxwriter
908
+ is installed otherwise `openpyxl <https://pypi.org/project/openpyxl/>`__
909
+ * `odswriter <https://pypi.org/project/odswriter/>`__ for ods files
910
+
911
+ See ``DataFrame.to_excel`` for typical usage.
912
+
913
+ The writer should be used as a context manager. Otherwise, call `close()` to save
914
+ and close any opened file handles.
915
+
916
+ Parameters
917
+ ----------
918
+ path : str or typing.BinaryIO
919
+ Path to xls or xlsx or ods file.
920
+ engine : str (optional)
921
+ Engine to use for writing. If None, defaults to
922
+ ``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
923
+ argument.
924
+ date_format : str, default None
925
+ Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
926
+ datetime_format : str, default None
927
+ Format string for datetime objects written into Excel files.
928
+ (e.g. 'YYYY-MM-DD HH:MM:SS').
929
+ mode : {{'w', 'a'}}, default 'w'
930
+ File mode to use (write or append). Append does not work with fsspec URLs.
931
+ {storage_options}
932
+
933
+ .. versionadded:: 1.2.0
934
+
935
+ if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
936
+ How to behave when trying to write to a sheet that already
937
+ exists (append mode only).
938
+
939
+ * error: raise a ValueError.
940
+ * new: Create a new sheet, with a name determined by the engine.
941
+ * replace: Delete the contents of the sheet before writing to it.
942
+ * overlay: Write contents to the existing sheet without removing the old
943
+ contents.
944
+
945
+ .. versionadded:: 1.3.0
946
+
947
+ .. versionchanged:: 1.4.0
948
+
949
+ Added ``overlay`` option
950
+
951
+ engine_kwargs : dict, optional
952
+ Keyword arguments to be passed into the engine. These will be passed to
953
+ the following functions of the respective engines:
954
+
955
+ * xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
956
+ * openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
957
+ * openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
958
+ * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
959
+
960
+ .. versionadded:: 1.3.0
961
+
962
+ Notes
963
+ -----
964
+ For compatibility with CSV writers, ExcelWriter serializes lists
965
+ and dicts to strings before writing.
966
+
967
+ Examples
968
+ --------
969
+ Default usage:
970
+
971
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
972
+ >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
973
+ ... df.to_excel(writer) # doctest: +SKIP
974
+
975
+ To write to separate sheets in a single file:
976
+
977
+ >>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
978
+ >>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
979
+ >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
980
+ ... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
981
+ ... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
982
+
983
+ You can set the date format or datetime format:
984
+
985
+ >>> from datetime import date, datetime # doctest: +SKIP
986
+ >>> df = pd.DataFrame(
987
+ ... [
988
+ ... [date(2014, 1, 31), date(1999, 9, 24)],
989
+ ... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
990
+ ... ],
991
+ ... index=["Date", "Datetime"],
992
+ ... columns=["X", "Y"],
993
+ ... ) # doctest: +SKIP
994
+ >>> with pd.ExcelWriter(
995
+ ... "path_to_file.xlsx",
996
+ ... date_format="YYYY-MM-DD",
997
+ ... datetime_format="YYYY-MM-DD HH:MM:SS"
998
+ ... ) as writer:
999
+ ... df.to_excel(writer) # doctest: +SKIP
1000
+
1001
+ You can also append to an existing Excel file:
1002
+
1003
+ >>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
1004
+ ... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
1005
+
1006
+ Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
1007
+ already exists:
1008
+
1009
+ >>> with ExcelWriter(
1010
+ ... "path_to_file.xlsx",
1011
+ ... mode="a",
1012
+ ... engine="openpyxl",
1013
+ ... if_sheet_exists="replace",
1014
+ ... ) as writer:
1015
+ ... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
1016
+
1017
+ You can also write multiple DataFrames to a single sheet. Note that the
1018
+ ``if_sheet_exists`` parameter needs to be set to ``overlay``:
1019
+
1020
+ >>> with ExcelWriter("path_to_file.xlsx",
1021
+ ... mode="a",
1022
+ ... engine="openpyxl",
1023
+ ... if_sheet_exists="overlay",
1024
+ ... ) as writer:
1025
+ ... df1.to_excel(writer, sheet_name="Sheet1")
1026
+ ... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
1027
+
1028
+ You can store Excel file in RAM:
1029
+
1030
+ >>> import io
1031
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
1032
+ >>> buffer = io.BytesIO()
1033
+ >>> with pd.ExcelWriter(buffer) as writer:
1034
+ ... df.to_excel(writer)
1035
+
1036
+ You can pack Excel file into zip archive:
1037
+
1038
+ >>> import zipfile # doctest: +SKIP
1039
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
1040
+ >>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
1041
+ ... with zf.open("filename.xlsx", "w") as buffer:
1042
+ ... with pd.ExcelWriter(buffer) as writer:
1043
+ ... df.to_excel(writer) # doctest: +SKIP
1044
+
1045
+ You can specify additional arguments to the underlying engine:
1046
+
1047
+ >>> with pd.ExcelWriter(
1048
+ ... "path_to_file.xlsx",
1049
+ ... engine="xlsxwriter",
1050
+ ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
1051
+ ... ) as writer:
1052
+ ... df.to_excel(writer) # doctest: +SKIP
1053
+
1054
+ In append mode, ``engine_kwargs`` are passed through to
1055
+ openpyxl's ``load_workbook``:
1056
+
1057
+ >>> with pd.ExcelWriter(
1058
+ ... "path_to_file.xlsx",
1059
+ ... engine="openpyxl",
1060
+ ... mode="a",
1061
+ ... engine_kwargs={{"keep_vba": True}}
1062
+ ... ) as writer:
1063
+ ... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
1064
+ """
1065
+
1066
+ # Defining an ExcelWriter implementation (see abstract methods for more...)
1067
+
1068
+ # - Mandatory
1069
+ # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
1070
+ # --> called to write additional DataFrames to disk
1071
+ # - ``_supported_extensions`` (tuple of supported extensions), used to
1072
+ # check that engine supports the given extension.
1073
+ # - ``_engine`` - string that gives the engine name. Necessary to
1074
+ # instantiate class directly and bypass ``ExcelWriterMeta`` engine
1075
+ # lookup.
1076
+ # - ``save(self)`` --> called to save file to disk
1077
+ # - Mostly mandatory (i.e. should at least exist)
1078
+ # - book, cur_sheet, path
1079
+
1080
+ # - Optional:
1081
+ # - ``__init__(self, path, engine=None, **kwargs)`` --> always called
1082
+ # with path as first argument.
1083
+
1084
+ # You also need to register the class with ``register_writer()``.
1085
+ # Technically, ExcelWriter implementations don't need to subclass
1086
+ # ExcelWriter.
1087
+
1088
+ _engine: str
1089
+ _supported_extensions: tuple[str, ...]
1090
+
1091
+ def __new__(
1092
+ cls: type[ExcelWriter],
1093
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
1094
+ engine: str | None = None,
1095
+ date_format: str | None = None,
1096
+ datetime_format: str | None = None,
1097
+ mode: str = "w",
1098
+ storage_options: StorageOptions = None,
1099
+ if_sheet_exists: Literal["error", "new", "replace", "overlay"] | None = None,
1100
+ engine_kwargs: dict | None = None,
1101
+ ) -> ExcelWriter:
1102
+ # only switch class if generic(ExcelWriter)
1103
+ if cls is ExcelWriter:
1104
+ if engine is None or (isinstance(engine, str) and engine == "auto"):
1105
+ if isinstance(path, str):
1106
+ ext = os.path.splitext(path)[-1][1:]
1107
+ else:
1108
+ ext = "xlsx"
1109
+
1110
+ try:
1111
+ engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
1112
+ if engine == "auto":
1113
+ engine = get_default_engine(ext, mode="writer")
1114
+ except KeyError as err:
1115
+ raise ValueError(f"No engine for filetype: '{ext}'") from err
1116
+
1117
+ # for mypy
1118
+ assert engine is not None
1119
+ cls = get_writer(engine)
1120
+
1121
+ return object.__new__(cls)
1122
+
1123
+ # declare external properties you can count on
1124
+ _path = None
1125
+
1126
+ @property
1127
+ def supported_extensions(self) -> tuple[str, ...]:
1128
+ """Extensions that writer engine supports."""
1129
+ return self._supported_extensions
1130
+
1131
+ @property
1132
+ def engine(self) -> str:
1133
+ """Name of engine."""
1134
+ return self._engine
1135
+
1136
+ @property
1137
+ @abc.abstractmethod
1138
+ def sheets(self) -> dict[str, Any]:
1139
+ """Mapping of sheet names to sheet objects."""
1140
+
1141
+ @property
1142
+ @abc.abstractmethod
1143
+ def book(self):
1144
+ """
1145
+ Book instance. Class type will depend on the engine used.
1146
+
1147
+ This attribute can be used to access engine-specific features.
1148
+ """
1149
+
1150
+ @abc.abstractmethod
1151
+ def _write_cells(
1152
+ self,
1153
+ cells,
1154
+ sheet_name: str | None = None,
1155
+ startrow: int = 0,
1156
+ startcol: int = 0,
1157
+ freeze_panes: tuple[int, int] | None = None,
1158
+ ) -> None:
1159
+ """
1160
+ Write given formatted cells into Excel an excel sheet
1161
+
1162
+ Parameters
1163
+ ----------
1164
+ cells : generator
1165
+ cell of formatted data to save to Excel sheet
1166
+ sheet_name : str, default None
1167
+ Name of Excel sheet, if None, then use self.cur_sheet
1168
+ startrow : upper left cell row to dump data frame
1169
+ startcol : upper left cell column to dump data frame
1170
+ freeze_panes: int tuple of length 2
1171
+ contains the bottom-most row and right-most column to freeze
1172
+ """
1173
+
1174
+ @abc.abstractmethod
1175
+ def _save(self) -> None:
1176
+ """
1177
+ Save workbook to disk.
1178
+ """
1179
+
1180
+ def __init__(
1181
+ self,
1182
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
1183
+ engine: str | None = None,
1184
+ date_format: str | None = None,
1185
+ datetime_format: str | None = None,
1186
+ mode: str = "w",
1187
+ storage_options: StorageOptions = None,
1188
+ if_sheet_exists: str | None = None,
1189
+ engine_kwargs: dict[str, Any] | None = None,
1190
+ ) -> None:
1191
+ # validate that this engine can handle the extension
1192
+ if isinstance(path, str):
1193
+ ext = os.path.splitext(path)[-1]
1194
+ self.check_extension(ext)
1195
+
1196
+ # use mode to open the file
1197
+ if "b" not in mode:
1198
+ mode += "b"
1199
+ # use "a" for the user to append data to excel but internally use "r+" to let
1200
+ # the excel backend first read the existing file and then write any data to it
1201
+ mode = mode.replace("a", "r+")
1202
+
1203
+ if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
1204
+ raise ValueError(
1205
+ f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
1206
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
1207
+ )
1208
+ if if_sheet_exists and "r+" not in mode:
1209
+ raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
1210
+ if if_sheet_exists is None:
1211
+ if_sheet_exists = "error"
1212
+ self._if_sheet_exists = if_sheet_exists
1213
+
1214
+ # cast ExcelWriter to avoid adding 'if self._handles is not None'
1215
+ self._handles = IOHandles(
1216
+ cast(IO[bytes], path), compression={"compression": None}
1217
+ )
1218
+ if not isinstance(path, ExcelWriter):
1219
+ self._handles = get_handle(
1220
+ path, mode, storage_options=storage_options, is_text=False
1221
+ )
1222
+ self._cur_sheet = None
1223
+
1224
+ if date_format is None:
1225
+ self._date_format = "YYYY-MM-DD"
1226
+ else:
1227
+ self._date_format = date_format
1228
+ if datetime_format is None:
1229
+ self._datetime_format = "YYYY-MM-DD HH:MM:SS"
1230
+ else:
1231
+ self._datetime_format = datetime_format
1232
+
1233
+ self._mode = mode
1234
+
1235
+ @property
1236
+ def date_format(self) -> str:
1237
+ """
1238
+ Format string for dates written into Excel files (e.g. ‘YYYY-MM-DD’).
1239
+ """
1240
+ return self._date_format
1241
+
1242
+ @property
1243
+ def datetime_format(self) -> str:
1244
+ """
1245
+ Format string for dates written into Excel files (e.g. ‘YYYY-MM-DD’).
1246
+ """
1247
+ return self._datetime_format
1248
+
1249
+ @property
1250
+ def if_sheet_exists(self) -> str:
1251
+ """
1252
+ How to behave when writing to a sheet that already exists in append mode.
1253
+ """
1254
+ return self._if_sheet_exists
1255
+
1256
+ def __fspath__(self) -> str:
1257
+ return getattr(self._handles.handle, "name", "")
1258
+
1259
+ def _get_sheet_name(self, sheet_name: str | None) -> str:
1260
+ if sheet_name is None:
1261
+ sheet_name = self._cur_sheet
1262
+ if sheet_name is None: # pragma: no cover
1263
+ raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
1264
+ return sheet_name
1265
+
1266
+ def _value_with_fmt(self, val) -> tuple[object, str | None]:
1267
+ """
1268
+ Convert numpy types to Python types for the Excel writers.
1269
+
1270
+ Parameters
1271
+ ----------
1272
+ val : object
1273
+ Value to be written into cells
1274
+
1275
+ Returns
1276
+ -------
1277
+ Tuple with the first element being the converted value and the second
1278
+ being an optional format
1279
+ """
1280
+ fmt = None
1281
+
1282
+ if is_integer(val):
1283
+ val = int(val)
1284
+ elif is_float(val):
1285
+ val = float(val)
1286
+ elif is_bool(val):
1287
+ val = bool(val)
1288
+ elif isinstance(val, datetime.datetime):
1289
+ fmt = self._datetime_format
1290
+ elif isinstance(val, datetime.date):
1291
+ fmt = self._date_format
1292
+ elif isinstance(val, datetime.timedelta):
1293
+ val = val.total_seconds() / 86400
1294
+ fmt = "0"
1295
+ else:
1296
+ val = str(val)
1297
+
1298
+ return val, fmt
1299
+
1300
+ @classmethod
1301
+ def check_extension(cls, ext: str) -> Literal[True]:
1302
+ """
1303
+ checks that path's extension against the Writer's supported
1304
+ extensions. If it isn't supported, raises UnsupportedFiletypeError.
1305
+ """
1306
+ if ext.startswith("."):
1307
+ ext = ext[1:]
1308
+ if not any(ext in extension for extension in cls._supported_extensions):
1309
+ raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
1310
+ return True
1311
+
1312
+ # Allow use as a contextmanager
1313
+ def __enter__(self) -> ExcelWriter:
1314
+ return self
1315
+
1316
+ def __exit__(
1317
+ self,
1318
+ exc_type: type[BaseException] | None,
1319
+ exc_value: BaseException | None,
1320
+ traceback: TracebackType | None,
1321
+ ) -> None:
1322
+ self.close()
1323
+
1324
+ def close(self) -> None:
1325
+ """synonym for save, to make it more file-like"""
1326
+ self._save()
1327
+ self._handles.close()
1328
+
1329
+
1330
+ XLS_SIGNATURES = (
1331
+ b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
1332
+ b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
1333
+ b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
1334
+ b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
1335
+ )
1336
+ ZIP_SIGNATURE = b"PK\x03\x04"
1337
+ PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
1338
+
1339
+
1340
+ @doc(storage_options=_shared_docs["storage_options"])
1341
+ def inspect_excel_format(
1342
+ content_or_path: FilePath | ReadBuffer[bytes],
1343
+ storage_options: StorageOptions = None,
1344
+ ) -> str | None:
1345
+ """
1346
+ Inspect the path or content of an excel file and get its format.
1347
+
1348
+ Adopted from xlrd: https://github.com/python-excel/xlrd.
1349
+
1350
+ Parameters
1351
+ ----------
1352
+ content_or_path : str or file-like object
1353
+ Path to file or content of file to inspect. May be a URL.
1354
+ {storage_options}
1355
+
1356
+ Returns
1357
+ -------
1358
+ str or None
1359
+ Format of file if it can be determined.
1360
+
1361
+ Raises
1362
+ ------
1363
+ ValueError
1364
+ If resulting stream is empty.
1365
+ BadZipFile
1366
+ If resulting stream does not have an XLS signature and is not a valid zipfile.
1367
+ """
1368
+ if isinstance(content_or_path, bytes):
1369
+ content_or_path = BytesIO(content_or_path)
1370
+
1371
+ with get_handle(
1372
+ content_or_path, "rb", storage_options=storage_options, is_text=False
1373
+ ) as handle:
1374
+ stream = handle.handle
1375
+ stream.seek(0)
1376
+ buf = stream.read(PEEK_SIZE)
1377
+ if buf is None:
1378
+ raise ValueError("stream is empty")
1379
+ assert isinstance(buf, bytes)
1380
+ peek = buf
1381
+ stream.seek(0)
1382
+
1383
+ if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
1384
+ return "xls"
1385
+ elif not peek.startswith(ZIP_SIGNATURE):
1386
+ return None
1387
+
1388
+ with zipfile.ZipFile(stream) as zf:
1389
+ # Workaround for some third party files that use forward slashes and
1390
+ # lower case names.
1391
+ component_names = [
1392
+ name.replace("\\", "/").lower() for name in zf.namelist()
1393
+ ]
1394
+
1395
+ if "xl/workbook.xml" in component_names:
1396
+ return "xlsx"
1397
+ if "xl/workbook.bin" in component_names:
1398
+ return "xlsb"
1399
+ if "content.xml" in component_names:
1400
+ return "ods"
1401
+ return "zip"
1402
+
1403
+
1404
+ class ExcelFile:
1405
+ """
1406
+ Class for parsing tabular Excel sheets into DataFrame objects.
1407
+
1408
+ See read_excel for more documentation.
1409
+
1410
+ Parameters
1411
+ ----------
1412
+ path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
1413
+ A file-like object, xlrd workbook or openpyxl workbook.
1414
+ If a string or path object, expected to be a path to a
1415
+ .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
1416
+ engine : str, default None
1417
+ If io is not a buffer or path, this must be set to identify io.
1418
+ Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``
1419
+ Engine compatibility :
1420
+
1421
+ - ``xlrd`` supports old-style Excel files (.xls).
1422
+ - ``openpyxl`` supports newer Excel file formats.
1423
+ - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
1424
+ - ``pyxlsb`` supports Binary Excel files.
1425
+
1426
+ .. versionchanged:: 1.2.0
1427
+
1428
+ The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
1429
+ now only supports old-style ``.xls`` files.
1430
+ When ``engine=None``, the following logic will be
1431
+ used to determine the engine:
1432
+
1433
+ - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
1434
+ then `odf <https://pypi.org/project/odfpy/>`_ will be used.
1435
+ - Otherwise if ``path_or_buffer`` is an xls format,
1436
+ ``xlrd`` will be used.
1437
+ - Otherwise if ``path_or_buffer`` is in xlsb format,
1438
+ `pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
1439
+
1440
+ .. versionadded:: 1.3.0
1441
+
1442
+ - Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
1443
+ then ``openpyxl`` will be used.
1444
+ - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
1445
+
1446
+ .. warning::
1447
+
1448
+ Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
1449
+ This is not supported, switch to using ``openpyxl`` instead.
1450
+ """
1451
+
1452
+ from pandas.io.excel._odfreader import ODFReader
1453
+ from pandas.io.excel._openpyxl import OpenpyxlReader
1454
+ from pandas.io.excel._pyxlsb import PyxlsbReader
1455
+ from pandas.io.excel._xlrd import XlrdReader
1456
+
1457
+ _engines: Mapping[str, Any] = {
1458
+ "xlrd": XlrdReader,
1459
+ "openpyxl": OpenpyxlReader,
1460
+ "odf": ODFReader,
1461
+ "pyxlsb": PyxlsbReader,
1462
+ }
1463
+
1464
+ def __init__(
1465
+ self,
1466
+ path_or_buffer,
1467
+ engine: str | None = None,
1468
+ storage_options: StorageOptions = None,
1469
+ ) -> None:
1470
+ if engine is not None and engine not in self._engines:
1471
+ raise ValueError(f"Unknown engine: {engine}")
1472
+
1473
+ # First argument can also be bytes, so create a buffer
1474
+ if isinstance(path_or_buffer, bytes):
1475
+ path_or_buffer = BytesIO(path_or_buffer)
1476
+
1477
+ # Could be a str, ExcelFile, Book, etc.
1478
+ self.io = path_or_buffer
1479
+ # Always a string
1480
+ self._io = stringify_path(path_or_buffer)
1481
+
1482
+ # Determine xlrd version if installed
1483
+ if import_optional_dependency("xlrd", errors="ignore") is None:
1484
+ xlrd_version = None
1485
+ else:
1486
+ import xlrd
1487
+
1488
+ xlrd_version = Version(get_version(xlrd))
1489
+
1490
+ if engine is None:
1491
+ # Only determine ext if it is needed
1492
+ ext: str | None
1493
+ if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
1494
+ ext = "xls"
1495
+ else:
1496
+ ext = inspect_excel_format(
1497
+ content_or_path=path_or_buffer, storage_options=storage_options
1498
+ )
1499
+ if ext is None:
1500
+ raise ValueError(
1501
+ "Excel file format cannot be determined, you must specify "
1502
+ "an engine manually."
1503
+ )
1504
+
1505
+ engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
1506
+ if engine == "auto":
1507
+ engine = get_default_engine(ext, mode="reader")
1508
+
1509
+ assert engine is not None
1510
+ self.engine = engine
1511
+ self.storage_options = storage_options
1512
+
1513
+ self._reader = self._engines[engine](self._io, storage_options=storage_options)
1514
+
1515
+ def __fspath__(self):
1516
+ return self._io
1517
+
1518
+ def parse(
1519
+ self,
1520
+ sheet_name: str | int | list[int] | list[str] | None = 0,
1521
+ header: int | Sequence[int] | None = 0,
1522
+ names=None,
1523
+ index_col: int | Sequence[int] | None = None,
1524
+ usecols=None,
1525
+ converters=None,
1526
+ true_values: Iterable[Hashable] | None = None,
1527
+ false_values: Iterable[Hashable] | None = None,
1528
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
1529
+ nrows: int | None = None,
1530
+ na_values=None,
1531
+ parse_dates: list | dict | bool = False,
1532
+ date_parser: Callable | lib.NoDefault = lib.no_default,
1533
+ date_format: str | dict[Hashable, str] | None = None,
1534
+ thousands: str | None = None,
1535
+ comment: str | None = None,
1536
+ skipfooter: int = 0,
1537
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
1538
+ **kwds,
1539
+ ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
1540
+ """
1541
+ Parse specified sheet(s) into a DataFrame.
1542
+
1543
+ Equivalent to read_excel(ExcelFile, ...) See the read_excel
1544
+ docstring for more info on accepted parameters.
1545
+
1546
+ Returns
1547
+ -------
1548
+ DataFrame or dict of DataFrames
1549
+ DataFrame from the passed in Excel file.
1550
+ """
1551
+ return self._reader.parse(
1552
+ sheet_name=sheet_name,
1553
+ header=header,
1554
+ names=names,
1555
+ index_col=index_col,
1556
+ usecols=usecols,
1557
+ converters=converters,
1558
+ true_values=true_values,
1559
+ false_values=false_values,
1560
+ skiprows=skiprows,
1561
+ nrows=nrows,
1562
+ na_values=na_values,
1563
+ parse_dates=parse_dates,
1564
+ date_parser=date_parser,
1565
+ date_format=date_format,
1566
+ thousands=thousands,
1567
+ comment=comment,
1568
+ skipfooter=skipfooter,
1569
+ dtype_backend=dtype_backend,
1570
+ **kwds,
1571
+ )
1572
+
1573
+ @property
1574
+ def book(self):
1575
+ return self._reader.book
1576
+
1577
+ @property
1578
+ def sheet_names(self):
1579
+ return self._reader.sheet_names
1580
+
1581
+ def close(self) -> None:
1582
+ """close io if necessary"""
1583
+ self._reader.close()
1584
+
1585
+ def __enter__(self) -> ExcelFile:
1586
+ return self
1587
+
1588
+ def __exit__(
1589
+ self,
1590
+ exc_type: type[BaseException] | None,
1591
+ exc_value: BaseException | None,
1592
+ traceback: TracebackType | None,
1593
+ ) -> None:
1594
+ self.close()
videochat2/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ cast,
6
+ )
7
+
8
+ import numpy as np
9
+
10
+ from pandas._typing import (
11
+ FilePath,
12
+ ReadBuffer,
13
+ Scalar,
14
+ StorageOptions,
15
+ )
16
+ from pandas.compat._optional import import_optional_dependency
17
+ from pandas.util._decorators import doc
18
+
19
+ import pandas as pd
20
+ from pandas.core.shared_docs import _shared_docs
21
+
22
+ from pandas.io.excel._base import BaseExcelReader
23
+
24
+ if TYPE_CHECKING:
25
+ from pandas._libs.tslibs.nattype import NaTType
26
+
27
+
28
+ @doc(storage_options=_shared_docs["storage_options"])
29
+ class ODFReader(BaseExcelReader):
30
+ def __init__(
31
+ self,
32
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
33
+ storage_options: StorageOptions = None,
34
+ ) -> None:
35
+ """
36
+ Read tables out of OpenDocument formatted files.
37
+
38
+ Parameters
39
+ ----------
40
+ filepath_or_buffer : str, path to be parsed or
41
+ an open readable stream.
42
+ {storage_options}
43
+ """
44
+ import_optional_dependency("odf")
45
+ super().__init__(filepath_or_buffer, storage_options=storage_options)
46
+
47
+ @property
48
+ def _workbook_class(self):
49
+ from odf.opendocument import OpenDocument
50
+
51
+ return OpenDocument
52
+
53
+ def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
54
+ from odf.opendocument import load
55
+
56
+ return load(filepath_or_buffer)
57
+
58
+ @property
59
+ def empty_value(self) -> str:
60
+ """Property for compat with other readers."""
61
+ return ""
62
+
63
+ @property
64
+ def sheet_names(self) -> list[str]:
65
+ """Return a list of sheet names present in the document"""
66
+ from odf.table import Table
67
+
68
+ tables = self.book.getElementsByType(Table)
69
+ return [t.getAttribute("name") for t in tables]
70
+
71
+ def get_sheet_by_index(self, index: int):
72
+ from odf.table import Table
73
+
74
+ self.raise_if_bad_sheet_by_index(index)
75
+ tables = self.book.getElementsByType(Table)
76
+ return tables[index]
77
+
78
+ def get_sheet_by_name(self, name: str):
79
+ from odf.table import Table
80
+
81
+ self.raise_if_bad_sheet_by_name(name)
82
+ tables = self.book.getElementsByType(Table)
83
+
84
+ for table in tables:
85
+ if table.getAttribute("name") == name:
86
+ return table
87
+
88
+ self.close()
89
+ raise ValueError(f"sheet {name} not found")
90
+
91
+ def get_sheet_data(
92
+ self, sheet, file_rows_needed: int | None = None
93
+ ) -> list[list[Scalar | NaTType]]:
94
+ """
95
+ Parse an ODF Table into a list of lists
96
+ """
97
+ from odf.table import (
98
+ CoveredTableCell,
99
+ TableCell,
100
+ TableRow,
101
+ )
102
+
103
+ covered_cell_name = CoveredTableCell().qname
104
+ table_cell_name = TableCell().qname
105
+ cell_names = {covered_cell_name, table_cell_name}
106
+
107
+ sheet_rows = sheet.getElementsByType(TableRow)
108
+ empty_rows = 0
109
+ max_row_len = 0
110
+
111
+ table: list[list[Scalar | NaTType]] = []
112
+
113
+ for sheet_row in sheet_rows:
114
+ sheet_cells = [
115
+ x
116
+ for x in sheet_row.childNodes
117
+ if hasattr(x, "qname") and x.qname in cell_names
118
+ ]
119
+ empty_cells = 0
120
+ table_row: list[Scalar | NaTType] = []
121
+
122
+ for sheet_cell in sheet_cells:
123
+ if sheet_cell.qname == table_cell_name:
124
+ value = self._get_cell_value(sheet_cell)
125
+ else:
126
+ value = self.empty_value
127
+
128
+ column_repeat = self._get_column_repeat(sheet_cell)
129
+
130
+ # Queue up empty values, writing only if content succeeds them
131
+ if value == self.empty_value:
132
+ empty_cells += column_repeat
133
+ else:
134
+ table_row.extend([self.empty_value] * empty_cells)
135
+ empty_cells = 0
136
+ table_row.extend([value] * column_repeat)
137
+
138
+ if max_row_len < len(table_row):
139
+ max_row_len = len(table_row)
140
+
141
+ row_repeat = self._get_row_repeat(sheet_row)
142
+ if self._is_empty_row(sheet_row):
143
+ empty_rows += row_repeat
144
+ else:
145
+ # add blank rows to our table
146
+ table.extend([[self.empty_value]] * empty_rows)
147
+ empty_rows = 0
148
+ for _ in range(row_repeat):
149
+ table.append(table_row)
150
+ if file_rows_needed is not None and len(table) >= file_rows_needed:
151
+ break
152
+
153
+ # Make our table square
154
+ for row in table:
155
+ if len(row) < max_row_len:
156
+ row.extend([self.empty_value] * (max_row_len - len(row)))
157
+
158
+ return table
159
+
160
+ def _get_row_repeat(self, row) -> int:
161
+ """
162
+ Return number of times this row was repeated
163
+ Repeating an empty row appeared to be a common way
164
+ of representing sparse rows in the table.
165
+ """
166
+ from odf.namespaces import TABLENS
167
+
168
+ return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
169
+
170
+ def _get_column_repeat(self, cell) -> int:
171
+ from odf.namespaces import TABLENS
172
+
173
+ return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
174
+
175
+ def _is_empty_row(self, row) -> bool:
176
+ """
177
+ Helper function to find empty rows
178
+ """
179
+ for column in row.childNodes:
180
+ if len(column.childNodes) > 0:
181
+ return False
182
+
183
+ return True
184
+
185
+ def _get_cell_value(self, cell) -> Scalar | NaTType:
186
+ from odf.namespaces import OFFICENS
187
+
188
+ if str(cell) == "#N/A":
189
+ return np.nan
190
+
191
+ cell_type = cell.attributes.get((OFFICENS, "value-type"))
192
+ if cell_type == "boolean":
193
+ if str(cell) == "TRUE":
194
+ return True
195
+ return False
196
+ if cell_type is None:
197
+ return self.empty_value
198
+ elif cell_type == "float":
199
+ # GH5394
200
+ cell_value = float(cell.attributes.get((OFFICENS, "value")))
201
+ val = int(cell_value)
202
+ if val == cell_value:
203
+ return val
204
+ return cell_value
205
+ elif cell_type == "percentage":
206
+ cell_value = cell.attributes.get((OFFICENS, "value"))
207
+ return float(cell_value)
208
+ elif cell_type == "string":
209
+ return self._get_cell_string_value(cell)
210
+ elif cell_type == "currency":
211
+ cell_value = cell.attributes.get((OFFICENS, "value"))
212
+ return float(cell_value)
213
+ elif cell_type == "date":
214
+ cell_value = cell.attributes.get((OFFICENS, "date-value"))
215
+ return pd.Timestamp(cell_value)
216
+ elif cell_type == "time":
217
+ stamp = pd.Timestamp(str(cell))
218
+ # cast needed here because Scalar doesn't include datetime.time
219
+ return cast(Scalar, stamp.time())
220
+ else:
221
+ self.close()
222
+ raise ValueError(f"Unrecognized type {cell_type}")
223
+
224
+ def _get_cell_string_value(self, cell) -> str:
225
+ """
226
+ Find and decode OpenDocument text:s tags that represent
227
+ a run length encoded sequence of space characters.
228
+ """
229
+ from odf.element import Element
230
+ from odf.namespaces import TEXTNS
231
+ from odf.text import S
232
+
233
+ text_s = S().qname
234
+
235
+ value = []
236
+
237
+ for fragment in cell.childNodes:
238
+ if isinstance(fragment, Element):
239
+ if fragment.qname == text_s:
240
+ spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
241
+ value.append(" " * spaces)
242
+ else:
243
+ # recursive impl needed in case of nested fragments
244
+ # with multiple spaces
245
+ # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
246
+ value.append(self._get_cell_string_value(fragment))
247
+ else:
248
+ value.append(str(fragment).strip("\n"))
249
+ return "".join(value)
videochat2/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ import datetime
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ Any,
8
+ DefaultDict,
9
+ Tuple,
10
+ cast,
11
+ )
12
+
13
+ from pandas._libs import json
14
+ from pandas._typing import (
15
+ FilePath,
16
+ StorageOptions,
17
+ WriteExcelBuffer,
18
+ )
19
+
20
+ from pandas.io.excel._base import ExcelWriter
21
+ from pandas.io.excel._util import (
22
+ combine_kwargs,
23
+ validate_freeze_panes,
24
+ )
25
+
26
+ if TYPE_CHECKING:
27
+ from pandas.io.formats.excel import ExcelCell
28
+
29
+
30
+ class ODSWriter(ExcelWriter):
31
+ _engine = "odf"
32
+ _supported_extensions = (".ods",)
33
+
34
+ def __init__(
35
+ self,
36
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
37
+ engine: str | None = None,
38
+ date_format: str | None = None,
39
+ datetime_format=None,
40
+ mode: str = "w",
41
+ storage_options: StorageOptions = None,
42
+ if_sheet_exists: str | None = None,
43
+ engine_kwargs: dict[str, Any] | None = None,
44
+ **kwargs,
45
+ ) -> None:
46
+ from odf.opendocument import OpenDocumentSpreadsheet
47
+
48
+ if mode == "a":
49
+ raise ValueError("Append mode is not supported with odf!")
50
+
51
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
52
+ self._book = OpenDocumentSpreadsheet(**engine_kwargs)
53
+
54
+ super().__init__(
55
+ path,
56
+ mode=mode,
57
+ storage_options=storage_options,
58
+ if_sheet_exists=if_sheet_exists,
59
+ engine_kwargs=engine_kwargs,
60
+ )
61
+
62
+ self._style_dict: dict[str, str] = {}
63
+
64
+ @property
65
+ def book(self):
66
+ """
67
+ Book instance of class odf.opendocument.OpenDocumentSpreadsheet.
68
+
69
+ This attribute can be used to access engine-specific features.
70
+ """
71
+ return self._book
72
+
73
+ @property
74
+ def sheets(self) -> dict[str, Any]:
75
+ """Mapping of sheet names to sheet objects."""
76
+ from odf.table import Table
77
+
78
+ result = {
79
+ sheet.getAttribute("name"): sheet
80
+ for sheet in self.book.getElementsByType(Table)
81
+ }
82
+ return result
83
+
84
+ def _save(self) -> None:
85
+ """
86
+ Save workbook to disk.
87
+ """
88
+ for sheet in self.sheets.values():
89
+ self.book.spreadsheet.addElement(sheet)
90
+ self.book.save(self._handles.handle)
91
+
92
+ def _write_cells(
93
+ self,
94
+ cells: list[ExcelCell],
95
+ sheet_name: str | None = None,
96
+ startrow: int = 0,
97
+ startcol: int = 0,
98
+ freeze_panes: tuple[int, int] | None = None,
99
+ ) -> None:
100
+ """
101
+ Write the frame cells using odf
102
+ """
103
+ from odf.table import (
104
+ Table,
105
+ TableCell,
106
+ TableRow,
107
+ )
108
+ from odf.text import P
109
+
110
+ sheet_name = self._get_sheet_name(sheet_name)
111
+ assert sheet_name is not None
112
+
113
+ if sheet_name in self.sheets:
114
+ wks = self.sheets[sheet_name]
115
+ else:
116
+ wks = Table(name=sheet_name)
117
+ self.book.spreadsheet.addElement(wks)
118
+
119
+ if validate_freeze_panes(freeze_panes):
120
+ freeze_panes = cast(Tuple[int, int], freeze_panes)
121
+ self._create_freeze_panes(sheet_name, freeze_panes)
122
+
123
+ for _ in range(startrow):
124
+ wks.addElement(TableRow())
125
+
126
+ rows: DefaultDict = defaultdict(TableRow)
127
+ col_count: DefaultDict = defaultdict(int)
128
+
129
+ for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):
130
+ # only add empty cells if the row is still empty
131
+ if not col_count[cell.row]:
132
+ for _ in range(startcol):
133
+ rows[cell.row].addElement(TableCell())
134
+
135
+ # fill with empty cells if needed
136
+ for _ in range(cell.col - col_count[cell.row]):
137
+ rows[cell.row].addElement(TableCell())
138
+ col_count[cell.row] += 1
139
+
140
+ pvalue, tc = self._make_table_cell(cell)
141
+ rows[cell.row].addElement(tc)
142
+ col_count[cell.row] += 1
143
+ p = P(text=pvalue)
144
+ tc.addElement(p)
145
+
146
+ # add all rows to the sheet
147
+ if len(rows) > 0:
148
+ for row_nr in range(max(rows.keys()) + 1):
149
+ wks.addElement(rows[row_nr])
150
+
151
+ def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:
152
+ """Convert cell attributes to OpenDocument attributes
153
+
154
+ Parameters
155
+ ----------
156
+ cell : ExcelCell
157
+ Spreadsheet cell data
158
+
159
+ Returns
160
+ -------
161
+ attributes : Dict[str, Union[int, str]]
162
+ Dictionary with attributes and attribute values
163
+ """
164
+ attributes: dict[str, int | str] = {}
165
+ style_name = self._process_style(cell.style)
166
+ if style_name is not None:
167
+ attributes["stylename"] = style_name
168
+ if cell.mergestart is not None and cell.mergeend is not None:
169
+ attributes["numberrowsspanned"] = max(1, cell.mergestart)
170
+ attributes["numbercolumnsspanned"] = cell.mergeend
171
+ return attributes
172
+
173
+ def _make_table_cell(self, cell) -> tuple[object, Any]:
174
+ """Convert cell data to an OpenDocument spreadsheet cell
175
+
176
+ Parameters
177
+ ----------
178
+ cell : ExcelCell
179
+ Spreadsheet cell data
180
+
181
+ Returns
182
+ -------
183
+ pvalue, cell : Tuple[str, TableCell]
184
+ Display value, Cell value
185
+ """
186
+ from odf.table import TableCell
187
+
188
+ attributes = self._make_table_cell_attributes(cell)
189
+ val, fmt = self._value_with_fmt(cell.val)
190
+ pvalue = value = val
191
+ if isinstance(val, bool):
192
+ value = str(val).lower()
193
+ pvalue = str(val).upper()
194
+ if isinstance(val, datetime.datetime):
195
+ # Fast formatting
196
+ value = val.isoformat()
197
+ # Slow but locale-dependent
198
+ pvalue = val.strftime("%c")
199
+ return (
200
+ pvalue,
201
+ TableCell(valuetype="date", datevalue=value, attributes=attributes),
202
+ )
203
+ elif isinstance(val, datetime.date):
204
+ # Fast formatting
205
+ value = f"{val.year}-{val.month:02d}-{val.day:02d}"
206
+ # Slow but locale-dependent
207
+ pvalue = val.strftime("%x")
208
+ return (
209
+ pvalue,
210
+ TableCell(valuetype="date", datevalue=value, attributes=attributes),
211
+ )
212
+ else:
213
+ class_to_cell_type = {
214
+ str: "string",
215
+ int: "float",
216
+ float: "float",
217
+ bool: "boolean",
218
+ }
219
+ return (
220
+ pvalue,
221
+ TableCell(
222
+ valuetype=class_to_cell_type[type(val)],
223
+ value=value,
224
+ attributes=attributes,
225
+ ),
226
+ )
227
+
228
+ def _process_style(self, style: dict[str, Any]) -> str:
229
+ """Convert a style dictionary to a OpenDocument style sheet
230
+
231
+ Parameters
232
+ ----------
233
+ style : Dict
234
+ Style dictionary
235
+
236
+ Returns
237
+ -------
238
+ style_key : str
239
+ Unique style key for later reference in sheet
240
+ """
241
+ from odf.style import (
242
+ ParagraphProperties,
243
+ Style,
244
+ TableCellProperties,
245
+ TextProperties,
246
+ )
247
+
248
+ if style is None:
249
+ return None
250
+ style_key = json.dumps(style)
251
+ if style_key in self._style_dict:
252
+ return self._style_dict[style_key]
253
+ name = f"pd{len(self._style_dict)+1}"
254
+ self._style_dict[style_key] = name
255
+ odf_style = Style(name=name, family="table-cell")
256
+ if "font" in style:
257
+ font = style["font"]
258
+ if font.get("bold", False):
259
+ odf_style.addElement(TextProperties(fontweight="bold"))
260
+ if "borders" in style:
261
+ borders = style["borders"]
262
+ for side, thickness in borders.items():
263
+ thickness_translation = {"thin": "0.75pt solid #000000"}
264
+ odf_style.addElement(
265
+ TableCellProperties(
266
+ attributes={f"border{side}": thickness_translation[thickness]}
267
+ )
268
+ )
269
+ if "alignment" in style:
270
+ alignment = style["alignment"]
271
+ horizontal = alignment.get("horizontal")
272
+ if horizontal:
273
+ odf_style.addElement(ParagraphProperties(textalign=horizontal))
274
+ vertical = alignment.get("vertical")
275
+ if vertical:
276
+ odf_style.addElement(TableCellProperties(verticalalign=vertical))
277
+ self.book.styles.addElement(odf_style)
278
+ return name
279
+
280
+ def _create_freeze_panes(
281
+ self, sheet_name: str, freeze_panes: tuple[int, int]
282
+ ) -> None:
283
+ """
284
+ Create freeze panes in the sheet.
285
+
286
+ Parameters
287
+ ----------
288
+ sheet_name : str
289
+ Name of the spreadsheet
290
+ freeze_panes : tuple of (int, int)
291
+ Freeze pane location x and y
292
+ """
293
+ from odf.config import (
294
+ ConfigItem,
295
+ ConfigItemMapEntry,
296
+ ConfigItemMapIndexed,
297
+ ConfigItemMapNamed,
298
+ ConfigItemSet,
299
+ )
300
+
301
+ config_item_set = ConfigItemSet(name="ooo:view-settings")
302
+ self.book.settings.addElement(config_item_set)
303
+
304
+ config_item_map_indexed = ConfigItemMapIndexed(name="Views")
305
+ config_item_set.addElement(config_item_map_indexed)
306
+
307
+ config_item_map_entry = ConfigItemMapEntry()
308
+ config_item_map_indexed.addElement(config_item_map_entry)
309
+
310
+ config_item_map_named = ConfigItemMapNamed(name="Tables")
311
+ config_item_map_entry.addElement(config_item_map_named)
312
+
313
+ config_item_map_entry = ConfigItemMapEntry(name=sheet_name)
314
+ config_item_map_named.addElement(config_item_map_entry)
315
+
316
+ config_item_map_entry.addElement(
317
+ ConfigItem(name="HorizontalSplitMode", type="short", text="2")
318
+ )
319
+ config_item_map_entry.addElement(
320
+ ConfigItem(name="VerticalSplitMode", type="short", text="2")
321
+ )
322
+ config_item_map_entry.addElement(
323
+ ConfigItem(
324
+ name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])
325
+ )
326
+ )
327
+ config_item_map_entry.addElement(
328
+ ConfigItem(
329
+ name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])
330
+ )
331
+ )
332
+ config_item_map_entry.addElement(
333
+ ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))
334
+ )
335
+ config_item_map_entry.addElement(
336
+ ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))
337
+ )
videochat2/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import mmap
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Tuple,
8
+ cast,
9
+ )
10
+
11
+ import numpy as np
12
+
13
+ from pandas._typing import (
14
+ FilePath,
15
+ ReadBuffer,
16
+ Scalar,
17
+ StorageOptions,
18
+ WriteExcelBuffer,
19
+ )
20
+ from pandas.compat._optional import import_optional_dependency
21
+ from pandas.util._decorators import doc
22
+
23
+ from pandas.core.shared_docs import _shared_docs
24
+
25
+ from pandas.io.excel._base import (
26
+ BaseExcelReader,
27
+ ExcelWriter,
28
+ )
29
+ from pandas.io.excel._util import (
30
+ combine_kwargs,
31
+ validate_freeze_panes,
32
+ )
33
+
34
+ if TYPE_CHECKING:
35
+ from openpyxl.descriptors.serialisable import Serialisable
36
+ from openpyxl.workbook import Workbook
37
+
38
+
39
+ class OpenpyxlWriter(ExcelWriter):
40
+ _engine = "openpyxl"
41
+ _supported_extensions = (".xlsx", ".xlsm")
42
+
43
+ def __init__(
44
+ self,
45
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
46
+ engine: str | None = None,
47
+ date_format: str | None = None,
48
+ datetime_format: str | None = None,
49
+ mode: str = "w",
50
+ storage_options: StorageOptions = None,
51
+ if_sheet_exists: str | None = None,
52
+ engine_kwargs: dict[str, Any] | None = None,
53
+ **kwargs,
54
+ ) -> None:
55
+ # Use the openpyxl module as the Excel writer.
56
+ from openpyxl.workbook import Workbook
57
+
58
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
59
+
60
+ super().__init__(
61
+ path,
62
+ mode=mode,
63
+ storage_options=storage_options,
64
+ if_sheet_exists=if_sheet_exists,
65
+ engine_kwargs=engine_kwargs,
66
+ )
67
+
68
+ # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
69
+ # the file and later write to it
70
+ if "r+" in self._mode: # Load from existing workbook
71
+ from openpyxl import load_workbook
72
+
73
+ try:
74
+ self._book = load_workbook(self._handles.handle, **engine_kwargs)
75
+ except TypeError:
76
+ self._handles.handle.close()
77
+ raise
78
+ self._handles.handle.seek(0)
79
+ else:
80
+ # Create workbook object with default optimized_write=True.
81
+ try:
82
+ self._book = Workbook(**engine_kwargs)
83
+ except TypeError:
84
+ self._handles.handle.close()
85
+ raise
86
+
87
+ if self.book.worksheets:
88
+ self.book.remove(self.book.worksheets[0])
89
+
90
+ @property
91
+ def book(self) -> Workbook:
92
+ """
93
+ Book instance of class openpyxl.workbook.Workbook.
94
+
95
+ This attribute can be used to access engine-specific features.
96
+ """
97
+ return self._book
98
+
99
+ @property
100
+ def sheets(self) -> dict[str, Any]:
101
+ """Mapping of sheet names to sheet objects."""
102
+ result = {name: self.book[name] for name in self.book.sheetnames}
103
+ return result
104
+
105
+ def _save(self) -> None:
106
+ """
107
+ Save workbook to disk.
108
+ """
109
+ self.book.save(self._handles.handle)
110
+ if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):
111
+ # truncate file to the written content
112
+ self._handles.handle.truncate()
113
+
114
+ @classmethod
115
+ def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
116
+ """
117
+ Convert a style_dict to a set of kwargs suitable for initializing
118
+ or updating-on-copy an openpyxl v2 style object.
119
+
120
+ Parameters
121
+ ----------
122
+ style_dict : dict
123
+ A dict with zero or more of the following keys (or their synonyms).
124
+ 'font'
125
+ 'fill'
126
+ 'border' ('borders')
127
+ 'alignment'
128
+ 'number_format'
129
+ 'protection'
130
+
131
+ Returns
132
+ -------
133
+ style_kwargs : dict
134
+ A dict with the same, normalized keys as ``style_dict`` but each
135
+ value has been replaced with a native openpyxl style object of the
136
+ appropriate class.
137
+ """
138
+ _style_key_map = {"borders": "border"}
139
+
140
+ style_kwargs: dict[str, Serialisable] = {}
141
+ for k, v in style_dict.items():
142
+ k = _style_key_map.get(k, k)
143
+ _conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
144
+ new_v = _conv_to_x(v)
145
+ if new_v:
146
+ style_kwargs[k] = new_v
147
+
148
+ return style_kwargs
149
+
150
+ @classmethod
151
+ def _convert_to_color(cls, color_spec):
152
+ """
153
+ Convert ``color_spec`` to an openpyxl v2 Color object.
154
+
155
+ Parameters
156
+ ----------
157
+ color_spec : str, dict
158
+ A 32-bit ARGB hex string, or a dict with zero or more of the
159
+ following keys.
160
+ 'rgb'
161
+ 'indexed'
162
+ 'auto'
163
+ 'theme'
164
+ 'tint'
165
+ 'index'
166
+ 'type'
167
+
168
+ Returns
169
+ -------
170
+ color : openpyxl.styles.Color
171
+ """
172
+ from openpyxl.styles import Color
173
+
174
+ if isinstance(color_spec, str):
175
+ return Color(color_spec)
176
+ else:
177
+ return Color(**color_spec)
178
+
179
+ @classmethod
180
+ def _convert_to_font(cls, font_dict):
181
+ """
182
+ Convert ``font_dict`` to an openpyxl v2 Font object.
183
+
184
+ Parameters
185
+ ----------
186
+ font_dict : dict
187
+ A dict with zero or more of the following keys (or their synonyms).
188
+ 'name'
189
+ 'size' ('sz')
190
+ 'bold' ('b')
191
+ 'italic' ('i')
192
+ 'underline' ('u')
193
+ 'strikethrough' ('strike')
194
+ 'color'
195
+ 'vertAlign' ('vertalign')
196
+ 'charset'
197
+ 'scheme'
198
+ 'family'
199
+ 'outline'
200
+ 'shadow'
201
+ 'condense'
202
+
203
+ Returns
204
+ -------
205
+ font : openpyxl.styles.Font
206
+ """
207
+ from openpyxl.styles import Font
208
+
209
+ _font_key_map = {
210
+ "sz": "size",
211
+ "b": "bold",
212
+ "i": "italic",
213
+ "u": "underline",
214
+ "strike": "strikethrough",
215
+ "vertalign": "vertAlign",
216
+ }
217
+
218
+ font_kwargs = {}
219
+ for k, v in font_dict.items():
220
+ k = _font_key_map.get(k, k)
221
+ if k == "color":
222
+ v = cls._convert_to_color(v)
223
+ font_kwargs[k] = v
224
+
225
+ return Font(**font_kwargs)
226
+
227
+ @classmethod
228
+ def _convert_to_stop(cls, stop_seq):
229
+ """
230
+ Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
231
+ suitable for initializing the ``GradientFill`` ``stop`` parameter.
232
+
233
+ Parameters
234
+ ----------
235
+ stop_seq : iterable
236
+ An iterable that yields objects suitable for consumption by
237
+ ``_convert_to_color``.
238
+
239
+ Returns
240
+ -------
241
+ stop : list of openpyxl.styles.Color
242
+ """
243
+ return map(cls._convert_to_color, stop_seq)
244
+
245
+ @classmethod
246
+ def _convert_to_fill(cls, fill_dict: dict[str, Any]):
247
+ """
248
+ Convert ``fill_dict`` to an openpyxl v2 Fill object.
249
+
250
+ Parameters
251
+ ----------
252
+ fill_dict : dict
253
+ A dict with one or more of the following keys (or their synonyms),
254
+ 'fill_type' ('patternType', 'patterntype')
255
+ 'start_color' ('fgColor', 'fgcolor')
256
+ 'end_color' ('bgColor', 'bgcolor')
257
+ or one or more of the following keys (or their synonyms).
258
+ 'type' ('fill_type')
259
+ 'degree'
260
+ 'left'
261
+ 'right'
262
+ 'top'
263
+ 'bottom'
264
+ 'stop'
265
+
266
+ Returns
267
+ -------
268
+ fill : openpyxl.styles.Fill
269
+ """
270
+ from openpyxl.styles import (
271
+ GradientFill,
272
+ PatternFill,
273
+ )
274
+
275
+ _pattern_fill_key_map = {
276
+ "patternType": "fill_type",
277
+ "patterntype": "fill_type",
278
+ "fgColor": "start_color",
279
+ "fgcolor": "start_color",
280
+ "bgColor": "end_color",
281
+ "bgcolor": "end_color",
282
+ }
283
+
284
+ _gradient_fill_key_map = {"fill_type": "type"}
285
+
286
+ pfill_kwargs = {}
287
+ gfill_kwargs = {}
288
+ for k, v in fill_dict.items():
289
+ pk = _pattern_fill_key_map.get(k)
290
+ gk = _gradient_fill_key_map.get(k)
291
+ if pk in ["start_color", "end_color"]:
292
+ v = cls._convert_to_color(v)
293
+ if gk == "stop":
294
+ v = cls._convert_to_stop(v)
295
+ if pk:
296
+ pfill_kwargs[pk] = v
297
+ elif gk:
298
+ gfill_kwargs[gk] = v
299
+ else:
300
+ pfill_kwargs[k] = v
301
+ gfill_kwargs[k] = v
302
+
303
+ try:
304
+ return PatternFill(**pfill_kwargs)
305
+ except TypeError:
306
+ return GradientFill(**gfill_kwargs)
307
+
308
+ @classmethod
309
+ def _convert_to_side(cls, side_spec):
310
+ """
311
+ Convert ``side_spec`` to an openpyxl v2 Side object.
312
+
313
+ Parameters
314
+ ----------
315
+ side_spec : str, dict
316
+ A string specifying the border style, or a dict with zero or more
317
+ of the following keys (or their synonyms).
318
+ 'style' ('border_style')
319
+ 'color'
320
+
321
+ Returns
322
+ -------
323
+ side : openpyxl.styles.Side
324
+ """
325
+ from openpyxl.styles import Side
326
+
327
+ _side_key_map = {"border_style": "style"}
328
+
329
+ if isinstance(side_spec, str):
330
+ return Side(style=side_spec)
331
+
332
+ side_kwargs = {}
333
+ for k, v in side_spec.items():
334
+ k = _side_key_map.get(k, k)
335
+ if k == "color":
336
+ v = cls._convert_to_color(v)
337
+ side_kwargs[k] = v
338
+
339
+ return Side(**side_kwargs)
340
+
341
+ @classmethod
342
+ def _convert_to_border(cls, border_dict):
343
+ """
344
+ Convert ``border_dict`` to an openpyxl v2 Border object.
345
+
346
+ Parameters
347
+ ----------
348
+ border_dict : dict
349
+ A dict with zero or more of the following keys (or their synonyms).
350
+ 'left'
351
+ 'right'
352
+ 'top'
353
+ 'bottom'
354
+ 'diagonal'
355
+ 'diagonal_direction'
356
+ 'vertical'
357
+ 'horizontal'
358
+ 'diagonalUp' ('diagonalup')
359
+ 'diagonalDown' ('diagonaldown')
360
+ 'outline'
361
+
362
+ Returns
363
+ -------
364
+ border : openpyxl.styles.Border
365
+ """
366
+ from openpyxl.styles import Border
367
+
368
+ _border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
369
+
370
+ border_kwargs = {}
371
+ for k, v in border_dict.items():
372
+ k = _border_key_map.get(k, k)
373
+ if k == "color":
374
+ v = cls._convert_to_color(v)
375
+ if k in ["left", "right", "top", "bottom", "diagonal"]:
376
+ v = cls._convert_to_side(v)
377
+ border_kwargs[k] = v
378
+
379
+ return Border(**border_kwargs)
380
+
381
+ @classmethod
382
+ def _convert_to_alignment(cls, alignment_dict):
383
+ """
384
+ Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
385
+
386
+ Parameters
387
+ ----------
388
+ alignment_dict : dict
389
+ A dict with zero or more of the following keys (or their synonyms).
390
+ 'horizontal'
391
+ 'vertical'
392
+ 'text_rotation'
393
+ 'wrap_text'
394
+ 'shrink_to_fit'
395
+ 'indent'
396
+ Returns
397
+ -------
398
+ alignment : openpyxl.styles.Alignment
399
+ """
400
+ from openpyxl.styles import Alignment
401
+
402
+ return Alignment(**alignment_dict)
403
+
404
+ @classmethod
405
+ def _convert_to_number_format(cls, number_format_dict):
406
+ """
407
+ Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
408
+ initializer.
409
+
410
+ Parameters
411
+ ----------
412
+ number_format_dict : dict
413
+ A dict with zero or more of the following keys.
414
+ 'format_code' : str
415
+
416
+ Returns
417
+ -------
418
+ number_format : str
419
+ """
420
+ return number_format_dict["format_code"]
421
+
422
+ @classmethod
423
+ def _convert_to_protection(cls, protection_dict):
424
+ """
425
+ Convert ``protection_dict`` to an openpyxl v2 Protection object.
426
+
427
+ Parameters
428
+ ----------
429
+ protection_dict : dict
430
+ A dict with zero or more of the following keys.
431
+ 'locked'
432
+ 'hidden'
433
+
434
+ Returns
435
+ -------
436
+ """
437
+ from openpyxl.styles import Protection
438
+
439
+ return Protection(**protection_dict)
440
+
441
+ def _write_cells(
442
+ self,
443
+ cells,
444
+ sheet_name: str | None = None,
445
+ startrow: int = 0,
446
+ startcol: int = 0,
447
+ freeze_panes: tuple[int, int] | None = None,
448
+ ) -> None:
449
+ # Write the frame cells using openpyxl.
450
+ sheet_name = self._get_sheet_name(sheet_name)
451
+
452
+ _style_cache: dict[str, dict[str, Serialisable]] = {}
453
+
454
+ if sheet_name in self.sheets and self._if_sheet_exists != "new":
455
+ if "r+" in self._mode:
456
+ if self._if_sheet_exists == "replace":
457
+ old_wks = self.sheets[sheet_name]
458
+ target_index = self.book.index(old_wks)
459
+ del self.book[sheet_name]
460
+ wks = self.book.create_sheet(sheet_name, target_index)
461
+ elif self._if_sheet_exists == "error":
462
+ raise ValueError(
463
+ f"Sheet '{sheet_name}' already exists and "
464
+ f"if_sheet_exists is set to 'error'."
465
+ )
466
+ elif self._if_sheet_exists == "overlay":
467
+ wks = self.sheets[sheet_name]
468
+ else:
469
+ raise ValueError(
470
+ f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "
471
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
472
+ )
473
+ else:
474
+ wks = self.sheets[sheet_name]
475
+ else:
476
+ wks = self.book.create_sheet()
477
+ wks.title = sheet_name
478
+
479
+ if validate_freeze_panes(freeze_panes):
480
+ freeze_panes = cast(Tuple[int, int], freeze_panes)
481
+ wks.freeze_panes = wks.cell(
482
+ row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
483
+ )
484
+
485
+ for cell in cells:
486
+ xcell = wks.cell(
487
+ row=startrow + cell.row + 1, column=startcol + cell.col + 1
488
+ )
489
+ xcell.value, fmt = self._value_with_fmt(cell.val)
490
+ if fmt:
491
+ xcell.number_format = fmt
492
+
493
+ style_kwargs: dict[str, Serialisable] | None = {}
494
+ if cell.style:
495
+ key = str(cell.style)
496
+ style_kwargs = _style_cache.get(key)
497
+ if style_kwargs is None:
498
+ style_kwargs = self._convert_to_style_kwargs(cell.style)
499
+ _style_cache[key] = style_kwargs
500
+
501
+ if style_kwargs:
502
+ for k, v in style_kwargs.items():
503
+ setattr(xcell, k, v)
504
+
505
+ if cell.mergestart is not None and cell.mergeend is not None:
506
+ wks.merge_cells(
507
+ start_row=startrow + cell.row + 1,
508
+ start_column=startcol + cell.col + 1,
509
+ end_column=startcol + cell.mergeend + 1,
510
+ end_row=startrow + cell.mergestart + 1,
511
+ )
512
+
513
+ # When cells are merged only the top-left cell is preserved
514
+ # The behaviour of the other cells in a merged range is
515
+ # undefined
516
+ if style_kwargs:
517
+ first_row = startrow + cell.row + 1
518
+ last_row = startrow + cell.mergestart + 1
519
+ first_col = startcol + cell.col + 1
520
+ last_col = startcol + cell.mergeend + 1
521
+
522
+ for row in range(first_row, last_row + 1):
523
+ for col in range(first_col, last_col + 1):
524
+ if row == first_row and col == first_col:
525
+ # Ignore first cell. It is already handled.
526
+ continue
527
+ xcell = wks.cell(column=col, row=row)
528
+ for k, v in style_kwargs.items():
529
+ setattr(xcell, k, v)
530
+
531
+
532
+ class OpenpyxlReader(BaseExcelReader):
533
+ @doc(storage_options=_shared_docs["storage_options"])
534
+ def __init__(
535
+ self,
536
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
537
+ storage_options: StorageOptions = None,
538
+ ) -> None:
539
+ """
540
+ Reader using openpyxl engine.
541
+
542
+ Parameters
543
+ ----------
544
+ filepath_or_buffer : str, path object or Workbook
545
+ Object to be parsed.
546
+ {storage_options}
547
+ """
548
+ import_optional_dependency("openpyxl")
549
+ super().__init__(filepath_or_buffer, storage_options=storage_options)
550
+
551
+ @property
552
+ def _workbook_class(self):
553
+ from openpyxl import Workbook
554
+
555
+ return Workbook
556
+
557
+ def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
558
+ from openpyxl import load_workbook
559
+
560
+ return load_workbook(
561
+ filepath_or_buffer, read_only=True, data_only=True, keep_links=False
562
+ )
563
+
564
+ @property
565
+ def sheet_names(self) -> list[str]:
566
+ return [sheet.title for sheet in self.book.worksheets]
567
+
568
+ def get_sheet_by_name(self, name: str):
569
+ self.raise_if_bad_sheet_by_name(name)
570
+ return self.book[name]
571
+
572
+ def get_sheet_by_index(self, index: int):
573
+ self.raise_if_bad_sheet_by_index(index)
574
+ return self.book.worksheets[index]
575
+
576
+ def _convert_cell(self, cell) -> Scalar:
577
+ from openpyxl.cell.cell import (
578
+ TYPE_ERROR,
579
+ TYPE_NUMERIC,
580
+ )
581
+
582
+ if cell.value is None:
583
+ return "" # compat with xlrd
584
+ elif cell.data_type == TYPE_ERROR:
585
+ return np.nan
586
+ elif cell.data_type == TYPE_NUMERIC:
587
+ val = int(cell.value)
588
+ if val == cell.value:
589
+ return val
590
+ return float(cell.value)
591
+
592
+ return cell.value
593
+
594
+ def get_sheet_data(
595
+ self, sheet, file_rows_needed: int | None = None
596
+ ) -> list[list[Scalar]]:
597
+ if self.book.read_only:
598
+ sheet.reset_dimensions()
599
+
600
+ data: list[list[Scalar]] = []
601
+ last_row_with_data = -1
602
+ for row_number, row in enumerate(sheet.rows):
603
+ converted_row = [self._convert_cell(cell) for cell in row]
604
+ while converted_row and converted_row[-1] == "":
605
+ # trim trailing empty elements
606
+ converted_row.pop()
607
+ if converted_row:
608
+ last_row_with_data = row_number
609
+ data.append(converted_row)
610
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
611
+ break
612
+
613
+ # Trim trailing empty rows
614
+ data = data[: last_row_with_data + 1]
615
+
616
+ if len(data) > 0:
617
+ # extend rows to max width
618
+ max_width = max(len(data_row) for data_row in data)
619
+ if min(len(data_row) for data_row in data) < max_width:
620
+ empty_cell: list[Scalar] = [""]
621
+ data = [
622
+ data_row + (max_width - len(data_row)) * empty_cell
623
+ for data_row in data
624
+ ]
625
+
626
+ return data
videochat2/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pyright: reportMissingImports=false
2
+ from __future__ import annotations
3
+
4
+ from pandas._typing import (
5
+ FilePath,
6
+ ReadBuffer,
7
+ Scalar,
8
+ StorageOptions,
9
+ )
10
+ from pandas.compat._optional import import_optional_dependency
11
+ from pandas.util._decorators import doc
12
+
13
+ from pandas.core.shared_docs import _shared_docs
14
+
15
+ from pandas.io.excel._base import BaseExcelReader
16
+
17
+
18
+ class PyxlsbReader(BaseExcelReader):
19
+ @doc(storage_options=_shared_docs["storage_options"])
20
+ def __init__(
21
+ self,
22
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
23
+ storage_options: StorageOptions = None,
24
+ ) -> None:
25
+ """
26
+ Reader using pyxlsb engine.
27
+
28
+ Parameters
29
+ ----------
30
+ filepath_or_buffer : str, path object, or Workbook
31
+ Object to be parsed.
32
+ {storage_options}
33
+ """
34
+ import_optional_dependency("pyxlsb")
35
+ # This will call load_workbook on the filepath or buffer
36
+ # And set the result to the book-attribute
37
+ super().__init__(filepath_or_buffer, storage_options=storage_options)
38
+
39
+ @property
40
+ def _workbook_class(self):
41
+ from pyxlsb import Workbook
42
+
43
+ return Workbook
44
+
45
+ def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes]):
46
+ from pyxlsb import open_workbook
47
+
48
+ # TODO: hack in buffer capability
49
+ # This might need some modifications to the Pyxlsb library
50
+ # Actual work for opening it is in xlsbpackage.py, line 20-ish
51
+
52
+ return open_workbook(filepath_or_buffer)
53
+
54
+ @property
55
+ def sheet_names(self) -> list[str]:
56
+ return self.book.sheets
57
+
58
+ def get_sheet_by_name(self, name: str):
59
+ self.raise_if_bad_sheet_by_name(name)
60
+ return self.book.get_sheet(name)
61
+
62
+ def get_sheet_by_index(self, index: int):
63
+ self.raise_if_bad_sheet_by_index(index)
64
+ # pyxlsb sheets are indexed from 1 onwards
65
+ # There's a fix for this in the source, but the pypi package doesn't have it
66
+ return self.book.get_sheet(index + 1)
67
+
68
+ def _convert_cell(self, cell) -> Scalar:
69
+ # TODO: there is no way to distinguish between floats and datetimes in pyxlsb
70
+ # This means that there is no way to read datetime types from an xlsb file yet
71
+ if cell.v is None:
72
+ return "" # Prevents non-named columns from not showing up as Unnamed: i
73
+ if isinstance(cell.v, float):
74
+ val = int(cell.v)
75
+ if val == cell.v:
76
+ return val
77
+ else:
78
+ return float(cell.v)
79
+
80
+ return cell.v
81
+
82
+ def get_sheet_data(
83
+ self,
84
+ sheet,
85
+ file_rows_needed: int | None = None,
86
+ ) -> list[list[Scalar]]:
87
+ data: list[list[Scalar]] = []
88
+ prevous_row_number = -1
89
+ # When sparse=True the rows can have different lengths and empty rows are
90
+ # not returned. The cells are namedtuples of row, col, value (r, c, v).
91
+ for row in sheet.rows(sparse=True):
92
+ row_number = row[0].r
93
+ converted_row = [self._convert_cell(cell) for cell in row]
94
+ while converted_row and converted_row[-1] == "":
95
+ # trim trailing empty elements
96
+ converted_row.pop()
97
+ if converted_row:
98
+ data.extend([[]] * (row_number - prevous_row_number - 1))
99
+ data.append(converted_row)
100
+ prevous_row_number = row_number
101
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
102
+ break
103
+ if data:
104
+ # extend rows to max_width
105
+ max_width = max(len(data_row) for data_row in data)
106
+ if min(len(data_row) for data_row in data) < max_width:
107
+ empty_cell: list[Scalar] = [""]
108
+ data = [
109
+ data_row + (max_width - len(data_row)) * empty_cell
110
+ for data_row in data
111
+ ]
112
+ return data
videochat2/lib/python3.10/site-packages/pandas/io/excel/_util.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ Callable,
7
+ Hashable,
8
+ Iterable,
9
+ Literal,
10
+ MutableMapping,
11
+ Sequence,
12
+ TypeVar,
13
+ overload,
14
+ )
15
+
16
+ from pandas.compat._optional import import_optional_dependency
17
+
18
+ from pandas.core.dtypes.common import (
19
+ is_integer,
20
+ is_list_like,
21
+ )
22
+
23
+ if TYPE_CHECKING:
24
+ from pandas.io.excel._base import ExcelWriter
25
+
26
+ ExcelWriter_t = type[ExcelWriter]
27
+ usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
28
+
29
+ _writers: MutableMapping[str, ExcelWriter_t] = {}
30
+
31
+
32
+ def register_writer(klass: ExcelWriter_t) -> None:
33
+ """
34
+ Add engine to the excel writer registry.io.excel.
35
+
36
+ You must use this method to integrate with ``to_excel``.
37
+
38
+ Parameters
39
+ ----------
40
+ klass : ExcelWriter
41
+ """
42
+ if not callable(klass):
43
+ raise ValueError("Can only register callables as engines")
44
+ engine_name = klass._engine
45
+ _writers[engine_name] = klass
46
+
47
+
48
+ def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
49
+ """
50
+ Return the default reader/writer for the given extension.
51
+
52
+ Parameters
53
+ ----------
54
+ ext : str
55
+ The excel file extension for which to get the default engine.
56
+ mode : str {'reader', 'writer'}
57
+ Whether to get the default engine for reading or writing.
58
+ Either 'reader' or 'writer'
59
+
60
+ Returns
61
+ -------
62
+ str
63
+ The default engine for the extension.
64
+ """
65
+ _default_readers = {
66
+ "xlsx": "openpyxl",
67
+ "xlsm": "openpyxl",
68
+ "xlsb": "pyxlsb",
69
+ "xls": "xlrd",
70
+ "ods": "odf",
71
+ }
72
+ _default_writers = {
73
+ "xlsx": "openpyxl",
74
+ "xlsm": "openpyxl",
75
+ "xlsb": "pyxlsb",
76
+ "ods": "odf",
77
+ }
78
+ assert mode in ["reader", "writer"]
79
+ if mode == "writer":
80
+ # Prefer xlsxwriter over openpyxl if installed
81
+ xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
82
+ if xlsxwriter:
83
+ _default_writers["xlsx"] = "xlsxwriter"
84
+ return _default_writers[ext]
85
+ else:
86
+ return _default_readers[ext]
87
+
88
+
89
+ def get_writer(engine_name: str) -> ExcelWriter_t:
90
+ try:
91
+ return _writers[engine_name]
92
+ except KeyError as err:
93
+ raise ValueError(f"No Excel writer '{engine_name}'") from err
94
+
95
+
96
+ def _excel2num(x: str) -> int:
97
+ """
98
+ Convert Excel column name like 'AB' to 0-based column index.
99
+
100
+ Parameters
101
+ ----------
102
+ x : str
103
+ The Excel column name to convert to a 0-based column index.
104
+
105
+ Returns
106
+ -------
107
+ num : int
108
+ The column index corresponding to the name.
109
+
110
+ Raises
111
+ ------
112
+ ValueError
113
+ Part of the Excel column name was invalid.
114
+ """
115
+ index = 0
116
+
117
+ for c in x.upper().strip():
118
+ cp = ord(c)
119
+
120
+ if cp < ord("A") or cp > ord("Z"):
121
+ raise ValueError(f"Invalid column name: {x}")
122
+
123
+ index = index * 26 + cp - ord("A") + 1
124
+
125
+ return index - 1
126
+
127
+
128
+ def _range2cols(areas: str) -> list[int]:
129
+ """
130
+ Convert comma separated list of column names and ranges to indices.
131
+
132
+ Parameters
133
+ ----------
134
+ areas : str
135
+ A string containing a sequence of column ranges (or areas).
136
+
137
+ Returns
138
+ -------
139
+ cols : list
140
+ A list of 0-based column indices.
141
+
142
+ Examples
143
+ --------
144
+ >>> _range2cols('A:E')
145
+ [0, 1, 2, 3, 4]
146
+ >>> _range2cols('A,C,Z:AB')
147
+ [0, 2, 25, 26, 27]
148
+ """
149
+ cols: list[int] = []
150
+
151
+ for rng in areas.split(","):
152
+ if ":" in rng:
153
+ rngs = rng.split(":")
154
+ cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
155
+ else:
156
+ cols.append(_excel2num(rng))
157
+
158
+ return cols
159
+
160
+
161
+ @overload
162
+ def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
163
+ ...
164
+
165
+
166
+ @overload
167
+ def maybe_convert_usecols(usecols: list[str]) -> list[str]:
168
+ ...
169
+
170
+
171
+ @overload
172
+ def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
173
+ ...
174
+
175
+
176
+ @overload
177
+ def maybe_convert_usecols(usecols: None) -> None:
178
+ ...
179
+
180
+
181
+ def maybe_convert_usecols(
182
+ usecols: str | list[int] | list[str] | usecols_func | None,
183
+ ) -> None | list[int] | list[str] | usecols_func:
184
+ """
185
+ Convert `usecols` into a compatible format for parsing in `parsers.py`.
186
+
187
+ Parameters
188
+ ----------
189
+ usecols : object
190
+ The use-columns object to potentially convert.
191
+
192
+ Returns
193
+ -------
194
+ converted : object
195
+ The compatible format of `usecols`.
196
+ """
197
+ if usecols is None:
198
+ return usecols
199
+
200
+ if is_integer(usecols):
201
+ raise ValueError(
202
+ "Passing an integer for `usecols` is no longer supported. "
203
+ "Please pass in a list of int from 0 to `usecols` inclusive instead."
204
+ )
205
+
206
+ if isinstance(usecols, str):
207
+ return _range2cols(usecols)
208
+
209
+ return usecols
210
+
211
+
212
+ @overload
213
+ def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
214
+ ...
215
+
216
+
217
+ @overload
218
+ def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
219
+ ...
220
+
221
+
222
+ def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
223
+ if freeze_panes is not None:
224
+ if len(freeze_panes) == 2 and all(
225
+ isinstance(item, int) for item in freeze_panes
226
+ ):
227
+ return True
228
+
229
+ raise ValueError(
230
+ "freeze_panes must be of form (row, column) "
231
+ "where row and column are integers"
232
+ )
233
+
234
+ # freeze_panes wasn't specified, return False so it won't be applied
235
+ # to output sheet
236
+ return False
237
+
238
+
239
+ def fill_mi_header(
240
+ row: list[Hashable], control_row: list[bool]
241
+ ) -> tuple[list[Hashable], list[bool]]:
242
+ """
243
+ Forward fill blank entries in row but only inside the same parent index.
244
+
245
+ Used for creating headers in Multiindex.
246
+
247
+ Parameters
248
+ ----------
249
+ row : list
250
+ List of items in a single row.
251
+ control_row : list of bool
252
+ Helps to determine if particular column is in same parent index as the
253
+ previous value. Used to stop propagation of empty cells between
254
+ different indexes.
255
+
256
+ Returns
257
+ -------
258
+ Returns changed row and control_row
259
+ """
260
+ last = row[0]
261
+ for i in range(1, len(row)):
262
+ if not control_row[i]:
263
+ last = row[i]
264
+
265
+ if row[i] == "" or row[i] is None:
266
+ row[i] = last
267
+ else:
268
+ control_row[i] = False
269
+ last = row[i]
270
+
271
+ return row, control_row
272
+
273
+
274
+ def pop_header_name(
275
+ row: list[Hashable], index_col: int | Sequence[int]
276
+ ) -> tuple[Hashable | None, list[Hashable]]:
277
+ """
278
+ Pop the header name for MultiIndex parsing.
279
+
280
+ Parameters
281
+ ----------
282
+ row : list
283
+ The data row to parse for the header name.
284
+ index_col : int, list
285
+ The index columns for our data. Assumed to be non-null.
286
+
287
+ Returns
288
+ -------
289
+ header_name : str
290
+ The extracted header name.
291
+ trimmed_row : list
292
+ The original data row with the header name removed.
293
+ """
294
+ # Pop out header name and fill w/blank.
295
+ if is_list_like(index_col):
296
+ assert isinstance(index_col, Iterable)
297
+ i = max(index_col)
298
+ else:
299
+ assert not isinstance(index_col, Iterable)
300
+ i = index_col
301
+
302
+ header_name = row[i]
303
+ header_name = None if header_name == "" else header_name
304
+
305
+ return header_name, row[:i] + [""] + row[i + 1 :]
306
+
307
+
308
+ def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
309
+ """
310
+ Used to combine two sources of kwargs for the backend engine.
311
+
312
+ Use of kwargs is deprecated, this function is solely for use in 1.3 and should
313
+ be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
314
+ or kwargs must be None or empty respectively.
315
+
316
+ Parameters
317
+ ----------
318
+ engine_kwargs: dict
319
+ kwargs to be passed through to the engine.
320
+ kwargs: dict
321
+ kwargs to be psased through to the engine (deprecated)
322
+
323
+ Returns
324
+ -------
325
+ engine_kwargs combined with kwargs
326
+ """
327
+ if engine_kwargs is None:
328
+ result = {}
329
+ else:
330
+ result = engine_kwargs.copy()
331
+ result.update(kwargs)
332
+ return result
videochat2/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from datetime import time
4
+
5
+ import numpy as np
6
+
7
+ from pandas._typing import (
8
+ Scalar,
9
+ StorageOptions,
10
+ )
11
+ from pandas.compat._optional import import_optional_dependency
12
+ from pandas.util._decorators import doc
13
+
14
+ from pandas.core.shared_docs import _shared_docs
15
+
16
+ from pandas.io.excel._base import BaseExcelReader
17
+
18
+
19
+ class XlrdReader(BaseExcelReader):
20
+ @doc(storage_options=_shared_docs["storage_options"])
21
+ def __init__(
22
+ self, filepath_or_buffer, storage_options: StorageOptions = None
23
+ ) -> None:
24
+ """
25
+ Reader using xlrd engine.
26
+
27
+ Parameters
28
+ ----------
29
+ filepath_or_buffer : str, path object or Workbook
30
+ Object to be parsed.
31
+ {storage_options}
32
+ """
33
+ err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
34
+ import_optional_dependency("xlrd", extra=err_msg)
35
+ super().__init__(filepath_or_buffer, storage_options=storage_options)
36
+
37
+ @property
38
+ def _workbook_class(self):
39
+ from xlrd import Book
40
+
41
+ return Book
42
+
43
+ def load_workbook(self, filepath_or_buffer):
44
+ from xlrd import open_workbook
45
+
46
+ if hasattr(filepath_or_buffer, "read"):
47
+ data = filepath_or_buffer.read()
48
+ return open_workbook(file_contents=data)
49
+ else:
50
+ return open_workbook(filepath_or_buffer)
51
+
52
+ @property
53
+ def sheet_names(self):
54
+ return self.book.sheet_names()
55
+
56
+ def get_sheet_by_name(self, name):
57
+ self.raise_if_bad_sheet_by_name(name)
58
+ return self.book.sheet_by_name(name)
59
+
60
+ def get_sheet_by_index(self, index):
61
+ self.raise_if_bad_sheet_by_index(index)
62
+ return self.book.sheet_by_index(index)
63
+
64
+ def get_sheet_data(
65
+ self, sheet, file_rows_needed: int | None = None
66
+ ) -> list[list[Scalar]]:
67
+ from xlrd import (
68
+ XL_CELL_BOOLEAN,
69
+ XL_CELL_DATE,
70
+ XL_CELL_ERROR,
71
+ XL_CELL_NUMBER,
72
+ xldate,
73
+ )
74
+
75
+ epoch1904 = self.book.datemode
76
+
77
+ def _parse_cell(cell_contents, cell_typ):
78
+ """
79
+ converts the contents of the cell into a pandas appropriate object
80
+ """
81
+ if cell_typ == XL_CELL_DATE:
82
+ # Use the newer xlrd datetime handling.
83
+ try:
84
+ cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
85
+ except OverflowError:
86
+ return cell_contents
87
+
88
+ # Excel doesn't distinguish between dates and time,
89
+ # so we treat dates on the epoch as times only.
90
+ # Also, Excel supports 1900 and 1904 epochs.
91
+ year = (cell_contents.timetuple())[0:3]
92
+ if (not epoch1904 and year == (1899, 12, 31)) or (
93
+ epoch1904 and year == (1904, 1, 1)
94
+ ):
95
+ cell_contents = time(
96
+ cell_contents.hour,
97
+ cell_contents.minute,
98
+ cell_contents.second,
99
+ cell_contents.microsecond,
100
+ )
101
+
102
+ elif cell_typ == XL_CELL_ERROR:
103
+ cell_contents = np.nan
104
+ elif cell_typ == XL_CELL_BOOLEAN:
105
+ cell_contents = bool(cell_contents)
106
+ elif cell_typ == XL_CELL_NUMBER:
107
+ # GH5394 - Excel 'numbers' are always floats
108
+ # it's a minimal perf hit and less surprising
109
+ val = int(cell_contents)
110
+ if val == cell_contents:
111
+ cell_contents = val
112
+ return cell_contents
113
+
114
+ data = []
115
+
116
+ nrows = sheet.nrows
117
+ if file_rows_needed is not None:
118
+ nrows = min(nrows, file_rows_needed)
119
+ for i in range(nrows):
120
+ row = [
121
+ _parse_cell(value, typ)
122
+ for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
123
+ ]
124
+ data.append(row)
125
+
126
+ return data
videochat2/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from pandas._libs import json
6
+ from pandas._typing import (
7
+ FilePath,
8
+ StorageOptions,
9
+ WriteExcelBuffer,
10
+ )
11
+
12
+ from pandas.io.excel._base import ExcelWriter
13
+ from pandas.io.excel._util import (
14
+ combine_kwargs,
15
+ validate_freeze_panes,
16
+ )
17
+
18
+
19
+ class _XlsxStyler:
20
+ # Map from openpyxl-oriented styles to flatter xlsxwriter representation
21
+ # Ordering necessary for both determinism and because some are keyed by
22
+ # prefixes of others.
23
+ STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
24
+ "font": [
25
+ (("name",), "font_name"),
26
+ (("sz",), "font_size"),
27
+ (("size",), "font_size"),
28
+ (("color", "rgb"), "font_color"),
29
+ (("color",), "font_color"),
30
+ (("b",), "bold"),
31
+ (("bold",), "bold"),
32
+ (("i",), "italic"),
33
+ (("italic",), "italic"),
34
+ (("u",), "underline"),
35
+ (("underline",), "underline"),
36
+ (("strike",), "font_strikeout"),
37
+ (("vertAlign",), "font_script"),
38
+ (("vertalign",), "font_script"),
39
+ ],
40
+ "number_format": [(("format_code",), "num_format"), ((), "num_format")],
41
+ "protection": [(("locked",), "locked"), (("hidden",), "hidden")],
42
+ "alignment": [
43
+ (("horizontal",), "align"),
44
+ (("vertical",), "valign"),
45
+ (("text_rotation",), "rotation"),
46
+ (("wrap_text",), "text_wrap"),
47
+ (("indent",), "indent"),
48
+ (("shrink_to_fit",), "shrink"),
49
+ ],
50
+ "fill": [
51
+ (("patternType",), "pattern"),
52
+ (("patterntype",), "pattern"),
53
+ (("fill_type",), "pattern"),
54
+ (("start_color", "rgb"), "fg_color"),
55
+ (("fgColor", "rgb"), "fg_color"),
56
+ (("fgcolor", "rgb"), "fg_color"),
57
+ (("start_color",), "fg_color"),
58
+ (("fgColor",), "fg_color"),
59
+ (("fgcolor",), "fg_color"),
60
+ (("end_color", "rgb"), "bg_color"),
61
+ (("bgColor", "rgb"), "bg_color"),
62
+ (("bgcolor", "rgb"), "bg_color"),
63
+ (("end_color",), "bg_color"),
64
+ (("bgColor",), "bg_color"),
65
+ (("bgcolor",), "bg_color"),
66
+ ],
67
+ "border": [
68
+ (("color", "rgb"), "border_color"),
69
+ (("color",), "border_color"),
70
+ (("style",), "border"),
71
+ (("top", "color", "rgb"), "top_color"),
72
+ (("top", "color"), "top_color"),
73
+ (("top", "style"), "top"),
74
+ (("top",), "top"),
75
+ (("right", "color", "rgb"), "right_color"),
76
+ (("right", "color"), "right_color"),
77
+ (("right", "style"), "right"),
78
+ (("right",), "right"),
79
+ (("bottom", "color", "rgb"), "bottom_color"),
80
+ (("bottom", "color"), "bottom_color"),
81
+ (("bottom", "style"), "bottom"),
82
+ (("bottom",), "bottom"),
83
+ (("left", "color", "rgb"), "left_color"),
84
+ (("left", "color"), "left_color"),
85
+ (("left", "style"), "left"),
86
+ (("left",), "left"),
87
+ ],
88
+ }
89
+
90
+ @classmethod
91
+ def convert(cls, style_dict, num_format_str=None):
92
+ """
93
+ converts a style_dict to an xlsxwriter format dict
94
+
95
+ Parameters
96
+ ----------
97
+ style_dict : style dictionary to convert
98
+ num_format_str : optional number format string
99
+ """
100
+ # Create a XlsxWriter format object.
101
+ props = {}
102
+
103
+ if num_format_str is not None:
104
+ props["num_format"] = num_format_str
105
+
106
+ if style_dict is None:
107
+ return props
108
+
109
+ if "borders" in style_dict:
110
+ style_dict = style_dict.copy()
111
+ style_dict["border"] = style_dict.pop("borders")
112
+
113
+ for style_group_key, style_group in style_dict.items():
114
+ for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
115
+ # src is a sequence of keys into a nested dict
116
+ # dst is a flat key
117
+ if dst in props:
118
+ continue
119
+ v = style_group
120
+ for k in src:
121
+ try:
122
+ v = v[k]
123
+ except (KeyError, TypeError):
124
+ break
125
+ else:
126
+ props[dst] = v
127
+
128
+ if isinstance(props.get("pattern"), str):
129
+ # TODO: support other fill patterns
130
+ props["pattern"] = 0 if props["pattern"] == "none" else 1
131
+
132
+ for k in ["border", "top", "right", "bottom", "left"]:
133
+ if isinstance(props.get(k), str):
134
+ try:
135
+ props[k] = [
136
+ "none",
137
+ "thin",
138
+ "medium",
139
+ "dashed",
140
+ "dotted",
141
+ "thick",
142
+ "double",
143
+ "hair",
144
+ "mediumDashed",
145
+ "dashDot",
146
+ "mediumDashDot",
147
+ "dashDotDot",
148
+ "mediumDashDotDot",
149
+ "slantDashDot",
150
+ ].index(props[k])
151
+ except ValueError:
152
+ props[k] = 2
153
+
154
+ if isinstance(props.get("font_script"), str):
155
+ props["font_script"] = ["baseline", "superscript", "subscript"].index(
156
+ props["font_script"]
157
+ )
158
+
159
+ if isinstance(props.get("underline"), str):
160
+ props["underline"] = {
161
+ "none": 0,
162
+ "single": 1,
163
+ "double": 2,
164
+ "singleAccounting": 33,
165
+ "doubleAccounting": 34,
166
+ }[props["underline"]]
167
+
168
+ # GH 30107 - xlsxwriter uses different name
169
+ if props.get("valign") == "center":
170
+ props["valign"] = "vcenter"
171
+
172
+ return props
173
+
174
+
175
+ class XlsxWriter(ExcelWriter):
176
+ _engine = "xlsxwriter"
177
+ _supported_extensions = (".xlsx",)
178
+
179
+ def __init__(
180
+ self,
181
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
182
+ engine: str | None = None,
183
+ date_format: str | None = None,
184
+ datetime_format: str | None = None,
185
+ mode: str = "w",
186
+ storage_options: StorageOptions = None,
187
+ if_sheet_exists: str | None = None,
188
+ engine_kwargs: dict[str, Any] | None = None,
189
+ **kwargs,
190
+ ) -> None:
191
+ # Use the xlsxwriter module as the Excel writer.
192
+ from xlsxwriter import Workbook
193
+
194
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
195
+
196
+ if mode == "a":
197
+ raise ValueError("Append mode is not supported with xlsxwriter!")
198
+
199
+ super().__init__(
200
+ path,
201
+ engine=engine,
202
+ date_format=date_format,
203
+ datetime_format=datetime_format,
204
+ mode=mode,
205
+ storage_options=storage_options,
206
+ if_sheet_exists=if_sheet_exists,
207
+ engine_kwargs=engine_kwargs,
208
+ )
209
+
210
+ self._book = Workbook(self._handles.handle, **engine_kwargs)
211
+
212
+ @property
213
+ def book(self):
214
+ """
215
+ Book instance of class xlsxwriter.Workbook.
216
+
217
+ This attribute can be used to access engine-specific features.
218
+ """
219
+ return self._book
220
+
221
+ @property
222
+ def sheets(self) -> dict[str, Any]:
223
+ result = self.book.sheetnames
224
+ return result
225
+
226
+ def _save(self) -> None:
227
+ """
228
+ Save workbook to disk.
229
+ """
230
+ self.book.close()
231
+
232
+ def _write_cells(
233
+ self,
234
+ cells,
235
+ sheet_name: str | None = None,
236
+ startrow: int = 0,
237
+ startcol: int = 0,
238
+ freeze_panes: tuple[int, int] | None = None,
239
+ ) -> None:
240
+ # Write the frame cells using xlsxwriter.
241
+ sheet_name = self._get_sheet_name(sheet_name)
242
+
243
+ wks = self.book.get_worksheet_by_name(sheet_name)
244
+ if wks is None:
245
+ wks = self.book.add_worksheet(sheet_name)
246
+
247
+ style_dict = {"null": None}
248
+
249
+ if validate_freeze_panes(freeze_panes):
250
+ wks.freeze_panes(*(freeze_panes))
251
+
252
+ for cell in cells:
253
+ val, fmt = self._value_with_fmt(cell.val)
254
+
255
+ stylekey = json.dumps(cell.style)
256
+ if fmt:
257
+ stylekey += fmt
258
+
259
+ if stylekey in style_dict:
260
+ style = style_dict[stylekey]
261
+ else:
262
+ style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
263
+ style_dict[stylekey] = style
264
+
265
+ if cell.mergestart is not None and cell.mergeend is not None:
266
+ wks.merge_range(
267
+ startrow + cell.row,
268
+ startcol + cell.col,
269
+ startrow + cell.mergestart,
270
+ startcol + cell.mergeend,
271
+ val,
272
+ style,
273
+ )
274
+ else:
275
+ wks.write(startrow + cell.row, startcol + cell.col, val, style)
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (298 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/parsers/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.io.parsers.readers import (
2
+ TextFileReader,
3
+ TextParser,
4
+ read_csv,
5
+ read_fwf,
6
+ read_table,
7
+ )
8
+
9
+ __all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"]
videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (336 Bytes). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc ADDED
Binary file (4.65 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc ADDED
Binary file (32.7 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc ADDED
Binary file (9.78 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc ADDED
Binary file (29.5 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc ADDED
Binary file (52.9 kB). View file
 
videochat2/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pandas._typing import ReadBuffer
4
+ from pandas.compat._optional import import_optional_dependency
5
+
6
+ from pandas.core.dtypes.inference import is_integer
7
+
8
+ import pandas as pd
9
+ from pandas import DataFrame
10
+
11
+ from pandas.io._util import _arrow_dtype_mapping
12
+ from pandas.io.parsers.base_parser import ParserBase
13
+
14
+
15
+ class ArrowParserWrapper(ParserBase):
16
+ """
17
+ Wrapper for the pyarrow engine for read_csv()
18
+ """
19
+
20
+ def __init__(self, src: ReadBuffer[bytes], **kwds) -> None:
21
+ super().__init__(kwds)
22
+ self.kwds = kwds
23
+ self.src = src
24
+
25
+ self._parse_kwds()
26
+
27
+ def _parse_kwds(self):
28
+ """
29
+ Validates keywords before passing to pyarrow.
30
+ """
31
+ encoding: str | None = self.kwds.get("encoding")
32
+ self.encoding = "utf-8" if encoding is None else encoding
33
+
34
+ self.usecols, self.usecols_dtype = self._validate_usecols_arg(
35
+ self.kwds["usecols"]
36
+ )
37
+ na_values = self.kwds["na_values"]
38
+ if isinstance(na_values, dict):
39
+ raise ValueError(
40
+ "The pyarrow engine doesn't support passing a dict for na_values"
41
+ )
42
+ self.na_values = list(self.kwds["na_values"])
43
+
44
+ def _get_pyarrow_options(self) -> None:
45
+ """
46
+ Rename some arguments to pass to pyarrow
47
+ """
48
+ mapping = {
49
+ "usecols": "include_columns",
50
+ "na_values": "null_values",
51
+ "escapechar": "escape_char",
52
+ "skip_blank_lines": "ignore_empty_lines",
53
+ "decimal": "decimal_point",
54
+ }
55
+ for pandas_name, pyarrow_name in mapping.items():
56
+ if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:
57
+ self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)
58
+
59
+ self.parse_options = {
60
+ option_name: option_value
61
+ for option_name, option_value in self.kwds.items()
62
+ if option_value is not None
63
+ and option_name
64
+ in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")
65
+ }
66
+ self.convert_options = {
67
+ option_name: option_value
68
+ for option_name, option_value in self.kwds.items()
69
+ if option_value is not None
70
+ and option_name
71
+ in (
72
+ "include_columns",
73
+ "null_values",
74
+ "true_values",
75
+ "false_values",
76
+ "decimal_point",
77
+ )
78
+ }
79
+ self.read_options = {
80
+ "autogenerate_column_names": self.header is None,
81
+ "skip_rows": self.header
82
+ if self.header is not None
83
+ else self.kwds["skiprows"],
84
+ "encoding": self.encoding,
85
+ }
86
+
87
+ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
88
+ """
89
+ Processes data read in based on kwargs.
90
+
91
+ Parameters
92
+ ----------
93
+ frame: DataFrame
94
+ The DataFrame to process.
95
+
96
+ Returns
97
+ -------
98
+ DataFrame
99
+ The processed DataFrame.
100
+ """
101
+ num_cols = len(frame.columns)
102
+ multi_index_named = True
103
+ if self.header is None:
104
+ if self.names is None:
105
+ if self.header is None:
106
+ self.names = range(num_cols)
107
+ if len(self.names) != num_cols:
108
+ # usecols is passed through to pyarrow, we only handle index col here
109
+ # The only way self.names is not the same length as number of cols is
110
+ # if we have int index_col. We should just pad the names(they will get
111
+ # removed anyways) to expected length then.
112
+ self.names = list(range(num_cols - len(self.names))) + self.names
113
+ multi_index_named = False
114
+ frame.columns = self.names
115
+ # we only need the frame not the names
116
+ frame.columns, frame = self._do_date_conversions(frame.columns, frame)
117
+ if self.index_col is not None:
118
+ for i, item in enumerate(self.index_col):
119
+ if is_integer(item):
120
+ self.index_col[i] = frame.columns[item]
121
+ else:
122
+ # String case
123
+ if item not in frame.columns:
124
+ raise ValueError(f"Index {item} invalid")
125
+ frame.set_index(self.index_col, drop=True, inplace=True)
126
+ # Clear names if headerless and no name given
127
+ if self.header is None and not multi_index_named:
128
+ frame.index.names = [None] * len(frame.index.names)
129
+
130
+ if self.kwds.get("dtype") is not None:
131
+ try:
132
+ frame = frame.astype(self.kwds.get("dtype"))
133
+ except TypeError as e:
134
+ # GH#44901 reraise to keep api consistent
135
+ raise ValueError(e)
136
+ return frame
137
+
138
+ def read(self) -> DataFrame:
139
+ """
140
+ Reads the contents of a CSV file into a DataFrame and
141
+ processes it according to the kwargs passed in the
142
+ constructor.
143
+
144
+ Returns
145
+ -------
146
+ DataFrame
147
+ The DataFrame created from the CSV file.
148
+ """
149
+ pyarrow_csv = import_optional_dependency("pyarrow.csv")
150
+ self._get_pyarrow_options()
151
+
152
+ table = pyarrow_csv.read_csv(
153
+ self.src,
154
+ read_options=pyarrow_csv.ReadOptions(**self.read_options),
155
+ parse_options=pyarrow_csv.ParseOptions(**self.parse_options),
156
+ convert_options=pyarrow_csv.ConvertOptions(**self.convert_options),
157
+ )
158
+ if self.kwds["dtype_backend"] == "pyarrow":
159
+ frame = table.to_pandas(types_mapper=pd.ArrowDtype)
160
+ elif self.kwds["dtype_backend"] == "numpy_nullable":
161
+ frame = table.to_pandas(types_mapper=_arrow_dtype_mapping().get)
162
+ else:
163
+ frame = table.to_pandas()
164
+ return self._finalize_pandas_output(frame)
videochat2/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py ADDED
@@ -0,0 +1,1388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ from copy import copy
5
+ import csv
6
+ import datetime
7
+ from enum import Enum
8
+ import itertools
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ Hashable,
14
+ Iterable,
15
+ List,
16
+ Mapping,
17
+ Sequence,
18
+ Tuple,
19
+ cast,
20
+ final,
21
+ overload,
22
+ )
23
+ import warnings
24
+
25
+ import numpy as np
26
+
27
+ from pandas._libs import (
28
+ lib,
29
+ parsers,
30
+ )
31
+ import pandas._libs.ops as libops
32
+ from pandas._libs.parsers import STR_NA_VALUES
33
+ from pandas._libs.tslibs import parsing
34
+ from pandas._typing import (
35
+ ArrayLike,
36
+ DtypeArg,
37
+ DtypeObj,
38
+ Scalar,
39
+ )
40
+ from pandas.compat._optional import import_optional_dependency
41
+ from pandas.errors import (
42
+ ParserError,
43
+ ParserWarning,
44
+ )
45
+ from pandas.util._exceptions import find_stack_level
46
+
47
+ from pandas.core.dtypes.astype import astype_array
48
+ from pandas.core.dtypes.common import (
49
+ ensure_object,
50
+ is_bool_dtype,
51
+ is_dict_like,
52
+ is_dtype_equal,
53
+ is_extension_array_dtype,
54
+ is_float_dtype,
55
+ is_integer,
56
+ is_integer_dtype,
57
+ is_list_like,
58
+ is_object_dtype,
59
+ is_scalar,
60
+ is_string_dtype,
61
+ pandas_dtype,
62
+ )
63
+ from pandas.core.dtypes.dtypes import (
64
+ CategoricalDtype,
65
+ ExtensionDtype,
66
+ )
67
+ from pandas.core.dtypes.missing import isna
68
+
69
+ from pandas import (
70
+ ArrowDtype,
71
+ DatetimeIndex,
72
+ StringDtype,
73
+ )
74
+ from pandas.core import algorithms
75
+ from pandas.core.arrays import (
76
+ ArrowExtensionArray,
77
+ BooleanArray,
78
+ Categorical,
79
+ ExtensionArray,
80
+ FloatingArray,
81
+ IntegerArray,
82
+ )
83
+ from pandas.core.arrays.boolean import BooleanDtype
84
+ from pandas.core.indexes.api import (
85
+ Index,
86
+ MultiIndex,
87
+ default_index,
88
+ ensure_index_from_sequences,
89
+ )
90
+ from pandas.core.series import Series
91
+ from pandas.core.tools import datetimes as tools
92
+
93
+ from pandas.io.common import is_potential_multi_index
94
+
95
+ if TYPE_CHECKING:
96
+ from pandas import DataFrame
97
+
98
+
99
+ class ParserBase:
100
+ class BadLineHandleMethod(Enum):
101
+ ERROR = 0
102
+ WARN = 1
103
+ SKIP = 2
104
+
105
+ _implicit_index: bool = False
106
+ _first_chunk: bool
107
+
108
+ def __init__(self, kwds) -> None:
109
+ self.names = kwds.get("names")
110
+ self.orig_names: Sequence[Hashable] | None = None
111
+
112
+ self.index_col = kwds.get("index_col", None)
113
+ self.unnamed_cols: set = set()
114
+ self.index_names: Sequence[Hashable] | None = None
115
+ self.col_names: Sequence[Hashable] | None = None
116
+
117
+ self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
118
+ self._parse_date_cols: Iterable = []
119
+ self.date_parser = kwds.pop("date_parser", lib.no_default)
120
+ self.date_format = kwds.pop("date_format", None)
121
+ self.dayfirst = kwds.pop("dayfirst", False)
122
+ self.keep_date_col = kwds.pop("keep_date_col", False)
123
+
124
+ self.na_values = kwds.get("na_values")
125
+ self.na_fvalues = kwds.get("na_fvalues")
126
+ self.na_filter = kwds.get("na_filter", False)
127
+ self.keep_default_na = kwds.get("keep_default_na", True)
128
+
129
+ self.dtype = copy(kwds.get("dtype", None))
130
+ self.converters = kwds.get("converters")
131
+ self.dtype_backend = kwds.get("dtype_backend")
132
+
133
+ self.true_values = kwds.get("true_values")
134
+ self.false_values = kwds.get("false_values")
135
+ self.cache_dates = kwds.pop("cache_dates", True)
136
+
137
+ self._date_conv = _make_date_converter(
138
+ date_parser=self.date_parser,
139
+ date_format=self.date_format,
140
+ dayfirst=self.dayfirst,
141
+ cache_dates=self.cache_dates,
142
+ )
143
+
144
+ # validate header options for mi
145
+ self.header = kwds.get("header")
146
+ if is_list_like(self.header, allow_sets=False):
147
+ if kwds.get("usecols"):
148
+ raise ValueError(
149
+ "cannot specify usecols when specifying a multi-index header"
150
+ )
151
+ if kwds.get("names"):
152
+ raise ValueError(
153
+ "cannot specify names when specifying a multi-index header"
154
+ )
155
+
156
+ # validate index_col that only contains integers
157
+ if self.index_col is not None:
158
+ if not (
159
+ is_list_like(self.index_col, allow_sets=False)
160
+ and all(map(is_integer, self.index_col))
161
+ or is_integer(self.index_col)
162
+ ):
163
+ raise ValueError(
164
+ "index_col must only contain row numbers "
165
+ "when specifying a multi-index header"
166
+ )
167
+
168
+ self._name_processed = False
169
+
170
+ self._first_chunk = True
171
+
172
+ self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"])
173
+
174
+ # Fallback to error to pass a sketchy test(test_override_set_noconvert_columns)
175
+ # Normally, this arg would get pre-processed earlier on
176
+ self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR)
177
+
178
+ def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable:
179
+ """
180
+ Check if parse_dates are in columns.
181
+
182
+ If user has provided names for parse_dates, check if those columns
183
+ are available.
184
+
185
+ Parameters
186
+ ----------
187
+ columns : list
188
+ List of names of the dataframe.
189
+
190
+ Returns
191
+ -------
192
+ The names of the columns which will get parsed later if a dict or list
193
+ is given as specification.
194
+
195
+ Raises
196
+ ------
197
+ ValueError
198
+ If column to parse_date is not in dataframe.
199
+
200
+ """
201
+ cols_needed: Iterable
202
+ if is_dict_like(self.parse_dates):
203
+ cols_needed = itertools.chain(*self.parse_dates.values())
204
+ elif is_list_like(self.parse_dates):
205
+ # a column in parse_dates could be represented
206
+ # ColReference = Union[int, str]
207
+ # DateGroups = List[ColReference]
208
+ # ParseDates = Union[DateGroups, List[DateGroups],
209
+ # Dict[ColReference, DateGroups]]
210
+ cols_needed = itertools.chain.from_iterable(
211
+ col if is_list_like(col) and not isinstance(col, tuple) else [col]
212
+ for col in self.parse_dates
213
+ )
214
+ else:
215
+ cols_needed = []
216
+
217
+ cols_needed = list(cols_needed)
218
+
219
+ # get only columns that are references using names (str), not by index
220
+ missing_cols = ", ".join(
221
+ sorted(
222
+ {
223
+ col
224
+ for col in cols_needed
225
+ if isinstance(col, str) and col not in columns
226
+ }
227
+ )
228
+ )
229
+ if missing_cols:
230
+ raise ValueError(
231
+ f"Missing column provided to 'parse_dates': '{missing_cols}'"
232
+ )
233
+ # Convert positions to actual column names
234
+ return [
235
+ col if (isinstance(col, str) or col in columns) else columns[col]
236
+ for col in cols_needed
237
+ ]
238
+
239
+ def close(self) -> None:
240
+ pass
241
+
242
+ @final
243
+ @property
244
+ def _has_complex_date_col(self) -> bool:
245
+ return isinstance(self.parse_dates, dict) or (
246
+ isinstance(self.parse_dates, list)
247
+ and len(self.parse_dates) > 0
248
+ and isinstance(self.parse_dates[0], list)
249
+ )
250
+
251
+ @final
252
+ def _should_parse_dates(self, i: int) -> bool:
253
+ if isinstance(self.parse_dates, bool):
254
+ return self.parse_dates
255
+ else:
256
+ if self.index_names is not None:
257
+ name = self.index_names[i]
258
+ else:
259
+ name = None
260
+ j = i if self.index_col is None else self.index_col[i]
261
+
262
+ if is_scalar(self.parse_dates):
263
+ return (j == self.parse_dates) or (
264
+ name is not None and name == self.parse_dates
265
+ )
266
+ else:
267
+ return (j in self.parse_dates) or (
268
+ name is not None and name in self.parse_dates
269
+ )
270
+
271
+ @final
272
+ def _extract_multi_indexer_columns(
273
+ self,
274
+ header,
275
+ index_names: Sequence[Hashable] | None,
276
+ passed_names: bool = False,
277
+ ) -> tuple[
278
+ Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool
279
+ ]:
280
+ """
281
+ Extract and return the names, index_names, col_names if the column
282
+ names are a MultiIndex.
283
+
284
+ Parameters
285
+ ----------
286
+ header: list of lists
287
+ The header rows
288
+ index_names: list, optional
289
+ The names of the future index
290
+ passed_names: bool, default False
291
+ A flag specifying if names where passed
292
+
293
+ """
294
+ if len(header) < 2:
295
+ return header[0], index_names, None, passed_names
296
+
297
+ # the names are the tuples of the header that are not the index cols
298
+ # 0 is the name of the index, assuming index_col is a list of column
299
+ # numbers
300
+ ic = self.index_col
301
+ if ic is None:
302
+ ic = []
303
+
304
+ if not isinstance(ic, (list, tuple, np.ndarray)):
305
+ ic = [ic]
306
+ sic = set(ic)
307
+
308
+ # clean the index_names
309
+ index_names = header.pop(-1)
310
+ index_names, _, _ = self._clean_index_names(index_names, self.index_col)
311
+
312
+ # extract the columns
313
+ field_count = len(header[0])
314
+
315
+ # check if header lengths are equal
316
+ if not all(len(header_iter) == field_count for header_iter in header[1:]):
317
+ raise ParserError("Header rows must have an equal number of columns.")
318
+
319
+ def extract(r):
320
+ return tuple(r[i] for i in range(field_count) if i not in sic)
321
+
322
+ columns = list(zip(*(extract(r) for r in header)))
323
+ names = columns.copy()
324
+ for single_ic in sorted(ic):
325
+ names.insert(single_ic, single_ic)
326
+
327
+ # Clean the column names (if we have an index_col).
328
+ if len(ic):
329
+ col_names = [
330
+ r[ic[0]]
331
+ if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols)
332
+ else None
333
+ for r in header
334
+ ]
335
+ else:
336
+ col_names = [None] * len(header)
337
+
338
+ passed_names = True
339
+
340
+ return names, index_names, col_names, passed_names
341
+
342
+ @final
343
+ def _maybe_make_multi_index_columns(
344
+ self,
345
+ columns: Sequence[Hashable],
346
+ col_names: Sequence[Hashable] | None = None,
347
+ ) -> Sequence[Hashable] | MultiIndex:
348
+ # possibly create a column mi here
349
+ if is_potential_multi_index(columns):
350
+ list_columns = cast(List[Tuple], columns)
351
+ return MultiIndex.from_tuples(list_columns, names=col_names)
352
+ return columns
353
+
354
+ @final
355
+ def _make_index(
356
+ self, data, alldata, columns, indexnamerow: list[Scalar] | None = None
357
+ ) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]:
358
+ index: Index | None
359
+ if not is_index_col(self.index_col) or not self.index_col:
360
+ index = None
361
+
362
+ elif not self._has_complex_date_col:
363
+ simple_index = self._get_simple_index(alldata, columns)
364
+ index = self._agg_index(simple_index)
365
+ elif self._has_complex_date_col:
366
+ if not self._name_processed:
367
+ (self.index_names, _, self.index_col) = self._clean_index_names(
368
+ list(columns), self.index_col
369
+ )
370
+ self._name_processed = True
371
+ date_index = self._get_complex_date_index(data, columns)
372
+ index = self._agg_index(date_index, try_parse_dates=False)
373
+
374
+ # add names for the index
375
+ if indexnamerow:
376
+ coffset = len(indexnamerow) - len(columns)
377
+ assert index is not None
378
+ index = index.set_names(indexnamerow[:coffset])
379
+
380
+ # maybe create a mi on the columns
381
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
382
+
383
+ return index, columns
384
+
385
+ @final
386
+ def _get_simple_index(self, data, columns):
387
+ def ix(col):
388
+ if not isinstance(col, str):
389
+ return col
390
+ raise ValueError(f"Index {col} invalid")
391
+
392
+ to_remove = []
393
+ index = []
394
+ for idx in self.index_col:
395
+ i = ix(idx)
396
+ to_remove.append(i)
397
+ index.append(data[i])
398
+
399
+ # remove index items from content and columns, don't pop in
400
+ # loop
401
+ for i in sorted(to_remove, reverse=True):
402
+ data.pop(i)
403
+ if not self._implicit_index:
404
+ columns.pop(i)
405
+
406
+ return index
407
+
408
+ @final
409
+ def _get_complex_date_index(self, data, col_names):
410
+ def _get_name(icol):
411
+ if isinstance(icol, str):
412
+ return icol
413
+
414
+ if col_names is None:
415
+ raise ValueError(f"Must supply column order to use {icol!s} as index")
416
+
417
+ for i, c in enumerate(col_names):
418
+ if i == icol:
419
+ return c
420
+
421
+ to_remove = []
422
+ index = []
423
+ for idx in self.index_col:
424
+ name = _get_name(idx)
425
+ to_remove.append(name)
426
+ index.append(data[name])
427
+
428
+ # remove index items from content and columns, don't pop in
429
+ # loop
430
+ for c in sorted(to_remove, reverse=True):
431
+ data.pop(c)
432
+ col_names.remove(c)
433
+
434
+ return index
435
+
436
+ def _clean_mapping(self, mapping):
437
+ """converts col numbers to names"""
438
+ if not isinstance(mapping, dict):
439
+ return mapping
440
+ clean = {}
441
+ # for mypy
442
+ assert self.orig_names is not None
443
+
444
+ for col, v in mapping.items():
445
+ if isinstance(col, int) and col not in self.orig_names:
446
+ col = self.orig_names[col]
447
+ clean[col] = v
448
+ if isinstance(mapping, defaultdict):
449
+ remaining_cols = set(self.orig_names) - set(clean.keys())
450
+ clean.update({col: mapping[col] for col in remaining_cols})
451
+ return clean
452
+
453
+ @final
454
+ def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
455
+ arrays = []
456
+ converters = self._clean_mapping(self.converters)
457
+
458
+ for i, arr in enumerate(index):
459
+ if try_parse_dates and self._should_parse_dates(i):
460
+ arr = self._date_conv(
461
+ arr,
462
+ col=self.index_names[i] if self.index_names is not None else None,
463
+ )
464
+
465
+ if self.na_filter:
466
+ col_na_values = self.na_values
467
+ col_na_fvalues = self.na_fvalues
468
+ else:
469
+ col_na_values = set()
470
+ col_na_fvalues = set()
471
+
472
+ if isinstance(self.na_values, dict):
473
+ assert self.index_names is not None
474
+ col_name = self.index_names[i]
475
+ if col_name is not None:
476
+ col_na_values, col_na_fvalues = _get_na_values(
477
+ col_name, self.na_values, self.na_fvalues, self.keep_default_na
478
+ )
479
+
480
+ clean_dtypes = self._clean_mapping(self.dtype)
481
+
482
+ cast_type = None
483
+ index_converter = False
484
+ if self.index_names is not None:
485
+ if isinstance(clean_dtypes, dict):
486
+ cast_type = clean_dtypes.get(self.index_names[i], None)
487
+
488
+ if isinstance(converters, dict):
489
+ index_converter = converters.get(self.index_names[i]) is not None
490
+
491
+ try_num_bool = not (
492
+ cast_type and is_string_dtype(cast_type) or index_converter
493
+ )
494
+
495
+ arr, _ = self._infer_types(
496
+ arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool
497
+ )
498
+ arrays.append(arr)
499
+
500
+ names = self.index_names
501
+ index = ensure_index_from_sequences(arrays, names)
502
+
503
+ return index
504
+
505
+ @final
506
+ def _convert_to_ndarrays(
507
+ self,
508
+ dct: Mapping,
509
+ na_values,
510
+ na_fvalues,
511
+ verbose: bool = False,
512
+ converters=None,
513
+ dtypes=None,
514
+ ):
515
+ result = {}
516
+ for c, values in dct.items():
517
+ conv_f = None if converters is None else converters.get(c, None)
518
+ if isinstance(dtypes, dict):
519
+ cast_type = dtypes.get(c, None)
520
+ else:
521
+ # single dtype or None
522
+ cast_type = dtypes
523
+
524
+ if self.na_filter:
525
+ col_na_values, col_na_fvalues = _get_na_values(
526
+ c, na_values, na_fvalues, self.keep_default_na
527
+ )
528
+ else:
529
+ col_na_values, col_na_fvalues = set(), set()
530
+
531
+ if c in self._parse_date_cols:
532
+ # GH#26203 Do not convert columns which get converted to dates
533
+ # but replace nans to ensure to_datetime works
534
+ mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues)
535
+ np.putmask(values, mask, np.nan)
536
+ result[c] = values
537
+ continue
538
+
539
+ if conv_f is not None:
540
+ # conv_f applied to data before inference
541
+ if cast_type is not None:
542
+ warnings.warn(
543
+ (
544
+ "Both a converter and dtype were specified "
545
+ f"for column {c} - only the converter will be used."
546
+ ),
547
+ ParserWarning,
548
+ stacklevel=find_stack_level(),
549
+ )
550
+
551
+ try:
552
+ values = lib.map_infer(values, conv_f)
553
+ except ValueError:
554
+ # error: Argument 2 to "isin" has incompatible type "List[Any]";
555
+ # expected "Union[Union[ExtensionArray, ndarray], Index, Series]"
556
+ mask = algorithms.isin(
557
+ values, list(na_values) # type: ignore[arg-type]
558
+ ).view(np.uint8)
559
+ values = lib.map_infer_mask(values, conv_f, mask)
560
+
561
+ cvals, na_count = self._infer_types(
562
+ values,
563
+ set(col_na_values) | col_na_fvalues,
564
+ cast_type is None,
565
+ try_num_bool=False,
566
+ )
567
+ else:
568
+ is_ea = is_extension_array_dtype(cast_type)
569
+ is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
570
+ # skip inference if specified dtype is object
571
+ # or casting to an EA
572
+ try_num_bool = not (cast_type and is_str_or_ea_dtype)
573
+
574
+ # general type inference and conversion
575
+ cvals, na_count = self._infer_types(
576
+ values,
577
+ set(col_na_values) | col_na_fvalues,
578
+ cast_type is None,
579
+ try_num_bool,
580
+ )
581
+
582
+ # type specified in dtype param or cast_type is an EA
583
+ if cast_type and (not is_dtype_equal(cvals, cast_type) or is_ea):
584
+ if not is_ea and na_count > 0:
585
+ if is_bool_dtype(cast_type):
586
+ raise ValueError(f"Bool column has NA values in column {c}")
587
+ cast_type = pandas_dtype(cast_type)
588
+ cvals = self._cast_types(cvals, cast_type, c)
589
+
590
+ result[c] = cvals
591
+ if verbose and na_count:
592
+ print(f"Filled {na_count} NA values in column {c!s}")
593
+ return result
594
+
595
+ @final
596
+ def _set_noconvert_dtype_columns(
597
+ self, col_indices: list[int], names: Sequence[Hashable]
598
+ ) -> set[int]:
599
+ """
600
+ Set the columns that should not undergo dtype conversions.
601
+
602
+ Currently, any column that is involved with date parsing will not
603
+ undergo such conversions. If usecols is specified, the positions of the columns
604
+ not to cast is relative to the usecols not to all columns.
605
+
606
+ Parameters
607
+ ----------
608
+ col_indices: The indices specifying order and positions of the columns
609
+ names: The column names which order is corresponding with the order
610
+ of col_indices
611
+
612
+ Returns
613
+ -------
614
+ A set of integers containing the positions of the columns not to convert.
615
+ """
616
+ usecols: list[int] | list[str] | None
617
+ noconvert_columns = set()
618
+ if self.usecols_dtype == "integer":
619
+ # A set of integers will be converted to a list in
620
+ # the correct order every single time.
621
+ usecols = sorted(self.usecols)
622
+ elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):
623
+ # The names attribute should have the correct columns
624
+ # in the proper order for indexing with parse_dates.
625
+ usecols = col_indices
626
+ else:
627
+ # Usecols is empty.
628
+ usecols = None
629
+
630
+ def _set(x) -> int:
631
+ if usecols is not None and is_integer(x):
632
+ x = usecols[x]
633
+
634
+ if not is_integer(x):
635
+ x = col_indices[names.index(x)]
636
+
637
+ return x
638
+
639
+ if isinstance(self.parse_dates, list):
640
+ for val in self.parse_dates:
641
+ if isinstance(val, list):
642
+ for k in val:
643
+ noconvert_columns.add(_set(k))
644
+ else:
645
+ noconvert_columns.add(_set(val))
646
+
647
+ elif isinstance(self.parse_dates, dict):
648
+ for val in self.parse_dates.values():
649
+ if isinstance(val, list):
650
+ for k in val:
651
+ noconvert_columns.add(_set(k))
652
+ else:
653
+ noconvert_columns.add(_set(val))
654
+
655
+ elif self.parse_dates:
656
+ if isinstance(self.index_col, list):
657
+ for k in self.index_col:
658
+ noconvert_columns.add(_set(k))
659
+ elif self.index_col is not None:
660
+ noconvert_columns.add(_set(self.index_col))
661
+
662
+ return noconvert_columns
663
+
664
+ def _infer_types(
665
+ self, values, na_values, no_dtype_specified, try_num_bool: bool = True
666
+ ) -> tuple[ArrayLike, int]:
667
+ """
668
+ Infer types of values, possibly casting
669
+
670
+ Parameters
671
+ ----------
672
+ values : ndarray
673
+ na_values : set
674
+ no_dtype_specified: Specifies if we want to cast explicitly
675
+ try_num_bool : bool, default try
676
+ try to cast values to numeric (first preference) or boolean
677
+
678
+ Returns
679
+ -------
680
+ converted : ndarray or ExtensionArray
681
+ na_count : int
682
+ """
683
+ na_count = 0
684
+ if issubclass(values.dtype.type, (np.number, np.bool_)):
685
+ # If our array has numeric dtype, we don't have to check for strings in isin
686
+ na_values = np.array([val for val in na_values if not isinstance(val, str)])
687
+ mask = algorithms.isin(values, na_values)
688
+ na_count = mask.astype("uint8", copy=False).sum()
689
+ if na_count > 0:
690
+ if is_integer_dtype(values):
691
+ values = values.astype(np.float64)
692
+ np.putmask(values, mask, np.nan)
693
+ return values, na_count
694
+
695
+ dtype_backend = self.dtype_backend
696
+ non_default_dtype_backend = (
697
+ no_dtype_specified and dtype_backend is not lib.no_default
698
+ )
699
+ result: ArrayLike
700
+
701
+ if try_num_bool and is_object_dtype(values.dtype):
702
+ # exclude e.g DatetimeIndex here
703
+ try:
704
+ result, result_mask = lib.maybe_convert_numeric(
705
+ values,
706
+ na_values,
707
+ False,
708
+ convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa
709
+ )
710
+ except (ValueError, TypeError):
711
+ # e.g. encountering datetime string gets ValueError
712
+ # TypeError can be raised in floatify
713
+ na_count = parsers.sanitize_objects(values, na_values)
714
+ result = values
715
+ else:
716
+ if non_default_dtype_backend:
717
+ if result_mask is None:
718
+ result_mask = np.zeros(result.shape, dtype=np.bool_)
719
+
720
+ if result_mask.all():
721
+ result = IntegerArray(
722
+ np.ones(result_mask.shape, dtype=np.int64), result_mask
723
+ )
724
+ elif is_integer_dtype(result):
725
+ result = IntegerArray(result, result_mask)
726
+ elif is_bool_dtype(result):
727
+ result = BooleanArray(result, result_mask)
728
+ elif is_float_dtype(result):
729
+ result = FloatingArray(result, result_mask)
730
+
731
+ na_count = result_mask.sum()
732
+ else:
733
+ na_count = isna(result).sum()
734
+ else:
735
+ result = values
736
+ if values.dtype == np.object_:
737
+ na_count = parsers.sanitize_objects(values, na_values)
738
+
739
+ if result.dtype == np.object_ and try_num_bool:
740
+ result, bool_mask = libops.maybe_convert_bool(
741
+ np.asarray(values),
742
+ true_values=self.true_values,
743
+ false_values=self.false_values,
744
+ convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] # noqa
745
+ )
746
+ if result.dtype == np.bool_ and non_default_dtype_backend:
747
+ if bool_mask is None:
748
+ bool_mask = np.zeros(result.shape, dtype=np.bool_)
749
+ result = BooleanArray(result, bool_mask)
750
+ elif result.dtype == np.object_ and non_default_dtype_backend:
751
+ # read_excel sends array of datetime objects
752
+ inferred_type = lib.infer_dtype(result)
753
+ if inferred_type != "datetime":
754
+ result = StringDtype().construct_array_type()._from_sequence(values)
755
+
756
+ if dtype_backend == "pyarrow":
757
+ pa = import_optional_dependency("pyarrow")
758
+ if isinstance(result, np.ndarray):
759
+ result = ArrowExtensionArray(pa.array(result, from_pandas=True))
760
+ else:
761
+ # ExtensionArray
762
+ result = ArrowExtensionArray(
763
+ pa.array(result.to_numpy(), from_pandas=True)
764
+ )
765
+
766
+ return result, na_count
767
+
768
+ def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike:
769
+ """
770
+ Cast values to specified type
771
+
772
+ Parameters
773
+ ----------
774
+ values : ndarray or ExtensionArray
775
+ cast_type : np.dtype or ExtensionDtype
776
+ dtype to cast values to
777
+ column : string
778
+ column name - used only for error reporting
779
+
780
+ Returns
781
+ -------
782
+ converted : ndarray or ExtensionArray
783
+ """
784
+ if isinstance(cast_type, CategoricalDtype):
785
+ known_cats = cast_type.categories is not None
786
+
787
+ if not is_object_dtype(values.dtype) and not known_cats:
788
+ # TODO: this is for consistency with
789
+ # c-parser which parses all categories
790
+ # as strings
791
+ values = lib.ensure_string_array(
792
+ values, skipna=False, convert_na_value=False
793
+ )
794
+
795
+ cats = Index(values).unique().dropna()
796
+ values = Categorical._from_inferred_categories(
797
+ cats, cats.get_indexer(values), cast_type, true_values=self.true_values
798
+ )
799
+
800
+ # use the EA's implementation of casting
801
+ elif isinstance(cast_type, ExtensionDtype):
802
+ array_type = cast_type.construct_array_type()
803
+ try:
804
+ if isinstance(cast_type, BooleanDtype):
805
+ # error: Unexpected keyword argument "true_values" for
806
+ # "_from_sequence_of_strings" of "ExtensionArray"
807
+ return array_type._from_sequence_of_strings( # type: ignore[call-arg] # noqa:E501
808
+ values,
809
+ dtype=cast_type,
810
+ true_values=self.true_values,
811
+ false_values=self.false_values,
812
+ )
813
+ else:
814
+ return array_type._from_sequence_of_strings(values, dtype=cast_type)
815
+ except NotImplementedError as err:
816
+ raise NotImplementedError(
817
+ f"Extension Array: {array_type} must implement "
818
+ "_from_sequence_of_strings in order to be used in parser methods"
819
+ ) from err
820
+
821
+ elif isinstance(values, ExtensionArray):
822
+ values = values.astype(cast_type, copy=False)
823
+ elif issubclass(cast_type.type, str):
824
+ # TODO: why skipna=True here and False above? some tests depend
825
+ # on it here, but nothing fails if we change it above
826
+ # (as no tests get there as of 2022-12-06)
827
+ values = lib.ensure_string_array(
828
+ values, skipna=True, convert_na_value=False
829
+ )
830
+ else:
831
+ try:
832
+ values = astype_array(values, cast_type, copy=True)
833
+ except ValueError as err:
834
+ raise ValueError(
835
+ f"Unable to convert column {column} to type {cast_type}"
836
+ ) from err
837
+ return values
838
+
839
+ @overload
840
+ def _do_date_conversions(
841
+ self,
842
+ names: Index,
843
+ data: DataFrame,
844
+ ) -> tuple[Sequence[Hashable] | Index, DataFrame]:
845
+ ...
846
+
847
+ @overload
848
+ def _do_date_conversions(
849
+ self,
850
+ names: Sequence[Hashable],
851
+ data: Mapping[Hashable, ArrayLike],
852
+ ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]:
853
+ ...
854
+
855
+ def _do_date_conversions(
856
+ self,
857
+ names: Sequence[Hashable] | Index,
858
+ data: Mapping[Hashable, ArrayLike] | DataFrame,
859
+ ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]:
860
+ # returns data, columns
861
+
862
+ if self.parse_dates is not None:
863
+ data, names = _process_date_conversion(
864
+ data,
865
+ self._date_conv,
866
+ self.parse_dates,
867
+ self.index_col,
868
+ self.index_names,
869
+ names,
870
+ keep_date_col=self.keep_date_col,
871
+ dtype_backend=self.dtype_backend,
872
+ )
873
+
874
+ return names, data
875
+
876
+ def _check_data_length(
877
+ self,
878
+ columns: Sequence[Hashable],
879
+ data: Sequence[ArrayLike],
880
+ ) -> None:
881
+ """Checks if length of data is equal to length of column names.
882
+
883
+ One set of trailing commas is allowed. self.index_col not False
884
+ results in a ParserError previously when lengths do not match.
885
+
886
+ Parameters
887
+ ----------
888
+ columns: list of column names
889
+ data: list of array-likes containing the data column-wise.
890
+ """
891
+ if not self.index_col and len(columns) != len(data) and columns:
892
+ empty_str = is_object_dtype(data[-1]) and data[-1] == ""
893
+ # error: No overload variant of "__ror__" of "ndarray" matches
894
+ # argument type "ExtensionArray"
895
+ empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator]
896
+ if len(columns) == len(data) - 1 and np.all(empty_str_or_na):
897
+ return
898
+ warnings.warn(
899
+ "Length of header or names does not match length of data. This leads "
900
+ "to a loss of data with index_col=False.",
901
+ ParserWarning,
902
+ stacklevel=find_stack_level(),
903
+ )
904
+
905
+ @overload
906
+ def _evaluate_usecols(
907
+ self,
908
+ usecols: set[int] | Callable[[Hashable], object],
909
+ names: Sequence[Hashable],
910
+ ) -> set[int]:
911
+ ...
912
+
913
+ @overload
914
+ def _evaluate_usecols(
915
+ self, usecols: set[str], names: Sequence[Hashable]
916
+ ) -> set[str]:
917
+ ...
918
+
919
+ def _evaluate_usecols(
920
+ self,
921
+ usecols: Callable[[Hashable], object] | set[str] | set[int],
922
+ names: Sequence[Hashable],
923
+ ) -> set[str] | set[int]:
924
+ """
925
+ Check whether or not the 'usecols' parameter
926
+ is a callable. If so, enumerates the 'names'
927
+ parameter and returns a set of indices for
928
+ each entry in 'names' that evaluates to True.
929
+ If not a callable, returns 'usecols'.
930
+ """
931
+ if callable(usecols):
932
+ return {i for i, name in enumerate(names) if usecols(name)}
933
+ return usecols
934
+
935
+ def _validate_usecols_names(self, usecols, names):
936
+ """
937
+ Validates that all usecols are present in a given
938
+ list of names. If not, raise a ValueError that
939
+ shows what usecols are missing.
940
+
941
+ Parameters
942
+ ----------
943
+ usecols : iterable of usecols
944
+ The columns to validate are present in names.
945
+ names : iterable of names
946
+ The column names to check against.
947
+
948
+ Returns
949
+ -------
950
+ usecols : iterable of usecols
951
+ The `usecols` parameter if the validation succeeds.
952
+
953
+ Raises
954
+ ------
955
+ ValueError : Columns were missing. Error message will list them.
956
+ """
957
+ missing = [c for c in usecols if c not in names]
958
+ if len(missing) > 0:
959
+ raise ValueError(
960
+ f"Usecols do not match columns, columns expected but not found: "
961
+ f"{missing}"
962
+ )
963
+
964
+ return usecols
965
+
966
+ def _validate_usecols_arg(self, usecols):
967
+ """
968
+ Validate the 'usecols' parameter.
969
+
970
+ Checks whether or not the 'usecols' parameter contains all integers
971
+ (column selection by index), strings (column by name) or is a callable.
972
+ Raises a ValueError if that is not the case.
973
+
974
+ Parameters
975
+ ----------
976
+ usecols : list-like, callable, or None
977
+ List of columns to use when parsing or a callable that can be used
978
+ to filter a list of table columns.
979
+
980
+ Returns
981
+ -------
982
+ usecols_tuple : tuple
983
+ A tuple of (verified_usecols, usecols_dtype).
984
+
985
+ 'verified_usecols' is either a set if an array-like is passed in or
986
+ 'usecols' if a callable or None is passed in.
987
+
988
+ 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
989
+ is passed in or None if a callable or None is passed in.
990
+ """
991
+ msg = (
992
+ "'usecols' must either be list-like of all strings, all unicode, "
993
+ "all integers or a callable."
994
+ )
995
+ if usecols is not None:
996
+ if callable(usecols):
997
+ return usecols, None
998
+
999
+ if not is_list_like(usecols):
1000
+ # see gh-20529
1001
+ #
1002
+ # Ensure it is iterable container but not string.
1003
+ raise ValueError(msg)
1004
+
1005
+ usecols_dtype = lib.infer_dtype(usecols, skipna=False)
1006
+
1007
+ if usecols_dtype not in ("empty", "integer", "string"):
1008
+ raise ValueError(msg)
1009
+
1010
+ usecols = set(usecols)
1011
+
1012
+ return usecols, usecols_dtype
1013
+ return usecols, None
1014
+
1015
+ def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]:
1016
+ if not is_index_col(index_col):
1017
+ return None, columns, index_col
1018
+
1019
+ columns = list(columns)
1020
+
1021
+ # In case of no rows and multiindex columns we have to set index_names to
1022
+ # list of Nones GH#38292
1023
+ if not columns:
1024
+ return [None] * len(index_col), columns, index_col
1025
+
1026
+ cp_cols = list(columns)
1027
+ index_names: list[str | int | None] = []
1028
+
1029
+ # don't mutate
1030
+ index_col = list(index_col)
1031
+
1032
+ for i, c in enumerate(index_col):
1033
+ if isinstance(c, str):
1034
+ index_names.append(c)
1035
+ for j, name in enumerate(cp_cols):
1036
+ if name == c:
1037
+ index_col[i] = j
1038
+ columns.remove(name)
1039
+ break
1040
+ else:
1041
+ name = cp_cols[c]
1042
+ columns.remove(name)
1043
+ index_names.append(name)
1044
+
1045
+ # Only clean index names that were placeholders.
1046
+ for i, name in enumerate(index_names):
1047
+ if isinstance(name, str) and name in self.unnamed_cols:
1048
+ index_names[i] = None
1049
+
1050
+ return index_names, columns, index_col
1051
+
1052
+ def _get_empty_meta(
1053
+ self, columns, index_col, index_names, dtype: DtypeArg | None = None
1054
+ ):
1055
+ columns = list(columns)
1056
+
1057
+ # Convert `dtype` to a defaultdict of some kind.
1058
+ # This will enable us to write `dtype[col_name]`
1059
+ # without worrying about KeyError issues later on.
1060
+ dtype_dict: defaultdict[Hashable, Any]
1061
+ if not is_dict_like(dtype):
1062
+ # if dtype == None, default will be object.
1063
+ default_dtype = dtype or object
1064
+ dtype_dict = defaultdict(lambda: default_dtype)
1065
+ else:
1066
+ dtype = cast(dict, dtype)
1067
+ dtype_dict = defaultdict(
1068
+ lambda: object,
1069
+ {columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
1070
+ )
1071
+
1072
+ # Even though we have no data, the "index" of the empty DataFrame
1073
+ # could for example still be an empty MultiIndex. Thus, we need to
1074
+ # check whether we have any index columns specified, via either:
1075
+ #
1076
+ # 1) index_col (column indices)
1077
+ # 2) index_names (column names)
1078
+ #
1079
+ # Both must be non-null to ensure a successful construction. Otherwise,
1080
+ # we have to create a generic empty Index.
1081
+ index: Index
1082
+ if (index_col is None or index_col is False) or index_names is None:
1083
+ index = default_index(0)
1084
+ else:
1085
+ data = [Series([], dtype=dtype_dict[name]) for name in index_names]
1086
+ index = ensure_index_from_sequences(data, names=index_names)
1087
+ index_col.sort()
1088
+
1089
+ for i, n in enumerate(index_col):
1090
+ columns.pop(n - i)
1091
+
1092
+ col_dict = {
1093
+ col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns
1094
+ }
1095
+
1096
+ return index, columns, col_dict
1097
+
1098
+
1099
+ def _make_date_converter(
1100
+ date_parser=lib.no_default,
1101
+ dayfirst: bool = False,
1102
+ cache_dates: bool = True,
1103
+ date_format: dict[Hashable, str] | str | None = None,
1104
+ ):
1105
+ if date_parser is not lib.no_default:
1106
+ warnings.warn(
1107
+ "The argument 'date_parser' is deprecated and will "
1108
+ "be removed in a future version. "
1109
+ "Please use 'date_format' instead, or read your data in as 'object' dtype "
1110
+ "and then call 'to_datetime'.",
1111
+ FutureWarning,
1112
+ stacklevel=find_stack_level(),
1113
+ )
1114
+ if date_parser is not lib.no_default and date_format is not None:
1115
+ raise TypeError("Cannot use both 'date_parser' and 'date_format'")
1116
+
1117
+ def unpack_if_single_element(arg):
1118
+ # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615
1119
+ if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1:
1120
+ return arg[0]
1121
+ return arg
1122
+
1123
+ def converter(*date_cols, col: Hashable):
1124
+ if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm":
1125
+ return date_cols[0]
1126
+
1127
+ if date_parser is lib.no_default:
1128
+ strs = parsing.concat_date_cols(date_cols)
1129
+ date_fmt = (
1130
+ date_format.get(col) if isinstance(date_format, dict) else date_format
1131
+ )
1132
+
1133
+ result = tools.to_datetime(
1134
+ ensure_object(strs),
1135
+ format=date_fmt,
1136
+ utc=False,
1137
+ dayfirst=dayfirst,
1138
+ errors="ignore",
1139
+ cache=cache_dates,
1140
+ )
1141
+ if isinstance(result, DatetimeIndex):
1142
+ arr = result.to_numpy()
1143
+ arr.flags.writeable = True
1144
+ return arr
1145
+ return result._values
1146
+ else:
1147
+ try:
1148
+ result = tools.to_datetime(
1149
+ date_parser(*(unpack_if_single_element(arg) for arg in date_cols)),
1150
+ errors="ignore",
1151
+ cache=cache_dates,
1152
+ )
1153
+ if isinstance(result, datetime.datetime):
1154
+ raise Exception("scalar parser")
1155
+ return result
1156
+ except Exception:
1157
+ return tools.to_datetime(
1158
+ parsing.try_parse_dates(
1159
+ parsing.concat_date_cols(date_cols),
1160
+ parser=date_parser,
1161
+ ),
1162
+ errors="ignore",
1163
+ )
1164
+
1165
+ return converter
1166
+
1167
+
1168
+ parser_defaults = {
1169
+ "delimiter": None,
1170
+ "escapechar": None,
1171
+ "quotechar": '"',
1172
+ "quoting": csv.QUOTE_MINIMAL,
1173
+ "doublequote": True,
1174
+ "skipinitialspace": False,
1175
+ "lineterminator": None,
1176
+ "header": "infer",
1177
+ "index_col": None,
1178
+ "names": None,
1179
+ "skiprows": None,
1180
+ "skipfooter": 0,
1181
+ "nrows": None,
1182
+ "na_values": None,
1183
+ "keep_default_na": True,
1184
+ "true_values": None,
1185
+ "false_values": None,
1186
+ "converters": None,
1187
+ "dtype": None,
1188
+ "cache_dates": True,
1189
+ "thousands": None,
1190
+ "comment": None,
1191
+ "decimal": ".",
1192
+ # 'engine': 'c',
1193
+ "parse_dates": False,
1194
+ "keep_date_col": False,
1195
+ "dayfirst": False,
1196
+ "date_parser": lib.no_default,
1197
+ "date_format": None,
1198
+ "usecols": None,
1199
+ # 'iterator': False,
1200
+ "chunksize": None,
1201
+ "verbose": False,
1202
+ "encoding": None,
1203
+ "compression": None,
1204
+ "skip_blank_lines": True,
1205
+ "encoding_errors": "strict",
1206
+ "on_bad_lines": ParserBase.BadLineHandleMethod.ERROR,
1207
+ "dtype_backend": lib.no_default,
1208
+ }
1209
+
1210
+
1211
+ def _process_date_conversion(
1212
+ data_dict,
1213
+ converter: Callable,
1214
+ parse_spec,
1215
+ index_col,
1216
+ index_names,
1217
+ columns,
1218
+ keep_date_col: bool = False,
1219
+ dtype_backend=lib.no_default,
1220
+ ):
1221
+ def _isindex(colspec):
1222
+ return (isinstance(index_col, list) and colspec in index_col) or (
1223
+ isinstance(index_names, list) and colspec in index_names
1224
+ )
1225
+
1226
+ new_cols = []
1227
+ new_data = {}
1228
+
1229
+ orig_names = columns
1230
+ columns = list(columns)
1231
+
1232
+ date_cols = set()
1233
+
1234
+ if parse_spec is None or isinstance(parse_spec, bool):
1235
+ return data_dict, columns
1236
+
1237
+ if isinstance(parse_spec, list):
1238
+ # list of column lists
1239
+ for colspec in parse_spec:
1240
+ if is_scalar(colspec) or isinstance(colspec, tuple):
1241
+ if isinstance(colspec, int) and colspec not in data_dict:
1242
+ colspec = orig_names[colspec]
1243
+ if _isindex(colspec):
1244
+ continue
1245
+ elif dtype_backend == "pyarrow":
1246
+ import pyarrow as pa
1247
+
1248
+ dtype = data_dict[colspec].dtype
1249
+ if isinstance(dtype, ArrowDtype) and (
1250
+ pa.types.is_timestamp(dtype.pyarrow_dtype)
1251
+ or pa.types.is_date(dtype.pyarrow_dtype)
1252
+ ):
1253
+ continue
1254
+
1255
+ # Pyarrow engine returns Series which we need to convert to
1256
+ # numpy array before converter, its a no-op for other parsers
1257
+ data_dict[colspec] = converter(
1258
+ np.asarray(data_dict[colspec]), col=colspec
1259
+ )
1260
+ else:
1261
+ new_name, col, old_names = _try_convert_dates(
1262
+ converter, colspec, data_dict, orig_names
1263
+ )
1264
+ if new_name in data_dict:
1265
+ raise ValueError(f"New date column already in dict {new_name}")
1266
+ new_data[new_name] = col
1267
+ new_cols.append(new_name)
1268
+ date_cols.update(old_names)
1269
+
1270
+ elif isinstance(parse_spec, dict):
1271
+ # dict of new name to column list
1272
+ for new_name, colspec in parse_spec.items():
1273
+ if new_name in data_dict:
1274
+ raise ValueError(f"Date column {new_name} already in dict")
1275
+
1276
+ _, col, old_names = _try_convert_dates(
1277
+ converter,
1278
+ colspec,
1279
+ data_dict,
1280
+ orig_names,
1281
+ target_name=new_name,
1282
+ )
1283
+
1284
+ new_data[new_name] = col
1285
+
1286
+ # If original column can be converted to date we keep the converted values
1287
+ # This can only happen if values are from single column
1288
+ if len(colspec) == 1:
1289
+ new_data[colspec[0]] = col
1290
+
1291
+ new_cols.append(new_name)
1292
+ date_cols.update(old_names)
1293
+
1294
+ data_dict.update(new_data)
1295
+ new_cols.extend(columns)
1296
+
1297
+ if not keep_date_col:
1298
+ for c in list(date_cols):
1299
+ data_dict.pop(c)
1300
+ new_cols.remove(c)
1301
+
1302
+ return data_dict, new_cols
1303
+
1304
+
1305
+ def _try_convert_dates(
1306
+ parser: Callable, colspec, data_dict, columns, target_name: str | None = None
1307
+ ):
1308
+ colset = set(columns)
1309
+ colnames = []
1310
+
1311
+ for c in colspec:
1312
+ if c in colset:
1313
+ colnames.append(c)
1314
+ elif isinstance(c, int) and c not in columns:
1315
+ colnames.append(columns[c])
1316
+ else:
1317
+ colnames.append(c)
1318
+
1319
+ new_name: tuple | str
1320
+ if all(isinstance(x, tuple) for x in colnames):
1321
+ new_name = tuple(map("_".join, zip(*colnames)))
1322
+ else:
1323
+ new_name = "_".join([str(x) for x in colnames])
1324
+ to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict]
1325
+
1326
+ new_col = parser(*to_parse, col=new_name if target_name is None else target_name)
1327
+ return new_name, new_col, colnames
1328
+
1329
+
1330
+ def _get_na_values(col, na_values, na_fvalues, keep_default_na):
1331
+ """
1332
+ Get the NaN values for a given column.
1333
+
1334
+ Parameters
1335
+ ----------
1336
+ col : str
1337
+ The name of the column.
1338
+ na_values : array-like, dict
1339
+ The object listing the NaN values as strings.
1340
+ na_fvalues : array-like, dict
1341
+ The object listing the NaN values as floats.
1342
+ keep_default_na : bool
1343
+ If `na_values` is a dict, and the column is not mapped in the
1344
+ dictionary, whether to return the default NaN values or the empty set.
1345
+
1346
+ Returns
1347
+ -------
1348
+ nan_tuple : A length-two tuple composed of
1349
+
1350
+ 1) na_values : the string NaN values for that column.
1351
+ 2) na_fvalues : the float NaN values for that column.
1352
+ """
1353
+ if isinstance(na_values, dict):
1354
+ if col in na_values:
1355
+ return na_values[col], na_fvalues[col]
1356
+ else:
1357
+ if keep_default_na:
1358
+ return STR_NA_VALUES, set()
1359
+
1360
+ return set(), set()
1361
+ else:
1362
+ return na_values, na_fvalues
1363
+
1364
+
1365
+ def _validate_parse_dates_arg(parse_dates):
1366
+ """
1367
+ Check whether or not the 'parse_dates' parameter
1368
+ is a non-boolean scalar. Raises a ValueError if
1369
+ that is the case.
1370
+ """
1371
+ msg = (
1372
+ "Only booleans, lists, and dictionaries are accepted "
1373
+ "for the 'parse_dates' parameter"
1374
+ )
1375
+
1376
+ if parse_dates is not None:
1377
+ if is_scalar(parse_dates):
1378
+ if not lib.is_bool(parse_dates):
1379
+ raise TypeError(msg)
1380
+
1381
+ elif not isinstance(parse_dates, (list, dict)):
1382
+ raise TypeError(msg)
1383
+
1384
+ return parse_dates
1385
+
1386
+
1387
+ def is_index_col(col) -> bool:
1388
+ return col is not None and col is not False
videochat2/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import defaultdict
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Hashable,
7
+ Mapping,
8
+ Sequence,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import (
15
+ lib,
16
+ parsers,
17
+ )
18
+ from pandas._typing import (
19
+ ArrayLike,
20
+ DtypeArg,
21
+ DtypeObj,
22
+ ReadCsvBuffer,
23
+ )
24
+ from pandas.compat._optional import import_optional_dependency
25
+ from pandas.errors import DtypeWarning
26
+ from pandas.util._exceptions import find_stack_level
27
+
28
+ from pandas.core.dtypes.common import (
29
+ is_categorical_dtype,
30
+ pandas_dtype,
31
+ )
32
+ from pandas.core.dtypes.concat import (
33
+ concat_compat,
34
+ union_categoricals,
35
+ )
36
+
37
+ from pandas.core.indexes.api import ensure_index_from_sequences
38
+
39
+ from pandas.io.common import (
40
+ dedup_names,
41
+ is_potential_multi_index,
42
+ )
43
+ from pandas.io.parsers.base_parser import (
44
+ ParserBase,
45
+ ParserError,
46
+ is_index_col,
47
+ )
48
+
49
+ if TYPE_CHECKING:
50
+ from pandas import (
51
+ Index,
52
+ MultiIndex,
53
+ )
54
+
55
+
56
+ class CParserWrapper(ParserBase):
57
+ low_memory: bool
58
+ _reader: parsers.TextReader
59
+
60
+ def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None:
61
+ super().__init__(kwds)
62
+ self.kwds = kwds
63
+ kwds = kwds.copy()
64
+
65
+ self.low_memory = kwds.pop("low_memory", False)
66
+
67
+ # #2442
68
+ # error: Cannot determine type of 'index_col'
69
+ kwds["allow_leading_cols"] = (
70
+ self.index_col is not False # type: ignore[has-type]
71
+ )
72
+
73
+ # GH20529, validate usecol arg before TextReader
74
+ kwds["usecols"] = self.usecols
75
+
76
+ # Have to pass int, would break tests using TextReader directly otherwise :(
77
+ kwds["on_bad_lines"] = self.on_bad_lines.value
78
+
79
+ for key in (
80
+ "storage_options",
81
+ "encoding",
82
+ "memory_map",
83
+ "compression",
84
+ ):
85
+ kwds.pop(key, None)
86
+
87
+ kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
88
+ if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default:
89
+ kwds["dtype_backend"] = "numpy"
90
+ if kwds["dtype_backend"] == "pyarrow":
91
+ # Fail here loudly instead of in cython after reading
92
+ import_optional_dependency("pyarrow")
93
+ self._reader = parsers.TextReader(src, **kwds)
94
+
95
+ self.unnamed_cols = self._reader.unnamed_cols
96
+
97
+ # error: Cannot determine type of 'names'
98
+ passed_names = self.names is None # type: ignore[has-type]
99
+
100
+ if self._reader.header is None:
101
+ self.names = None
102
+ else:
103
+ # error: Cannot determine type of 'names'
104
+ # error: Cannot determine type of 'index_names'
105
+ (
106
+ self.names, # type: ignore[has-type]
107
+ self.index_names,
108
+ self.col_names,
109
+ passed_names,
110
+ ) = self._extract_multi_indexer_columns(
111
+ self._reader.header,
112
+ self.index_names, # type: ignore[has-type]
113
+ passed_names,
114
+ )
115
+
116
+ # error: Cannot determine type of 'names'
117
+ if self.names is None: # type: ignore[has-type]
118
+ self.names = list(range(self._reader.table_width))
119
+
120
+ # gh-9755
121
+ #
122
+ # need to set orig_names here first
123
+ # so that proper indexing can be done
124
+ # with _set_noconvert_columns
125
+ #
126
+ # once names has been filtered, we will
127
+ # then set orig_names again to names
128
+ # error: Cannot determine type of 'names'
129
+ self.orig_names = self.names[:] # type: ignore[has-type]
130
+
131
+ if self.usecols:
132
+ usecols = self._evaluate_usecols(self.usecols, self.orig_names)
133
+
134
+ # GH 14671
135
+ # assert for mypy, orig_names is List or None, None would error in issubset
136
+ assert self.orig_names is not None
137
+ if self.usecols_dtype == "string" and not set(usecols).issubset(
138
+ self.orig_names
139
+ ):
140
+ self._validate_usecols_names(usecols, self.orig_names)
141
+
142
+ # error: Cannot determine type of 'names'
143
+ if len(self.names) > len(usecols): # type: ignore[has-type]
144
+ # error: Cannot determine type of 'names'
145
+ self.names = [ # type: ignore[has-type]
146
+ n
147
+ # error: Cannot determine type of 'names'
148
+ for i, n in enumerate(self.names) # type: ignore[has-type]
149
+ if (i in usecols or n in usecols)
150
+ ]
151
+
152
+ # error: Cannot determine type of 'names'
153
+ if len(self.names) < len(usecols): # type: ignore[has-type]
154
+ # error: Cannot determine type of 'names'
155
+ self._validate_usecols_names(
156
+ usecols,
157
+ self.names, # type: ignore[has-type]
158
+ )
159
+
160
+ # error: Cannot determine type of 'names'
161
+ self._validate_parse_dates_presence(self.names) # type: ignore[has-type]
162
+ self._set_noconvert_columns()
163
+
164
+ # error: Cannot determine type of 'names'
165
+ self.orig_names = self.names # type: ignore[has-type]
166
+
167
+ if not self._has_complex_date_col:
168
+ # error: Cannot determine type of 'index_col'
169
+ if self._reader.leading_cols == 0 and is_index_col(
170
+ self.index_col # type: ignore[has-type]
171
+ ):
172
+ self._name_processed = True
173
+ (
174
+ index_names,
175
+ # error: Cannot determine type of 'names'
176
+ self.names, # type: ignore[has-type]
177
+ self.index_col,
178
+ ) = self._clean_index_names(
179
+ # error: Cannot determine type of 'names'
180
+ self.names, # type: ignore[has-type]
181
+ # error: Cannot determine type of 'index_col'
182
+ self.index_col, # type: ignore[has-type]
183
+ )
184
+
185
+ if self.index_names is None:
186
+ self.index_names = index_names
187
+
188
+ if self._reader.header is None and not passed_names:
189
+ assert self.index_names is not None
190
+ self.index_names = [None] * len(self.index_names)
191
+
192
+ self._implicit_index = self._reader.leading_cols > 0
193
+
194
+ def close(self) -> None:
195
+ # close handles opened by C parser
196
+ try:
197
+ self._reader.close()
198
+ except ValueError:
199
+ pass
200
+
201
+ def _set_noconvert_columns(self) -> None:
202
+ """
203
+ Set the columns that should not undergo dtype conversions.
204
+
205
+ Currently, any column that is involved with date parsing will not
206
+ undergo such conversions.
207
+ """
208
+ assert self.orig_names is not None
209
+ # error: Cannot determine type of 'names'
210
+
211
+ # much faster than using orig_names.index(x) xref GH#44106
212
+ names_dict = {x: i for i, x in enumerate(self.orig_names)}
213
+ col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
214
+ # error: Cannot determine type of 'names'
215
+ noconvert_columns = self._set_noconvert_dtype_columns(
216
+ col_indices,
217
+ self.names, # type: ignore[has-type]
218
+ )
219
+ for col in noconvert_columns:
220
+ self._reader.set_noconvert(col)
221
+
222
+ def read(
223
+ self,
224
+ nrows: int | None = None,
225
+ ) -> tuple[
226
+ Index | MultiIndex | None,
227
+ Sequence[Hashable] | MultiIndex,
228
+ Mapping[Hashable, ArrayLike],
229
+ ]:
230
+ index: Index | MultiIndex | None
231
+ column_names: Sequence[Hashable] | MultiIndex
232
+ try:
233
+ if self.low_memory:
234
+ chunks = self._reader.read_low_memory(nrows)
235
+ # destructive to chunks
236
+ data = _concatenate_chunks(chunks)
237
+
238
+ else:
239
+ data = self._reader.read(nrows)
240
+ except StopIteration:
241
+ if self._first_chunk:
242
+ self._first_chunk = False
243
+ names = dedup_names(
244
+ self.orig_names,
245
+ is_potential_multi_index(self.orig_names, self.index_col),
246
+ )
247
+ index, columns, col_dict = self._get_empty_meta(
248
+ names,
249
+ self.index_col,
250
+ self.index_names,
251
+ dtype=self.kwds.get("dtype"),
252
+ )
253
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
254
+
255
+ if self.usecols is not None:
256
+ columns = self._filter_usecols(columns)
257
+
258
+ col_dict = {k: v for k, v in col_dict.items() if k in columns}
259
+
260
+ return index, columns, col_dict
261
+
262
+ else:
263
+ self.close()
264
+ raise
265
+
266
+ # Done with first read, next time raise StopIteration
267
+ self._first_chunk = False
268
+
269
+ # error: Cannot determine type of 'names'
270
+ names = self.names # type: ignore[has-type]
271
+
272
+ if self._reader.leading_cols:
273
+ if self._has_complex_date_col:
274
+ raise NotImplementedError("file structure not yet supported")
275
+
276
+ # implicit index, no index names
277
+ arrays = []
278
+
279
+ if self.index_col and self._reader.leading_cols != len(self.index_col):
280
+ raise ParserError(
281
+ "Could not construct index. Requested to use "
282
+ f"{len(self.index_col)} number of columns, but "
283
+ f"{self._reader.leading_cols} left to parse."
284
+ )
285
+
286
+ for i in range(self._reader.leading_cols):
287
+ if self.index_col is None:
288
+ values = data.pop(i)
289
+ else:
290
+ values = data.pop(self.index_col[i])
291
+
292
+ values = self._maybe_parse_dates(values, i, try_parse_dates=True)
293
+ arrays.append(values)
294
+
295
+ index = ensure_index_from_sequences(arrays)
296
+
297
+ if self.usecols is not None:
298
+ names = self._filter_usecols(names)
299
+
300
+ names = dedup_names(names, is_potential_multi_index(names, self.index_col))
301
+
302
+ # rename dict keys
303
+ data_tups = sorted(data.items())
304
+ data = {k: v for k, (i, v) in zip(names, data_tups)}
305
+
306
+ column_names, date_data = self._do_date_conversions(names, data)
307
+
308
+ # maybe create a mi on the columns
309
+ column_names = self._maybe_make_multi_index_columns(
310
+ column_names, self.col_names
311
+ )
312
+
313
+ else:
314
+ # rename dict keys
315
+ data_tups = sorted(data.items())
316
+
317
+ # ugh, mutation
318
+
319
+ # assert for mypy, orig_names is List or None, None would error in list(...)
320
+ assert self.orig_names is not None
321
+ names = list(self.orig_names)
322
+ names = dedup_names(names, is_potential_multi_index(names, self.index_col))
323
+
324
+ if self.usecols is not None:
325
+ names = self._filter_usecols(names)
326
+
327
+ # columns as list
328
+ alldata = [x[1] for x in data_tups]
329
+ if self.usecols is None:
330
+ self._check_data_length(names, alldata)
331
+
332
+ data = {k: v for k, (i, v) in zip(names, data_tups)}
333
+
334
+ names, date_data = self._do_date_conversions(names, data)
335
+ index, column_names = self._make_index(date_data, alldata, names)
336
+
337
+ return index, column_names, date_data
338
+
339
+ def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]:
340
+ # hackish
341
+ usecols = self._evaluate_usecols(self.usecols, names)
342
+ if usecols is not None and len(names) != len(usecols):
343
+ names = [
344
+ name for i, name in enumerate(names) if i in usecols or name in usecols
345
+ ]
346
+ return names
347
+
348
+ def _get_index_names(self):
349
+ names = list(self._reader.header[0])
350
+ idx_names = None
351
+
352
+ if self._reader.leading_cols == 0 and self.index_col is not None:
353
+ (idx_names, names, self.index_col) = self._clean_index_names(
354
+ names, self.index_col
355
+ )
356
+
357
+ return names, idx_names
358
+
359
+ def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True):
360
+ if try_parse_dates and self._should_parse_dates(index):
361
+ values = self._date_conv(
362
+ values,
363
+ col=self.index_names[index] if self.index_names is not None else None,
364
+ )
365
+ return values
366
+
367
+
368
+ def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
369
+ """
370
+ Concatenate chunks of data read with low_memory=True.
371
+
372
+ The tricky part is handling Categoricals, where different chunks
373
+ may have different inferred categories.
374
+ """
375
+ names = list(chunks[0].keys())
376
+ warning_columns = []
377
+
378
+ result: dict = {}
379
+ for name in names:
380
+ arrs = [chunk.pop(name) for chunk in chunks]
381
+ # Check each arr for consistent types.
382
+ dtypes = {a.dtype for a in arrs}
383
+ non_cat_dtypes = {x for x in dtypes if not is_categorical_dtype(x)}
384
+
385
+ dtype = dtypes.pop()
386
+ if is_categorical_dtype(dtype):
387
+ result[name] = union_categoricals(arrs, sort_categories=False)
388
+ else:
389
+ result[name] = concat_compat(arrs)
390
+ if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object):
391
+ warning_columns.append(str(name))
392
+
393
+ if warning_columns:
394
+ warning_names = ",".join(warning_columns)
395
+ warning_message = " ".join(
396
+ [
397
+ f"Columns ({warning_names}) have mixed types. "
398
+ f"Specify dtype option on import or set low_memory=False."
399
+ ]
400
+ )
401
+ warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())
402
+ return result
403
+
404
+
405
+ def ensure_dtype_objs(
406
+ dtype: DtypeArg | dict[Hashable, DtypeArg] | None
407
+ ) -> DtypeObj | dict[Hashable, DtypeObj] | None:
408
+ """
409
+ Ensure we have either None, a dtype object, or a dictionary mapping to
410
+ dtype objects.
411
+ """
412
+ if isinstance(dtype, defaultdict):
413
+ # "None" not callable [misc]
414
+ default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc]
415
+ dtype_converted: defaultdict = defaultdict(lambda: default_dtype)
416
+ for key in dtype.keys():
417
+ dtype_converted[key] = pandas_dtype(dtype[key])
418
+ return dtype_converted
419
+ elif isinstance(dtype, dict):
420
+ return {k: pandas_dtype(dtype[k]) for k in dtype}
421
+ elif dtype is not None:
422
+ return pandas_dtype(dtype)
423
+ return dtype
videochat2/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py ADDED
@@ -0,0 +1,1351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import (
4
+ abc,
5
+ defaultdict,
6
+ )
7
+ import csv
8
+ from io import StringIO
9
+ import re
10
+ import sys
11
+ from typing import (
12
+ IO,
13
+ TYPE_CHECKING,
14
+ DefaultDict,
15
+ Hashable,
16
+ Iterator,
17
+ List,
18
+ Literal,
19
+ Mapping,
20
+ Sequence,
21
+ cast,
22
+ )
23
+
24
+ import numpy as np
25
+
26
+ from pandas._libs import lib
27
+ from pandas._typing import (
28
+ ArrayLike,
29
+ ReadCsvBuffer,
30
+ Scalar,
31
+ )
32
+ from pandas.errors import (
33
+ EmptyDataError,
34
+ ParserError,
35
+ )
36
+
37
+ from pandas.core.dtypes.common import is_integer
38
+ from pandas.core.dtypes.inference import is_dict_like
39
+
40
+ from pandas.io.common import (
41
+ dedup_names,
42
+ is_potential_multi_index,
43
+ )
44
+ from pandas.io.parsers.base_parser import (
45
+ ParserBase,
46
+ parser_defaults,
47
+ )
48
+
49
+ if TYPE_CHECKING:
50
+ from pandas import (
51
+ Index,
52
+ MultiIndex,
53
+ )
54
+
55
+ # BOM character (byte order mark)
56
+ # This exists at the beginning of a file to indicate endianness
57
+ # of a file (stream). Unfortunately, this marker screws up parsing,
58
+ # so we need to remove it if we see it.
59
+ _BOM = "\ufeff"
60
+
61
+
62
+ class PythonParser(ParserBase):
63
+ def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None:
64
+ """
65
+ Workhorse function for processing nested list into DataFrame
66
+ """
67
+ super().__init__(kwds)
68
+
69
+ self.data: Iterator[str] | None = None
70
+ self.buf: list = []
71
+ self.pos = 0
72
+ self.line_pos = 0
73
+
74
+ self.skiprows = kwds["skiprows"]
75
+
76
+ if callable(self.skiprows):
77
+ self.skipfunc = self.skiprows
78
+ else:
79
+ self.skipfunc = lambda x: x in self.skiprows
80
+
81
+ self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
82
+ self.delimiter = kwds["delimiter"]
83
+
84
+ self.quotechar = kwds["quotechar"]
85
+ if isinstance(self.quotechar, str):
86
+ self.quotechar = str(self.quotechar)
87
+
88
+ self.escapechar = kwds["escapechar"]
89
+ self.doublequote = kwds["doublequote"]
90
+ self.skipinitialspace = kwds["skipinitialspace"]
91
+ self.lineterminator = kwds["lineterminator"]
92
+ self.quoting = kwds["quoting"]
93
+ self.skip_blank_lines = kwds["skip_blank_lines"]
94
+
95
+ self.names_passed = kwds["names"] or None
96
+
97
+ self.has_index_names = False
98
+ if "has_index_names" in kwds:
99
+ self.has_index_names = kwds["has_index_names"]
100
+
101
+ self.verbose = kwds["verbose"]
102
+
103
+ self.thousands = kwds["thousands"]
104
+ self.decimal = kwds["decimal"]
105
+
106
+ self.comment = kwds["comment"]
107
+
108
+ # Set self.data to something that can read lines.
109
+ if isinstance(f, list):
110
+ # read_excel: f is a list
111
+ self.data = cast(Iterator[str], f)
112
+ else:
113
+ assert hasattr(f, "readline")
114
+ self._make_reader(f)
115
+
116
+ # Get columns in two steps: infer from data, then
117
+ # infer column indices from self.usecols if it is specified.
118
+ self._col_indices: list[int] | None = None
119
+ columns: list[list[Scalar | None]]
120
+ (
121
+ columns,
122
+ self.num_original_columns,
123
+ self.unnamed_cols,
124
+ ) = self._infer_columns()
125
+
126
+ # Now self.columns has the set of columns that we will process.
127
+ # The original set is stored in self.original_columns.
128
+ # error: Cannot determine type of 'index_names'
129
+ (
130
+ self.columns,
131
+ self.index_names,
132
+ self.col_names,
133
+ _,
134
+ ) = self._extract_multi_indexer_columns(
135
+ columns,
136
+ self.index_names, # type: ignore[has-type]
137
+ )
138
+
139
+ # get popped off for index
140
+ self.orig_names: list[Hashable] = list(self.columns)
141
+
142
+ # needs to be cleaned/refactored
143
+ # multiple date column thing turning into a real spaghetti factory
144
+
145
+ if not self._has_complex_date_col:
146
+ (index_names, self.orig_names, self.columns) = self._get_index_name(
147
+ self.columns
148
+ )
149
+ self._name_processed = True
150
+ if self.index_names is None:
151
+ self.index_names = index_names
152
+
153
+ if self._col_indices is None:
154
+ self._col_indices = list(range(len(self.columns)))
155
+
156
+ self._parse_date_cols = self._validate_parse_dates_presence(self.columns)
157
+ no_thousands_columns: set[int] | None = None
158
+ if self.parse_dates:
159
+ no_thousands_columns = self._set_noconvert_dtype_columns(
160
+ self._col_indices, self.columns
161
+ )
162
+ self._no_thousands_columns = no_thousands_columns
163
+
164
+ if len(self.decimal) != 1:
165
+ raise ValueError("Only length-1 decimal markers supported")
166
+
167
+ decimal = re.escape(self.decimal)
168
+ if self.thousands is None:
169
+ regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
170
+ else:
171
+ thousands = re.escape(self.thousands)
172
+ regex = (
173
+ rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
174
+ rf"([0-9]?(E|e)\-?[0-9]+)?$"
175
+ )
176
+ self.num = re.compile(regex)
177
+
178
+ def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> None:
179
+ sep = self.delimiter
180
+
181
+ if sep is None or len(sep) == 1:
182
+ if self.lineterminator:
183
+ raise ValueError(
184
+ "Custom line terminators not supported in python parser (yet)"
185
+ )
186
+
187
+ class MyDialect(csv.Dialect):
188
+ delimiter = self.delimiter
189
+ quotechar = self.quotechar
190
+ escapechar = self.escapechar
191
+ doublequote = self.doublequote
192
+ skipinitialspace = self.skipinitialspace
193
+ quoting = self.quoting
194
+ lineterminator = "\n"
195
+
196
+ dia = MyDialect
197
+
198
+ if sep is not None:
199
+ dia.delimiter = sep
200
+ else:
201
+ # attempt to sniff the delimiter from the first valid line,
202
+ # i.e. no comment line and not in skiprows
203
+ line = f.readline()
204
+ lines = self._check_comments([[line]])[0]
205
+ while self.skipfunc(self.pos) or not lines:
206
+ self.pos += 1
207
+ line = f.readline()
208
+ lines = self._check_comments([[line]])[0]
209
+ lines_str = cast(List[str], lines)
210
+
211
+ # since `line` was a string, lines will be a list containing
212
+ # only a single string
213
+ line = lines_str[0]
214
+
215
+ self.pos += 1
216
+ self.line_pos += 1
217
+ sniffed = csv.Sniffer().sniff(line)
218
+ dia.delimiter = sniffed.delimiter
219
+
220
+ # Note: encoding is irrelevant here
221
+ line_rdr = csv.reader(StringIO(line), dialect=dia)
222
+ self.buf.extend(list(line_rdr))
223
+
224
+ # Note: encoding is irrelevant here
225
+ reader = csv.reader(f, dialect=dia, strict=True)
226
+
227
+ else:
228
+
229
+ def _read():
230
+ line = f.readline()
231
+ pat = re.compile(sep)
232
+
233
+ yield pat.split(line.strip())
234
+
235
+ for line in f:
236
+ yield pat.split(line.strip())
237
+
238
+ reader = _read()
239
+
240
+ # error: Incompatible types in assignment (expression has type "_reader",
241
+ # variable has type "Union[IO[Any], RawIOBase, BufferedIOBase, TextIOBase,
242
+ # TextIOWrapper, mmap, None]")
243
+ self.data = reader # type: ignore[assignment]
244
+
245
+ def read(
246
+ self, rows: int | None = None
247
+ ) -> tuple[
248
+ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
249
+ ]:
250
+ try:
251
+ content = self._get_lines(rows)
252
+ except StopIteration:
253
+ if self._first_chunk:
254
+ content = []
255
+ else:
256
+ self.close()
257
+ raise
258
+
259
+ # done with first read, next time raise StopIteration
260
+ self._first_chunk = False
261
+
262
+ columns: Sequence[Hashable] = list(self.orig_names)
263
+ if not len(content): # pragma: no cover
264
+ # DataFrame with the right metadata, even though it's length 0
265
+ # error: Cannot determine type of 'index_col'
266
+ names = dedup_names(
267
+ self.orig_names,
268
+ is_potential_multi_index(
269
+ self.orig_names,
270
+ self.index_col, # type: ignore[has-type]
271
+ ),
272
+ )
273
+ # error: Cannot determine type of 'index_col'
274
+ index, columns, col_dict = self._get_empty_meta(
275
+ names,
276
+ self.index_col, # type: ignore[has-type]
277
+ self.index_names,
278
+ self.dtype,
279
+ )
280
+ conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names)
281
+ return index, conv_columns, col_dict
282
+
283
+ # handle new style for names in index
284
+ count_empty_content_vals = count_empty_vals(content[0])
285
+ indexnamerow = None
286
+ if self.has_index_names and count_empty_content_vals == len(columns):
287
+ indexnamerow = content[0]
288
+ content = content[1:]
289
+
290
+ alldata = self._rows_to_cols(content)
291
+ data, columns = self._exclude_implicit_index(alldata)
292
+
293
+ conv_data = self._convert_data(data)
294
+ columns, conv_data = self._do_date_conversions(columns, conv_data)
295
+
296
+ index, result_columns = self._make_index(
297
+ conv_data, alldata, columns, indexnamerow
298
+ )
299
+
300
+ return index, result_columns, conv_data
301
+
302
+ def _exclude_implicit_index(
303
+ self,
304
+ alldata: list[np.ndarray],
305
+ ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]:
306
+ # error: Cannot determine type of 'index_col'
307
+ names = dedup_names(
308
+ self.orig_names,
309
+ is_potential_multi_index(
310
+ self.orig_names,
311
+ self.index_col, # type: ignore[has-type]
312
+ ),
313
+ )
314
+
315
+ offset = 0
316
+ if self._implicit_index:
317
+ # error: Cannot determine type of 'index_col'
318
+ offset = len(self.index_col) # type: ignore[has-type]
319
+
320
+ len_alldata = len(alldata)
321
+ self._check_data_length(names, alldata)
322
+
323
+ return {
324
+ name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata
325
+ }, names
326
+
327
+ # legacy
328
+ def get_chunk(
329
+ self, size: int | None = None
330
+ ) -> tuple[
331
+ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
332
+ ]:
333
+ if size is None:
334
+ # error: "PythonParser" has no attribute "chunksize"
335
+ size = self.chunksize # type: ignore[attr-defined]
336
+ return self.read(rows=size)
337
+
338
+ def _convert_data(
339
+ self,
340
+ data: Mapping[Hashable, np.ndarray],
341
+ ) -> Mapping[Hashable, ArrayLike]:
342
+ # apply converters
343
+ clean_conv = self._clean_mapping(self.converters)
344
+ clean_dtypes = self._clean_mapping(self.dtype)
345
+
346
+ # Apply NA values.
347
+ clean_na_values = {}
348
+ clean_na_fvalues = {}
349
+
350
+ if isinstance(self.na_values, dict):
351
+ for col in self.na_values:
352
+ na_value = self.na_values[col]
353
+ na_fvalue = self.na_fvalues[col]
354
+
355
+ if isinstance(col, int) and col not in self.orig_names:
356
+ col = self.orig_names[col]
357
+
358
+ clean_na_values[col] = na_value
359
+ clean_na_fvalues[col] = na_fvalue
360
+ else:
361
+ clean_na_values = self.na_values
362
+ clean_na_fvalues = self.na_fvalues
363
+
364
+ return self._convert_to_ndarrays(
365
+ data,
366
+ clean_na_values,
367
+ clean_na_fvalues,
368
+ self.verbose,
369
+ clean_conv,
370
+ clean_dtypes,
371
+ )
372
+
373
+ def _infer_columns(
374
+ self,
375
+ ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]:
376
+ names = self.names
377
+ num_original_columns = 0
378
+ clear_buffer = True
379
+ unnamed_cols: set[Scalar | None] = set()
380
+ self._header_line = None
381
+
382
+ if self.header is not None:
383
+ header = self.header
384
+
385
+ if isinstance(header, (list, tuple, np.ndarray)):
386
+ have_mi_columns = len(header) > 1
387
+ # we have a mi columns, so read an extra line
388
+ if have_mi_columns:
389
+ header = list(header) + [header[-1] + 1]
390
+ else:
391
+ have_mi_columns = False
392
+ header = [header]
393
+
394
+ columns: list[list[Scalar | None]] = []
395
+ for level, hr in enumerate(header):
396
+ try:
397
+ line = self._buffered_line()
398
+
399
+ while self.line_pos <= hr:
400
+ line = self._next_line()
401
+
402
+ except StopIteration as err:
403
+ if 0 < self.line_pos <= hr and (
404
+ not have_mi_columns or hr != header[-1]
405
+ ):
406
+ # If no rows we want to raise a different message and if
407
+ # we have mi columns, the last line is not part of the header
408
+ joi = list(map(str, header[:-1] if have_mi_columns else header))
409
+ msg = f"[{','.join(joi)}], len of {len(joi)}, "
410
+ raise ValueError(
411
+ f"Passed header={msg}"
412
+ f"but only {self.line_pos} lines in file"
413
+ ) from err
414
+
415
+ # We have an empty file, so check
416
+ # if columns are provided. That will
417
+ # serve as the 'line' for parsing
418
+ if have_mi_columns and hr > 0:
419
+ if clear_buffer:
420
+ self._clear_buffer()
421
+ columns.append([None] * len(columns[-1]))
422
+ return columns, num_original_columns, unnamed_cols
423
+
424
+ if not self.names:
425
+ raise EmptyDataError("No columns to parse from file") from err
426
+
427
+ line = self.names[:]
428
+
429
+ this_columns: list[Scalar | None] = []
430
+ this_unnamed_cols = []
431
+
432
+ for i, c in enumerate(line):
433
+ if c == "":
434
+ if have_mi_columns:
435
+ col_name = f"Unnamed: {i}_level_{level}"
436
+ else:
437
+ col_name = f"Unnamed: {i}"
438
+
439
+ this_unnamed_cols.append(i)
440
+ this_columns.append(col_name)
441
+ else:
442
+ this_columns.append(c)
443
+
444
+ if not have_mi_columns:
445
+ counts: DefaultDict = defaultdict(int)
446
+ # Ensure that regular columns are used before unnamed ones
447
+ # to keep given names and mangle unnamed columns
448
+ col_loop_order = [
449
+ i
450
+ for i in range(len(this_columns))
451
+ if i not in this_unnamed_cols
452
+ ] + this_unnamed_cols
453
+
454
+ # TODO: Use pandas.io.common.dedup_names instead (see #50371)
455
+ for i in col_loop_order:
456
+ col = this_columns[i]
457
+ old_col = col
458
+ cur_count = counts[col]
459
+
460
+ if cur_count > 0:
461
+ while cur_count > 0:
462
+ counts[old_col] = cur_count + 1
463
+ col = f"{old_col}.{cur_count}"
464
+ if col in this_columns:
465
+ cur_count += 1
466
+ else:
467
+ cur_count = counts[col]
468
+
469
+ if (
470
+ self.dtype is not None
471
+ and is_dict_like(self.dtype)
472
+ and self.dtype.get(old_col) is not None
473
+ and self.dtype.get(col) is None
474
+ ):
475
+ self.dtype.update({col: self.dtype.get(old_col)})
476
+ this_columns[i] = col
477
+ counts[col] = cur_count + 1
478
+ elif have_mi_columns:
479
+ # if we have grabbed an extra line, but its not in our
480
+ # format so save in the buffer, and create an blank extra
481
+ # line for the rest of the parsing code
482
+ if hr == header[-1]:
483
+ lc = len(this_columns)
484
+ # error: Cannot determine type of 'index_col'
485
+ sic = self.index_col # type: ignore[has-type]
486
+ ic = len(sic) if sic is not None else 0
487
+ unnamed_count = len(this_unnamed_cols)
488
+
489
+ # if wrong number of blanks or no index, not our format
490
+ if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0:
491
+ clear_buffer = False
492
+ this_columns = [None] * lc
493
+ self.buf = [self.buf[-1]]
494
+
495
+ columns.append(this_columns)
496
+ unnamed_cols.update({this_columns[i] for i in this_unnamed_cols})
497
+
498
+ if len(columns) == 1:
499
+ num_original_columns = len(this_columns)
500
+
501
+ if clear_buffer:
502
+ self._clear_buffer()
503
+
504
+ first_line: list[Scalar] | None
505
+ if names is not None:
506
+ # Read first row after header to check if data are longer
507
+ try:
508
+ first_line = self._next_line()
509
+ except StopIteration:
510
+ first_line = None
511
+
512
+ len_first_data_row = 0 if first_line is None else len(first_line)
513
+
514
+ if len(names) > len(columns[0]) and len(names) > len_first_data_row:
515
+ raise ValueError(
516
+ "Number of passed names did not match "
517
+ "number of header fields in the file"
518
+ )
519
+ if len(columns) > 1:
520
+ raise TypeError("Cannot pass names with multi-index columns")
521
+
522
+ if self.usecols is not None:
523
+ # Set _use_cols. We don't store columns because they are
524
+ # overwritten.
525
+ self._handle_usecols(columns, names, num_original_columns)
526
+ else:
527
+ num_original_columns = len(names)
528
+ if self._col_indices is not None and len(names) != len(
529
+ self._col_indices
530
+ ):
531
+ columns = [[names[i] for i in sorted(self._col_indices)]]
532
+ else:
533
+ columns = [names]
534
+ else:
535
+ columns = self._handle_usecols(
536
+ columns, columns[0], num_original_columns
537
+ )
538
+ else:
539
+ try:
540
+ line = self._buffered_line()
541
+
542
+ except StopIteration as err:
543
+ if not names:
544
+ raise EmptyDataError("No columns to parse from file") from err
545
+
546
+ line = names[:]
547
+
548
+ # Store line, otherwise it is lost for guessing the index
549
+ self._header_line = line
550
+ ncols = len(line)
551
+ num_original_columns = ncols
552
+
553
+ if not names:
554
+ columns = [list(range(ncols))]
555
+ columns = self._handle_usecols(
556
+ columns, columns[0], num_original_columns
557
+ )
558
+ else:
559
+ if self.usecols is None or len(names) >= num_original_columns:
560
+ columns = self._handle_usecols([names], names, num_original_columns)
561
+ num_original_columns = len(names)
562
+ else:
563
+ if not callable(self.usecols) and len(names) != len(self.usecols):
564
+ raise ValueError(
565
+ "Number of passed names did not match number of "
566
+ "header fields in the file"
567
+ )
568
+ # Ignore output but set used columns.
569
+ self._handle_usecols([names], names, ncols)
570
+ columns = [names]
571
+ num_original_columns = ncols
572
+
573
+ return columns, num_original_columns, unnamed_cols
574
+
575
+ def _handle_usecols(
576
+ self,
577
+ columns: list[list[Scalar | None]],
578
+ usecols_key: list[Scalar | None],
579
+ num_original_columns: int,
580
+ ) -> list[list[Scalar | None]]:
581
+ """
582
+ Sets self._col_indices
583
+
584
+ usecols_key is used if there are string usecols.
585
+ """
586
+ col_indices: set[int] | list[int]
587
+ if self.usecols is not None:
588
+ if callable(self.usecols):
589
+ col_indices = self._evaluate_usecols(self.usecols, usecols_key)
590
+ elif any(isinstance(u, str) for u in self.usecols):
591
+ if len(columns) > 1:
592
+ raise ValueError(
593
+ "If using multiple headers, usecols must be integers."
594
+ )
595
+ col_indices = []
596
+
597
+ for col in self.usecols:
598
+ if isinstance(col, str):
599
+ try:
600
+ col_indices.append(usecols_key.index(col))
601
+ except ValueError:
602
+ self._validate_usecols_names(self.usecols, usecols_key)
603
+ else:
604
+ col_indices.append(col)
605
+ else:
606
+ missing_usecols = [
607
+ col for col in self.usecols if col >= num_original_columns
608
+ ]
609
+ if missing_usecols:
610
+ raise ParserError(
611
+ "Defining usecols without of bounds indices is not allowed. "
612
+ f"{missing_usecols} are out of bounds.",
613
+ )
614
+ col_indices = self.usecols
615
+
616
+ columns = [
617
+ [n for i, n in enumerate(column) if i in col_indices]
618
+ for column in columns
619
+ ]
620
+ self._col_indices = sorted(col_indices)
621
+ return columns
622
+
623
+ def _buffered_line(self) -> list[Scalar]:
624
+ """
625
+ Return a line from buffer, filling buffer if required.
626
+ """
627
+ if len(self.buf) > 0:
628
+ return self.buf[0]
629
+ else:
630
+ return self._next_line()
631
+
632
+ def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]:
633
+ """
634
+ Checks whether the file begins with the BOM character.
635
+ If it does, remove it. In addition, if there is quoting
636
+ in the field subsequent to the BOM, remove it as well
637
+ because it technically takes place at the beginning of
638
+ the name, not the middle of it.
639
+ """
640
+ # first_row will be a list, so we need to check
641
+ # that that list is not empty before proceeding.
642
+ if not first_row:
643
+ return first_row
644
+
645
+ # The first element of this row is the one that could have the
646
+ # BOM that we want to remove. Check that the first element is a
647
+ # string before proceeding.
648
+ if not isinstance(first_row[0], str):
649
+ return first_row
650
+
651
+ # Check that the string is not empty, as that would
652
+ # obviously not have a BOM at the start of it.
653
+ if not first_row[0]:
654
+ return first_row
655
+
656
+ # Since the string is non-empty, check that it does
657
+ # in fact begin with a BOM.
658
+ first_elt = first_row[0][0]
659
+ if first_elt != _BOM:
660
+ return first_row
661
+
662
+ first_row_bom = first_row[0]
663
+ new_row: str
664
+
665
+ if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:
666
+ start = 2
667
+ quote = first_row_bom[1]
668
+ end = first_row_bom[2:].index(quote) + 2
669
+
670
+ # Extract the data between the quotation marks
671
+ new_row = first_row_bom[start:end]
672
+
673
+ # Extract any remaining data after the second
674
+ # quotation mark.
675
+ if len(first_row_bom) > end + 1:
676
+ new_row += first_row_bom[end + 1 :]
677
+
678
+ else:
679
+ # No quotation so just remove BOM from first element
680
+ new_row = first_row_bom[1:]
681
+
682
+ new_row_list: list[Scalar] = [new_row]
683
+ return new_row_list + first_row[1:]
684
+
685
+ def _is_line_empty(self, line: list[Scalar]) -> bool:
686
+ """
687
+ Check if a line is empty or not.
688
+
689
+ Parameters
690
+ ----------
691
+ line : str, array-like
692
+ The line of data to check.
693
+
694
+ Returns
695
+ -------
696
+ boolean : Whether or not the line is empty.
697
+ """
698
+ return not line or all(not x for x in line)
699
+
700
+ def _next_line(self) -> list[Scalar]:
701
+ if isinstance(self.data, list):
702
+ while self.skipfunc(self.pos):
703
+ if self.pos >= len(self.data):
704
+ break
705
+ self.pos += 1
706
+
707
+ while True:
708
+ try:
709
+ line = self._check_comments([self.data[self.pos]])[0]
710
+ self.pos += 1
711
+ # either uncommented or blank to begin with
712
+ if not self.skip_blank_lines and (
713
+ self._is_line_empty(self.data[self.pos - 1]) or line
714
+ ):
715
+ break
716
+ if self.skip_blank_lines:
717
+ ret = self._remove_empty_lines([line])
718
+ if ret:
719
+ line = ret[0]
720
+ break
721
+ except IndexError:
722
+ raise StopIteration
723
+ else:
724
+ while self.skipfunc(self.pos):
725
+ self.pos += 1
726
+ # assert for mypy, data is Iterator[str] or None, would error in next
727
+ assert self.data is not None
728
+ next(self.data)
729
+
730
+ while True:
731
+ orig_line = self._next_iter_line(row_num=self.pos + 1)
732
+ self.pos += 1
733
+
734
+ if orig_line is not None:
735
+ line = self._check_comments([orig_line])[0]
736
+
737
+ if self.skip_blank_lines:
738
+ ret = self._remove_empty_lines([line])
739
+
740
+ if ret:
741
+ line = ret[0]
742
+ break
743
+ elif self._is_line_empty(orig_line) or line:
744
+ break
745
+
746
+ # This was the first line of the file,
747
+ # which could contain the BOM at the
748
+ # beginning of it.
749
+ if self.pos == 1:
750
+ line = self._check_for_bom(line)
751
+
752
+ self.line_pos += 1
753
+ self.buf.append(line)
754
+ return line
755
+
756
+ def _alert_malformed(self, msg: str, row_num: int) -> None:
757
+ """
758
+ Alert a user about a malformed row, depending on value of
759
+ `self.on_bad_lines` enum.
760
+
761
+ If `self.on_bad_lines` is ERROR, the alert will be `ParserError`.
762
+ If `self.on_bad_lines` is WARN, the alert will be printed out.
763
+
764
+ Parameters
765
+ ----------
766
+ msg: str
767
+ The error message to display.
768
+ row_num: int
769
+ The row number where the parsing error occurred.
770
+ Because this row number is displayed, we 1-index,
771
+ even though we 0-index internally.
772
+ """
773
+ if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
774
+ raise ParserError(msg)
775
+ if self.on_bad_lines == self.BadLineHandleMethod.WARN:
776
+ base = f"Skipping line {row_num}: "
777
+ sys.stderr.write(base + msg + "\n")
778
+
779
+ def _next_iter_line(self, row_num: int) -> list[Scalar] | None:
780
+ """
781
+ Wrapper around iterating through `self.data` (CSV source).
782
+
783
+ When a CSV error is raised, we check for specific
784
+ error messages that allow us to customize the
785
+ error message displayed to the user.
786
+
787
+ Parameters
788
+ ----------
789
+ row_num: int
790
+ The row number of the line being parsed.
791
+ """
792
+ try:
793
+ # assert for mypy, data is Iterator[str] or None, would error in next
794
+ assert self.data is not None
795
+ line = next(self.data)
796
+ # for mypy
797
+ assert isinstance(line, list)
798
+ return line
799
+ except csv.Error as e:
800
+ if self.on_bad_lines in (
801
+ self.BadLineHandleMethod.ERROR,
802
+ self.BadLineHandleMethod.WARN,
803
+ ):
804
+ msg = str(e)
805
+
806
+ if "NULL byte" in msg or "line contains NUL" in msg:
807
+ msg = (
808
+ "NULL byte detected. This byte "
809
+ "cannot be processed in Python's "
810
+ "native csv library at the moment, "
811
+ "so please pass in engine='c' instead"
812
+ )
813
+
814
+ if self.skipfooter > 0:
815
+ reason = (
816
+ "Error could possibly be due to "
817
+ "parsing errors in the skipped footer rows "
818
+ "(the skipfooter keyword is only applied "
819
+ "after Python's csv library has parsed "
820
+ "all rows)."
821
+ )
822
+ msg += ". " + reason
823
+
824
+ self._alert_malformed(msg, row_num)
825
+ return None
826
+
827
+ def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
828
+ if self.comment is None:
829
+ return lines
830
+ ret = []
831
+ for line in lines:
832
+ rl = []
833
+ for x in line:
834
+ if (
835
+ not isinstance(x, str)
836
+ or self.comment not in x
837
+ or x in self.na_values
838
+ ):
839
+ rl.append(x)
840
+ else:
841
+ x = x[: x.find(self.comment)]
842
+ if len(x) > 0:
843
+ rl.append(x)
844
+ break
845
+ ret.append(rl)
846
+ return ret
847
+
848
+ def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
849
+ """
850
+ Iterate through the lines and remove any that are
851
+ either empty or contain only one whitespace value
852
+
853
+ Parameters
854
+ ----------
855
+ lines : list of list of Scalars
856
+ The array of lines that we are to filter.
857
+
858
+ Returns
859
+ -------
860
+ filtered_lines : list of list of Scalars
861
+ The same array of lines with the "empty" ones removed.
862
+ """
863
+ ret = []
864
+ for line in lines:
865
+ # Remove empty lines and lines with only one whitespace value
866
+ if (
867
+ len(line) > 1
868
+ or len(line) == 1
869
+ and (not isinstance(line[0], str) or line[0].strip())
870
+ ):
871
+ ret.append(line)
872
+ return ret
873
+
874
+ def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
875
+ if self.thousands is None:
876
+ return lines
877
+
878
+ return self._search_replace_num_columns(
879
+ lines=lines, search=self.thousands, replace=""
880
+ )
881
+
882
+ def _search_replace_num_columns(
883
+ self, lines: list[list[Scalar]], search: str, replace: str
884
+ ) -> list[list[Scalar]]:
885
+ ret = []
886
+ for line in lines:
887
+ rl = []
888
+ for i, x in enumerate(line):
889
+ if (
890
+ not isinstance(x, str)
891
+ or search not in x
892
+ or (self._no_thousands_columns and i in self._no_thousands_columns)
893
+ or not self.num.search(x.strip())
894
+ ):
895
+ rl.append(x)
896
+ else:
897
+ rl.append(x.replace(search, replace))
898
+ ret.append(rl)
899
+ return ret
900
+
901
+ def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
902
+ if self.decimal == parser_defaults["decimal"]:
903
+ return lines
904
+
905
+ return self._search_replace_num_columns(
906
+ lines=lines, search=self.decimal, replace="."
907
+ )
908
+
909
+ def _clear_buffer(self) -> None:
910
+ self.buf = []
911
+
912
+ _implicit_index = False
913
+
914
+ def _get_index_name(
915
+ self, columns: Sequence[Hashable]
916
+ ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]:
917
+ """
918
+ Try several cases to get lines:
919
+
920
+ 0) There are headers on row 0 and row 1 and their
921
+ total summed lengths equals the length of the next line.
922
+ Treat row 0 as columns and row 1 as indices
923
+ 1) Look for implicit index: there are more columns
924
+ on row 1 than row 0. If this is true, assume that row
925
+ 1 lists index columns and row 0 lists normal columns.
926
+ 2) Get index from the columns if it was listed.
927
+ """
928
+ orig_names = list(columns)
929
+ columns = list(columns)
930
+
931
+ line: list[Scalar] | None
932
+ if self._header_line is not None:
933
+ line = self._header_line
934
+ else:
935
+ try:
936
+ line = self._next_line()
937
+ except StopIteration:
938
+ line = None
939
+
940
+ next_line: list[Scalar] | None
941
+ try:
942
+ next_line = self._next_line()
943
+ except StopIteration:
944
+ next_line = None
945
+
946
+ # implicitly index_col=0 b/c 1 fewer column names
947
+ implicit_first_cols = 0
948
+ if line is not None:
949
+ # leave it 0, #2442
950
+ # Case 1
951
+ # error: Cannot determine type of 'index_col'
952
+ index_col = self.index_col # type: ignore[has-type]
953
+ if index_col is not False:
954
+ implicit_first_cols = len(line) - self.num_original_columns
955
+
956
+ # Case 0
957
+ if (
958
+ next_line is not None
959
+ and self.header is not None
960
+ and index_col is not False
961
+ ):
962
+ if len(next_line) == len(line) + self.num_original_columns:
963
+ # column and index names on diff rows
964
+ self.index_col = list(range(len(line)))
965
+ self.buf = self.buf[1:]
966
+
967
+ for c in reversed(line):
968
+ columns.insert(0, c)
969
+
970
+ # Update list of original names to include all indices.
971
+ orig_names = list(columns)
972
+ self.num_original_columns = len(columns)
973
+ return line, orig_names, columns
974
+
975
+ if implicit_first_cols > 0:
976
+ # Case 1
977
+ self._implicit_index = True
978
+ if self.index_col is None:
979
+ self.index_col = list(range(implicit_first_cols))
980
+
981
+ index_name = None
982
+
983
+ else:
984
+ # Case 2
985
+ (index_name, _, self.index_col) = self._clean_index_names(
986
+ columns, self.index_col
987
+ )
988
+
989
+ return index_name, orig_names, columns
990
+
991
+ def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
992
+ col_len = self.num_original_columns
993
+
994
+ if self._implicit_index:
995
+ col_len += len(self.index_col)
996
+
997
+ max_len = max(len(row) for row in content)
998
+
999
+ # Check that there are no rows with too many
1000
+ # elements in their row (rows with too few
1001
+ # elements are padded with NaN).
1002
+ # error: Non-overlapping identity check (left operand type: "List[int]",
1003
+ # right operand type: "Literal[False]")
1004
+ if (
1005
+ max_len > col_len
1006
+ and self.index_col is not False # type: ignore[comparison-overlap]
1007
+ and self.usecols is None
1008
+ ):
1009
+ footers = self.skipfooter if self.skipfooter else 0
1010
+ bad_lines = []
1011
+
1012
+ iter_content = enumerate(content)
1013
+ content_len = len(content)
1014
+ content = []
1015
+
1016
+ for i, _content in iter_content:
1017
+ actual_len = len(_content)
1018
+
1019
+ if actual_len > col_len:
1020
+ if callable(self.on_bad_lines):
1021
+ new_l = self.on_bad_lines(_content)
1022
+ if new_l is not None:
1023
+ content.append(new_l)
1024
+ elif self.on_bad_lines in (
1025
+ self.BadLineHandleMethod.ERROR,
1026
+ self.BadLineHandleMethod.WARN,
1027
+ ):
1028
+ row_num = self.pos - (content_len - i + footers)
1029
+ bad_lines.append((row_num, actual_len))
1030
+
1031
+ if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
1032
+ break
1033
+ else:
1034
+ content.append(_content)
1035
+
1036
+ for row_num, actual_len in bad_lines:
1037
+ msg = (
1038
+ f"Expected {col_len} fields in line {row_num + 1}, saw "
1039
+ f"{actual_len}"
1040
+ )
1041
+ if (
1042
+ self.delimiter
1043
+ and len(self.delimiter) > 1
1044
+ and self.quoting != csv.QUOTE_NONE
1045
+ ):
1046
+ # see gh-13374
1047
+ reason = (
1048
+ "Error could possibly be due to quotes being "
1049
+ "ignored when a multi-char delimiter is used."
1050
+ )
1051
+ msg += ". " + reason
1052
+
1053
+ self._alert_malformed(msg, row_num + 1)
1054
+
1055
+ # see gh-13320
1056
+ zipped_content = list(lib.to_object_array(content, min_width=col_len).T)
1057
+
1058
+ if self.usecols:
1059
+ assert self._col_indices is not None
1060
+ col_indices = self._col_indices
1061
+
1062
+ if self._implicit_index:
1063
+ zipped_content = [
1064
+ a
1065
+ for i, a in enumerate(zipped_content)
1066
+ if (
1067
+ i < len(self.index_col)
1068
+ or i - len(self.index_col) in col_indices
1069
+ )
1070
+ ]
1071
+ else:
1072
+ zipped_content = [
1073
+ a for i, a in enumerate(zipped_content) if i in col_indices
1074
+ ]
1075
+ return zipped_content
1076
+
1077
+ def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
1078
+ lines = self.buf
1079
+ new_rows = None
1080
+
1081
+ # already fetched some number
1082
+ if rows is not None:
1083
+ # we already have the lines in the buffer
1084
+ if len(self.buf) >= rows:
1085
+ new_rows, self.buf = self.buf[:rows], self.buf[rows:]
1086
+
1087
+ # need some lines
1088
+ else:
1089
+ rows -= len(self.buf)
1090
+
1091
+ if new_rows is None:
1092
+ if isinstance(self.data, list):
1093
+ if self.pos > len(self.data):
1094
+ raise StopIteration
1095
+ if rows is None:
1096
+ new_rows = self.data[self.pos :]
1097
+ new_pos = len(self.data)
1098
+ else:
1099
+ new_rows = self.data[self.pos : self.pos + rows]
1100
+ new_pos = self.pos + rows
1101
+
1102
+ new_rows = self._remove_skipped_rows(new_rows)
1103
+ lines.extend(new_rows)
1104
+ self.pos = new_pos
1105
+
1106
+ else:
1107
+ new_rows = []
1108
+ try:
1109
+ if rows is not None:
1110
+ rows_to_skip = 0
1111
+ if self.skiprows is not None and self.pos is not None:
1112
+ # Only read additional rows if pos is in skiprows
1113
+ rows_to_skip = len(
1114
+ set(self.skiprows) - set(range(self.pos))
1115
+ )
1116
+
1117
+ for _ in range(rows + rows_to_skip):
1118
+ # assert for mypy, data is Iterator[str] or None, would
1119
+ # error in next
1120
+ assert self.data is not None
1121
+ new_rows.append(next(self.data))
1122
+
1123
+ len_new_rows = len(new_rows)
1124
+ new_rows = self._remove_skipped_rows(new_rows)
1125
+ lines.extend(new_rows)
1126
+ else:
1127
+ rows = 0
1128
+
1129
+ while True:
1130
+ new_row = self._next_iter_line(row_num=self.pos + rows + 1)
1131
+ rows += 1
1132
+
1133
+ if new_row is not None:
1134
+ new_rows.append(new_row)
1135
+ len_new_rows = len(new_rows)
1136
+
1137
+ except StopIteration:
1138
+ len_new_rows = len(new_rows)
1139
+ new_rows = self._remove_skipped_rows(new_rows)
1140
+ lines.extend(new_rows)
1141
+ if len(lines) == 0:
1142
+ raise
1143
+ self.pos += len_new_rows
1144
+
1145
+ self.buf = []
1146
+ else:
1147
+ lines = new_rows
1148
+
1149
+ if self.skipfooter:
1150
+ lines = lines[: -self.skipfooter]
1151
+
1152
+ lines = self._check_comments(lines)
1153
+ if self.skip_blank_lines:
1154
+ lines = self._remove_empty_lines(lines)
1155
+ lines = self._check_thousands(lines)
1156
+ return self._check_decimal(lines)
1157
+
1158
+ def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]:
1159
+ if self.skiprows:
1160
+ return [
1161
+ row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos)
1162
+ ]
1163
+ return new_rows
1164
+
1165
+
1166
+ class FixedWidthReader(abc.Iterator):
1167
+ """
1168
+ A reader of fixed-width lines.
1169
+ """
1170
+
1171
+ def __init__(
1172
+ self,
1173
+ f: IO[str] | ReadCsvBuffer[str],
1174
+ colspecs: list[tuple[int, int]] | Literal["infer"],
1175
+ delimiter: str | None,
1176
+ comment: str | None,
1177
+ skiprows: set[int] | None = None,
1178
+ infer_nrows: int = 100,
1179
+ ) -> None:
1180
+ self.f = f
1181
+ self.buffer: Iterator | None = None
1182
+ self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t "
1183
+ self.comment = comment
1184
+ if colspecs == "infer":
1185
+ self.colspecs = self.detect_colspecs(
1186
+ infer_nrows=infer_nrows, skiprows=skiprows
1187
+ )
1188
+ else:
1189
+ self.colspecs = colspecs
1190
+
1191
+ if not isinstance(self.colspecs, (tuple, list)):
1192
+ raise TypeError(
1193
+ "column specifications must be a list or tuple, "
1194
+ f"input was a {type(colspecs).__name__}"
1195
+ )
1196
+
1197
+ for colspec in self.colspecs:
1198
+ if not (
1199
+ isinstance(colspec, (tuple, list))
1200
+ and len(colspec) == 2
1201
+ and isinstance(colspec[0], (int, np.integer, type(None)))
1202
+ and isinstance(colspec[1], (int, np.integer, type(None)))
1203
+ ):
1204
+ raise TypeError(
1205
+ "Each column specification must be "
1206
+ "2 element tuple or list of integers"
1207
+ )
1208
+
1209
+ def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]:
1210
+ """
1211
+ Read rows from self.f, skipping as specified.
1212
+
1213
+ We distinguish buffer_rows (the first <= infer_nrows
1214
+ lines) from the rows returned to detect_colspecs
1215
+ because it's simpler to leave the other locations
1216
+ with skiprows logic alone than to modify them to
1217
+ deal with the fact we skipped some rows here as
1218
+ well.
1219
+
1220
+ Parameters
1221
+ ----------
1222
+ infer_nrows : int
1223
+ Number of rows to read from self.f, not counting
1224
+ rows that are skipped.
1225
+ skiprows: set, optional
1226
+ Indices of rows to skip.
1227
+
1228
+ Returns
1229
+ -------
1230
+ detect_rows : list of str
1231
+ A list containing the rows to read.
1232
+
1233
+ """
1234
+ if skiprows is None:
1235
+ skiprows = set()
1236
+ buffer_rows = []
1237
+ detect_rows = []
1238
+ for i, row in enumerate(self.f):
1239
+ if i not in skiprows:
1240
+ detect_rows.append(row)
1241
+ buffer_rows.append(row)
1242
+ if len(detect_rows) >= infer_nrows:
1243
+ break
1244
+ self.buffer = iter(buffer_rows)
1245
+ return detect_rows
1246
+
1247
+ def detect_colspecs(
1248
+ self, infer_nrows: int = 100, skiprows: set[int] | None = None
1249
+ ) -> list[tuple[int, int]]:
1250
+ # Regex escape the delimiters
1251
+ delimiters = "".join([rf"\{x}" for x in self.delimiter])
1252
+ pattern = re.compile(f"([^{delimiters}]+)")
1253
+ rows = self.get_rows(infer_nrows, skiprows)
1254
+ if not rows:
1255
+ raise EmptyDataError("No rows from which to infer column width")
1256
+ max_len = max(map(len, rows))
1257
+ mask = np.zeros(max_len + 1, dtype=int)
1258
+ if self.comment is not None:
1259
+ rows = [row.partition(self.comment)[0] for row in rows]
1260
+ for row in rows:
1261
+ for m in pattern.finditer(row):
1262
+ mask[m.start() : m.end()] = 1
1263
+ shifted = np.roll(mask, 1)
1264
+ shifted[0] = 0
1265
+ edges = np.where((mask ^ shifted) == 1)[0]
1266
+ edge_pairs = list(zip(edges[::2], edges[1::2]))
1267
+ return edge_pairs
1268
+
1269
+ def __next__(self) -> list[str]:
1270
+ # Argument 1 to "next" has incompatible type "Union[IO[str],
1271
+ # ReadCsvBuffer[str]]"; expected "SupportsNext[str]"
1272
+ if self.buffer is not None:
1273
+ try:
1274
+ line = next(self.buffer)
1275
+ except StopIteration:
1276
+ self.buffer = None
1277
+ line = next(self.f) # type: ignore[arg-type]
1278
+ else:
1279
+ line = next(self.f) # type: ignore[arg-type]
1280
+ # Note: 'colspecs' is a sequence of half-open intervals.
1281
+ return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs]
1282
+
1283
+
1284
+ class FixedWidthFieldParser(PythonParser):
1285
+ """
1286
+ Specialization that Converts fixed-width fields into DataFrames.
1287
+ See PythonParser for details.
1288
+ """
1289
+
1290
+ def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None:
1291
+ # Support iterators, convert to a list.
1292
+ self.colspecs = kwds.pop("colspecs")
1293
+ self.infer_nrows = kwds.pop("infer_nrows")
1294
+ PythonParser.__init__(self, f, **kwds)
1295
+
1296
+ def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> None:
1297
+ self.data = FixedWidthReader(
1298
+ f,
1299
+ self.colspecs,
1300
+ self.delimiter,
1301
+ self.comment,
1302
+ self.skiprows,
1303
+ self.infer_nrows,
1304
+ )
1305
+
1306
+ def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
1307
+ """
1308
+ Returns the list of lines without the empty ones. With fixed-width
1309
+ fields, empty lines become arrays of empty strings.
1310
+
1311
+ See PythonParser._remove_empty_lines.
1312
+ """
1313
+ return [
1314
+ line
1315
+ for line in lines
1316
+ if any(not isinstance(e, str) or e.strip() for e in line)
1317
+ ]
1318
+
1319
+
1320
+ def count_empty_vals(vals) -> int:
1321
+ return sum(1 for v in vals if v == "" or v is None)
1322
+
1323
+
1324
+ def _validate_skipfooter_arg(skipfooter: int) -> int:
1325
+ """
1326
+ Validate the 'skipfooter' parameter.
1327
+
1328
+ Checks whether 'skipfooter' is a non-negative integer.
1329
+ Raises a ValueError if that is not the case.
1330
+
1331
+ Parameters
1332
+ ----------
1333
+ skipfooter : non-negative integer
1334
+ The number of rows to skip at the end of the file.
1335
+
1336
+ Returns
1337
+ -------
1338
+ validated_skipfooter : non-negative integer
1339
+ The original input if the validation succeeds.
1340
+
1341
+ Raises
1342
+ ------
1343
+ ValueError : 'skipfooter' was not a non-negative integer.
1344
+ """
1345
+ if not is_integer(skipfooter):
1346
+ raise ValueError("skipfooter must be an integer")
1347
+
1348
+ if skipfooter < 0:
1349
+ raise ValueError("skipfooter cannot be negative")
1350
+
1351
+ return skipfooter
videochat2/lib/python3.10/site-packages/pandas/io/parsers/readers.py ADDED
@@ -0,0 +1,2127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module contains tools for processing files into DataFrames or other objects
3
+
4
+ GH#48849 provides a convenient way of deprecating keyword arguments
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from collections import abc
9
+ import csv
10
+ import sys
11
+ from textwrap import fill
12
+ from types import TracebackType
13
+ from typing import (
14
+ IO,
15
+ Any,
16
+ Callable,
17
+ Hashable,
18
+ Literal,
19
+ NamedTuple,
20
+ Sequence,
21
+ overload,
22
+ )
23
+ import warnings
24
+
25
+ import numpy as np
26
+
27
+ from pandas._libs import lib
28
+ from pandas._libs.parsers import STR_NA_VALUES
29
+ from pandas._typing import (
30
+ CompressionOptions,
31
+ CSVEngine,
32
+ DtypeArg,
33
+ DtypeBackend,
34
+ FilePath,
35
+ IndexLabel,
36
+ ReadCsvBuffer,
37
+ StorageOptions,
38
+ )
39
+ from pandas.errors import (
40
+ AbstractMethodError,
41
+ ParserWarning,
42
+ )
43
+ from pandas.util._decorators import Appender
44
+ from pandas.util._exceptions import find_stack_level
45
+ from pandas.util._validators import check_dtype_backend
46
+
47
+ from pandas.core.dtypes.common import (
48
+ is_file_like,
49
+ is_float,
50
+ is_integer,
51
+ is_list_like,
52
+ )
53
+
54
+ from pandas.core.frame import DataFrame
55
+ from pandas.core.indexes.api import RangeIndex
56
+ from pandas.core.shared_docs import _shared_docs
57
+
58
+ from pandas.io.common import (
59
+ IOHandles,
60
+ get_handle,
61
+ stringify_path,
62
+ validate_header_arg,
63
+ )
64
+ from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper
65
+ from pandas.io.parsers.base_parser import (
66
+ ParserBase,
67
+ is_index_col,
68
+ parser_defaults,
69
+ )
70
+ from pandas.io.parsers.c_parser_wrapper import CParserWrapper
71
+ from pandas.io.parsers.python_parser import (
72
+ FixedWidthFieldParser,
73
+ PythonParser,
74
+ )
75
+
76
+ _doc_read_csv_and_table = (
77
+ r"""
78
+ {summary}
79
+
80
+ Also supports optionally iterating or breaking of the file
81
+ into chunks.
82
+
83
+ Additional help can be found in the online docs for
84
+ `IO Tools <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
85
+
86
+ Parameters
87
+ ----------
88
+ filepath_or_buffer : str, path object or file-like object
89
+ Any valid string path is acceptable. The string could be a URL. Valid
90
+ URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
91
+ expected. A local file could be: file://localhost/path/to/table.csv.
92
+
93
+ If you want to pass in a path object, pandas accepts any ``os.PathLike``.
94
+
95
+ By file-like object, we refer to objects with a ``read()`` method, such as
96
+ a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
97
+ sep : str, default {_default_sep}
98
+ Delimiter to use. If sep is None, the C engine cannot automatically detect
99
+ the separator, but the Python parsing engine can, meaning the latter will
100
+ be used and automatically detect the separator by Python's builtin sniffer
101
+ tool, ``csv.Sniffer``. In addition, separators longer than 1 character and
102
+ different from ``'\s+'`` will be interpreted as regular expressions and
103
+ will also force the use of the Python parsing engine. Note that regex
104
+ delimiters are prone to ignoring quoted data. Regex example: ``'\r\t'``.
105
+ delimiter : str, default ``None``
106
+ Alias for sep.
107
+ header : int, list of int, None, default 'infer'
108
+ Row number(s) to use as the column names, and the start of the
109
+ data. Default behavior is to infer the column names: if no names
110
+ are passed the behavior is identical to ``header=0`` and column
111
+ names are inferred from the first line of the file, if column
112
+ names are passed explicitly then the behavior is identical to
113
+ ``header=None``. Explicitly pass ``header=0`` to be able to
114
+ replace existing names. The header can be a list of integers that
115
+ specify row locations for a multi-index on the columns
116
+ e.g. [0,1,3]. Intervening rows that are not specified will be
117
+ skipped (e.g. 2 in this example is skipped). Note that this
118
+ parameter ignores commented lines and empty lines if
119
+ ``skip_blank_lines=True``, so ``header=0`` denotes the first line of
120
+ data rather than the first line of the file.
121
+ names : array-like, optional
122
+ List of column names to use. If the file contains a header row,
123
+ then you should explicitly pass ``header=0`` to override the column names.
124
+ Duplicates in this list are not allowed.
125
+ index_col : int, str, sequence of int / str, or False, optional, default ``None``
126
+ Column(s) to use as the row labels of the ``DataFrame``, either given as
127
+ string name or column index. If a sequence of int / str is given, a
128
+ MultiIndex is used.
129
+
130
+ Note: ``index_col=False`` can be used to force pandas to *not* use the first
131
+ column as the index, e.g. when you have a malformed file with delimiters at
132
+ the end of each line.
133
+ usecols : list-like or callable, optional
134
+ Return a subset of the columns. If list-like, all elements must either
135
+ be positional (i.e. integer indices into the document columns) or strings
136
+ that correspond to column names provided either by the user in `names` or
137
+ inferred from the document header row(s). If ``names`` are given, the document
138
+ header row(s) are not taken into account. For example, a valid list-like
139
+ `usecols` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
140
+ Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
141
+ To instantiate a DataFrame from ``data`` with element order preserved use
142
+ ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns
143
+ in ``['foo', 'bar']`` order or
144
+ ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
145
+ for ``['bar', 'foo']`` order.
146
+
147
+ If callable, the callable function will be evaluated against the column
148
+ names, returning names where the callable function evaluates to True. An
149
+ example of a valid callable argument would be ``lambda x: x.upper() in
150
+ ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
151
+ parsing time and lower memory usage.
152
+ dtype : Type name or dict of column -> type, optional
153
+ Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32,
154
+ 'c': 'Int64'}}
155
+ Use `str` or `object` together with suitable `na_values` settings
156
+ to preserve and not interpret dtype.
157
+ If converters are specified, they will be applied INSTEAD
158
+ of dtype conversion.
159
+
160
+ .. versionadded:: 1.5.0
161
+
162
+ Support for defaultdict was added. Specify a defaultdict as input where
163
+ the default determines the dtype of the columns which are not explicitly
164
+ listed.
165
+ engine : {{'c', 'python', 'pyarrow'}}, optional
166
+ Parser engine to use. The C and pyarrow engines are faster, while the python engine
167
+ is currently more feature-complete. Multithreading is currently only supported by
168
+ the pyarrow engine.
169
+
170
+ .. versionadded:: 1.4.0
171
+
172
+ The "pyarrow" engine was added as an *experimental* engine, and some features
173
+ are unsupported, or may not work correctly, with this engine.
174
+ converters : dict, optional
175
+ Dict of functions for converting values in certain columns. Keys can either
176
+ be integers or column labels.
177
+ true_values : list, optional
178
+ Values to consider as True in addition to case-insensitive variants of "True".
179
+ false_values : list, optional
180
+ Values to consider as False in addition to case-insensitive variants of "False".
181
+ skipinitialspace : bool, default False
182
+ Skip spaces after delimiter.
183
+ skiprows : list-like, int or callable, optional
184
+ Line numbers to skip (0-indexed) or number of lines to skip (int)
185
+ at the start of the file.
186
+
187
+ If callable, the callable function will be evaluated against the row
188
+ indices, returning True if the row should be skipped and False otherwise.
189
+ An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
190
+ skipfooter : int, default 0
191
+ Number of lines at bottom of file to skip (Unsupported with engine='c').
192
+ nrows : int, optional
193
+ Number of rows of file to read. Useful for reading pieces of large files.
194
+ na_values : scalar, str, list-like, or dict, optional
195
+ Additional strings to recognize as NA/NaN. If dict passed, specific
196
+ per-column NA values. By default the following values are interpreted as
197
+ NaN: '"""
198
+ + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
199
+ + """'.
200
+ keep_default_na : bool, default True
201
+ Whether or not to include the default NaN values when parsing the data.
202
+ Depending on whether `na_values` is passed in, the behavior is as follows:
203
+
204
+ * If `keep_default_na` is True, and `na_values` are specified, `na_values`
205
+ is appended to the default NaN values used for parsing.
206
+ * If `keep_default_na` is True, and `na_values` are not specified, only
207
+ the default NaN values are used for parsing.
208
+ * If `keep_default_na` is False, and `na_values` are specified, only
209
+ the NaN values specified `na_values` are used for parsing.
210
+ * If `keep_default_na` is False, and `na_values` are not specified, no
211
+ strings will be parsed as NaN.
212
+
213
+ Note that if `na_filter` is passed in as False, the `keep_default_na` and
214
+ `na_values` parameters will be ignored.
215
+ na_filter : bool, default True
216
+ Detect missing value markers (empty strings and the value of na_values). In
217
+ data without any NAs, passing na_filter=False can improve the performance
218
+ of reading a large file.
219
+ verbose : bool, default False
220
+ Indicate number of NA values placed in non-numeric columns.
221
+ skip_blank_lines : bool, default True
222
+ If True, skip over blank lines rather than interpreting as NaN values.
223
+ parse_dates : bool or list of int or names or list of lists or dict, \
224
+ default False
225
+ The behavior is as follows:
226
+
227
+ * boolean. If True -> try parsing the index.
228
+ * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
229
+ each as a separate date column.
230
+ * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
231
+ a single date column.
232
+ * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
233
+ result 'foo'
234
+
235
+ If a column or index cannot be represented as an array of datetimes,
236
+ say because of an unparsable value or a mixture of timezones, the column
237
+ or index will be returned unaltered as an object data type. For
238
+ non-standard datetime parsing, use ``pd.to_datetime`` after
239
+ ``pd.read_csv``.
240
+
241
+ Note: A fast-path exists for iso8601-formatted dates.
242
+ infer_datetime_format : bool, default False
243
+ If True and `parse_dates` is enabled, pandas will attempt to infer the
244
+ format of the datetime strings in the columns, and if it can be inferred,
245
+ switch to a faster method of parsing them. In some cases this can increase
246
+ the parsing speed by 5-10x.
247
+
248
+ .. deprecated:: 2.0.0
249
+ A strict version of this argument is now the default, passing it has no effect.
250
+
251
+ keep_date_col : bool, default False
252
+ If True and `parse_dates` specifies combining multiple columns then
253
+ keep the original columns.
254
+ date_parser : function, optional
255
+ Function to use for converting a sequence of string columns to an array of
256
+ datetime instances. The default uses ``dateutil.parser.parser`` to do the
257
+ conversion. Pandas will try to call `date_parser` in three different ways,
258
+ advancing to the next if an exception occurs: 1) Pass one or more arrays
259
+ (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
260
+ string values from the columns defined by `parse_dates` into a single array
261
+ and pass that; and 3) call `date_parser` once for each row using one or
262
+ more strings (corresponding to the columns defined by `parse_dates`) as
263
+ arguments.
264
+
265
+ .. deprecated:: 2.0.0
266
+ Use ``date_format`` instead, or read in as ``object`` and then apply
267
+ :func:`to_datetime` as-needed.
268
+ date_format : str or dict of column -> format, default ``None``
269
+ If used in conjunction with ``parse_dates``, will parse dates according to this
270
+ format. For anything more complex,
271
+ please read in as ``object`` and then apply :func:`to_datetime` as-needed.
272
+
273
+ .. versionadded:: 2.0.0
274
+ dayfirst : bool, default False
275
+ DD/MM format dates, international and European format.
276
+ cache_dates : bool, default True
277
+ If True, use a cache of unique, converted dates to apply the datetime
278
+ conversion. May produce significant speed-up when parsing duplicate
279
+ date strings, especially ones with timezone offsets.
280
+
281
+ iterator : bool, default False
282
+ Return TextFileReader object for iteration or getting chunks with
283
+ ``get_chunk()``.
284
+
285
+ .. versionchanged:: 1.2
286
+
287
+ ``TextFileReader`` is a context manager.
288
+ chunksize : int, optional
289
+ Return TextFileReader object for iteration.
290
+ See the `IO Tools docs
291
+ <https://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_
292
+ for more information on ``iterator`` and ``chunksize``.
293
+
294
+ .. versionchanged:: 1.2
295
+
296
+ ``TextFileReader`` is a context manager.
297
+ {decompression_options}
298
+
299
+ .. versionchanged:: 1.4.0 Zstandard support.
300
+
301
+ thousands : str, optional
302
+ Thousands separator.
303
+ decimal : str, default '.'
304
+ Character to recognize as decimal point (e.g. use ',' for European data).
305
+ lineterminator : str (length 1), optional
306
+ Character to break file into lines. Only valid with C parser.
307
+ quotechar : str (length 1), optional
308
+ The character used to denote the start and end of a quoted item. Quoted
309
+ items can include the delimiter and it will be ignored.
310
+ quoting : int or csv.QUOTE_* instance, default 0
311
+ Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of
312
+ QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
313
+ doublequote : bool, default ``True``
314
+ When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate
315
+ whether or not to interpret two consecutive quotechar elements INSIDE a
316
+ field as a single ``quotechar`` element.
317
+ escapechar : str (length 1), optional
318
+ One-character string used to escape other characters.
319
+ comment : str, optional
320
+ Indicates remainder of line should not be parsed. If found at the beginning
321
+ of a line, the line will be ignored altogether. This parameter must be a
322
+ single character. Like empty lines (as long as ``skip_blank_lines=True``),
323
+ fully commented lines are ignored by the parameter `header` but not by
324
+ `skiprows`. For example, if ``comment='#'``, parsing
325
+ ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being
326
+ treated as the header.
327
+ encoding : str, optional, default "utf-8"
328
+ Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python
329
+ standard encodings
330
+ <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ .
331
+
332
+ .. versionchanged:: 1.2
333
+
334
+ When ``encoding`` is ``None``, ``errors="replace"`` is passed to
335
+ ``open()``. Otherwise, ``errors="strict"`` is passed to ``open()``.
336
+ This behavior was previously only the case for ``engine="python"``.
337
+
338
+ .. versionchanged:: 1.3.0
339
+
340
+ ``encoding_errors`` is a new argument. ``encoding`` has no longer an
341
+ influence on how encoding errors are handled.
342
+
343
+ encoding_errors : str, optional, default "strict"
344
+ How encoding errors are treated. `List of possible values
345
+ <https://docs.python.org/3/library/codecs.html#error-handlers>`_ .
346
+
347
+ .. versionadded:: 1.3.0
348
+
349
+ dialect : str or csv.Dialect, optional
350
+ If provided, this parameter will override values (default or not) for the
351
+ following parameters: `delimiter`, `doublequote`, `escapechar`,
352
+ `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
353
+ override values, a ParserWarning will be issued. See csv.Dialect
354
+ documentation for more details.
355
+ on_bad_lines : {{'error', 'warn', 'skip'}} or callable, default 'error'
356
+ Specifies what to do upon encountering a bad line (a line with too many fields).
357
+ Allowed values are :
358
+
359
+ - 'error', raise an Exception when a bad line is encountered.
360
+ - 'warn', raise a warning when a bad line is encountered and skip that line.
361
+ - 'skip', skip bad lines without raising or warning when they are encountered.
362
+
363
+ .. versionadded:: 1.3.0
364
+
365
+ .. versionadded:: 1.4.0
366
+
367
+ - callable, function with signature
368
+ ``(bad_line: list[str]) -> list[str] | None`` that will process a single
369
+ bad line. ``bad_line`` is a list of strings split by the ``sep``.
370
+ If the function returns ``None``, the bad line will be ignored.
371
+ If the function returns a new list of strings with more elements than
372
+ expected, a ``ParserWarning`` will be emitted while dropping extra elements.
373
+ Only supported when ``engine="python"``
374
+
375
+ delim_whitespace : bool, default False
376
+ Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
377
+ used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
378
+ is set to True, nothing should be passed in for the ``delimiter``
379
+ parameter.
380
+ low_memory : bool, default True
381
+ Internally process the file in chunks, resulting in lower memory use
382
+ while parsing, but possibly mixed type inference. To ensure no mixed
383
+ types either set False, or specify the type with the `dtype` parameter.
384
+ Note that the entire file is read into a single DataFrame regardless,
385
+ use the `chunksize` or `iterator` parameter to return the data in chunks.
386
+ (Only valid with C parser).
387
+ memory_map : bool, default False
388
+ If a filepath is provided for `filepath_or_buffer`, map the file object
389
+ directly onto memory and access the data directly from there. Using this
390
+ option can improve performance because there is no longer any I/O overhead.
391
+ float_precision : str, optional
392
+ Specifies which converter the C engine should use for floating-point
393
+ values. The options are ``None`` or 'high' for the ordinary converter,
394
+ 'legacy' for the original lower precision pandas converter, and
395
+ 'round_trip' for the round-trip converter.
396
+
397
+ .. versionchanged:: 1.2
398
+
399
+ {storage_options}
400
+
401
+ .. versionadded:: 1.2
402
+
403
+ dtype_backend : {{"numpy_nullable", "pyarrow"}}, defaults to NumPy backed DataFrames
404
+ Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
405
+ arrays, nullable dtypes are used for all dtypes that have a nullable
406
+ implementation when "numpy_nullable" is set, pyarrow is used for all
407
+ dtypes if "pyarrow" is set.
408
+
409
+ The dtype_backends are still experimential.
410
+
411
+ .. versionadded:: 2.0
412
+
413
+ Returns
414
+ -------
415
+ DataFrame or TextFileReader
416
+ A comma-separated values (csv) file is returned as two-dimensional
417
+ data structure with labeled axes.
418
+
419
+ See Also
420
+ --------
421
+ DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
422
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
423
+ read_fwf : Read a table of fixed-width formatted lines into DataFrame.
424
+
425
+ Examples
426
+ --------
427
+ >>> pd.{func_name}('data.csv') # doctest: +SKIP
428
+ """
429
+ )
430
+
431
+
432
+ _c_parser_defaults = {
433
+ "delim_whitespace": False,
434
+ "na_filter": True,
435
+ "low_memory": True,
436
+ "memory_map": False,
437
+ "float_precision": None,
438
+ }
439
+
440
+ _fwf_defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
441
+
442
+ _c_unsupported = {"skipfooter"}
443
+ _python_unsupported = {"low_memory", "float_precision"}
444
+ _pyarrow_unsupported = {
445
+ "skipfooter",
446
+ "float_precision",
447
+ "chunksize",
448
+ "comment",
449
+ "nrows",
450
+ "thousands",
451
+ "memory_map",
452
+ "dialect",
453
+ "on_bad_lines",
454
+ "delim_whitespace",
455
+ "quoting",
456
+ "lineterminator",
457
+ "converters",
458
+ "iterator",
459
+ "dayfirst",
460
+ "verbose",
461
+ "skipinitialspace",
462
+ "low_memory",
463
+ }
464
+
465
+
466
+ class _DeprecationConfig(NamedTuple):
467
+ default_value: Any
468
+ msg: str | None
469
+
470
+
471
+ @overload
472
+ def validate_integer(name, val: None, min_val: int = ...) -> None:
473
+ ...
474
+
475
+
476
+ @overload
477
+ def validate_integer(name, val: float, min_val: int = ...) -> int:
478
+ ...
479
+
480
+
481
+ @overload
482
+ def validate_integer(name, val: int | None, min_val: int = ...) -> int | None:
483
+ ...
484
+
485
+
486
+ def validate_integer(name, val: int | float | None, min_val: int = 0) -> int | None:
487
+ """
488
+ Checks whether the 'name' parameter for parsing is either
489
+ an integer OR float that can SAFELY be cast to an integer
490
+ without losing accuracy. Raises a ValueError if that is
491
+ not the case.
492
+
493
+ Parameters
494
+ ----------
495
+ name : str
496
+ Parameter name (used for error reporting)
497
+ val : int or float
498
+ The value to check
499
+ min_val : int
500
+ Minimum allowed value (val < min_val will result in a ValueError)
501
+ """
502
+ if val is None:
503
+ return val
504
+
505
+ msg = f"'{name:s}' must be an integer >={min_val:d}"
506
+ if is_float(val):
507
+ if int(val) != val:
508
+ raise ValueError(msg)
509
+ val = int(val)
510
+ elif not (is_integer(val) and val >= min_val):
511
+ raise ValueError(msg)
512
+
513
+ return int(val)
514
+
515
+
516
+ def _validate_names(names: Sequence[Hashable] | None) -> None:
517
+ """
518
+ Raise ValueError if the `names` parameter contains duplicates or has an
519
+ invalid data type.
520
+
521
+ Parameters
522
+ ----------
523
+ names : array-like or None
524
+ An array containing a list of the names used for the output DataFrame.
525
+
526
+ Raises
527
+ ------
528
+ ValueError
529
+ If names are not unique or are not ordered (e.g. set).
530
+ """
531
+ if names is not None:
532
+ if len(names) != len(set(names)):
533
+ raise ValueError("Duplicate names are not allowed.")
534
+ if not (
535
+ is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
536
+ ):
537
+ raise ValueError("Names should be an ordered collection.")
538
+
539
+
540
+ def _read(
541
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds
542
+ ) -> DataFrame | TextFileReader:
543
+ """Generic reader of line files."""
544
+ # if we pass a date_parser and parse_dates=False, we should not parse the
545
+ # dates GH#44366
546
+ if kwds.get("parse_dates", None) is None:
547
+ if (
548
+ kwds.get("date_parser", lib.no_default) is lib.no_default
549
+ and kwds.get("date_format", None) is None
550
+ ):
551
+ kwds["parse_dates"] = False
552
+ else:
553
+ kwds["parse_dates"] = True
554
+
555
+ # Extract some of the arguments (pass chunksize on).
556
+ iterator = kwds.get("iterator", False)
557
+ chunksize = kwds.get("chunksize", None)
558
+ if kwds.get("engine") == "pyarrow":
559
+ if iterator:
560
+ raise ValueError(
561
+ "The 'iterator' option is not supported with the 'pyarrow' engine"
562
+ )
563
+
564
+ if chunksize is not None:
565
+ raise ValueError(
566
+ "The 'chunksize' option is not supported with the 'pyarrow' engine"
567
+ )
568
+ else:
569
+ chunksize = validate_integer("chunksize", chunksize, 1)
570
+
571
+ nrows = kwds.get("nrows", None)
572
+
573
+ # Check for duplicates in names.
574
+ _validate_names(kwds.get("names", None))
575
+
576
+ # Create the parser.
577
+ parser = TextFileReader(filepath_or_buffer, **kwds)
578
+
579
+ if chunksize or iterator:
580
+ return parser
581
+
582
+ with parser:
583
+ return parser.read(nrows)
584
+
585
+
586
+ # iterator=True -> TextFileReader
587
+ @overload
588
+ def read_csv(
589
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
590
+ *,
591
+ sep: str | None | lib.NoDefault = ...,
592
+ delimiter: str | None | lib.NoDefault = ...,
593
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
594
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
595
+ index_col: IndexLabel | Literal[False] | None = ...,
596
+ usecols=...,
597
+ dtype: DtypeArg | None = ...,
598
+ engine: CSVEngine | None = ...,
599
+ converters=...,
600
+ true_values=...,
601
+ false_values=...,
602
+ skipinitialspace: bool = ...,
603
+ skiprows=...,
604
+ skipfooter: int = ...,
605
+ nrows: int | None = ...,
606
+ na_values=...,
607
+ keep_default_na: bool = ...,
608
+ na_filter: bool = ...,
609
+ verbose: bool = ...,
610
+ skip_blank_lines: bool = ...,
611
+ parse_dates: bool | Sequence[Hashable] | None = ...,
612
+ infer_datetime_format: bool | lib.NoDefault = ...,
613
+ keep_date_col: bool = ...,
614
+ date_parser=...,
615
+ date_format: str | None = ...,
616
+ dayfirst: bool = ...,
617
+ cache_dates: bool = ...,
618
+ iterator: Literal[True],
619
+ chunksize: int | None = ...,
620
+ compression: CompressionOptions = ...,
621
+ thousands: str | None = ...,
622
+ decimal: str = ...,
623
+ lineterminator: str | None = ...,
624
+ quotechar: str = ...,
625
+ quoting: int = ...,
626
+ doublequote: bool = ...,
627
+ escapechar: str | None = ...,
628
+ comment: str | None = ...,
629
+ encoding: str | None = ...,
630
+ encoding_errors: str | None = ...,
631
+ dialect: str | csv.Dialect | None = ...,
632
+ on_bad_lines=...,
633
+ delim_whitespace: bool = ...,
634
+ low_memory=...,
635
+ memory_map: bool = ...,
636
+ float_precision: Literal["high", "legacy"] | None = ...,
637
+ storage_options: StorageOptions = ...,
638
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
639
+ ) -> TextFileReader:
640
+ ...
641
+
642
+
643
+ # chunksize=int -> TextFileReader
644
+ @overload
645
+ def read_csv(
646
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
647
+ *,
648
+ sep: str | None | lib.NoDefault = ...,
649
+ delimiter: str | None | lib.NoDefault = ...,
650
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
651
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
652
+ index_col: IndexLabel | Literal[False] | None = ...,
653
+ usecols=...,
654
+ dtype: DtypeArg | None = ...,
655
+ engine: CSVEngine | None = ...,
656
+ converters=...,
657
+ true_values=...,
658
+ false_values=...,
659
+ skipinitialspace: bool = ...,
660
+ skiprows=...,
661
+ skipfooter: int = ...,
662
+ nrows: int | None = ...,
663
+ na_values=...,
664
+ keep_default_na: bool = ...,
665
+ na_filter: bool = ...,
666
+ verbose: bool = ...,
667
+ skip_blank_lines: bool = ...,
668
+ parse_dates: bool | Sequence[Hashable] | None = ...,
669
+ infer_datetime_format: bool | lib.NoDefault = ...,
670
+ keep_date_col: bool = ...,
671
+ date_parser=...,
672
+ date_format: str | None = ...,
673
+ dayfirst: bool = ...,
674
+ cache_dates: bool = ...,
675
+ iterator: bool = ...,
676
+ chunksize: int,
677
+ compression: CompressionOptions = ...,
678
+ thousands: str | None = ...,
679
+ decimal: str = ...,
680
+ lineterminator: str | None = ...,
681
+ quotechar: str = ...,
682
+ quoting: int = ...,
683
+ doublequote: bool = ...,
684
+ escapechar: str | None = ...,
685
+ comment: str | None = ...,
686
+ encoding: str | None = ...,
687
+ encoding_errors: str | None = ...,
688
+ dialect: str | csv.Dialect | None = ...,
689
+ on_bad_lines=...,
690
+ delim_whitespace: bool = ...,
691
+ low_memory=...,
692
+ memory_map: bool = ...,
693
+ float_precision: Literal["high", "legacy"] | None = ...,
694
+ storage_options: StorageOptions = ...,
695
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
696
+ ) -> TextFileReader:
697
+ ...
698
+
699
+
700
+ # default case -> DataFrame
701
+ @overload
702
+ def read_csv(
703
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
704
+ *,
705
+ sep: str | None | lib.NoDefault = ...,
706
+ delimiter: str | None | lib.NoDefault = ...,
707
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
708
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
709
+ index_col: IndexLabel | Literal[False] | None = ...,
710
+ usecols=...,
711
+ dtype: DtypeArg | None = ...,
712
+ engine: CSVEngine | None = ...,
713
+ converters=...,
714
+ true_values=...,
715
+ false_values=...,
716
+ skipinitialspace: bool = ...,
717
+ skiprows=...,
718
+ skipfooter: int = ...,
719
+ nrows: int | None = ...,
720
+ na_values=...,
721
+ keep_default_na: bool = ...,
722
+ na_filter: bool = ...,
723
+ verbose: bool = ...,
724
+ skip_blank_lines: bool = ...,
725
+ parse_dates: bool | Sequence[Hashable] | None = ...,
726
+ infer_datetime_format: bool | lib.NoDefault = ...,
727
+ keep_date_col: bool = ...,
728
+ date_parser=...,
729
+ date_format: str | None = ...,
730
+ dayfirst: bool = ...,
731
+ cache_dates: bool = ...,
732
+ iterator: Literal[False] = ...,
733
+ chunksize: None = ...,
734
+ compression: CompressionOptions = ...,
735
+ thousands: str | None = ...,
736
+ decimal: str = ...,
737
+ lineterminator: str | None = ...,
738
+ quotechar: str = ...,
739
+ quoting: int = ...,
740
+ doublequote: bool = ...,
741
+ escapechar: str | None = ...,
742
+ comment: str | None = ...,
743
+ encoding: str | None = ...,
744
+ encoding_errors: str | None = ...,
745
+ dialect: str | csv.Dialect | None = ...,
746
+ on_bad_lines=...,
747
+ delim_whitespace: bool = ...,
748
+ low_memory=...,
749
+ memory_map: bool = ...,
750
+ float_precision: Literal["high", "legacy"] | None = ...,
751
+ storage_options: StorageOptions = ...,
752
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
753
+ ) -> DataFrame:
754
+ ...
755
+
756
+
757
+ # Unions -> DataFrame | TextFileReader
758
+ @overload
759
+ def read_csv(
760
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
761
+ *,
762
+ sep: str | None | lib.NoDefault = ...,
763
+ delimiter: str | None | lib.NoDefault = ...,
764
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
765
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
766
+ index_col: IndexLabel | Literal[False] | None = ...,
767
+ usecols=...,
768
+ dtype: DtypeArg | None = ...,
769
+ engine: CSVEngine | None = ...,
770
+ converters=...,
771
+ true_values=...,
772
+ false_values=...,
773
+ skipinitialspace: bool = ...,
774
+ skiprows=...,
775
+ skipfooter: int = ...,
776
+ nrows: int | None = ...,
777
+ na_values=...,
778
+ keep_default_na: bool = ...,
779
+ na_filter: bool = ...,
780
+ verbose: bool = ...,
781
+ skip_blank_lines: bool = ...,
782
+ parse_dates: bool | Sequence[Hashable] | None = ...,
783
+ infer_datetime_format: bool | lib.NoDefault = ...,
784
+ keep_date_col: bool = ...,
785
+ date_parser=...,
786
+ date_format: str | None = ...,
787
+ dayfirst: bool = ...,
788
+ cache_dates: bool = ...,
789
+ iterator: bool = ...,
790
+ chunksize: int | None = ...,
791
+ compression: CompressionOptions = ...,
792
+ thousands: str | None = ...,
793
+ decimal: str = ...,
794
+ lineterminator: str | None = ...,
795
+ quotechar: str = ...,
796
+ quoting: int = ...,
797
+ doublequote: bool = ...,
798
+ escapechar: str | None = ...,
799
+ comment: str | None = ...,
800
+ encoding: str | None = ...,
801
+ encoding_errors: str | None = ...,
802
+ dialect: str | csv.Dialect | None = ...,
803
+ on_bad_lines=...,
804
+ delim_whitespace: bool = ...,
805
+ low_memory=...,
806
+ memory_map: bool = ...,
807
+ float_precision: Literal["high", "legacy"] | None = ...,
808
+ storage_options: StorageOptions = ...,
809
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
810
+ ) -> DataFrame | TextFileReader:
811
+ ...
812
+
813
+
814
+ @Appender(
815
+ _doc_read_csv_and_table.format(
816
+ func_name="read_csv",
817
+ summary="Read a comma-separated values (csv) file into DataFrame.",
818
+ _default_sep="','",
819
+ storage_options=_shared_docs["storage_options"],
820
+ decompression_options=_shared_docs["decompression_options"]
821
+ % "filepath_or_buffer",
822
+ )
823
+ )
824
+ def read_csv(
825
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
826
+ *,
827
+ sep: str | None | lib.NoDefault = lib.no_default,
828
+ delimiter: str | None | lib.NoDefault = None,
829
+ # Column and Index Locations and Names
830
+ header: int | Sequence[int] | None | Literal["infer"] = "infer",
831
+ names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
832
+ index_col: IndexLabel | Literal[False] | None = None,
833
+ usecols=None,
834
+ # General Parsing Configuration
835
+ dtype: DtypeArg | None = None,
836
+ engine: CSVEngine | None = None,
837
+ converters=None,
838
+ true_values=None,
839
+ false_values=None,
840
+ skipinitialspace: bool = False,
841
+ skiprows=None,
842
+ skipfooter: int = 0,
843
+ nrows: int | None = None,
844
+ # NA and Missing Data Handling
845
+ na_values=None,
846
+ keep_default_na: bool = True,
847
+ na_filter: bool = True,
848
+ verbose: bool = False,
849
+ skip_blank_lines: bool = True,
850
+ # Datetime Handling
851
+ parse_dates: bool | Sequence[Hashable] | None = None,
852
+ infer_datetime_format: bool | lib.NoDefault = lib.no_default,
853
+ keep_date_col: bool = False,
854
+ date_parser=lib.no_default,
855
+ date_format: str | None = None,
856
+ dayfirst: bool = False,
857
+ cache_dates: bool = True,
858
+ # Iteration
859
+ iterator: bool = False,
860
+ chunksize: int | None = None,
861
+ # Quoting, Compression, and File Format
862
+ compression: CompressionOptions = "infer",
863
+ thousands: str | None = None,
864
+ decimal: str = ".",
865
+ lineterminator: str | None = None,
866
+ quotechar: str = '"',
867
+ quoting: int = csv.QUOTE_MINIMAL,
868
+ doublequote: bool = True,
869
+ escapechar: str | None = None,
870
+ comment: str | None = None,
871
+ encoding: str | None = None,
872
+ encoding_errors: str | None = "strict",
873
+ dialect: str | csv.Dialect | None = None,
874
+ # Error Handling
875
+ on_bad_lines: str = "error",
876
+ # Internal
877
+ delim_whitespace: bool = False,
878
+ low_memory=_c_parser_defaults["low_memory"],
879
+ memory_map: bool = False,
880
+ float_precision: Literal["high", "legacy"] | None = None,
881
+ storage_options: StorageOptions = None,
882
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
883
+ ) -> DataFrame | TextFileReader:
884
+ if infer_datetime_format is not lib.no_default:
885
+ warnings.warn(
886
+ "The argument 'infer_datetime_format' is deprecated and will "
887
+ "be removed in a future version. "
888
+ "A strict version of it is now the default, see "
889
+ "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
890
+ "You can safely remove this argument.",
891
+ FutureWarning,
892
+ stacklevel=find_stack_level(),
893
+ )
894
+ # locals() should never be modified
895
+ kwds = locals().copy()
896
+ del kwds["filepath_or_buffer"]
897
+ del kwds["sep"]
898
+
899
+ kwds_defaults = _refine_defaults_read(
900
+ dialect,
901
+ delimiter,
902
+ delim_whitespace,
903
+ engine,
904
+ sep,
905
+ on_bad_lines,
906
+ names,
907
+ defaults={"delimiter": ","},
908
+ dtype_backend=dtype_backend,
909
+ )
910
+ kwds.update(kwds_defaults)
911
+
912
+ return _read(filepath_or_buffer, kwds)
913
+
914
+
915
+ # iterator=True -> TextFileReader
916
+ @overload
917
+ def read_table(
918
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
919
+ *,
920
+ sep: str | None | lib.NoDefault = ...,
921
+ delimiter: str | None | lib.NoDefault = ...,
922
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
923
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
924
+ index_col: IndexLabel | Literal[False] | None = ...,
925
+ usecols=...,
926
+ dtype: DtypeArg | None = ...,
927
+ engine: CSVEngine | None = ...,
928
+ converters=...,
929
+ true_values=...,
930
+ false_values=...,
931
+ skipinitialspace: bool = ...,
932
+ skiprows=...,
933
+ skipfooter: int = ...,
934
+ nrows: int | None = ...,
935
+ na_values=...,
936
+ keep_default_na: bool = ...,
937
+ na_filter: bool = ...,
938
+ verbose: bool = ...,
939
+ skip_blank_lines: bool = ...,
940
+ parse_dates: bool | Sequence[Hashable] = ...,
941
+ infer_datetime_format: bool | lib.NoDefault = ...,
942
+ keep_date_col: bool = ...,
943
+ date_parser=...,
944
+ date_format: str | None = ...,
945
+ dayfirst: bool = ...,
946
+ cache_dates: bool = ...,
947
+ iterator: Literal[True],
948
+ chunksize: int | None = ...,
949
+ compression: CompressionOptions = ...,
950
+ thousands: str | None = ...,
951
+ decimal: str = ...,
952
+ lineterminator: str | None = ...,
953
+ quotechar: str = ...,
954
+ quoting: int = ...,
955
+ doublequote: bool = ...,
956
+ escapechar: str | None = ...,
957
+ comment: str | None = ...,
958
+ encoding: str | None = ...,
959
+ encoding_errors: str | None = ...,
960
+ dialect: str | csv.Dialect | None = ...,
961
+ on_bad_lines=...,
962
+ delim_whitespace: bool = ...,
963
+ low_memory=...,
964
+ memory_map: bool = ...,
965
+ float_precision: str | None = ...,
966
+ storage_options: StorageOptions = ...,
967
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
968
+ ) -> TextFileReader:
969
+ ...
970
+
971
+
972
+ # chunksize=int -> TextFileReader
973
+ @overload
974
+ def read_table(
975
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
976
+ *,
977
+ sep: str | None | lib.NoDefault = ...,
978
+ delimiter: str | None | lib.NoDefault = ...,
979
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
980
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
981
+ index_col: IndexLabel | Literal[False] | None = ...,
982
+ usecols=...,
983
+ dtype: DtypeArg | None = ...,
984
+ engine: CSVEngine | None = ...,
985
+ converters=...,
986
+ true_values=...,
987
+ false_values=...,
988
+ skipinitialspace: bool = ...,
989
+ skiprows=...,
990
+ skipfooter: int = ...,
991
+ nrows: int | None = ...,
992
+ na_values=...,
993
+ keep_default_na: bool = ...,
994
+ na_filter: bool = ...,
995
+ verbose: bool = ...,
996
+ skip_blank_lines: bool = ...,
997
+ parse_dates: bool | Sequence[Hashable] = ...,
998
+ infer_datetime_format: bool | lib.NoDefault = ...,
999
+ keep_date_col: bool = ...,
1000
+ date_parser=...,
1001
+ date_format: str | None = ...,
1002
+ dayfirst: bool = ...,
1003
+ cache_dates: bool = ...,
1004
+ iterator: bool = ...,
1005
+ chunksize: int,
1006
+ compression: CompressionOptions = ...,
1007
+ thousands: str | None = ...,
1008
+ decimal: str = ...,
1009
+ lineterminator: str | None = ...,
1010
+ quotechar: str = ...,
1011
+ quoting: int = ...,
1012
+ doublequote: bool = ...,
1013
+ escapechar: str | None = ...,
1014
+ comment: str | None = ...,
1015
+ encoding: str | None = ...,
1016
+ encoding_errors: str | None = ...,
1017
+ dialect: str | csv.Dialect | None = ...,
1018
+ on_bad_lines=...,
1019
+ delim_whitespace: bool = ...,
1020
+ low_memory=...,
1021
+ memory_map: bool = ...,
1022
+ float_precision: str | None = ...,
1023
+ storage_options: StorageOptions = ...,
1024
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
1025
+ ) -> TextFileReader:
1026
+ ...
1027
+
1028
+
1029
+ # default -> DataFrame
1030
+ @overload
1031
+ def read_table(
1032
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
1033
+ *,
1034
+ sep: str | None | lib.NoDefault = ...,
1035
+ delimiter: str | None | lib.NoDefault = ...,
1036
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
1037
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
1038
+ index_col: IndexLabel | Literal[False] | None = ...,
1039
+ usecols=...,
1040
+ dtype: DtypeArg | None = ...,
1041
+ engine: CSVEngine | None = ...,
1042
+ converters=...,
1043
+ true_values=...,
1044
+ false_values=...,
1045
+ skipinitialspace: bool = ...,
1046
+ skiprows=...,
1047
+ skipfooter: int = ...,
1048
+ nrows: int | None = ...,
1049
+ na_values=...,
1050
+ keep_default_na: bool = ...,
1051
+ na_filter: bool = ...,
1052
+ verbose: bool = ...,
1053
+ skip_blank_lines: bool = ...,
1054
+ parse_dates: bool | Sequence[Hashable] = ...,
1055
+ infer_datetime_format: bool | lib.NoDefault = ...,
1056
+ keep_date_col: bool = ...,
1057
+ date_parser=...,
1058
+ date_format: str | None = ...,
1059
+ dayfirst: bool = ...,
1060
+ cache_dates: bool = ...,
1061
+ iterator: Literal[False] = ...,
1062
+ chunksize: None = ...,
1063
+ compression: CompressionOptions = ...,
1064
+ thousands: str | None = ...,
1065
+ decimal: str = ...,
1066
+ lineterminator: str | None = ...,
1067
+ quotechar: str = ...,
1068
+ quoting: int = ...,
1069
+ doublequote: bool = ...,
1070
+ escapechar: str | None = ...,
1071
+ comment: str | None = ...,
1072
+ encoding: str | None = ...,
1073
+ encoding_errors: str | None = ...,
1074
+ dialect: str | csv.Dialect | None = ...,
1075
+ on_bad_lines=...,
1076
+ delim_whitespace: bool = ...,
1077
+ low_memory=...,
1078
+ memory_map: bool = ...,
1079
+ float_precision: str | None = ...,
1080
+ storage_options: StorageOptions = ...,
1081
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
1082
+ ) -> DataFrame:
1083
+ ...
1084
+
1085
+
1086
+ # Unions -> DataFrame | TextFileReader
1087
+ @overload
1088
+ def read_table(
1089
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
1090
+ *,
1091
+ sep: str | None | lib.NoDefault = ...,
1092
+ delimiter: str | None | lib.NoDefault = ...,
1093
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
1094
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
1095
+ index_col: IndexLabel | Literal[False] | None = ...,
1096
+ usecols=...,
1097
+ dtype: DtypeArg | None = ...,
1098
+ engine: CSVEngine | None = ...,
1099
+ converters=...,
1100
+ true_values=...,
1101
+ false_values=...,
1102
+ skipinitialspace: bool = ...,
1103
+ skiprows=...,
1104
+ skipfooter: int = ...,
1105
+ nrows: int | None = ...,
1106
+ na_values=...,
1107
+ keep_default_na: bool = ...,
1108
+ na_filter: bool = ...,
1109
+ verbose: bool = ...,
1110
+ skip_blank_lines: bool = ...,
1111
+ parse_dates: bool | Sequence[Hashable] = ...,
1112
+ infer_datetime_format: bool | lib.NoDefault = ...,
1113
+ keep_date_col: bool = ...,
1114
+ date_parser=...,
1115
+ date_format: str | None = ...,
1116
+ dayfirst: bool = ...,
1117
+ cache_dates: bool = ...,
1118
+ iterator: bool = ...,
1119
+ chunksize: int | None = ...,
1120
+ compression: CompressionOptions = ...,
1121
+ thousands: str | None = ...,
1122
+ decimal: str = ...,
1123
+ lineterminator: str | None = ...,
1124
+ quotechar: str = ...,
1125
+ quoting: int = ...,
1126
+ doublequote: bool = ...,
1127
+ escapechar: str | None = ...,
1128
+ comment: str | None = ...,
1129
+ encoding: str | None = ...,
1130
+ encoding_errors: str | None = ...,
1131
+ dialect: str | csv.Dialect | None = ...,
1132
+ on_bad_lines=...,
1133
+ delim_whitespace: bool = ...,
1134
+ low_memory=...,
1135
+ memory_map: bool = ...,
1136
+ float_precision: str | None = ...,
1137
+ storage_options: StorageOptions = ...,
1138
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
1139
+ ) -> DataFrame | TextFileReader:
1140
+ ...
1141
+
1142
+
1143
+ @Appender(
1144
+ _doc_read_csv_and_table.format(
1145
+ func_name="read_table",
1146
+ summary="Read general delimited file into DataFrame.",
1147
+ _default_sep=r"'\\t' (tab-stop)",
1148
+ storage_options=_shared_docs["storage_options"],
1149
+ decompression_options=_shared_docs["decompression_options"]
1150
+ % "filepath_or_buffer",
1151
+ )
1152
+ )
1153
+ def read_table(
1154
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
1155
+ *,
1156
+ sep: str | None | lib.NoDefault = lib.no_default,
1157
+ delimiter: str | None | lib.NoDefault = None,
1158
+ # Column and Index Locations and Names
1159
+ header: int | Sequence[int] | None | Literal["infer"] = "infer",
1160
+ names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
1161
+ index_col: IndexLabel | Literal[False] | None = None,
1162
+ usecols=None,
1163
+ # General Parsing Configuration
1164
+ dtype: DtypeArg | None = None,
1165
+ engine: CSVEngine | None = None,
1166
+ converters=None,
1167
+ true_values=None,
1168
+ false_values=None,
1169
+ skipinitialspace: bool = False,
1170
+ skiprows=None,
1171
+ skipfooter: int = 0,
1172
+ nrows: int | None = None,
1173
+ # NA and Missing Data Handling
1174
+ na_values=None,
1175
+ keep_default_na: bool = True,
1176
+ na_filter: bool = True,
1177
+ verbose: bool = False,
1178
+ skip_blank_lines: bool = True,
1179
+ # Datetime Handling
1180
+ parse_dates: bool | Sequence[Hashable] = False,
1181
+ infer_datetime_format: bool | lib.NoDefault = lib.no_default,
1182
+ keep_date_col: bool = False,
1183
+ date_parser=lib.no_default,
1184
+ date_format: str | None = None,
1185
+ dayfirst: bool = False,
1186
+ cache_dates: bool = True,
1187
+ # Iteration
1188
+ iterator: bool = False,
1189
+ chunksize: int | None = None,
1190
+ # Quoting, Compression, and File Format
1191
+ compression: CompressionOptions = "infer",
1192
+ thousands: str | None = None,
1193
+ decimal: str = ".",
1194
+ lineterminator: str | None = None,
1195
+ quotechar: str = '"',
1196
+ quoting: int = csv.QUOTE_MINIMAL,
1197
+ doublequote: bool = True,
1198
+ escapechar: str | None = None,
1199
+ comment: str | None = None,
1200
+ encoding: str | None = None,
1201
+ encoding_errors: str | None = "strict",
1202
+ dialect: str | csv.Dialect | None = None,
1203
+ # Error Handling
1204
+ on_bad_lines: str = "error",
1205
+ # Internal
1206
+ delim_whitespace: bool = False,
1207
+ low_memory=_c_parser_defaults["low_memory"],
1208
+ memory_map: bool = False,
1209
+ float_precision: str | None = None,
1210
+ storage_options: StorageOptions = None,
1211
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
1212
+ ) -> DataFrame | TextFileReader:
1213
+ if infer_datetime_format is not lib.no_default:
1214
+ warnings.warn(
1215
+ "The argument 'infer_datetime_format' is deprecated and will "
1216
+ "be removed in a future version. "
1217
+ "A strict version of it is now the default, see "
1218
+ "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
1219
+ "You can safely remove this argument.",
1220
+ FutureWarning,
1221
+ stacklevel=find_stack_level(),
1222
+ )
1223
+
1224
+ # locals() should never be modified
1225
+ kwds = locals().copy()
1226
+ del kwds["filepath_or_buffer"]
1227
+ del kwds["sep"]
1228
+
1229
+ kwds_defaults = _refine_defaults_read(
1230
+ dialect,
1231
+ delimiter,
1232
+ delim_whitespace,
1233
+ engine,
1234
+ sep,
1235
+ on_bad_lines,
1236
+ names,
1237
+ defaults={"delimiter": "\t"},
1238
+ dtype_backend=dtype_backend,
1239
+ )
1240
+ kwds.update(kwds_defaults)
1241
+
1242
+ return _read(filepath_or_buffer, kwds)
1243
+
1244
+
1245
+ def read_fwf(
1246
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
1247
+ *,
1248
+ colspecs: Sequence[tuple[int, int]] | str | None = "infer",
1249
+ widths: Sequence[int] | None = None,
1250
+ infer_nrows: int = 100,
1251
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
1252
+ **kwds,
1253
+ ) -> DataFrame | TextFileReader:
1254
+ r"""
1255
+ Read a table of fixed-width formatted lines into DataFrame.
1256
+
1257
+ Also supports optionally iterating or breaking of the file
1258
+ into chunks.
1259
+
1260
+ Additional help can be found in the `online docs for IO Tools
1261
+ <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html>`_.
1262
+
1263
+ Parameters
1264
+ ----------
1265
+ filepath_or_buffer : str, path object, or file-like object
1266
+ String, path object (implementing ``os.PathLike[str]``), or file-like
1267
+ object implementing a text ``read()`` function.The string could be a URL.
1268
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
1269
+ expected. A local file could be:
1270
+ ``file://localhost/path/to/table.csv``.
1271
+ colspecs : list of tuple (int, int) or 'infer'. optional
1272
+ A list of tuples giving the extents of the fixed-width
1273
+ fields of each line as half-open intervals (i.e., [from, to[ ).
1274
+ String value 'infer' can be used to instruct the parser to try
1275
+ detecting the column specifications from the first 100 rows of
1276
+ the data which are not being skipped via skiprows (default='infer').
1277
+ widths : list of int, optional
1278
+ A list of field widths which can be used instead of 'colspecs' if
1279
+ the intervals are contiguous.
1280
+ infer_nrows : int, default 100
1281
+ The number of rows to consider when letting the parser determine the
1282
+ `colspecs`.
1283
+ dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames
1284
+ Which dtype_backend to use, e.g. whether a DataFrame should have NumPy
1285
+ arrays, nullable dtypes are used for all dtypes that have a nullable
1286
+ implementation when "numpy_nullable" is set, pyarrow is used for all
1287
+ dtypes if "pyarrow" is set.
1288
+
1289
+ The dtype_backends are still experimential.
1290
+
1291
+ .. versionadded:: 2.0
1292
+
1293
+ **kwds : optional
1294
+ Optional keyword arguments can be passed to ``TextFileReader``.
1295
+
1296
+ Returns
1297
+ -------
1298
+ DataFrame or TextFileReader
1299
+ A comma-separated values (csv) file is returned as two-dimensional
1300
+ data structure with labeled axes.
1301
+
1302
+ See Also
1303
+ --------
1304
+ DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
1305
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
1306
+
1307
+ Examples
1308
+ --------
1309
+ >>> pd.read_fwf('data.csv') # doctest: +SKIP
1310
+ """
1311
+ # Check input arguments.
1312
+ if colspecs is None and widths is None:
1313
+ raise ValueError("Must specify either colspecs or widths")
1314
+ if colspecs not in (None, "infer") and widths is not None:
1315
+ raise ValueError("You must specify only one of 'widths' and 'colspecs'")
1316
+
1317
+ # Compute 'colspecs' from 'widths', if specified.
1318
+ if widths is not None:
1319
+ colspecs, col = [], 0
1320
+ for w in widths:
1321
+ colspecs.append((col, col + w))
1322
+ col += w
1323
+
1324
+ # for mypy
1325
+ assert colspecs is not None
1326
+
1327
+ # GH#40830
1328
+ # Ensure length of `colspecs` matches length of `names`
1329
+ names = kwds.get("names")
1330
+ if names is not None:
1331
+ if len(names) != len(colspecs) and colspecs != "infer":
1332
+ # need to check len(index_col) as it might contain
1333
+ # unnamed indices, in which case it's name is not required
1334
+ len_index = 0
1335
+ if kwds.get("index_col") is not None:
1336
+ index_col: Any = kwds.get("index_col")
1337
+ if index_col is not False:
1338
+ if not is_list_like(index_col):
1339
+ len_index = 1
1340
+ else:
1341
+ len_index = len(index_col)
1342
+ if kwds.get("usecols") is None and len(names) + len_index != len(colspecs):
1343
+ # If usecols is used colspec may be longer than names
1344
+ raise ValueError("Length of colspecs must match length of names")
1345
+
1346
+ kwds["colspecs"] = colspecs
1347
+ kwds["infer_nrows"] = infer_nrows
1348
+ kwds["engine"] = "python-fwf"
1349
+
1350
+ check_dtype_backend(dtype_backend)
1351
+ kwds["dtype_backend"] = dtype_backend
1352
+ return _read(filepath_or_buffer, kwds)
1353
+
1354
+
1355
+ class TextFileReader(abc.Iterator):
1356
+ """
1357
+
1358
+ Passed dialect overrides any of the related parser options
1359
+
1360
+ """
1361
+
1362
+ def __init__(
1363
+ self,
1364
+ f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list,
1365
+ engine: CSVEngine | None = None,
1366
+ **kwds,
1367
+ ) -> None:
1368
+ if engine is not None:
1369
+ engine_specified = True
1370
+ else:
1371
+ engine = "python"
1372
+ engine_specified = False
1373
+ self.engine = engine
1374
+ self._engine_specified = kwds.get("engine_specified", engine_specified)
1375
+
1376
+ _validate_skipfooter(kwds)
1377
+
1378
+ dialect = _extract_dialect(kwds)
1379
+ if dialect is not None:
1380
+ if engine == "pyarrow":
1381
+ raise ValueError(
1382
+ "The 'dialect' option is not supported with the 'pyarrow' engine"
1383
+ )
1384
+ kwds = _merge_with_dialect_properties(dialect, kwds)
1385
+
1386
+ if kwds.get("header", "infer") == "infer":
1387
+ kwds["header"] = 0 if kwds.get("names") is None else None
1388
+
1389
+ self.orig_options = kwds
1390
+
1391
+ # miscellanea
1392
+ self._currow = 0
1393
+
1394
+ options = self._get_options_with_defaults(engine)
1395
+ options["storage_options"] = kwds.get("storage_options", None)
1396
+
1397
+ self.chunksize = options.pop("chunksize", None)
1398
+ self.nrows = options.pop("nrows", None)
1399
+
1400
+ self._check_file_or_buffer(f, engine)
1401
+ self.options, self.engine = self._clean_options(options, engine)
1402
+
1403
+ if "has_index_names" in kwds:
1404
+ self.options["has_index_names"] = kwds["has_index_names"]
1405
+
1406
+ self.handles: IOHandles | None = None
1407
+ self._engine = self._make_engine(f, self.engine)
1408
+
1409
+ def close(self) -> None:
1410
+ if self.handles is not None:
1411
+ self.handles.close()
1412
+ self._engine.close()
1413
+
1414
+ def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]:
1415
+ kwds = self.orig_options
1416
+
1417
+ options = {}
1418
+ default: object | None
1419
+
1420
+ for argname, default in parser_defaults.items():
1421
+ value = kwds.get(argname, default)
1422
+
1423
+ # see gh-12935
1424
+ if (
1425
+ engine == "pyarrow"
1426
+ and argname in _pyarrow_unsupported
1427
+ and value != default
1428
+ and value != getattr(value, "value", default)
1429
+ ):
1430
+ raise ValueError(
1431
+ f"The {repr(argname)} option is not supported with the "
1432
+ f"'pyarrow' engine"
1433
+ )
1434
+ options[argname] = value
1435
+
1436
+ for argname, default in _c_parser_defaults.items():
1437
+ if argname in kwds:
1438
+ value = kwds[argname]
1439
+
1440
+ if engine != "c" and value != default:
1441
+ if "python" in engine and argname not in _python_unsupported:
1442
+ pass
1443
+ else:
1444
+ raise ValueError(
1445
+ f"The {repr(argname)} option is not supported with the "
1446
+ f"{repr(engine)} engine"
1447
+ )
1448
+ else:
1449
+ value = default
1450
+ options[argname] = value
1451
+
1452
+ if engine == "python-fwf":
1453
+ for argname, default in _fwf_defaults.items():
1454
+ options[argname] = kwds.get(argname, default)
1455
+
1456
+ return options
1457
+
1458
+ def _check_file_or_buffer(self, f, engine: CSVEngine) -> None:
1459
+ # see gh-16530
1460
+ if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"):
1461
+ # The C engine doesn't need the file-like to have the "__iter__"
1462
+ # attribute. However, the Python engine needs "__iter__(...)"
1463
+ # when iterating through such an object, meaning it
1464
+ # needs to have that attribute
1465
+ raise ValueError(
1466
+ "The 'python' engine cannot iterate through this file buffer."
1467
+ )
1468
+
1469
+ def _clean_options(
1470
+ self, options: dict[str, Any], engine: CSVEngine
1471
+ ) -> tuple[dict[str, Any], CSVEngine]:
1472
+ result = options.copy()
1473
+
1474
+ fallback_reason = None
1475
+
1476
+ # C engine not supported yet
1477
+ if engine == "c":
1478
+ if options["skipfooter"] > 0:
1479
+ fallback_reason = "the 'c' engine does not support skipfooter"
1480
+ engine = "python"
1481
+
1482
+ sep = options["delimiter"]
1483
+ delim_whitespace = options["delim_whitespace"]
1484
+
1485
+ if sep is None and not delim_whitespace:
1486
+ if engine in ("c", "pyarrow"):
1487
+ fallback_reason = (
1488
+ f"the '{engine}' engine does not support "
1489
+ "sep=None with delim_whitespace=False"
1490
+ )
1491
+ engine = "python"
1492
+ elif sep is not None and len(sep) > 1:
1493
+ if engine == "c" and sep == r"\s+":
1494
+ result["delim_whitespace"] = True
1495
+ del result["delimiter"]
1496
+ elif engine not in ("python", "python-fwf"):
1497
+ # wait until regex engine integrated
1498
+ fallback_reason = (
1499
+ f"the '{engine}' engine does not support "
1500
+ "regex separators (separators > 1 char and "
1501
+ r"different from '\s+' are interpreted as regex)"
1502
+ )
1503
+ engine = "python"
1504
+ elif delim_whitespace:
1505
+ if "python" in engine:
1506
+ result["delimiter"] = r"\s+"
1507
+ elif sep is not None:
1508
+ encodeable = True
1509
+ encoding = sys.getfilesystemencoding() or "utf-8"
1510
+ try:
1511
+ if len(sep.encode(encoding)) > 1:
1512
+ encodeable = False
1513
+ except UnicodeDecodeError:
1514
+ encodeable = False
1515
+ if not encodeable and engine not in ("python", "python-fwf"):
1516
+ fallback_reason = (
1517
+ f"the separator encoded in {encoding} "
1518
+ f"is > 1 char long, and the '{engine}' engine "
1519
+ "does not support such separators"
1520
+ )
1521
+ engine = "python"
1522
+
1523
+ quotechar = options["quotechar"]
1524
+ if quotechar is not None and isinstance(quotechar, (str, bytes)):
1525
+ if (
1526
+ len(quotechar) == 1
1527
+ and ord(quotechar) > 127
1528
+ and engine not in ("python", "python-fwf")
1529
+ ):
1530
+ fallback_reason = (
1531
+ "ord(quotechar) > 127, meaning the "
1532
+ "quotechar is larger than one byte, "
1533
+ f"and the '{engine}' engine does not support such quotechars"
1534
+ )
1535
+ engine = "python"
1536
+
1537
+ if fallback_reason and self._engine_specified:
1538
+ raise ValueError(fallback_reason)
1539
+
1540
+ if engine == "c":
1541
+ for arg in _c_unsupported:
1542
+ del result[arg]
1543
+
1544
+ if "python" in engine:
1545
+ for arg in _python_unsupported:
1546
+ if fallback_reason and result[arg] != _c_parser_defaults[arg]:
1547
+ raise ValueError(
1548
+ "Falling back to the 'python' engine because "
1549
+ f"{fallback_reason}, but this causes {repr(arg)} to be "
1550
+ "ignored as it is not supported by the 'python' engine."
1551
+ )
1552
+ del result[arg]
1553
+
1554
+ if fallback_reason:
1555
+ warnings.warn(
1556
+ (
1557
+ "Falling back to the 'python' engine because "
1558
+ f"{fallback_reason}; you can avoid this warning by specifying "
1559
+ "engine='python'."
1560
+ ),
1561
+ ParserWarning,
1562
+ stacklevel=find_stack_level(),
1563
+ )
1564
+
1565
+ index_col = options["index_col"]
1566
+ names = options["names"]
1567
+ converters = options["converters"]
1568
+ na_values = options["na_values"]
1569
+ skiprows = options["skiprows"]
1570
+
1571
+ validate_header_arg(options["header"])
1572
+
1573
+ if index_col is True:
1574
+ raise ValueError("The value of index_col couldn't be 'True'")
1575
+ if is_index_col(index_col):
1576
+ if not isinstance(index_col, (list, tuple, np.ndarray)):
1577
+ index_col = [index_col]
1578
+ result["index_col"] = index_col
1579
+
1580
+ names = list(names) if names is not None else names
1581
+
1582
+ # type conversion-related
1583
+ if converters is not None:
1584
+ if not isinstance(converters, dict):
1585
+ raise TypeError(
1586
+ "Type converters must be a dict or subclass, "
1587
+ f"input was a {type(converters).__name__}"
1588
+ )
1589
+ else:
1590
+ converters = {}
1591
+
1592
+ # Converting values to NA
1593
+ keep_default_na = options["keep_default_na"]
1594
+ na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)
1595
+
1596
+ # handle skiprows; this is internally handled by the
1597
+ # c-engine, so only need for python and pyarrow parsers
1598
+ if engine == "pyarrow":
1599
+ if not is_integer(skiprows) and skiprows is not None:
1600
+ # pyarrow expects skiprows to be passed as an integer
1601
+ raise ValueError(
1602
+ "skiprows argument must be an integer when using "
1603
+ "engine='pyarrow'"
1604
+ )
1605
+ else:
1606
+ if is_integer(skiprows):
1607
+ skiprows = list(range(skiprows))
1608
+ if skiprows is None:
1609
+ skiprows = set()
1610
+ elif not callable(skiprows):
1611
+ skiprows = set(skiprows)
1612
+
1613
+ # put stuff back
1614
+ result["names"] = names
1615
+ result["converters"] = converters
1616
+ result["na_values"] = na_values
1617
+ result["na_fvalues"] = na_fvalues
1618
+ result["skiprows"] = skiprows
1619
+
1620
+ return result, engine
1621
+
1622
+ def __next__(self) -> DataFrame:
1623
+ try:
1624
+ return self.get_chunk()
1625
+ except StopIteration:
1626
+ self.close()
1627
+ raise
1628
+
1629
+ def _make_engine(
1630
+ self,
1631
+ f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO,
1632
+ engine: CSVEngine = "c",
1633
+ ) -> ParserBase:
1634
+ mapping: dict[str, type[ParserBase]] = {
1635
+ "c": CParserWrapper,
1636
+ "python": PythonParser,
1637
+ "pyarrow": ArrowParserWrapper,
1638
+ "python-fwf": FixedWidthFieldParser,
1639
+ }
1640
+ if engine not in mapping:
1641
+ raise ValueError(
1642
+ f"Unknown engine: {engine} (valid options are {mapping.keys()})"
1643
+ )
1644
+ if not isinstance(f, list):
1645
+ # open file here
1646
+ is_text = True
1647
+ mode = "r"
1648
+ if engine == "pyarrow":
1649
+ is_text = False
1650
+ mode = "rb"
1651
+ elif (
1652
+ engine == "c"
1653
+ and self.options.get("encoding", "utf-8") == "utf-8"
1654
+ and isinstance(stringify_path(f), str)
1655
+ ):
1656
+ # c engine can decode utf-8 bytes, adding TextIOWrapper makes
1657
+ # the c-engine especially for memory_map=True far slower
1658
+ is_text = False
1659
+ if "b" not in mode:
1660
+ mode += "b"
1661
+ self.handles = get_handle(
1662
+ f,
1663
+ mode,
1664
+ encoding=self.options.get("encoding", None),
1665
+ compression=self.options.get("compression", None),
1666
+ memory_map=self.options.get("memory_map", False),
1667
+ is_text=is_text,
1668
+ errors=self.options.get("encoding_errors", "strict"),
1669
+ storage_options=self.options.get("storage_options", None),
1670
+ )
1671
+ assert self.handles is not None
1672
+ f = self.handles.handle
1673
+
1674
+ elif engine != "python":
1675
+ msg = f"Invalid file path or buffer object type: {type(f)}"
1676
+ raise ValueError(msg)
1677
+
1678
+ try:
1679
+ return mapping[engine](f, **self.options)
1680
+ except Exception:
1681
+ if self.handles is not None:
1682
+ self.handles.close()
1683
+ raise
1684
+
1685
+ def _failover_to_python(self) -> None:
1686
+ raise AbstractMethodError(self)
1687
+
1688
+ def read(self, nrows: int | None = None) -> DataFrame:
1689
+ if self.engine == "pyarrow":
1690
+ try:
1691
+ # error: "ParserBase" has no attribute "read"
1692
+ df = self._engine.read() # type: ignore[attr-defined]
1693
+ except Exception:
1694
+ self.close()
1695
+ raise
1696
+ else:
1697
+ nrows = validate_integer("nrows", nrows)
1698
+ try:
1699
+ # error: "ParserBase" has no attribute "read"
1700
+ (
1701
+ index,
1702
+ columns,
1703
+ col_dict,
1704
+ ) = self._engine.read( # type: ignore[attr-defined]
1705
+ nrows
1706
+ )
1707
+ except Exception:
1708
+ self.close()
1709
+ raise
1710
+
1711
+ if index is None:
1712
+ if col_dict:
1713
+ # Any column is actually fine:
1714
+ new_rows = len(next(iter(col_dict.values())))
1715
+ index = RangeIndex(self._currow, self._currow + new_rows)
1716
+ else:
1717
+ new_rows = 0
1718
+ else:
1719
+ new_rows = len(index)
1720
+
1721
+ df = DataFrame(col_dict, columns=columns, index=index)
1722
+
1723
+ self._currow += new_rows
1724
+ return df
1725
+
1726
+ def get_chunk(self, size: int | None = None) -> DataFrame:
1727
+ if size is None:
1728
+ size = self.chunksize
1729
+ if self.nrows is not None:
1730
+ if self._currow >= self.nrows:
1731
+ raise StopIteration
1732
+ size = min(size, self.nrows - self._currow)
1733
+ return self.read(nrows=size)
1734
+
1735
+ def __enter__(self) -> TextFileReader:
1736
+ return self
1737
+
1738
+ def __exit__(
1739
+ self,
1740
+ exc_type: type[BaseException] | None,
1741
+ exc_value: BaseException | None,
1742
+ traceback: TracebackType | None,
1743
+ ) -> None:
1744
+ self.close()
1745
+
1746
+
1747
+ def TextParser(*args, **kwds) -> TextFileReader:
1748
+ """
1749
+ Converts lists of lists/tuples into DataFrames with proper type inference
1750
+ and optional (e.g. string to datetime) conversion. Also enables iterating
1751
+ lazily over chunks of large files
1752
+
1753
+ Parameters
1754
+ ----------
1755
+ data : file-like object or list
1756
+ delimiter : separator character to use
1757
+ dialect : str or csv.Dialect instance, optional
1758
+ Ignored if delimiter is longer than 1 character
1759
+ names : sequence, default
1760
+ header : int, default 0
1761
+ Row to use to parse column labels. Defaults to the first row. Prior
1762
+ rows will be discarded
1763
+ index_col : int or list, optional
1764
+ Column or columns to use as the (possibly hierarchical) index
1765
+ has_index_names: bool, default False
1766
+ True if the cols defined in index_col have an index name and are
1767
+ not in the header.
1768
+ na_values : scalar, str, list-like, or dict, optional
1769
+ Additional strings to recognize as NA/NaN.
1770
+ keep_default_na : bool, default True
1771
+ thousands : str, optional
1772
+ Thousands separator
1773
+ comment : str, optional
1774
+ Comment out remainder of line
1775
+ parse_dates : bool, default False
1776
+ keep_date_col : bool, default False
1777
+ date_parser : function, optional
1778
+
1779
+ .. deprecated:: 2.0.0
1780
+ date_format : str or dict of column -> format, default ``None``
1781
+
1782
+ .. versionadded:: 2.0.0
1783
+ skiprows : list of integers
1784
+ Row numbers to skip
1785
+ skipfooter : int
1786
+ Number of line at bottom of file to skip
1787
+ converters : dict, optional
1788
+ Dict of functions for converting values in certain columns. Keys can
1789
+ either be integers or column labels, values are functions that take one
1790
+ input argument, the cell (not column) content, and return the
1791
+ transformed content.
1792
+ encoding : str, optional
1793
+ Encoding to use for UTF when reading/writing (ex. 'utf-8')
1794
+ float_precision : str, optional
1795
+ Specifies which converter the C engine should use for floating-point
1796
+ values. The options are `None` or `high` for the ordinary converter,
1797
+ `legacy` for the original lower precision pandas converter, and
1798
+ `round_trip` for the round-trip converter.
1799
+
1800
+ .. versionchanged:: 1.2
1801
+ """
1802
+ kwds["engine"] = "python"
1803
+ return TextFileReader(*args, **kwds)
1804
+
1805
+
1806
+ def _clean_na_values(na_values, keep_default_na: bool = True):
1807
+ na_fvalues: set | dict
1808
+ if na_values is None:
1809
+ if keep_default_na:
1810
+ na_values = STR_NA_VALUES
1811
+ else:
1812
+ na_values = set()
1813
+ na_fvalues = set()
1814
+ elif isinstance(na_values, dict):
1815
+ old_na_values = na_values.copy()
1816
+ na_values = {} # Prevent aliasing.
1817
+
1818
+ # Convert the values in the na_values dictionary
1819
+ # into array-likes for further use. This is also
1820
+ # where we append the default NaN values, provided
1821
+ # that `keep_default_na=True`.
1822
+ for k, v in old_na_values.items():
1823
+ if not is_list_like(v):
1824
+ v = [v]
1825
+
1826
+ if keep_default_na:
1827
+ v = set(v) | STR_NA_VALUES
1828
+
1829
+ na_values[k] = v
1830
+ na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}
1831
+ else:
1832
+ if not is_list_like(na_values):
1833
+ na_values = [na_values]
1834
+ na_values = _stringify_na_values(na_values)
1835
+ if keep_default_na:
1836
+ na_values = na_values | STR_NA_VALUES
1837
+
1838
+ na_fvalues = _floatify_na_values(na_values)
1839
+
1840
+ return na_values, na_fvalues
1841
+
1842
+
1843
+ def _floatify_na_values(na_values):
1844
+ # create float versions of the na_values
1845
+ result = set()
1846
+ for v in na_values:
1847
+ try:
1848
+ v = float(v)
1849
+ if not np.isnan(v):
1850
+ result.add(v)
1851
+ except (TypeError, ValueError, OverflowError):
1852
+ pass
1853
+ return result
1854
+
1855
+
1856
+ def _stringify_na_values(na_values):
1857
+ """return a stringified and numeric for these values"""
1858
+ result: list[str | float] = []
1859
+ for x in na_values:
1860
+ result.append(str(x))
1861
+ result.append(x)
1862
+ try:
1863
+ v = float(x)
1864
+
1865
+ # we are like 999 here
1866
+ if v == int(v):
1867
+ v = int(v)
1868
+ result.append(f"{v}.0")
1869
+ result.append(str(v))
1870
+
1871
+ result.append(v)
1872
+ except (TypeError, ValueError, OverflowError):
1873
+ pass
1874
+ try:
1875
+ result.append(int(x))
1876
+ except (TypeError, ValueError, OverflowError):
1877
+ pass
1878
+ return set(result)
1879
+
1880
+
1881
+ def _refine_defaults_read(
1882
+ dialect: str | csv.Dialect | None,
1883
+ delimiter: str | None | lib.NoDefault,
1884
+ delim_whitespace: bool,
1885
+ engine: CSVEngine | None,
1886
+ sep: str | None | lib.NoDefault,
1887
+ on_bad_lines: str | Callable,
1888
+ names: Sequence[Hashable] | None | lib.NoDefault,
1889
+ defaults: dict[str, Any],
1890
+ dtype_backend: DtypeBackend | lib.NoDefault,
1891
+ ):
1892
+ """Validate/refine default values of input parameters of read_csv, read_table.
1893
+
1894
+ Parameters
1895
+ ----------
1896
+ dialect : str or csv.Dialect
1897
+ If provided, this parameter will override values (default or not) for the
1898
+ following parameters: `delimiter`, `doublequote`, `escapechar`,
1899
+ `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
1900
+ override values, a ParserWarning will be issued. See csv.Dialect
1901
+ documentation for more details.
1902
+ delimiter : str or object
1903
+ Alias for sep.
1904
+ delim_whitespace : bool
1905
+ Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
1906
+ used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
1907
+ is set to True, nothing should be passed in for the ``delimiter``
1908
+ parameter.
1909
+ engine : {{'c', 'python'}}
1910
+ Parser engine to use. The C engine is faster while the python engine is
1911
+ currently more feature-complete.
1912
+ sep : str or object
1913
+ A delimiter provided by the user (str) or a sentinel value, i.e.
1914
+ pandas._libs.lib.no_default.
1915
+ on_bad_lines : str, callable
1916
+ An option for handling bad lines or a sentinel value(None).
1917
+ names : array-like, optional
1918
+ List of column names to use. If the file contains a header row,
1919
+ then you should explicitly pass ``header=0`` to override the column names.
1920
+ Duplicates in this list are not allowed.
1921
+ defaults: dict
1922
+ Default values of input parameters.
1923
+
1924
+ Returns
1925
+ -------
1926
+ kwds : dict
1927
+ Input parameters with correct values.
1928
+
1929
+ Raises
1930
+ ------
1931
+ ValueError :
1932
+ If a delimiter was specified with ``sep`` (or ``delimiter``) and
1933
+ ``delim_whitespace=True``.
1934
+ """
1935
+ # fix types for sep, delimiter to Union(str, Any)
1936
+ delim_default = defaults["delimiter"]
1937
+ kwds: dict[str, Any] = {}
1938
+ # gh-23761
1939
+ #
1940
+ # When a dialect is passed, it overrides any of the overlapping
1941
+ # parameters passed in directly. We don't want to warn if the
1942
+ # default parameters were passed in (since it probably means
1943
+ # that the user didn't pass them in explicitly in the first place).
1944
+ #
1945
+ # "delimiter" is the annoying corner case because we alias it to
1946
+ # "sep" before doing comparison to the dialect values later on.
1947
+ # Thus, we need a flag to indicate that we need to "override"
1948
+ # the comparison to dialect values by checking if default values
1949
+ # for BOTH "delimiter" and "sep" were provided.
1950
+ if dialect is not None:
1951
+ kwds["sep_override"] = delimiter is None and (
1952
+ sep is lib.no_default or sep == delim_default
1953
+ )
1954
+
1955
+ if delimiter and (sep is not lib.no_default):
1956
+ raise ValueError("Specified a sep and a delimiter; you can only specify one.")
1957
+
1958
+ kwds["names"] = None if names is lib.no_default else names
1959
+
1960
+ # Alias sep -> delimiter.
1961
+ if delimiter is None:
1962
+ delimiter = sep
1963
+
1964
+ if delim_whitespace and (delimiter is not lib.no_default):
1965
+ raise ValueError(
1966
+ "Specified a delimiter with both sep and "
1967
+ "delim_whitespace=True; you can only specify one."
1968
+ )
1969
+
1970
+ if delimiter == "\n":
1971
+ raise ValueError(
1972
+ r"Specified \n as separator or delimiter. This forces the python engine "
1973
+ "which does not accept a line terminator. Hence it is not allowed to use "
1974
+ "the line terminator as separator.",
1975
+ )
1976
+
1977
+ if delimiter is lib.no_default:
1978
+ # assign default separator value
1979
+ kwds["delimiter"] = delim_default
1980
+ else:
1981
+ kwds["delimiter"] = delimiter
1982
+
1983
+ if engine is not None:
1984
+ kwds["engine_specified"] = True
1985
+ else:
1986
+ kwds["engine"] = "c"
1987
+ kwds["engine_specified"] = False
1988
+
1989
+ if on_bad_lines == "error":
1990
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR
1991
+ elif on_bad_lines == "warn":
1992
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN
1993
+ elif on_bad_lines == "skip":
1994
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP
1995
+ elif callable(on_bad_lines):
1996
+ if engine != "python":
1997
+ raise ValueError(
1998
+ "on_bad_line can only be a callable function if engine='python'"
1999
+ )
2000
+ kwds["on_bad_lines"] = on_bad_lines
2001
+ else:
2002
+ raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines")
2003
+
2004
+ check_dtype_backend(dtype_backend)
2005
+
2006
+ kwds["dtype_backend"] = dtype_backend
2007
+
2008
+ return kwds
2009
+
2010
+
2011
+ def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None:
2012
+ """
2013
+ Extract concrete csv dialect instance.
2014
+
2015
+ Returns
2016
+ -------
2017
+ csv.Dialect or None
2018
+ """
2019
+ if kwds.get("dialect") is None:
2020
+ return None
2021
+
2022
+ dialect = kwds["dialect"]
2023
+ if dialect in csv.list_dialects():
2024
+ dialect = csv.get_dialect(dialect)
2025
+
2026
+ _validate_dialect(dialect)
2027
+
2028
+ return dialect
2029
+
2030
+
2031
+ MANDATORY_DIALECT_ATTRS = (
2032
+ "delimiter",
2033
+ "doublequote",
2034
+ "escapechar",
2035
+ "skipinitialspace",
2036
+ "quotechar",
2037
+ "quoting",
2038
+ )
2039
+
2040
+
2041
+ def _validate_dialect(dialect: csv.Dialect) -> None:
2042
+ """
2043
+ Validate csv dialect instance.
2044
+
2045
+ Raises
2046
+ ------
2047
+ ValueError
2048
+ If incorrect dialect is provided.
2049
+ """
2050
+ for param in MANDATORY_DIALECT_ATTRS:
2051
+ if not hasattr(dialect, param):
2052
+ raise ValueError(f"Invalid dialect {dialect} provided")
2053
+
2054
+
2055
+ def _merge_with_dialect_properties(
2056
+ dialect: csv.Dialect,
2057
+ defaults: dict[str, Any],
2058
+ ) -> dict[str, Any]:
2059
+ """
2060
+ Merge default kwargs in TextFileReader with dialect parameters.
2061
+
2062
+ Parameters
2063
+ ----------
2064
+ dialect : csv.Dialect
2065
+ Concrete csv dialect. See csv.Dialect documentation for more details.
2066
+ defaults : dict
2067
+ Keyword arguments passed to TextFileReader.
2068
+
2069
+ Returns
2070
+ -------
2071
+ kwds : dict
2072
+ Updated keyword arguments, merged with dialect parameters.
2073
+ """
2074
+ kwds = defaults.copy()
2075
+
2076
+ for param in MANDATORY_DIALECT_ATTRS:
2077
+ dialect_val = getattr(dialect, param)
2078
+
2079
+ parser_default = parser_defaults[param]
2080
+ provided = kwds.get(param, parser_default)
2081
+
2082
+ # Messages for conflicting values between the dialect
2083
+ # instance and the actual parameters provided.
2084
+ conflict_msgs = []
2085
+
2086
+ # Don't warn if the default parameter was passed in,
2087
+ # even if it conflicts with the dialect (gh-23761).
2088
+ if provided not in (parser_default, dialect_val):
2089
+ msg = (
2090
+ f"Conflicting values for '{param}': '{provided}' was "
2091
+ f"provided, but the dialect specifies '{dialect_val}'. "
2092
+ "Using the dialect-specified value."
2093
+ )
2094
+
2095
+ # Annoying corner case for not warning about
2096
+ # conflicts between dialect and delimiter parameter.
2097
+ # Refer to the outer "_read_" function for more info.
2098
+ if not (param == "delimiter" and kwds.pop("sep_override", False)):
2099
+ conflict_msgs.append(msg)
2100
+
2101
+ if conflict_msgs:
2102
+ warnings.warn(
2103
+ "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level()
2104
+ )
2105
+ kwds[param] = dialect_val
2106
+ return kwds
2107
+
2108
+
2109
+ def _validate_skipfooter(kwds: dict[str, Any]) -> None:
2110
+ """
2111
+ Check whether skipfooter is compatible with other kwargs in TextFileReader.
2112
+
2113
+ Parameters
2114
+ ----------
2115
+ kwds : dict
2116
+ Keyword arguments passed to TextFileReader.
2117
+
2118
+ Raises
2119
+ ------
2120
+ ValueError
2121
+ If skipfooter is not compatible with other parameters.
2122
+ """
2123
+ if kwds.get("skipfooter"):
2124
+ if kwds.get("iterator") or kwds.get("chunksize"):
2125
+ raise ValueError("'skipfooter' not supported for iteration")
2126
+ if kwds.get("nrows"):
2127
+ raise ValueError("'skipfooter' not supported with 'nrows'")
videochat2/lib/python3.10/site-packages/pandas/io/sas/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from pandas.io.sas.sasreader import read_sas
2
+
3
+ __all__ = ["read_sas"]
videochat2/lib/python3.10/site-packages/pandas/io/sas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (247 Bytes). View file