diff --git a/.gitattributes b/.gitattributes
index 5d16b3ad05b80349d0f5eabd0bdbced676b130f8..b0d1618dfccae93d5702e487c64508507f30d42e 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -793,3 +793,5 @@ infer_4_33_0/lib/python3.10/site-packages/torchvision.libs/libcudart.782fcab0.so
emu3/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
emu3/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_eagle/lib/python3.10/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+evalkit_eagle/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..745aa41758b5d49a35395aa499231eb80e7d0437
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:081532ea448213b2aeb08ea3489b54471367bdcb7d67046294f8b715eee9e367
+size 158562
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..671f4420bcd92fd509f3168715763b3f1b8b680a
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:175c0bfc8e92043260d60b2509174cdd08ac2a8efdfc0c28aa8e9fcddf9c8dfa
+size 108432
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ebaad67733213d9c075121a973d1173b98a49cd
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20b4e37e85e2982515f2ff496341688d264d7e12
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/_util.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c749086b135ea570b0e52556165b52e015eef2ce
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/api.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f8e8884ce4797966cb1c9c872d3a7b9668d5ca4
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/clipboards.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a15406876605efa8207616834cb3d84fc2a1b60f
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/common.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9c510b9227bde749ab59ff6352e1fcd38d19366
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/feather_format.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..80cf9c60aa4ab17dead70cb55d47f89fa9a66359
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/gbq.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..916643b6a5f17afae536a7ceed33a21b9a301ac0
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/html.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ebbc745214a132837bfeada03e102de13d4e551
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/orc.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4ce206137d6ece63dd9ff77625ff8aea36104f2
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/parquet.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3955816b787ef2666e2f7c2a7e03aa03b510e6fb
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/pickle.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ef4f03db0fb139e6531dda779e8a7ee93fd1100
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/spss.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a784212492a0f6168a23d92cd645761122a3aa46
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/xml.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6491849925e863c35a98390a31729cb13e28ca19
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py
@@ -0,0 +1,747 @@
+"""
+Pyperclip
+
+A cross-platform clipboard module for Python,
+with copy & paste functions for plain text.
+By Al Sweigart al@inventwithpython.com
+Licence at LICENSES/PYPERCLIP_LICENSE
+
+Usage:
+ import pyperclip
+ pyperclip.copy('The text to be copied to the clipboard.')
+ spam = pyperclip.paste()
+
+ if not pyperclip.is_available():
+ print("Copy functionality unavailable!")
+
+On Windows, no additional modules are needed.
+On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli
+ commands. (These commands should come with OS X.).
+On Linux, install xclip, xsel, or wl-clipboard (for "wayland" sessions) via
+package manager.
+For example, in Debian:
+ sudo apt-get install xclip
+ sudo apt-get install xsel
+ sudo apt-get install wl-clipboard
+
+Otherwise on Linux, you will need the PyQt5 modules installed.
+
+This module does not work with PyGObject yet.
+
+Cygwin is currently not supported.
+
+Security Note: This module runs programs with these names:
+ - pbcopy
+ - pbpaste
+ - xclip
+ - xsel
+ - wl-copy/wl-paste
+ - klipper
+ - qdbus
+A malicious user could rename or add programs with these names, tricking
+Pyperclip into running them with whatever permissions the Python process has.
+
+"""
+
+__version__ = "1.8.2"
+
+
+import contextlib
+import ctypes
+from ctypes import (
+ c_size_t,
+ c_wchar,
+ c_wchar_p,
+ get_errno,
+ sizeof,
+)
+import os
+import platform
+from shutil import which as _executable_exists
+import subprocess
+import time
+import warnings
+
+from pandas.errors import (
+ PyperclipException,
+ PyperclipWindowsException,
+)
+from pandas.util._exceptions import find_stack_level
+
+# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
+# Thus, we need to detect the presence of $DISPLAY manually
+# and not load PyQt4 if it is absent.
+HAS_DISPLAY = os.getenv("DISPLAY")
+
+EXCEPT_MSG = """
+ Pyperclip could not find a copy/paste mechanism for your system.
+ For more information, please visit
+ https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error
+ """
+
+ENCODING = "utf-8"
+
+
+class PyperclipTimeoutException(PyperclipException):
+ pass
+
+
+def _stringifyText(text) -> str:
+ acceptedTypes = (str, int, float, bool)
+ if not isinstance(text, acceptedTypes):
+ raise PyperclipException(
+ f"only str, int, float, and bool values "
+ f"can be copied to the clipboard, not {type(text).__name__}"
+ )
+ return str(text)
+
+
+def init_osx_pbcopy_clipboard():
+ def copy_osx_pbcopy(text):
+ text = _stringifyText(text) # Converts non-str values to str.
+ with subprocess.Popen(
+ ["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True
+ ) as p:
+ p.communicate(input=text.encode(ENCODING))
+
+ def paste_osx_pbcopy():
+ with subprocess.Popen(
+ ["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True
+ ) as p:
+ stdout = p.communicate()[0]
+ return stdout.decode(ENCODING)
+
+ return copy_osx_pbcopy, paste_osx_pbcopy
+
+
+def init_osx_pyobjc_clipboard():
+ def copy_osx_pyobjc(text):
+ """Copy string argument to clipboard"""
+ text = _stringifyText(text) # Converts non-str values to str.
+ newStr = Foundation.NSString.stringWithString_(text).nsstring()
+ newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)
+ board = AppKit.NSPasteboard.generalPasteboard()
+ board.declareTypes_owner_([AppKit.NSStringPboardType], None)
+ board.setData_forType_(newData, AppKit.NSStringPboardType)
+
+ def paste_osx_pyobjc():
+ """Returns contents of clipboard"""
+ board = AppKit.NSPasteboard.generalPasteboard()
+ content = board.stringForType_(AppKit.NSStringPboardType)
+ return content
+
+ return copy_osx_pyobjc, paste_osx_pyobjc
+
+
+def init_qt_clipboard():
+ global QApplication
+ # $DISPLAY should exist
+
+ # Try to import from qtpy, but if that fails try PyQt5 then PyQt4
+ try:
+ from qtpy.QtWidgets import QApplication
+ except ImportError:
+ try:
+ from PyQt5.QtWidgets import QApplication
+ except ImportError:
+ from PyQt4.QtGui import QApplication
+
+ app = QApplication.instance()
+ if app is None:
+ app = QApplication([])
+
+ def copy_qt(text):
+ text = _stringifyText(text) # Converts non-str values to str.
+ cb = app.clipboard()
+ cb.setText(text)
+
+ def paste_qt() -> str:
+ cb = app.clipboard()
+ return str(cb.text())
+
+ return copy_qt, paste_qt
+
+
+def init_xclip_clipboard():
+ DEFAULT_SELECTION = "c"
+ PRIMARY_SELECTION = "p"
+
+ def copy_xclip(text, primary=False):
+ text = _stringifyText(text) # Converts non-str values to str.
+ selection = DEFAULT_SELECTION
+ if primary:
+ selection = PRIMARY_SELECTION
+ with subprocess.Popen(
+ ["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True
+ ) as p:
+ p.communicate(input=text.encode(ENCODING))
+
+ def paste_xclip(primary=False):
+ selection = DEFAULT_SELECTION
+ if primary:
+ selection = PRIMARY_SELECTION
+ with subprocess.Popen(
+ ["xclip", "-selection", selection, "-o"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=True,
+ ) as p:
+ stdout = p.communicate()[0]
+ # Intentionally ignore extraneous output on stderr when clipboard is empty
+ return stdout.decode(ENCODING)
+
+ return copy_xclip, paste_xclip
+
+
+def init_xsel_clipboard():
+ DEFAULT_SELECTION = "-b"
+ PRIMARY_SELECTION = "-p"
+
+ def copy_xsel(text, primary=False):
+ text = _stringifyText(text) # Converts non-str values to str.
+ selection_flag = DEFAULT_SELECTION
+ if primary:
+ selection_flag = PRIMARY_SELECTION
+ with subprocess.Popen(
+ ["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True
+ ) as p:
+ p.communicate(input=text.encode(ENCODING))
+
+ def paste_xsel(primary=False):
+ selection_flag = DEFAULT_SELECTION
+ if primary:
+ selection_flag = PRIMARY_SELECTION
+ with subprocess.Popen(
+ ["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True
+ ) as p:
+ stdout = p.communicate()[0]
+ return stdout.decode(ENCODING)
+
+ return copy_xsel, paste_xsel
+
+
+def init_wl_clipboard():
+ PRIMARY_SELECTION = "-p"
+
+ def copy_wl(text, primary=False):
+ text = _stringifyText(text) # Converts non-str values to str.
+ args = ["wl-copy"]
+ if primary:
+ args.append(PRIMARY_SELECTION)
+ if not text:
+ args.append("--clear")
+ subprocess.check_call(args, close_fds=True)
+ else:
+ p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True)
+ p.communicate(input=text.encode(ENCODING))
+
+ def paste_wl(primary=False):
+ args = ["wl-paste", "-n"]
+ if primary:
+ args.append(PRIMARY_SELECTION)
+ p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True)
+ stdout, _stderr = p.communicate()
+ return stdout.decode(ENCODING)
+
+ return copy_wl, paste_wl
+
+
+def init_klipper_clipboard():
+ def copy_klipper(text):
+ text = _stringifyText(text) # Converts non-str values to str.
+ with subprocess.Popen(
+ [
+ "qdbus",
+ "org.kde.klipper",
+ "/klipper",
+ "setClipboardContents",
+ text.encode(ENCODING),
+ ],
+ stdin=subprocess.PIPE,
+ close_fds=True,
+ ) as p:
+ p.communicate(input=None)
+
+ def paste_klipper():
+ with subprocess.Popen(
+ ["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],
+ stdout=subprocess.PIPE,
+ close_fds=True,
+ ) as p:
+ stdout = p.communicate()[0]
+
+ # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
+ # TODO: https://github.com/asweigart/pyperclip/issues/43
+ clipboardContents = stdout.decode(ENCODING)
+ # even if blank, Klipper will append a newline at the end
+ assert len(clipboardContents) > 0
+ # make sure that newline is there
+ assert clipboardContents.endswith("\n")
+ if clipboardContents.endswith("\n"):
+ clipboardContents = clipboardContents[:-1]
+ return clipboardContents
+
+ return copy_klipper, paste_klipper
+
+
+def init_dev_clipboard_clipboard():
+ def copy_dev_clipboard(text):
+ text = _stringifyText(text) # Converts non-str values to str.
+ if text == "":
+ warnings.warn(
+ "Pyperclip cannot copy a blank string to the clipboard on Cygwin. "
+ "This is effectively a no-op.",
+ stacklevel=find_stack_level(),
+ )
+ if "\r" in text:
+ warnings.warn(
+ "Pyperclip cannot handle \\r characters on Cygwin.",
+ stacklevel=find_stack_level(),
+ )
+
+ with open("/dev/clipboard", "w", encoding="utf-8") as fd:
+ fd.write(text)
+
+ def paste_dev_clipboard() -> str:
+ with open("/dev/clipboard", encoding="utf-8") as fd:
+ content = fd.read()
+ return content
+
+ return copy_dev_clipboard, paste_dev_clipboard
+
+
+def init_no_clipboard():
+ class ClipboardUnavailable:
+ def __call__(self, *args, **kwargs):
+ raise PyperclipException(EXCEPT_MSG)
+
+ def __bool__(self) -> bool:
+ return False
+
+ return ClipboardUnavailable(), ClipboardUnavailable()
+
+
+# Windows-related clipboard functions:
+class CheckedCall:
+ def __init__(self, f) -> None:
+ super().__setattr__("f", f)
+
+ def __call__(self, *args):
+ ret = self.f(*args)
+ if not ret and get_errno():
+ raise PyperclipWindowsException("Error calling " + self.f.__name__)
+ return ret
+
+ def __setattr__(self, key, value):
+ setattr(self.f, key, value)
+
+
+def init_windows_clipboard():
+ global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
+ global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
+ from ctypes.wintypes import (
+ BOOL,
+ DWORD,
+ HANDLE,
+ HGLOBAL,
+ HINSTANCE,
+ HMENU,
+ HWND,
+ INT,
+ LPCSTR,
+ LPVOID,
+ UINT,
+ )
+
+ windll = ctypes.windll
+ msvcrt = ctypes.CDLL("msvcrt")
+
+ safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
+ safeCreateWindowExA.argtypes = [
+ DWORD,
+ LPCSTR,
+ LPCSTR,
+ DWORD,
+ INT,
+ INT,
+ INT,
+ INT,
+ HWND,
+ HMENU,
+ HINSTANCE,
+ LPVOID,
+ ]
+ safeCreateWindowExA.restype = HWND
+
+ safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
+ safeDestroyWindow.argtypes = [HWND]
+ safeDestroyWindow.restype = BOOL
+
+ OpenClipboard = windll.user32.OpenClipboard
+ OpenClipboard.argtypes = [HWND]
+ OpenClipboard.restype = BOOL
+
+ safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
+ safeCloseClipboard.argtypes = []
+ safeCloseClipboard.restype = BOOL
+
+ safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
+ safeEmptyClipboard.argtypes = []
+ safeEmptyClipboard.restype = BOOL
+
+ safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
+ safeGetClipboardData.argtypes = [UINT]
+ safeGetClipboardData.restype = HANDLE
+
+ safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
+ safeSetClipboardData.argtypes = [UINT, HANDLE]
+ safeSetClipboardData.restype = HANDLE
+
+ safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
+ safeGlobalAlloc.argtypes = [UINT, c_size_t]
+ safeGlobalAlloc.restype = HGLOBAL
+
+ safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
+ safeGlobalLock.argtypes = [HGLOBAL]
+ safeGlobalLock.restype = LPVOID
+
+ safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
+ safeGlobalUnlock.argtypes = [HGLOBAL]
+ safeGlobalUnlock.restype = BOOL
+
+ wcslen = CheckedCall(msvcrt.wcslen)
+ wcslen.argtypes = [c_wchar_p]
+ wcslen.restype = UINT
+
+ GMEM_MOVEABLE = 0x0002
+ CF_UNICODETEXT = 13
+
+ @contextlib.contextmanager
+ def window():
+ """
+ Context that provides a valid Windows hwnd.
+ """
+ # we really just need the hwnd, so setting "STATIC"
+ # as predefined lpClass is just fine.
+ hwnd = safeCreateWindowExA(
+ 0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None
+ )
+ try:
+ yield hwnd
+ finally:
+ safeDestroyWindow(hwnd)
+
+ @contextlib.contextmanager
+ def clipboard(hwnd):
+ """
+ Context manager that opens the clipboard and prevents
+ other applications from modifying the clipboard content.
+ """
+ # We may not get the clipboard handle immediately because
+ # some other application is accessing it (?)
+ # We try for at least 500ms to get the clipboard.
+ t = time.time() + 0.5
+ success = False
+ while time.time() < t:
+ success = OpenClipboard(hwnd)
+ if success:
+ break
+ time.sleep(0.01)
+ if not success:
+ raise PyperclipWindowsException("Error calling OpenClipboard")
+
+ try:
+ yield
+ finally:
+ safeCloseClipboard()
+
+ def copy_windows(text):
+ # This function is heavily based on
+ # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
+
+ text = _stringifyText(text) # Converts non-str values to str.
+
+ with window() as hwnd:
+ # http://msdn.com/ms649048
+ # If an application calls OpenClipboard with hwnd set to NULL,
+ # EmptyClipboard sets the clipboard owner to NULL;
+ # this causes SetClipboardData to fail.
+ # => We need a valid hwnd to copy something.
+ with clipboard(hwnd):
+ safeEmptyClipboard()
+
+ if text:
+ # http://msdn.com/ms649051
+ # If the hMem parameter identifies a memory object,
+ # the object must have been allocated using the
+ # function with the GMEM_MOVEABLE flag.
+ count = wcslen(text) + 1
+ handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar))
+ locked_handle = safeGlobalLock(handle)
+
+ ctypes.memmove(
+ c_wchar_p(locked_handle),
+ c_wchar_p(text),
+ count * sizeof(c_wchar),
+ )
+
+ safeGlobalUnlock(handle)
+ safeSetClipboardData(CF_UNICODETEXT, handle)
+
+ def paste_windows():
+ with clipboard(None):
+ handle = safeGetClipboardData(CF_UNICODETEXT)
+ if not handle:
+ # GetClipboardData may return NULL with errno == NO_ERROR
+ # if the clipboard is empty.
+ # (Also, it may return a handle to an empty buffer,
+ # but technically that's not empty)
+ return ""
+ return c_wchar_p(handle).value
+
+ return copy_windows, paste_windows
+
+
+def init_wsl_clipboard():
+ def copy_wsl(text):
+ text = _stringifyText(text) # Converts non-str values to str.
+ with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p:
+ p.communicate(input=text.encode(ENCODING))
+
+ def paste_wsl():
+ with subprocess.Popen(
+ ["powershell.exe", "-command", "Get-Clipboard"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=True,
+ ) as p:
+ stdout = p.communicate()[0]
+ # WSL appends "\r\n" to the contents.
+ return stdout[:-2].decode(ENCODING)
+
+ return copy_wsl, paste_wsl
+
+
+# Automatic detection of clipboard mechanisms
+# and importing is done in determine_clipboard():
+def determine_clipboard():
+ """
+ Determine the OS/platform and set the copy() and paste() functions
+ accordingly.
+ """
+ global Foundation, AppKit, qtpy, PyQt4, PyQt5
+
+ # Setup for the CYGWIN platform:
+ if (
+ "cygwin" in platform.system().lower()
+ ): # Cygwin has a variety of values returned by platform.system(),
+ # such as 'CYGWIN_NT-6.1'
+ # FIXME(pyperclip#55): pyperclip currently does not support Cygwin,
+ # see https://github.com/asweigart/pyperclip/issues/55
+ if os.path.exists("/dev/clipboard"):
+ warnings.warn(
+ "Pyperclip's support for Cygwin is not perfect, "
+ "see https://github.com/asweigart/pyperclip/issues/55",
+ stacklevel=find_stack_level(),
+ )
+ return init_dev_clipboard_clipboard()
+
+ # Setup for the WINDOWS platform:
+ elif os.name == "nt" or platform.system() == "Windows":
+ return init_windows_clipboard()
+
+ if platform.system() == "Linux":
+ if _executable_exists("wslconfig.exe"):
+ return init_wsl_clipboard()
+
+ # Setup for the macOS platform:
+ if os.name == "mac" or platform.system() == "Darwin":
+ try:
+ import AppKit
+ import Foundation # check if pyobjc is installed
+ except ImportError:
+ return init_osx_pbcopy_clipboard()
+ else:
+ return init_osx_pyobjc_clipboard()
+
+ # Setup for the LINUX platform:
+ if HAS_DISPLAY:
+ if os.environ.get("WAYLAND_DISPLAY") and _executable_exists("wl-copy"):
+ return init_wl_clipboard()
+ if _executable_exists("xsel"):
+ return init_xsel_clipboard()
+ if _executable_exists("xclip"):
+ return init_xclip_clipboard()
+ if _executable_exists("klipper") and _executable_exists("qdbus"):
+ return init_klipper_clipboard()
+
+ try:
+ # qtpy is a small abstraction layer that lets you write applications
+ # using a single api call to either PyQt or PySide.
+ # https://pypi.python.org/project/QtPy
+ import qtpy # check if qtpy is installed
+ except ImportError:
+ # If qtpy isn't installed, fall back on importing PyQt4.
+ try:
+ import PyQt5 # check if PyQt5 is installed
+ except ImportError:
+ try:
+ import PyQt4 # check if PyQt4 is installed
+ except ImportError:
+ pass # We want to fail fast for all non-ImportError exceptions.
+ else:
+ return init_qt_clipboard()
+ else:
+ return init_qt_clipboard()
+ else:
+ return init_qt_clipboard()
+
+ return init_no_clipboard()
+
+
+def set_clipboard(clipboard):
+ """
+ Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how
+ the copy() and paste() functions interact with the operating system to
+ implement the copy/paste feature. The clipboard parameter must be one of:
+ - pbcopy
+ - pyobjc (default on macOS)
+ - qt
+ - xclip
+ - xsel
+ - klipper
+ - windows (default on Windows)
+ - no (this is what is set when no clipboard mechanism can be found)
+ """
+ global copy, paste
+
+ clipboard_types = {
+ "pbcopy": init_osx_pbcopy_clipboard,
+ "pyobjc": init_osx_pyobjc_clipboard,
+ "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'
+ "xclip": init_xclip_clipboard,
+ "xsel": init_xsel_clipboard,
+ "wl-clipboard": init_wl_clipboard,
+ "klipper": init_klipper_clipboard,
+ "windows": init_windows_clipboard,
+ "no": init_no_clipboard,
+ }
+
+ if clipboard not in clipboard_types:
+ allowed_clipboard_types = [repr(_) for _ in clipboard_types]
+ raise ValueError(
+ f"Argument must be one of {', '.join(allowed_clipboard_types)}"
+ )
+
+ # Sets pyperclip's copy() and paste() functions:
+ copy, paste = clipboard_types[clipboard]()
+
+
+def lazy_load_stub_copy(text):
+ """
+ A stub function for copy(), which will load the real copy() function when
+ called so that the real copy() function is used for later calls.
+
+ This allows users to import pyperclip without having determine_clipboard()
+ automatically run, which will automatically select a clipboard mechanism.
+ This could be a problem if it selects, say, the memory-heavy PyQt4 module
+ but the user was just going to immediately call set_clipboard() to use a
+ different clipboard mechanism.
+
+ The lazy loading this stub function implements gives the user a chance to
+ call set_clipboard() to pick another clipboard mechanism. Or, if the user
+ simply calls copy() or paste() without calling set_clipboard() first,
+ will fall back on whatever clipboard mechanism that determine_clipboard()
+ automatically chooses.
+ """
+ global copy, paste
+ copy, paste = determine_clipboard()
+ return copy(text)
+
+
+def lazy_load_stub_paste():
+ """
+ A stub function for paste(), which will load the real paste() function when
+ called so that the real paste() function is used for later calls.
+
+ This allows users to import pyperclip without having determine_clipboard()
+ automatically run, which will automatically select a clipboard mechanism.
+ This could be a problem if it selects, say, the memory-heavy PyQt4 module
+ but the user was just going to immediately call set_clipboard() to use a
+ different clipboard mechanism.
+
+ The lazy loading this stub function implements gives the user a chance to
+ call set_clipboard() to pick another clipboard mechanism. Or, if the user
+ simply calls copy() or paste() without calling set_clipboard() first,
+ will fall back on whatever clipboard mechanism that determine_clipboard()
+ automatically chooses.
+ """
+ global copy, paste
+ copy, paste = determine_clipboard()
+ return paste()
+
+
+def is_available() -> bool:
+ return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste
+
+
+# Initially, copy() and paste() are set to lazy loading wrappers which will
+# set `copy` and `paste` to real functions the first time they're used, unless
+# set_clipboard() or determine_clipboard() is called first.
+copy, paste = lazy_load_stub_copy, lazy_load_stub_paste
+
+
+def waitForPaste(timeout=None):
+ """This function call blocks until a non-empty text string exists on the
+ clipboard. It returns this text.
+
+ This function raises PyperclipTimeoutException if timeout was set to
+ a number of seconds that has elapsed without non-empty text being put on
+ the clipboard."""
+ startTime = time.time()
+ while True:
+ clipboardText = paste()
+ if clipboardText != "":
+ return clipboardText
+ time.sleep(0.01)
+
+ if timeout is not None and time.time() > startTime + timeout:
+ raise PyperclipTimeoutException(
+ "waitForPaste() timed out after " + str(timeout) + " seconds."
+ )
+
+
+def waitForNewPaste(timeout=None):
+ """This function call blocks until a new text string exists on the
+ clipboard that is different from the text that was there when the function
+ was first called. It returns this text.
+
+ This function raises PyperclipTimeoutException if timeout was set to
+ a number of seconds that has elapsed without non-empty text being put on
+ the clipboard."""
+ startTime = time.time()
+ originalText = paste()
+ while True:
+ currentText = paste()
+ if currentText != originalText:
+ return currentText
+ time.sleep(0.01)
+
+ if timeout is not None and time.time() > startTime + timeout:
+ raise PyperclipTimeoutException(
+ "waitForNewPaste() timed out after " + str(timeout) + " seconds."
+ )
+
+
+__all__ = [
+ "copy",
+ "paste",
+ "waitForPaste",
+ "waitForNewPaste",
+ "set_clipboard",
+ "determine_clipboard",
+]
+
+# pandas aliases
+clipboard_get = paste
+clipboard_set = copy
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96ee9b9c062a11bf15fb076580fce6f2a4adba4a
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..275cbf0148f944eb04ca6c40c624cc5df77aa626
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/__init__.py
@@ -0,0 +1,19 @@
+from pandas.io.excel._base import (
+ ExcelFile,
+ ExcelWriter,
+ read_excel,
+)
+from pandas.io.excel._odswriter import ODSWriter as _ODSWriter
+from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter
+from pandas.io.excel._util import register_writer
+from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter
+
+__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
+
+
+register_writer(_OpenpyxlWriter)
+
+register_writer(_XlsxWriter)
+
+
+register_writer(_ODSWriter)
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ee94e53720637417a034f00b3b9a128e5099014
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/__pycache__/_openpyxl.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_base.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..786f719337b84a29e5b6ea7577edd412b596920f
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_base.py
@@ -0,0 +1,1659 @@
+from __future__ import annotations
+
+from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+)
+import datetime
+from functools import partial
+from io import BytesIO
+import os
+from textwrap import fill
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Generic,
+ Literal,
+ TypeVar,
+ Union,
+ cast,
+ overload,
+)
+import warnings
+import zipfile
+
+from pandas._config import config
+
+from pandas._libs import lib
+from pandas._libs.parsers import STR_NA_VALUES
+from pandas.compat._optional import (
+ get_version,
+ import_optional_dependency,
+)
+from pandas.errors import EmptyDataError
+from pandas.util._decorators import (
+ Appender,
+ doc,
+)
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_float,
+ is_integer,
+ is_list_like,
+)
+
+from pandas.core.frame import DataFrame
+from pandas.core.shared_docs import _shared_docs
+from pandas.util.version import Version
+
+from pandas.io.common import (
+ IOHandles,
+ get_handle,
+ stringify_path,
+ validate_header_arg,
+)
+from pandas.io.excel._util import (
+ fill_mi_header,
+ get_default_engine,
+ get_writer,
+ maybe_convert_usecols,
+ pop_header_name,
+)
+from pandas.io.parsers import TextParser
+from pandas.io.parsers.readers import validate_integer
+
+if TYPE_CHECKING:
+ from types import TracebackType
+
+ from pandas._typing import (
+ DtypeArg,
+ DtypeBackend,
+ ExcelWriterIfSheetExists,
+ FilePath,
+ IntStrT,
+ ReadBuffer,
+ Self,
+ SequenceNotStr,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+_read_excel_doc = (
+ """
+Read an Excel file into a ``pandas`` ``DataFrame``.
+
+Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
+read from a local filesystem or URL. Supports an option to read
+a single sheet or a list of sheets.
+
+Parameters
+----------
+io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
+ Any valid string path is acceptable. The string could be a URL. Valid
+ URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
+
+ If you want to pass in a path object, pandas accepts any ``os.PathLike``.
+
+ By file-like object, we refer to objects with a ``read()`` method,
+ such as a file handle (e.g. via builtin ``open`` function)
+ or ``StringIO``.
+
+ .. deprecated:: 2.1.0
+ Passing byte strings is deprecated. To read from a
+ byte string, wrap it in a ``BytesIO`` object.
+sheet_name : str, int, list, or None, default 0
+ Strings are used for sheet names. Integers are used in zero-indexed
+ sheet positions (chart sheets do not count as a sheet position).
+ Lists of strings/integers are used to request multiple sheets.
+ Specify ``None`` to get all worksheets.
+
+ Available cases:
+
+ * Defaults to ``0``: 1st sheet as a `DataFrame`
+ * ``1``: 2nd sheet as a `DataFrame`
+ * ``"Sheet1"``: Load sheet with name "Sheet1"
+ * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
+ as a dict of `DataFrame`
+ * ``None``: All worksheets.
+
+header : int, list of int, default 0
+ Row (0-indexed) to use for the column labels of the parsed
+ DataFrame. If a list of integers is passed those row positions will
+ be combined into a ``MultiIndex``. Use None if there is no header.
+names : array-like, default None
+ List of column names to use. If file contains no header row,
+ then you should explicitly pass header=None.
+index_col : int, str, list of int, default None
+ Column (0-indexed) to use as the row labels of the DataFrame.
+ Pass None if there is no such column. If a list is passed,
+ those columns will be combined into a ``MultiIndex``. If a
+ subset of data is selected with ``usecols``, index_col
+ is based on the subset.
+
+ Missing values will be forward filled to allow roundtripping with
+ ``to_excel`` for ``merged_cells=True``. To avoid forward filling the
+ missing values use ``set_index`` after reading the data instead of
+ ``index_col``.
+usecols : str, list-like, or callable, default None
+ * If None, then parse all columns.
+ * If str, then indicates comma separated list of Excel column letters
+ and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
+ both sides.
+ * If list of int, then indicates list of column numbers to be parsed
+ (0-indexed).
+ * If list of string, then indicates list of column names to be parsed.
+ * If callable, then evaluate each column name against it and parse the
+ column if the callable returns ``True``.
+
+ Returns a subset of the columns according to behavior above.
+dtype : Type name or dict of column -> type, default None
+ Data type for data or columns. E.g. {{'a': np.float64, 'b': np.int32}}
+ Use ``object`` to preserve data as stored in Excel and not interpret dtype,
+ which will necessarily result in ``object`` dtype.
+ If converters are specified, they will be applied INSTEAD
+ of dtype conversion.
+ If you use ``None``, it will infer the dtype of each column based on the data.
+engine : {{'openpyxl', 'calamine', 'odf', 'pyxlsb', 'xlrd'}}, default None
+ If io is not a buffer or path, this must be set to identify io.
+ Engine compatibility :
+
+ - ``openpyxl`` supports newer Excel file formats.
+ - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
+ and OpenDocument (.ods) file formats.
+ - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
+ - ``pyxlsb`` supports Binary Excel files.
+ - ``xlrd`` supports old-style Excel files (.xls).
+
+ When ``engine=None``, the following logic will be used to determine the engine:
+
+ - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
+ then `odf `_ will be used.
+ - Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used.
+ - Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used.
+ - Otherwise ``openpyxl`` will be used.
+converters : dict, default None
+ Dict of functions for converting values in certain columns. Keys can
+ either be integers or column labels, values are functions that take one
+ input argument, the Excel cell content, and return the transformed
+ content.
+true_values : list, default None
+ Values to consider as True.
+false_values : list, default None
+ Values to consider as False.
+skiprows : list-like, int, or callable, optional
+ Line numbers to skip (0-indexed) or number of lines to skip (int) at the
+ start of the file. If callable, the callable function will be evaluated
+ against the row indices, returning True if the row should be skipped and
+ False otherwise. An example of a valid callable argument would be ``lambda
+ x: x in [0, 2]``.
+nrows : int, default None
+ Number of rows to parse.
+na_values : scalar, str, list-like, or dict, default None
+ Additional strings to recognize as NA/NaN. If dict passed, specific
+ per-column NA values. By default the following values are interpreted
+ as NaN: '"""
+ + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ + """'.
+keep_default_na : bool, default True
+ Whether or not to include the default NaN values when parsing the data.
+ Depending on whether ``na_values`` is passed in, the behavior is as follows:
+
+ * If ``keep_default_na`` is True, and ``na_values`` are specified,
+ ``na_values`` is appended to the default NaN values used for parsing.
+ * If ``keep_default_na`` is True, and ``na_values`` are not specified, only
+ the default NaN values are used for parsing.
+ * If ``keep_default_na`` is False, and ``na_values`` are specified, only
+ the NaN values specified ``na_values`` are used for parsing.
+ * If ``keep_default_na`` is False, and ``na_values`` are not specified, no
+ strings will be parsed as NaN.
+
+ Note that if `na_filter` is passed in as False, the ``keep_default_na`` and
+ ``na_values`` parameters will be ignored.
+na_filter : bool, default True
+ Detect missing value markers (empty strings and the value of na_values). In
+ data without any NAs, passing ``na_filter=False`` can improve the
+ performance of reading a large file.
+verbose : bool, default False
+ Indicate number of NA values placed in non-numeric columns.
+parse_dates : bool, list-like, or dict, default False
+ The behavior is as follows:
+
+ * ``bool``. If True -> try parsing the index.
+ * ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
+ each as a separate date column.
+ * ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
+ a single date column.
+ * ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
+ result 'foo'
+
+ If a column or index contains an unparsable date, the entire column or
+ index will be returned unaltered as an object data type. If you don`t want to
+ parse some cells as date just change their type in Excel to "Text".
+ For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
+
+ Note: A fast-path exists for iso8601-formatted dates.
+date_parser : function, optional
+ Function to use for converting a sequence of string columns to an array of
+ datetime instances. The default uses ``dateutil.parser.parser`` to do the
+ conversion. Pandas will try to call `date_parser` in three different ways,
+ advancing to the next if an exception occurs: 1) Pass one or more arrays
+ (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
+ string values from the columns defined by `parse_dates` into a single array
+ and pass that; and 3) call `date_parser` once for each row using one or
+ more strings (corresponding to the columns defined by `parse_dates`) as
+ arguments.
+
+ .. deprecated:: 2.0.0
+ Use ``date_format`` instead, or read in as ``object`` and then apply
+ :func:`to_datetime` as-needed.
+date_format : str or dict of column -> format, default ``None``
+ If used in conjunction with ``parse_dates``, will parse dates according to this
+ format. For anything more complex,
+ please read in as ``object`` and then apply :func:`to_datetime` as-needed.
+
+ .. versionadded:: 2.0.0
+thousands : str, default None
+ Thousands separator for parsing string columns to numeric. Note that
+ this parameter is only necessary for columns stored as TEXT in Excel,
+ any numeric columns will automatically be parsed, regardless of display
+ format.
+decimal : str, default '.'
+ Character to recognize as decimal point for parsing string columns to numeric.
+ Note that this parameter is only necessary for columns stored as TEXT in Excel,
+ any numeric columns will automatically be parsed, regardless of display
+ format.(e.g. use ',' for European data).
+
+ .. versionadded:: 1.4.0
+
+comment : str, default None
+ Comments out remainder of line. Pass a character or characters to this
+ argument to indicate comments in the input file. Any data between the
+ comment string and the end of the current line is ignored.
+skipfooter : int, default 0
+ Rows at the end to skip (0-indexed).
+{storage_options}
+
+dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+
+Returns
+-------
+DataFrame or dict of DataFrames
+ DataFrame from the passed in Excel file. See notes in sheet_name
+ argument for more information on when a dict of DataFrames is returned.
+
+See Also
+--------
+DataFrame.to_excel : Write DataFrame to an Excel file.
+DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
+read_csv : Read a comma-separated values (csv) file into DataFrame.
+read_fwf : Read a table of fixed-width formatted lines into DataFrame.
+
+Notes
+-----
+For specific information on the methods used for each Excel engine, refer to the pandas
+:ref:`user guide `
+
+Examples
+--------
+The file can be read using the file name as string or an open file object:
+
+>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
+ Name Value
+0 string1 1
+1 string2 2
+2 #Comment 3
+
+>>> pd.read_excel(open('tmp.xlsx', 'rb'),
+... sheet_name='Sheet3') # doctest: +SKIP
+ Unnamed: 0 Name Value
+0 0 string1 1
+1 1 string2 2
+2 2 #Comment 3
+
+Index and header can be specified via the `index_col` and `header` arguments
+
+>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
+ 0 1 2
+0 NaN Name Value
+1 0.0 string1 1
+2 1.0 string2 2
+3 2.0 #Comment 3
+
+Column types are inferred but can be explicitly specified
+
+>>> pd.read_excel('tmp.xlsx', index_col=0,
+... dtype={{'Name': str, 'Value': float}}) # doctest: +SKIP
+ Name Value
+0 string1 1.0
+1 string2 2.0
+2 #Comment 3.0
+
+True, False, and NA values, and thousands separators have defaults,
+but can be explicitly specified, too. Supply the values you would like
+as strings or lists of strings!
+
+>>> pd.read_excel('tmp.xlsx', index_col=0,
+... na_values=['string1', 'string2']) # doctest: +SKIP
+ Name Value
+0 NaN 1
+1 NaN 2
+2 #Comment 3
+
+Comment lines in the excel input file can be skipped using the
+``comment`` kwarg.
+
+>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
+ Name Value
+0 string1 1.0
+1 string2 2.0
+2 None NaN
+"""
+)
+
+
+@overload
+def read_excel(
+ io,
+ # sheet name is str or int -> DataFrame
+ sheet_name: str | int = ...,
+ *,
+ header: int | Sequence[int] | None = ...,
+ names: SequenceNotStr[Hashable] | range | None = ...,
+ index_col: int | str | Sequence[int] | None = ...,
+ usecols: int
+ | str
+ | Sequence[int]
+ | Sequence[str]
+ | Callable[[str], bool]
+ | None = ...,
+ dtype: DtypeArg | None = ...,
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
+ converters: dict[str, Callable] | dict[int, Callable] | None = ...,
+ true_values: Iterable[Hashable] | None = ...,
+ false_values: Iterable[Hashable] | None = ...,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
+ nrows: int | None = ...,
+ na_values=...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool = ...,
+ parse_dates: list | dict | bool = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: dict[Hashable, str] | str | None = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ comment: str | None = ...,
+ skipfooter: int = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+@overload
+def read_excel(
+ io,
+ # sheet name is list or None -> dict[IntStrT, DataFrame]
+ sheet_name: list[IntStrT] | None,
+ *,
+ header: int | Sequence[int] | None = ...,
+ names: SequenceNotStr[Hashable] | range | None = ...,
+ index_col: int | str | Sequence[int] | None = ...,
+ usecols: int
+ | str
+ | Sequence[int]
+ | Sequence[str]
+ | Callable[[str], bool]
+ | None = ...,
+ dtype: DtypeArg | None = ...,
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = ...,
+ converters: dict[str, Callable] | dict[int, Callable] | None = ...,
+ true_values: Iterable[Hashable] | None = ...,
+ false_values: Iterable[Hashable] | None = ...,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = ...,
+ nrows: int | None = ...,
+ na_values=...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool = ...,
+ parse_dates: list | dict | bool = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: dict[Hashable, str] | str | None = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ comment: str | None = ...,
+ skipfooter: int = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> dict[IntStrT, DataFrame]:
+ ...
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+@Appender(_read_excel_doc)
+def read_excel(
+ io,
+ sheet_name: str | int | list[IntStrT] | None = 0,
+ *,
+ header: int | Sequence[int] | None = 0,
+ names: SequenceNotStr[Hashable] | range | None = None,
+ index_col: int | str | Sequence[int] | None = None,
+ usecols: int
+ | str
+ | Sequence[int]
+ | Sequence[str]
+ | Callable[[str], bool]
+ | None = None,
+ dtype: DtypeArg | None = None,
+ engine: Literal["xlrd", "openpyxl", "odf", "pyxlsb", "calamine"] | None = None,
+ converters: dict[str, Callable] | dict[int, Callable] | None = None,
+ true_values: Iterable[Hashable] | None = None,
+ false_values: Iterable[Hashable] | None = None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
+ nrows: int | None = None,
+ na_values=None,
+ keep_default_na: bool = True,
+ na_filter: bool = True,
+ verbose: bool = False,
+ parse_dates: list | dict | bool = False,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: dict[Hashable, str] | str | None = None,
+ thousands: str | None = None,
+ decimal: str = ".",
+ comment: str | None = None,
+ skipfooter: int = 0,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ engine_kwargs: dict | None = None,
+) -> DataFrame | dict[IntStrT, DataFrame]:
+ check_dtype_backend(dtype_backend)
+ should_close = False
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ if not isinstance(io, ExcelFile):
+ should_close = True
+ io = ExcelFile(
+ io,
+ storage_options=storage_options,
+ engine=engine,
+ engine_kwargs=engine_kwargs,
+ )
+ elif engine and engine != io.engine:
+ raise ValueError(
+ "Engine should not be specified when passing "
+ "an ExcelFile - ExcelFile already has the engine set"
+ )
+
+ try:
+ data = io.parse(
+ sheet_name=sheet_name,
+ header=header,
+ names=names,
+ index_col=index_col,
+ usecols=usecols,
+ dtype=dtype,
+ converters=converters,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ keep_default_na=keep_default_na,
+ na_filter=na_filter,
+ verbose=verbose,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ date_format=date_format,
+ thousands=thousands,
+ decimal=decimal,
+ comment=comment,
+ skipfooter=skipfooter,
+ dtype_backend=dtype_backend,
+ )
+ finally:
+ # make sure to close opened file handles
+ if should_close:
+ io.close()
+ return data
+
+
+_WorkbookT = TypeVar("_WorkbookT")
+
+
+class BaseExcelReader(Generic[_WorkbookT]):
+ book: _WorkbookT
+
+ def __init__(
+ self,
+ filepath_or_buffer,
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ # First argument can also be bytes, so create a buffer
+ if isinstance(filepath_or_buffer, bytes):
+ filepath_or_buffer = BytesIO(filepath_or_buffer)
+
+ self.handles = IOHandles(
+ handle=filepath_or_buffer, compression={"method": None}
+ )
+ if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
+ self.handles = get_handle(
+ filepath_or_buffer, "rb", storage_options=storage_options, is_text=False
+ )
+
+ if isinstance(self.handles.handle, self._workbook_class):
+ self.book = self.handles.handle
+ elif hasattr(self.handles.handle, "read"):
+ # N.B. xlrd.Book has a read attribute too
+ self.handles.handle.seek(0)
+ try:
+ self.book = self.load_workbook(self.handles.handle, engine_kwargs)
+ except Exception:
+ self.close()
+ raise
+ else:
+ raise ValueError(
+ "Must explicitly set engine if not passing in buffer or path for io."
+ )
+
+ @property
+ def _workbook_class(self) -> type[_WorkbookT]:
+ raise NotImplementedError
+
+ def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT:
+ raise NotImplementedError
+
+ def close(self) -> None:
+ if hasattr(self, "book"):
+ if hasattr(self.book, "close"):
+ # pyxlsb: opens a TemporaryFile
+ # openpyxl: https://stackoverflow.com/questions/31416842/
+ # openpyxl-does-not-close-excel-workbook-in-read-only-mode
+ self.book.close()
+ elif hasattr(self.book, "release_resources"):
+ # xlrd
+ # https://github.com/python-excel/xlrd/blob/2.0.1/xlrd/book.py#L548
+ self.book.release_resources()
+ self.handles.close()
+
+ @property
+ def sheet_names(self) -> list[str]:
+ raise NotImplementedError
+
+ def get_sheet_by_name(self, name: str):
+ raise NotImplementedError
+
+ def get_sheet_by_index(self, index: int):
+ raise NotImplementedError
+
+ def get_sheet_data(self, sheet, rows: int | None = None):
+ raise NotImplementedError
+
+ def raise_if_bad_sheet_by_index(self, index: int) -> None:
+ n_sheets = len(self.sheet_names)
+ if index >= n_sheets:
+ raise ValueError(
+ f"Worksheet index {index} is invalid, {n_sheets} worksheets found"
+ )
+
+ def raise_if_bad_sheet_by_name(self, name: str) -> None:
+ if name not in self.sheet_names:
+ raise ValueError(f"Worksheet named '{name}' not found")
+
+ def _check_skiprows_func(
+ self,
+ skiprows: Callable,
+ rows_to_use: int,
+ ) -> int:
+ """
+ Determine how many file rows are required to obtain `nrows` data
+ rows when `skiprows` is a function.
+
+ Parameters
+ ----------
+ skiprows : function
+ The function passed to read_excel by the user.
+ rows_to_use : int
+ The number of rows that will be needed for the header and
+ the data.
+
+ Returns
+ -------
+ int
+ """
+ i = 0
+ rows_used_so_far = 0
+ while rows_used_so_far < rows_to_use:
+ if not skiprows(i):
+ rows_used_so_far += 1
+ i += 1
+ return i
+
+ def _calc_rows(
+ self,
+ header: int | Sequence[int] | None,
+ index_col: int | Sequence[int] | None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None,
+ nrows: int | None,
+ ) -> int | None:
+ """
+ If nrows specified, find the number of rows needed from the
+ file, otherwise return None.
+
+
+ Parameters
+ ----------
+ header : int, list of int, or None
+ See read_excel docstring.
+ index_col : int, str, list of int, or None
+ See read_excel docstring.
+ skiprows : list-like, int, callable, or None
+ See read_excel docstring.
+ nrows : int or None
+ See read_excel docstring.
+
+ Returns
+ -------
+ int or None
+ """
+ if nrows is None:
+ return None
+ if header is None:
+ header_rows = 1
+ elif is_integer(header):
+ header = cast(int, header)
+ header_rows = 1 + header
+ else:
+ header = cast(Sequence, header)
+ header_rows = 1 + header[-1]
+ # If there is a MultiIndex header and an index then there is also
+ # a row containing just the index name(s)
+ if is_list_like(header) and index_col is not None:
+ header = cast(Sequence, header)
+ if len(header) > 1:
+ header_rows += 1
+ if skiprows is None:
+ return header_rows + nrows
+ if is_integer(skiprows):
+ skiprows = cast(int, skiprows)
+ return header_rows + nrows + skiprows
+ if is_list_like(skiprows):
+
+ def f(skiprows: Sequence, x: int) -> bool:
+ return x in skiprows
+
+ skiprows = cast(Sequence, skiprows)
+ return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows)
+ if callable(skiprows):
+ return self._check_skiprows_func(
+ skiprows,
+ header_rows + nrows,
+ )
+ # else unexpected skiprows type: read_excel will not optimize
+ # the number of rows read from file
+ return None
+
+ def parse(
+ self,
+ sheet_name: str | int | list[int] | list[str] | None = 0,
+ header: int | Sequence[int] | None = 0,
+ names: SequenceNotStr[Hashable] | range | None = None,
+ index_col: int | Sequence[int] | None = None,
+ usecols=None,
+ dtype: DtypeArg | None = None,
+ true_values: Iterable[Hashable] | None = None,
+ false_values: Iterable[Hashable] | None = None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
+ nrows: int | None = None,
+ na_values=None,
+ verbose: bool = False,
+ parse_dates: list | dict | bool = False,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: dict[Hashable, str] | str | None = None,
+ thousands: str | None = None,
+ decimal: str = ".",
+ comment: str | None = None,
+ skipfooter: int = 0,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ **kwds,
+ ):
+ validate_header_arg(header)
+ validate_integer("nrows", nrows)
+
+ ret_dict = False
+
+ # Keep sheetname to maintain backwards compatibility.
+ sheets: list[int] | list[str]
+ if isinstance(sheet_name, list):
+ sheets = sheet_name
+ ret_dict = True
+ elif sheet_name is None:
+ sheets = self.sheet_names
+ ret_dict = True
+ elif isinstance(sheet_name, str):
+ sheets = [sheet_name]
+ else:
+ sheets = [sheet_name]
+
+ # handle same-type duplicates.
+ sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys()))
+
+ output = {}
+
+ last_sheetname = None
+ for asheetname in sheets:
+ last_sheetname = asheetname
+ if verbose:
+ print(f"Reading sheet {asheetname}")
+
+ if isinstance(asheetname, str):
+ sheet = self.get_sheet_by_name(asheetname)
+ else: # assume an integer if not a string
+ sheet = self.get_sheet_by_index(asheetname)
+
+ file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows)
+ data = self.get_sheet_data(sheet, file_rows_needed)
+ if hasattr(sheet, "close"):
+ # pyxlsb opens two TemporaryFiles
+ sheet.close()
+ usecols = maybe_convert_usecols(usecols)
+
+ if not data:
+ output[asheetname] = DataFrame()
+ continue
+
+ is_list_header = False
+ is_len_one_list_header = False
+ if is_list_like(header):
+ assert isinstance(header, Sequence)
+ is_list_header = True
+ if len(header) == 1:
+ is_len_one_list_header = True
+
+ if is_len_one_list_header:
+ header = cast(Sequence[int], header)[0]
+
+ # forward fill and pull out names for MultiIndex column
+ header_names = None
+ if header is not None and is_list_like(header):
+ assert isinstance(header, Sequence)
+
+ header_names = []
+ control_row = [True] * len(data[0])
+
+ for row in header:
+ if is_integer(skiprows):
+ assert isinstance(skiprows, int)
+ row += skiprows
+
+ if row > len(data) - 1:
+ raise ValueError(
+ f"header index {row} exceeds maximum index "
+ f"{len(data) - 1} of data.",
+ )
+
+ data[row], control_row = fill_mi_header(data[row], control_row)
+
+ if index_col is not None:
+ header_name, _ = pop_header_name(data[row], index_col)
+ header_names.append(header_name)
+
+ # If there is a MultiIndex header and an index then there is also
+ # a row containing just the index name(s)
+ has_index_names = False
+ if is_list_header and not is_len_one_list_header and index_col is not None:
+ index_col_list: Sequence[int]
+ if isinstance(index_col, int):
+ index_col_list = [index_col]
+ else:
+ assert isinstance(index_col, Sequence)
+ index_col_list = index_col
+
+ # We have to handle mi without names. If any of the entries in the data
+ # columns are not empty, this is a regular row
+ assert isinstance(header, Sequence)
+ if len(header) < len(data):
+ potential_index_names = data[len(header)]
+ potential_data = [
+ x
+ for i, x in enumerate(potential_index_names)
+ if not control_row[i] and i not in index_col_list
+ ]
+ has_index_names = all(x == "" or x is None for x in potential_data)
+
+ if is_list_like(index_col):
+ # Forward fill values for MultiIndex index.
+ if header is None:
+ offset = 0
+ elif isinstance(header, int):
+ offset = 1 + header
+ else:
+ offset = 1 + max(header)
+
+ # GH34673: if MultiIndex names present and not defined in the header,
+ # offset needs to be incremented so that forward filling starts
+ # from the first MI value instead of the name
+ if has_index_names:
+ offset += 1
+
+ # Check if we have an empty dataset
+ # before trying to collect data.
+ if offset < len(data):
+ assert isinstance(index_col, Sequence)
+
+ for col in index_col:
+ last = data[offset][col]
+
+ for row in range(offset + 1, len(data)):
+ if data[row][col] == "" or data[row][col] is None:
+ data[row][col] = last
+ else:
+ last = data[row][col]
+
+ # GH 12292 : error when read one empty column from excel file
+ try:
+ parser = TextParser(
+ data,
+ names=names,
+ header=header,
+ index_col=index_col,
+ has_index_names=has_index_names,
+ dtype=dtype,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ skip_blank_lines=False, # GH 39808
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ date_format=date_format,
+ thousands=thousands,
+ decimal=decimal,
+ comment=comment,
+ skipfooter=skipfooter,
+ usecols=usecols,
+ dtype_backend=dtype_backend,
+ **kwds,
+ )
+
+ output[asheetname] = parser.read(nrows=nrows)
+
+ if header_names:
+ output[asheetname].columns = output[asheetname].columns.set_names(
+ header_names
+ )
+
+ except EmptyDataError:
+ # No Data, return an empty DataFrame
+ output[asheetname] = DataFrame()
+
+ except Exception as err:
+ err.args = (f"{err.args[0]} (sheet: {asheetname})", *err.args[1:])
+ raise err
+
+ if last_sheetname is None:
+ raise ValueError("Sheet name is an empty list")
+
+ if ret_dict:
+ return output
+ else:
+ return output[last_sheetname]
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+class ExcelWriter(Generic[_WorkbookT]):
+ """
+ Class for writing DataFrame objects into excel sheets.
+
+ Default is to use:
+
+ * `xlsxwriter `__ for xlsx files if xlsxwriter
+ is installed otherwise `openpyxl `__
+ * `odswriter `__ for ods files
+
+ See ``DataFrame.to_excel`` for typical usage.
+
+ The writer should be used as a context manager. Otherwise, call `close()` to save
+ and close any opened file handles.
+
+ Parameters
+ ----------
+ path : str or typing.BinaryIO
+ Path to xls or xlsx or ods file.
+ engine : str (optional)
+ Engine to use for writing. If None, defaults to
+ ``io.excel..writer``. NOTE: can only be passed as a keyword
+ argument.
+ date_format : str, default None
+ Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
+ datetime_format : str, default None
+ Format string for datetime objects written into Excel files.
+ (e.g. 'YYYY-MM-DD HH:MM:SS').
+ mode : {{'w', 'a'}}, default 'w'
+ File mode to use (write or append). Append does not work with fsspec URLs.
+ {storage_options}
+
+ if_sheet_exists : {{'error', 'new', 'replace', 'overlay'}}, default 'error'
+ How to behave when trying to write to a sheet that already
+ exists (append mode only).
+
+ * error: raise a ValueError.
+ * new: Create a new sheet, with a name determined by the engine.
+ * replace: Delete the contents of the sheet before writing to it.
+ * overlay: Write contents to the existing sheet without first removing,
+ but possibly over top of, the existing contents.
+
+ .. versionadded:: 1.3.0
+
+ .. versionchanged:: 1.4.0
+
+ Added ``overlay`` option
+
+ engine_kwargs : dict, optional
+ Keyword arguments to be passed into the engine. These will be passed to
+ the following functions of the respective engines:
+
+ * xlsxwriter: ``xlsxwriter.Workbook(file, **engine_kwargs)``
+ * openpyxl (write mode): ``openpyxl.Workbook(**engine_kwargs)``
+ * openpyxl (append mode): ``openpyxl.load_workbook(file, **engine_kwargs)``
+ * odswriter: ``odf.opendocument.OpenDocumentSpreadsheet(**engine_kwargs)``
+
+ .. versionadded:: 1.3.0
+
+ Notes
+ -----
+ For compatibility with CSV writers, ExcelWriter serializes lists
+ and dicts to strings before writing.
+
+ Examples
+ --------
+ Default usage:
+
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
+ >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ To write to separate sheets in a single file:
+
+ >>> df1 = pd.DataFrame([["AAA", "BBB"]], columns=["Spam", "Egg"]) # doctest: +SKIP
+ >>> df2 = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
+ >>> with pd.ExcelWriter("path_to_file.xlsx") as writer:
+ ... df1.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
+ ... df2.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
+
+ You can set the date format or datetime format:
+
+ >>> from datetime import date, datetime # doctest: +SKIP
+ >>> df = pd.DataFrame(
+ ... [
+ ... [date(2014, 1, 31), date(1999, 9, 24)],
+ ... [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)],
+ ... ],
+ ... index=["Date", "Datetime"],
+ ... columns=["X", "Y"],
+ ... ) # doctest: +SKIP
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... date_format="YYYY-MM-DD",
+ ... datetime_format="YYYY-MM-DD HH:MM:SS"
+ ... ) as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ You can also append to an existing Excel file:
+
+ >>> with pd.ExcelWriter("path_to_file.xlsx", mode="a", engine="openpyxl") as writer:
+ ... df.to_excel(writer, sheet_name="Sheet3") # doctest: +SKIP
+
+ Here, the `if_sheet_exists` parameter can be set to replace a sheet if it
+ already exists:
+
+ >>> with ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... mode="a",
+ ... engine="openpyxl",
+ ... if_sheet_exists="replace",
+ ... ) as writer:
+ ... df.to_excel(writer, sheet_name="Sheet1") # doctest: +SKIP
+
+ You can also write multiple DataFrames to a single sheet. Note that the
+ ``if_sheet_exists`` parameter needs to be set to ``overlay``:
+
+ >>> with ExcelWriter("path_to_file.xlsx",
+ ... mode="a",
+ ... engine="openpyxl",
+ ... if_sheet_exists="overlay",
+ ... ) as writer:
+ ... df1.to_excel(writer, sheet_name="Sheet1")
+ ... df2.to_excel(writer, sheet_name="Sheet1", startcol=3) # doctest: +SKIP
+
+ You can store Excel file in RAM:
+
+ >>> import io
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"])
+ >>> buffer = io.BytesIO()
+ >>> with pd.ExcelWriter(buffer) as writer:
+ ... df.to_excel(writer)
+
+ You can pack Excel file into zip archive:
+
+ >>> import zipfile # doctest: +SKIP
+ >>> df = pd.DataFrame([["ABC", "XYZ"]], columns=["Foo", "Bar"]) # doctest: +SKIP
+ >>> with zipfile.ZipFile("path_to_file.zip", "w") as zf:
+ ... with zf.open("filename.xlsx", "w") as buffer:
+ ... with pd.ExcelWriter(buffer) as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ You can specify additional arguments to the underlying engine:
+
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... engine="xlsxwriter",
+ ... engine_kwargs={{"options": {{"nan_inf_to_errors": True}}}}
+ ... ) as writer:
+ ... df.to_excel(writer) # doctest: +SKIP
+
+ In append mode, ``engine_kwargs`` are passed through to
+ openpyxl's ``load_workbook``:
+
+ >>> with pd.ExcelWriter(
+ ... "path_to_file.xlsx",
+ ... engine="openpyxl",
+ ... mode="a",
+ ... engine_kwargs={{"keep_vba": True}}
+ ... ) as writer:
+ ... df.to_excel(writer, sheet_name="Sheet2") # doctest: +SKIP
+ """
+
+ # Defining an ExcelWriter implementation (see abstract methods for more...)
+
+ # - Mandatory
+ # - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
+ # --> called to write additional DataFrames to disk
+ # - ``_supported_extensions`` (tuple of supported extensions), used to
+ # check that engine supports the given extension.
+ # - ``_engine`` - string that gives the engine name. Necessary to
+ # instantiate class directly and bypass ``ExcelWriterMeta`` engine
+ # lookup.
+ # - ``save(self)`` --> called to save file to disk
+ # - Mostly mandatory (i.e. should at least exist)
+ # - book, cur_sheet, path
+
+ # - Optional:
+ # - ``__init__(self, path, engine=None, **kwargs)`` --> always called
+ # with path as first argument.
+
+ # You also need to register the class with ``register_writer()``.
+ # Technically, ExcelWriter implementations don't need to subclass
+ # ExcelWriter.
+
+ _engine: str
+ _supported_extensions: tuple[str, ...]
+
+ def __new__(
+ cls,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> Self:
+ # only switch class if generic(ExcelWriter)
+ if cls is ExcelWriter:
+ if engine is None or (isinstance(engine, str) and engine == "auto"):
+ if isinstance(path, str):
+ ext = os.path.splitext(path)[-1][1:]
+ else:
+ ext = "xlsx"
+
+ try:
+ engine = config.get_option(f"io.excel.{ext}.writer", silent=True)
+ if engine == "auto":
+ engine = get_default_engine(ext, mode="writer")
+ except KeyError as err:
+ raise ValueError(f"No engine for filetype: '{ext}'") from err
+
+ # for mypy
+ assert engine is not None
+ # error: Incompatible types in assignment (expression has type
+ # "type[ExcelWriter[Any]]", variable has type "type[Self]")
+ cls = get_writer(engine) # type: ignore[assignment]
+
+ return object.__new__(cls)
+
+ # declare external properties you can count on
+ _path = None
+
+ @property
+ def supported_extensions(self) -> tuple[str, ...]:
+ """Extensions that writer engine supports."""
+ return self._supported_extensions
+
+ @property
+ def engine(self) -> str:
+ """Name of engine."""
+ return self._engine
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ """Mapping of sheet names to sheet objects."""
+ raise NotImplementedError
+
+ @property
+ def book(self) -> _WorkbookT:
+ """
+ Book instance. Class type will depend on the engine used.
+
+ This attribute can be used to access engine-specific features.
+ """
+ raise NotImplementedError
+
+ def _write_cells(
+ self,
+ cells,
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ """
+ Write given formatted cells into Excel an excel sheet
+
+ Parameters
+ ----------
+ cells : generator
+ cell of formatted data to save to Excel sheet
+ sheet_name : str, default None
+ Name of Excel sheet, if None, then use self.cur_sheet
+ startrow : upper left cell row to dump data frame
+ startcol : upper left cell column to dump data frame
+ freeze_panes: int tuple of length 2
+ contains the bottom-most row and right-most column to freeze
+ """
+ raise NotImplementedError
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ raise NotImplementedError
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ ) -> None:
+ # validate that this engine can handle the extension
+ if isinstance(path, str):
+ ext = os.path.splitext(path)[-1]
+ self.check_extension(ext)
+
+ # use mode to open the file
+ if "b" not in mode:
+ mode += "b"
+ # use "a" for the user to append data to excel but internally use "r+" to let
+ # the excel backend first read the existing file and then write any data to it
+ mode = mode.replace("a", "r+")
+
+ if if_sheet_exists not in (None, "error", "new", "replace", "overlay"):
+ raise ValueError(
+ f"'{if_sheet_exists}' is not valid for if_sheet_exists. "
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
+ )
+ if if_sheet_exists and "r+" not in mode:
+ raise ValueError("if_sheet_exists is only valid in append mode (mode='a')")
+ if if_sheet_exists is None:
+ if_sheet_exists = "error"
+ self._if_sheet_exists = if_sheet_exists
+
+ # cast ExcelWriter to avoid adding 'if self._handles is not None'
+ self._handles = IOHandles(
+ cast(IO[bytes], path), compression={"compression": None}
+ )
+ if not isinstance(path, ExcelWriter):
+ self._handles = get_handle(
+ path, mode, storage_options=storage_options, is_text=False
+ )
+ self._cur_sheet = None
+
+ if date_format is None:
+ self._date_format = "YYYY-MM-DD"
+ else:
+ self._date_format = date_format
+ if datetime_format is None:
+ self._datetime_format = "YYYY-MM-DD HH:MM:SS"
+ else:
+ self._datetime_format = datetime_format
+
+ self._mode = mode
+
+ @property
+ def date_format(self) -> str:
+ """
+ Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
+ """
+ return self._date_format
+
+ @property
+ def datetime_format(self) -> str:
+ """
+ Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
+ """
+ return self._datetime_format
+
+ @property
+ def if_sheet_exists(self) -> str:
+ """
+ How to behave when writing to a sheet that already exists in append mode.
+ """
+ return self._if_sheet_exists
+
+ def __fspath__(self) -> str:
+ return getattr(self._handles.handle, "name", "")
+
+ def _get_sheet_name(self, sheet_name: str | None) -> str:
+ if sheet_name is None:
+ sheet_name = self._cur_sheet
+ if sheet_name is None: # pragma: no cover
+ raise ValueError("Must pass explicit sheet_name or set _cur_sheet property")
+ return sheet_name
+
+ def _value_with_fmt(
+ self, val
+ ) -> tuple[
+ int | float | bool | str | datetime.datetime | datetime.date, str | None
+ ]:
+ """
+ Convert numpy types to Python types for the Excel writers.
+
+ Parameters
+ ----------
+ val : object
+ Value to be written into cells
+
+ Returns
+ -------
+ Tuple with the first element being the converted value and the second
+ being an optional format
+ """
+ fmt = None
+
+ if is_integer(val):
+ val = int(val)
+ elif is_float(val):
+ val = float(val)
+ elif is_bool(val):
+ val = bool(val)
+ elif isinstance(val, datetime.datetime):
+ fmt = self._datetime_format
+ elif isinstance(val, datetime.date):
+ fmt = self._date_format
+ elif isinstance(val, datetime.timedelta):
+ val = val.total_seconds() / 86400
+ fmt = "0"
+ else:
+ val = str(val)
+
+ return val, fmt
+
+ @classmethod
+ def check_extension(cls, ext: str) -> Literal[True]:
+ """
+ checks that path's extension against the Writer's supported
+ extensions. If it isn't supported, raises UnsupportedFiletypeError.
+ """
+ if ext.startswith("."):
+ ext = ext[1:]
+ if not any(ext in extension for extension in cls._supported_extensions):
+ raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
+ return True
+
+ # Allow use as a contextmanager
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+ def close(self) -> None:
+ """synonym for save, to make it more file-like"""
+ self._save()
+ self._handles.close()
+
+
+XLS_SIGNATURES = (
+ b"\x09\x00\x04\x00\x07\x00\x10\x00", # BIFF2
+ b"\x09\x02\x06\x00\x00\x00\x10\x00", # BIFF3
+ b"\x09\x04\x06\x00\x00\x00\x10\x00", # BIFF4
+ b"\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1", # Compound File Binary
+)
+ZIP_SIGNATURE = b"PK\x03\x04"
+PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,)))
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+def inspect_excel_format(
+ content_or_path: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+) -> str | None:
+ """
+ Inspect the path or content of an excel file and get its format.
+
+ Adopted from xlrd: https://github.com/python-excel/xlrd.
+
+ Parameters
+ ----------
+ content_or_path : str or file-like object
+ Path to file or content of file to inspect. May be a URL.
+ {storage_options}
+
+ Returns
+ -------
+ str or None
+ Format of file if it can be determined.
+
+ Raises
+ ------
+ ValueError
+ If resulting stream is empty.
+ BadZipFile
+ If resulting stream does not have an XLS signature and is not a valid zipfile.
+ """
+ if isinstance(content_or_path, bytes):
+ content_or_path = BytesIO(content_or_path)
+
+ with get_handle(
+ content_or_path, "rb", storage_options=storage_options, is_text=False
+ ) as handle:
+ stream = handle.handle
+ stream.seek(0)
+ buf = stream.read(PEEK_SIZE)
+ if buf is None:
+ raise ValueError("stream is empty")
+ assert isinstance(buf, bytes)
+ peek = buf
+ stream.seek(0)
+
+ if any(peek.startswith(sig) for sig in XLS_SIGNATURES):
+ return "xls"
+ elif not peek.startswith(ZIP_SIGNATURE):
+ return None
+
+ with zipfile.ZipFile(stream) as zf:
+ # Workaround for some third party files that use forward slashes and
+ # lower case names.
+ component_names = [
+ name.replace("\\", "/").lower() for name in zf.namelist()
+ ]
+
+ if "xl/workbook.xml" in component_names:
+ return "xlsx"
+ if "xl/workbook.bin" in component_names:
+ return "xlsb"
+ if "content.xml" in component_names:
+ return "ods"
+ return "zip"
+
+
+class ExcelFile:
+ """
+ Class for parsing tabular Excel sheets into DataFrame objects.
+
+ See read_excel for more documentation.
+
+ Parameters
+ ----------
+ path_or_buffer : str, bytes, path object (pathlib.Path or py._path.local.LocalPath),
+ A file-like object, xlrd workbook or openpyxl workbook.
+ If a string or path object, expected to be a path to a
+ .xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
+ engine : str, default None
+ If io is not a buffer or path, this must be set to identify io.
+ Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
+ Engine compatibility :
+
+ - ``xlrd`` supports old-style Excel files (.xls).
+ - ``openpyxl`` supports newer Excel file formats.
+ - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
+ - ``pyxlsb`` supports Binary Excel files.
+ - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
+ and OpenDocument (.ods) file formats.
+
+ .. versionchanged:: 1.2.0
+
+ The engine `xlrd `_
+ now only supports old-style ``.xls`` files.
+ When ``engine=None``, the following logic will be
+ used to determine the engine:
+
+ - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
+ then `odf `_ will be used.
+ - Otherwise if ``path_or_buffer`` is an xls format,
+ ``xlrd`` will be used.
+ - Otherwise if ``path_or_buffer`` is in xlsb format,
+ `pyxlsb `_ will be used.
+
+ .. versionadded:: 1.3.0
+
+ - Otherwise if `openpyxl `_ is installed,
+ then ``openpyxl`` will be used.
+ - Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
+
+ .. warning::
+
+ Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
+ This is not supported, switch to using ``openpyxl`` instead.
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+
+ Examples
+ --------
+ >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
+ >>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
+ ... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
+ """
+
+ from pandas.io.excel._calamine import CalamineReader
+ from pandas.io.excel._odfreader import ODFReader
+ from pandas.io.excel._openpyxl import OpenpyxlReader
+ from pandas.io.excel._pyxlsb import PyxlsbReader
+ from pandas.io.excel._xlrd import XlrdReader
+
+ _engines: Mapping[str, Any] = {
+ "xlrd": XlrdReader,
+ "openpyxl": OpenpyxlReader,
+ "odf": ODFReader,
+ "pyxlsb": PyxlsbReader,
+ "calamine": CalamineReader,
+ }
+
+ def __init__(
+ self,
+ path_or_buffer,
+ engine: str | None = None,
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ if engine_kwargs is None:
+ engine_kwargs = {}
+
+ if engine is not None and engine not in self._engines:
+ raise ValueError(f"Unknown engine: {engine}")
+
+ # First argument can also be bytes, so create a buffer
+ if isinstance(path_or_buffer, bytes):
+ path_or_buffer = BytesIO(path_or_buffer)
+ warnings.warn(
+ "Passing bytes to 'read_excel' is deprecated and "
+ "will be removed in a future version. To read from a "
+ "byte string, wrap it in a `BytesIO` object.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ # Could be a str, ExcelFile, Book, etc.
+ self.io = path_or_buffer
+ # Always a string
+ self._io = stringify_path(path_or_buffer)
+
+ # Determine xlrd version if installed
+ if import_optional_dependency("xlrd", errors="ignore") is None:
+ xlrd_version = None
+ else:
+ import xlrd
+
+ xlrd_version = Version(get_version(xlrd))
+
+ if engine is None:
+ # Only determine ext if it is needed
+ ext: str | None
+ if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
+ ext = "xls"
+ else:
+ ext = inspect_excel_format(
+ content_or_path=path_or_buffer, storage_options=storage_options
+ )
+ if ext is None:
+ raise ValueError(
+ "Excel file format cannot be determined, you must specify "
+ "an engine manually."
+ )
+
+ engine = config.get_option(f"io.excel.{ext}.reader", silent=True)
+ if engine == "auto":
+ engine = get_default_engine(ext, mode="reader")
+
+ assert engine is not None
+ self.engine = engine
+ self.storage_options = storage_options
+
+ self._reader = self._engines[engine](
+ self._io,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ def __fspath__(self):
+ return self._io
+
+ def parse(
+ self,
+ sheet_name: str | int | list[int] | list[str] | None = 0,
+ header: int | Sequence[int] | None = 0,
+ names: SequenceNotStr[Hashable] | range | None = None,
+ index_col: int | Sequence[int] | None = None,
+ usecols=None,
+ converters=None,
+ true_values: Iterable[Hashable] | None = None,
+ false_values: Iterable[Hashable] | None = None,
+ skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
+ nrows: int | None = None,
+ na_values=None,
+ parse_dates: list | dict | bool = False,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: str | dict[Hashable, str] | None = None,
+ thousands: str | None = None,
+ comment: str | None = None,
+ skipfooter: int = 0,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ **kwds,
+ ) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
+ """
+ Parse specified sheet(s) into a DataFrame.
+
+ Equivalent to read_excel(ExcelFile, ...) See the read_excel
+ docstring for more info on accepted parameters.
+
+ Returns
+ -------
+ DataFrame or dict of DataFrames
+ DataFrame from the passed in Excel file.
+
+ Examples
+ --------
+ >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
+ >>> df.to_excel('myfile.xlsx') # doctest: +SKIP
+ >>> file = pd.ExcelFile('myfile.xlsx') # doctest: +SKIP
+ >>> file.parse() # doctest: +SKIP
+ """
+ return self._reader.parse(
+ sheet_name=sheet_name,
+ header=header,
+ names=names,
+ index_col=index_col,
+ usecols=usecols,
+ converters=converters,
+ true_values=true_values,
+ false_values=false_values,
+ skiprows=skiprows,
+ nrows=nrows,
+ na_values=na_values,
+ parse_dates=parse_dates,
+ date_parser=date_parser,
+ date_format=date_format,
+ thousands=thousands,
+ comment=comment,
+ skipfooter=skipfooter,
+ dtype_backend=dtype_backend,
+ **kwds,
+ )
+
+ @property
+ def book(self):
+ return self._reader.book
+
+ @property
+ def sheet_names(self):
+ return self._reader.sheet_names
+
+ def close(self) -> None:
+ """close io if necessary"""
+ self._reader.close()
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_calamine.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_calamine.py
new file mode 100644
index 0000000000000000000000000000000000000000..5259469f7a569a1913aa49635b3c14e89a18d157
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_calamine.py
@@ -0,0 +1,121 @@
+from __future__ import annotations
+
+from datetime import (
+ date,
+ datetime,
+ time,
+ timedelta,
+)
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Union,
+)
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+import pandas as pd
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from python_calamine import (
+ CalamineSheet,
+ CalamineWorkbook,
+ )
+
+ from pandas._typing import (
+ FilePath,
+ NaTType,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+ )
+
+_CellValue = Union[int, float, str, bool, time, date, datetime, timedelta]
+
+
+class CalamineReader(BaseExcelReader["CalamineWorkbook"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using calamine engine (xlsx/xls/xlsb/ods).
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path to be parsed or
+ an open readable stream.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("python_calamine")
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[CalamineWorkbook]:
+ from python_calamine import CalamineWorkbook
+
+ return CalamineWorkbook
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any
+ ) -> CalamineWorkbook:
+ from python_calamine import load_workbook
+
+ return load_workbook(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def sheet_names(self) -> list[str]:
+ from python_calamine import SheetTypeEnum
+
+ return [
+ sheet.name
+ for sheet in self.book.sheets_metadata
+ if sheet.typ == SheetTypeEnum.WorkSheet
+ ]
+
+ def get_sheet_by_name(self, name: str) -> CalamineSheet:
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book.get_sheet_by_name(name)
+
+ def get_sheet_by_index(self, index: int) -> CalamineSheet:
+ self.raise_if_bad_sheet_by_index(index)
+ return self.book.get_sheet_by_index(index)
+
+ def get_sheet_data(
+ self, sheet: CalamineSheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar | NaTType | time]]:
+ def _convert_cell(value: _CellValue) -> Scalar | NaTType | time:
+ if isinstance(value, float):
+ val = int(value)
+ if val == value:
+ return val
+ else:
+ return value
+ elif isinstance(value, date):
+ return pd.Timestamp(value)
+ elif isinstance(value, timedelta):
+ return pd.Timedelta(value)
+ elif isinstance(value, time):
+ return value
+
+ return value
+
+ rows: list[list[_CellValue]] = sheet.to_python(
+ skip_empty_area=False, nrows=file_rows_needed
+ )
+ data = [[_convert_cell(cell) for cell in row] for row in rows]
+
+ return data
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py
new file mode 100644
index 0000000000000000000000000000000000000000..69b514da32857119f048a25f647d1002315a9889
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_odfreader.py
@@ -0,0 +1,253 @@
+from __future__ import annotations
+
+from typing import (
+ TYPE_CHECKING,
+ cast,
+)
+
+import numpy as np
+
+from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+)
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+import pandas as pd
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from odf.opendocument import OpenDocument
+
+ from pandas._libs.tslibs.nattype import NaTType
+
+
+@doc(storage_options=_shared_docs["storage_options"])
+class ODFReader(BaseExcelReader["OpenDocument"]):
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Read tables out of OpenDocument formatted files.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path to be parsed or
+ an open readable stream.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("odf")
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[OpenDocument]:
+ from odf.opendocument import OpenDocument
+
+ return OpenDocument
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
+ ) -> OpenDocument:
+ from odf.opendocument import load
+
+ return load(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def empty_value(self) -> str:
+ """Property for compat with other readers."""
+ return ""
+
+ @property
+ def sheet_names(self) -> list[str]:
+ """Return a list of sheet names present in the document"""
+ from odf.table import Table
+
+ tables = self.book.getElementsByType(Table)
+ return [t.getAttribute("name") for t in tables]
+
+ def get_sheet_by_index(self, index: int):
+ from odf.table import Table
+
+ self.raise_if_bad_sheet_by_index(index)
+ tables = self.book.getElementsByType(Table)
+ return tables[index]
+
+ def get_sheet_by_name(self, name: str):
+ from odf.table import Table
+
+ self.raise_if_bad_sheet_by_name(name)
+ tables = self.book.getElementsByType(Table)
+
+ for table in tables:
+ if table.getAttribute("name") == name:
+ return table
+
+ self.close()
+ raise ValueError(f"sheet {name} not found")
+
+ def get_sheet_data(
+ self, sheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar | NaTType]]:
+ """
+ Parse an ODF Table into a list of lists
+ """
+ from odf.table import (
+ CoveredTableCell,
+ TableCell,
+ TableRow,
+ )
+
+ covered_cell_name = CoveredTableCell().qname
+ table_cell_name = TableCell().qname
+ cell_names = {covered_cell_name, table_cell_name}
+
+ sheet_rows = sheet.getElementsByType(TableRow)
+ empty_rows = 0
+ max_row_len = 0
+
+ table: list[list[Scalar | NaTType]] = []
+
+ for sheet_row in sheet_rows:
+ sheet_cells = [
+ x
+ for x in sheet_row.childNodes
+ if hasattr(x, "qname") and x.qname in cell_names
+ ]
+ empty_cells = 0
+ table_row: list[Scalar | NaTType] = []
+
+ for sheet_cell in sheet_cells:
+ if sheet_cell.qname == table_cell_name:
+ value = self._get_cell_value(sheet_cell)
+ else:
+ value = self.empty_value
+
+ column_repeat = self._get_column_repeat(sheet_cell)
+
+ # Queue up empty values, writing only if content succeeds them
+ if value == self.empty_value:
+ empty_cells += column_repeat
+ else:
+ table_row.extend([self.empty_value] * empty_cells)
+ empty_cells = 0
+ table_row.extend([value] * column_repeat)
+
+ if max_row_len < len(table_row):
+ max_row_len = len(table_row)
+
+ row_repeat = self._get_row_repeat(sheet_row)
+ if len(table_row) == 0:
+ empty_rows += row_repeat
+ else:
+ # add blank rows to our table
+ table.extend([[self.empty_value]] * empty_rows)
+ empty_rows = 0
+ table.extend(table_row for _ in range(row_repeat))
+ if file_rows_needed is not None and len(table) >= file_rows_needed:
+ break
+
+ # Make our table square
+ for row in table:
+ if len(row) < max_row_len:
+ row.extend([self.empty_value] * (max_row_len - len(row)))
+
+ return table
+
+ def _get_row_repeat(self, row) -> int:
+ """
+ Return number of times this row was repeated
+ Repeating an empty row appeared to be a common way
+ of representing sparse rows in the table.
+ """
+ from odf.namespaces import TABLENS
+
+ return int(row.attributes.get((TABLENS, "number-rows-repeated"), 1))
+
+ def _get_column_repeat(self, cell) -> int:
+ from odf.namespaces import TABLENS
+
+ return int(cell.attributes.get((TABLENS, "number-columns-repeated"), 1))
+
+ def _get_cell_value(self, cell) -> Scalar | NaTType:
+ from odf.namespaces import OFFICENS
+
+ if str(cell) == "#N/A":
+ return np.nan
+
+ cell_type = cell.attributes.get((OFFICENS, "value-type"))
+ if cell_type == "boolean":
+ if str(cell) == "TRUE":
+ return True
+ return False
+ if cell_type is None:
+ return self.empty_value
+ elif cell_type == "float":
+ # GH5394
+ cell_value = float(cell.attributes.get((OFFICENS, "value")))
+ val = int(cell_value)
+ if val == cell_value:
+ return val
+ return cell_value
+ elif cell_type == "percentage":
+ cell_value = cell.attributes.get((OFFICENS, "value"))
+ return float(cell_value)
+ elif cell_type == "string":
+ return self._get_cell_string_value(cell)
+ elif cell_type == "currency":
+ cell_value = cell.attributes.get((OFFICENS, "value"))
+ return float(cell_value)
+ elif cell_type == "date":
+ cell_value = cell.attributes.get((OFFICENS, "date-value"))
+ return pd.Timestamp(cell_value)
+ elif cell_type == "time":
+ stamp = pd.Timestamp(str(cell))
+ # cast needed here because Scalar doesn't include datetime.time
+ return cast(Scalar, stamp.time())
+ else:
+ self.close()
+ raise ValueError(f"Unrecognized type {cell_type}")
+
+ def _get_cell_string_value(self, cell) -> str:
+ """
+ Find and decode OpenDocument text:s tags that represent
+ a run length encoded sequence of space characters.
+ """
+ from odf.element import Element
+ from odf.namespaces import TEXTNS
+ from odf.office import Annotation
+ from odf.text import S
+
+ office_annotation = Annotation().qname
+ text_s = S().qname
+
+ value = []
+
+ for fragment in cell.childNodes:
+ if isinstance(fragment, Element):
+ if fragment.qname == text_s:
+ spaces = int(fragment.attributes.get((TEXTNS, "c"), 1))
+ value.append(" " * spaces)
+ elif fragment.qname == office_annotation:
+ continue
+ else:
+ # recursive impl needed in case of nested fragments
+ # with multiple spaces
+ # https://github.com/pandas-dev/pandas/pull/36175#discussion_r484639704
+ value.append(self._get_cell_string_value(fragment))
+ else:
+ value.append(str(fragment).strip("\n"))
+ return "".join(value)
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc7dca2d95b6b434279f8290fdf929e737f75459
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_odswriter.py
@@ -0,0 +1,357 @@
+from __future__ import annotations
+
+from collections import defaultdict
+import datetime
+import json
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ DefaultDict,
+ cast,
+ overload,
+)
+
+from pandas.io.excel._base import ExcelWriter
+from pandas.io.excel._util import (
+ combine_kwargs,
+ validate_freeze_panes,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ ExcelWriterIfSheetExists,
+ FilePath,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+
+ from pandas.io.formats.excel import ExcelCell
+
+
+class ODSWriter(ExcelWriter):
+ _engine = "odf"
+ _supported_extensions = (".ods",)
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format=None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ **kwargs,
+ ) -> None:
+ from odf.opendocument import OpenDocumentSpreadsheet
+
+ if mode == "a":
+ raise ValueError("Append mode is not supported with odf!")
+
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
+ self._book = OpenDocumentSpreadsheet(**engine_kwargs)
+
+ super().__init__(
+ path,
+ mode=mode,
+ storage_options=storage_options,
+ if_sheet_exists=if_sheet_exists,
+ engine_kwargs=engine_kwargs,
+ )
+
+ self._style_dict: dict[str, str] = {}
+
+ @property
+ def book(self):
+ """
+ Book instance of class odf.opendocument.OpenDocumentSpreadsheet.
+
+ This attribute can be used to access engine-specific features.
+ """
+ return self._book
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ """Mapping of sheet names to sheet objects."""
+ from odf.table import Table
+
+ result = {
+ sheet.getAttribute("name"): sheet
+ for sheet in self.book.getElementsByType(Table)
+ }
+ return result
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ for sheet in self.sheets.values():
+ self.book.spreadsheet.addElement(sheet)
+ self.book.save(self._handles.handle)
+
+ def _write_cells(
+ self,
+ cells: list[ExcelCell],
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ """
+ Write the frame cells using odf
+ """
+ from odf.table import (
+ Table,
+ TableCell,
+ TableRow,
+ )
+ from odf.text import P
+
+ sheet_name = self._get_sheet_name(sheet_name)
+ assert sheet_name is not None
+
+ if sheet_name in self.sheets:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = Table(name=sheet_name)
+ self.book.spreadsheet.addElement(wks)
+
+ if validate_freeze_panes(freeze_panes):
+ freeze_panes = cast(tuple[int, int], freeze_panes)
+ self._create_freeze_panes(sheet_name, freeze_panes)
+
+ for _ in range(startrow):
+ wks.addElement(TableRow())
+
+ rows: DefaultDict = defaultdict(TableRow)
+ col_count: DefaultDict = defaultdict(int)
+
+ for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)):
+ # only add empty cells if the row is still empty
+ if not col_count[cell.row]:
+ for _ in range(startcol):
+ rows[cell.row].addElement(TableCell())
+
+ # fill with empty cells if needed
+ for _ in range(cell.col - col_count[cell.row]):
+ rows[cell.row].addElement(TableCell())
+ col_count[cell.row] += 1
+
+ pvalue, tc = self._make_table_cell(cell)
+ rows[cell.row].addElement(tc)
+ col_count[cell.row] += 1
+ p = P(text=pvalue)
+ tc.addElement(p)
+
+ # add all rows to the sheet
+ if len(rows) > 0:
+ for row_nr in range(max(rows.keys()) + 1):
+ wks.addElement(rows[row_nr])
+
+ def _make_table_cell_attributes(self, cell) -> dict[str, int | str]:
+ """Convert cell attributes to OpenDocument attributes
+
+ Parameters
+ ----------
+ cell : ExcelCell
+ Spreadsheet cell data
+
+ Returns
+ -------
+ attributes : Dict[str, Union[int, str]]
+ Dictionary with attributes and attribute values
+ """
+ attributes: dict[str, int | str] = {}
+ style_name = self._process_style(cell.style)
+ if style_name is not None:
+ attributes["stylename"] = style_name
+ if cell.mergestart is not None and cell.mergeend is not None:
+ attributes["numberrowsspanned"] = max(1, cell.mergestart)
+ attributes["numbercolumnsspanned"] = cell.mergeend
+ return attributes
+
+ def _make_table_cell(self, cell) -> tuple[object, Any]:
+ """Convert cell data to an OpenDocument spreadsheet cell
+
+ Parameters
+ ----------
+ cell : ExcelCell
+ Spreadsheet cell data
+
+ Returns
+ -------
+ pvalue, cell : Tuple[str, TableCell]
+ Display value, Cell value
+ """
+ from odf.table import TableCell
+
+ attributes = self._make_table_cell_attributes(cell)
+ val, fmt = self._value_with_fmt(cell.val)
+ pvalue = value = val
+ if isinstance(val, bool):
+ value = str(val).lower()
+ pvalue = str(val).upper()
+ return (
+ pvalue,
+ TableCell(
+ valuetype="boolean",
+ booleanvalue=value,
+ attributes=attributes,
+ ),
+ )
+ elif isinstance(val, datetime.datetime):
+ # Fast formatting
+ value = val.isoformat()
+ # Slow but locale-dependent
+ pvalue = val.strftime("%c")
+ return (
+ pvalue,
+ TableCell(valuetype="date", datevalue=value, attributes=attributes),
+ )
+ elif isinstance(val, datetime.date):
+ # Fast formatting
+ value = f"{val.year}-{val.month:02d}-{val.day:02d}"
+ # Slow but locale-dependent
+ pvalue = val.strftime("%x")
+ return (
+ pvalue,
+ TableCell(valuetype="date", datevalue=value, attributes=attributes),
+ )
+ elif isinstance(val, str):
+ return (
+ pvalue,
+ TableCell(
+ valuetype="string",
+ stringvalue=value,
+ attributes=attributes,
+ ),
+ )
+ else:
+ return (
+ pvalue,
+ TableCell(
+ valuetype="float",
+ value=value,
+ attributes=attributes,
+ ),
+ )
+
+ @overload
+ def _process_style(self, style: dict[str, Any]) -> str:
+ ...
+
+ @overload
+ def _process_style(self, style: None) -> None:
+ ...
+
+ def _process_style(self, style: dict[str, Any] | None) -> str | None:
+ """Convert a style dictionary to a OpenDocument style sheet
+
+ Parameters
+ ----------
+ style : Dict
+ Style dictionary
+
+ Returns
+ -------
+ style_key : str
+ Unique style key for later reference in sheet
+ """
+ from odf.style import (
+ ParagraphProperties,
+ Style,
+ TableCellProperties,
+ TextProperties,
+ )
+
+ if style is None:
+ return None
+ style_key = json.dumps(style)
+ if style_key in self._style_dict:
+ return self._style_dict[style_key]
+ name = f"pd{len(self._style_dict)+1}"
+ self._style_dict[style_key] = name
+ odf_style = Style(name=name, family="table-cell")
+ if "font" in style:
+ font = style["font"]
+ if font.get("bold", False):
+ odf_style.addElement(TextProperties(fontweight="bold"))
+ if "borders" in style:
+ borders = style["borders"]
+ for side, thickness in borders.items():
+ thickness_translation = {"thin": "0.75pt solid #000000"}
+ odf_style.addElement(
+ TableCellProperties(
+ attributes={f"border{side}": thickness_translation[thickness]}
+ )
+ )
+ if "alignment" in style:
+ alignment = style["alignment"]
+ horizontal = alignment.get("horizontal")
+ if horizontal:
+ odf_style.addElement(ParagraphProperties(textalign=horizontal))
+ vertical = alignment.get("vertical")
+ if vertical:
+ odf_style.addElement(TableCellProperties(verticalalign=vertical))
+ self.book.styles.addElement(odf_style)
+ return name
+
+ def _create_freeze_panes(
+ self, sheet_name: str, freeze_panes: tuple[int, int]
+ ) -> None:
+ """
+ Create freeze panes in the sheet.
+
+ Parameters
+ ----------
+ sheet_name : str
+ Name of the spreadsheet
+ freeze_panes : tuple of (int, int)
+ Freeze pane location x and y
+ """
+ from odf.config import (
+ ConfigItem,
+ ConfigItemMapEntry,
+ ConfigItemMapIndexed,
+ ConfigItemMapNamed,
+ ConfigItemSet,
+ )
+
+ config_item_set = ConfigItemSet(name="ooo:view-settings")
+ self.book.settings.addElement(config_item_set)
+
+ config_item_map_indexed = ConfigItemMapIndexed(name="Views")
+ config_item_set.addElement(config_item_map_indexed)
+
+ config_item_map_entry = ConfigItemMapEntry()
+ config_item_map_indexed.addElement(config_item_map_entry)
+
+ config_item_map_named = ConfigItemMapNamed(name="Tables")
+ config_item_map_entry.addElement(config_item_map_named)
+
+ config_item_map_entry = ConfigItemMapEntry(name=sheet_name)
+ config_item_map_named.addElement(config_item_map_entry)
+
+ config_item_map_entry.addElement(
+ ConfigItem(name="HorizontalSplitMode", type="short", text="2")
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(name="VerticalSplitMode", type="short", text="2")
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(
+ name="HorizontalSplitPosition", type="int", text=str(freeze_panes[0])
+ )
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(
+ name="VerticalSplitPosition", type="int", text=str(freeze_panes[1])
+ )
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(name="PositionRight", type="int", text=str(freeze_panes[0]))
+ )
+ config_item_map_entry.addElement(
+ ConfigItem(name="PositionBottom", type="int", text=str(freeze_panes[1]))
+ )
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py
new file mode 100644
index 0000000000000000000000000000000000000000..c546443868a62aed062bf3fd41d80933e4fbc59e
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_openpyxl.py
@@ -0,0 +1,639 @@
+from __future__ import annotations
+
+import mmap
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ cast,
+)
+
+import numpy as np
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import (
+ BaseExcelReader,
+ ExcelWriter,
+)
+from pandas.io.excel._util import (
+ combine_kwargs,
+ validate_freeze_panes,
+)
+
+if TYPE_CHECKING:
+ from openpyxl import Workbook
+ from openpyxl.descriptors.serialisable import Serialisable
+
+ from pandas._typing import (
+ ExcelWriterIfSheetExists,
+ FilePath,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+
+
+class OpenpyxlWriter(ExcelWriter):
+ _engine = "openpyxl"
+ _supported_extensions = (".xlsx", ".xlsm")
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ **kwargs,
+ ) -> None:
+ # Use the openpyxl module as the Excel writer.
+ from openpyxl.workbook import Workbook
+
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
+
+ super().__init__(
+ path,
+ mode=mode,
+ storage_options=storage_options,
+ if_sheet_exists=if_sheet_exists,
+ engine_kwargs=engine_kwargs,
+ )
+
+ # ExcelWriter replaced "a" by "r+" to allow us to first read the excel file from
+ # the file and later write to it
+ if "r+" in self._mode: # Load from existing workbook
+ from openpyxl import load_workbook
+
+ try:
+ self._book = load_workbook(self._handles.handle, **engine_kwargs)
+ except TypeError:
+ self._handles.handle.close()
+ raise
+ self._handles.handle.seek(0)
+ else:
+ # Create workbook object with default optimized_write=True.
+ try:
+ self._book = Workbook(**engine_kwargs)
+ except TypeError:
+ self._handles.handle.close()
+ raise
+
+ if self.book.worksheets:
+ self.book.remove(self.book.worksheets[0])
+
+ @property
+ def book(self) -> Workbook:
+ """
+ Book instance of class openpyxl.workbook.Workbook.
+
+ This attribute can be used to access engine-specific features.
+ """
+ return self._book
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ """Mapping of sheet names to sheet objects."""
+ result = {name: self.book[name] for name in self.book.sheetnames}
+ return result
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ self.book.save(self._handles.handle)
+ if "r+" in self._mode and not isinstance(self._handles.handle, mmap.mmap):
+ # truncate file to the written content
+ self._handles.handle.truncate()
+
+ @classmethod
+ def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]:
+ """
+ Convert a style_dict to a set of kwargs suitable for initializing
+ or updating-on-copy an openpyxl v2 style object.
+
+ Parameters
+ ----------
+ style_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'font'
+ 'fill'
+ 'border' ('borders')
+ 'alignment'
+ 'number_format'
+ 'protection'
+
+ Returns
+ -------
+ style_kwargs : dict
+ A dict with the same, normalized keys as ``style_dict`` but each
+ value has been replaced with a native openpyxl style object of the
+ appropriate class.
+ """
+ _style_key_map = {"borders": "border"}
+
+ style_kwargs: dict[str, Serialisable] = {}
+ for k, v in style_dict.items():
+ k = _style_key_map.get(k, k)
+ _conv_to_x = getattr(cls, f"_convert_to_{k}", lambda x: None)
+ new_v = _conv_to_x(v)
+ if new_v:
+ style_kwargs[k] = new_v
+
+ return style_kwargs
+
+ @classmethod
+ def _convert_to_color(cls, color_spec):
+ """
+ Convert ``color_spec`` to an openpyxl v2 Color object.
+
+ Parameters
+ ----------
+ color_spec : str, dict
+ A 32-bit ARGB hex string, or a dict with zero or more of the
+ following keys.
+ 'rgb'
+ 'indexed'
+ 'auto'
+ 'theme'
+ 'tint'
+ 'index'
+ 'type'
+
+ Returns
+ -------
+ color : openpyxl.styles.Color
+ """
+ from openpyxl.styles import Color
+
+ if isinstance(color_spec, str):
+ return Color(color_spec)
+ else:
+ return Color(**color_spec)
+
+ @classmethod
+ def _convert_to_font(cls, font_dict):
+ """
+ Convert ``font_dict`` to an openpyxl v2 Font object.
+
+ Parameters
+ ----------
+ font_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'name'
+ 'size' ('sz')
+ 'bold' ('b')
+ 'italic' ('i')
+ 'underline' ('u')
+ 'strikethrough' ('strike')
+ 'color'
+ 'vertAlign' ('vertalign')
+ 'charset'
+ 'scheme'
+ 'family'
+ 'outline'
+ 'shadow'
+ 'condense'
+
+ Returns
+ -------
+ font : openpyxl.styles.Font
+ """
+ from openpyxl.styles import Font
+
+ _font_key_map = {
+ "sz": "size",
+ "b": "bold",
+ "i": "italic",
+ "u": "underline",
+ "strike": "strikethrough",
+ "vertalign": "vertAlign",
+ }
+
+ font_kwargs = {}
+ for k, v in font_dict.items():
+ k = _font_key_map.get(k, k)
+ if k == "color":
+ v = cls._convert_to_color(v)
+ font_kwargs[k] = v
+
+ return Font(**font_kwargs)
+
+ @classmethod
+ def _convert_to_stop(cls, stop_seq):
+ """
+ Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
+ suitable for initializing the ``GradientFill`` ``stop`` parameter.
+
+ Parameters
+ ----------
+ stop_seq : iterable
+ An iterable that yields objects suitable for consumption by
+ ``_convert_to_color``.
+
+ Returns
+ -------
+ stop : list of openpyxl.styles.Color
+ """
+ return map(cls._convert_to_color, stop_seq)
+
+ @classmethod
+ def _convert_to_fill(cls, fill_dict: dict[str, Any]):
+ """
+ Convert ``fill_dict`` to an openpyxl v2 Fill object.
+
+ Parameters
+ ----------
+ fill_dict : dict
+ A dict with one or more of the following keys (or their synonyms),
+ 'fill_type' ('patternType', 'patterntype')
+ 'start_color' ('fgColor', 'fgcolor')
+ 'end_color' ('bgColor', 'bgcolor')
+ or one or more of the following keys (or their synonyms).
+ 'type' ('fill_type')
+ 'degree'
+ 'left'
+ 'right'
+ 'top'
+ 'bottom'
+ 'stop'
+
+ Returns
+ -------
+ fill : openpyxl.styles.Fill
+ """
+ from openpyxl.styles import (
+ GradientFill,
+ PatternFill,
+ )
+
+ _pattern_fill_key_map = {
+ "patternType": "fill_type",
+ "patterntype": "fill_type",
+ "fgColor": "start_color",
+ "fgcolor": "start_color",
+ "bgColor": "end_color",
+ "bgcolor": "end_color",
+ }
+
+ _gradient_fill_key_map = {"fill_type": "type"}
+
+ pfill_kwargs = {}
+ gfill_kwargs = {}
+ for k, v in fill_dict.items():
+ pk = _pattern_fill_key_map.get(k)
+ gk = _gradient_fill_key_map.get(k)
+ if pk in ["start_color", "end_color"]:
+ v = cls._convert_to_color(v)
+ if gk == "stop":
+ v = cls._convert_to_stop(v)
+ if pk:
+ pfill_kwargs[pk] = v
+ elif gk:
+ gfill_kwargs[gk] = v
+ else:
+ pfill_kwargs[k] = v
+ gfill_kwargs[k] = v
+
+ try:
+ return PatternFill(**pfill_kwargs)
+ except TypeError:
+ return GradientFill(**gfill_kwargs)
+
+ @classmethod
+ def _convert_to_side(cls, side_spec):
+ """
+ Convert ``side_spec`` to an openpyxl v2 Side object.
+
+ Parameters
+ ----------
+ side_spec : str, dict
+ A string specifying the border style, or a dict with zero or more
+ of the following keys (or their synonyms).
+ 'style' ('border_style')
+ 'color'
+
+ Returns
+ -------
+ side : openpyxl.styles.Side
+ """
+ from openpyxl.styles import Side
+
+ _side_key_map = {"border_style": "style"}
+
+ if isinstance(side_spec, str):
+ return Side(style=side_spec)
+
+ side_kwargs = {}
+ for k, v in side_spec.items():
+ k = _side_key_map.get(k, k)
+ if k == "color":
+ v = cls._convert_to_color(v)
+ side_kwargs[k] = v
+
+ return Side(**side_kwargs)
+
+ @classmethod
+ def _convert_to_border(cls, border_dict):
+ """
+ Convert ``border_dict`` to an openpyxl v2 Border object.
+
+ Parameters
+ ----------
+ border_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'left'
+ 'right'
+ 'top'
+ 'bottom'
+ 'diagonal'
+ 'diagonal_direction'
+ 'vertical'
+ 'horizontal'
+ 'diagonalUp' ('diagonalup')
+ 'diagonalDown' ('diagonaldown')
+ 'outline'
+
+ Returns
+ -------
+ border : openpyxl.styles.Border
+ """
+ from openpyxl.styles import Border
+
+ _border_key_map = {"diagonalup": "diagonalUp", "diagonaldown": "diagonalDown"}
+
+ border_kwargs = {}
+ for k, v in border_dict.items():
+ k = _border_key_map.get(k, k)
+ if k == "color":
+ v = cls._convert_to_color(v)
+ if k in ["left", "right", "top", "bottom", "diagonal"]:
+ v = cls._convert_to_side(v)
+ border_kwargs[k] = v
+
+ return Border(**border_kwargs)
+
+ @classmethod
+ def _convert_to_alignment(cls, alignment_dict):
+ """
+ Convert ``alignment_dict`` to an openpyxl v2 Alignment object.
+
+ Parameters
+ ----------
+ alignment_dict : dict
+ A dict with zero or more of the following keys (or their synonyms).
+ 'horizontal'
+ 'vertical'
+ 'text_rotation'
+ 'wrap_text'
+ 'shrink_to_fit'
+ 'indent'
+ Returns
+ -------
+ alignment : openpyxl.styles.Alignment
+ """
+ from openpyxl.styles import Alignment
+
+ return Alignment(**alignment_dict)
+
+ @classmethod
+ def _convert_to_number_format(cls, number_format_dict):
+ """
+ Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
+ initializer.
+
+ Parameters
+ ----------
+ number_format_dict : dict
+ A dict with zero or more of the following keys.
+ 'format_code' : str
+
+ Returns
+ -------
+ number_format : str
+ """
+ return number_format_dict["format_code"]
+
+ @classmethod
+ def _convert_to_protection(cls, protection_dict):
+ """
+ Convert ``protection_dict`` to an openpyxl v2 Protection object.
+
+ Parameters
+ ----------
+ protection_dict : dict
+ A dict with zero or more of the following keys.
+ 'locked'
+ 'hidden'
+
+ Returns
+ -------
+ """
+ from openpyxl.styles import Protection
+
+ return Protection(**protection_dict)
+
+ def _write_cells(
+ self,
+ cells,
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ # Write the frame cells using openpyxl.
+ sheet_name = self._get_sheet_name(sheet_name)
+
+ _style_cache: dict[str, dict[str, Serialisable]] = {}
+
+ if sheet_name in self.sheets and self._if_sheet_exists != "new":
+ if "r+" in self._mode:
+ if self._if_sheet_exists == "replace":
+ old_wks = self.sheets[sheet_name]
+ target_index = self.book.index(old_wks)
+ del self.book[sheet_name]
+ wks = self.book.create_sheet(sheet_name, target_index)
+ elif self._if_sheet_exists == "error":
+ raise ValueError(
+ f"Sheet '{sheet_name}' already exists and "
+ f"if_sheet_exists is set to 'error'."
+ )
+ elif self._if_sheet_exists == "overlay":
+ wks = self.sheets[sheet_name]
+ else:
+ raise ValueError(
+ f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. "
+ "Valid options are 'error', 'new', 'replace' and 'overlay'."
+ )
+ else:
+ wks = self.sheets[sheet_name]
+ else:
+ wks = self.book.create_sheet()
+ wks.title = sheet_name
+
+ if validate_freeze_panes(freeze_panes):
+ freeze_panes = cast(tuple[int, int], freeze_panes)
+ wks.freeze_panes = wks.cell(
+ row=freeze_panes[0] + 1, column=freeze_panes[1] + 1
+ )
+
+ for cell in cells:
+ xcell = wks.cell(
+ row=startrow + cell.row + 1, column=startcol + cell.col + 1
+ )
+ xcell.value, fmt = self._value_with_fmt(cell.val)
+ if fmt:
+ xcell.number_format = fmt
+
+ style_kwargs: dict[str, Serialisable] | None = {}
+ if cell.style:
+ key = str(cell.style)
+ style_kwargs = _style_cache.get(key)
+ if style_kwargs is None:
+ style_kwargs = self._convert_to_style_kwargs(cell.style)
+ _style_cache[key] = style_kwargs
+
+ if style_kwargs:
+ for k, v in style_kwargs.items():
+ setattr(xcell, k, v)
+
+ if cell.mergestart is not None and cell.mergeend is not None:
+ wks.merge_cells(
+ start_row=startrow + cell.row + 1,
+ start_column=startcol + cell.col + 1,
+ end_column=startcol + cell.mergeend + 1,
+ end_row=startrow + cell.mergestart + 1,
+ )
+
+ # When cells are merged only the top-left cell is preserved
+ # The behaviour of the other cells in a merged range is
+ # undefined
+ if style_kwargs:
+ first_row = startrow + cell.row + 1
+ last_row = startrow + cell.mergestart + 1
+ first_col = startcol + cell.col + 1
+ last_col = startcol + cell.mergeend + 1
+
+ for row in range(first_row, last_row + 1):
+ for col in range(first_col, last_col + 1):
+ if row == first_row and col == first_col:
+ # Ignore first cell. It is already handled.
+ continue
+ xcell = wks.cell(column=col, row=row)
+ for k, v in style_kwargs.items():
+ setattr(xcell, k, v)
+
+
+class OpenpyxlReader(BaseExcelReader["Workbook"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using openpyxl engine.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object or Workbook
+ Object to be parsed.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("openpyxl")
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[Workbook]:
+ from openpyxl import Workbook
+
+ return Workbook
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
+ ) -> Workbook:
+ from openpyxl import load_workbook
+
+ default_kwargs = {"read_only": True, "data_only": True, "keep_links": False}
+
+ return load_workbook(
+ filepath_or_buffer,
+ **(default_kwargs | engine_kwargs),
+ )
+
+ @property
+ def sheet_names(self) -> list[str]:
+ return [sheet.title for sheet in self.book.worksheets]
+
+ def get_sheet_by_name(self, name: str):
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book[name]
+
+ def get_sheet_by_index(self, index: int):
+ self.raise_if_bad_sheet_by_index(index)
+ return self.book.worksheets[index]
+
+ def _convert_cell(self, cell) -> Scalar:
+ from openpyxl.cell.cell import (
+ TYPE_ERROR,
+ TYPE_NUMERIC,
+ )
+
+ if cell.value is None:
+ return "" # compat with xlrd
+ elif cell.data_type == TYPE_ERROR:
+ return np.nan
+ elif cell.data_type == TYPE_NUMERIC:
+ val = int(cell.value)
+ if val == cell.value:
+ return val
+ return float(cell.value)
+
+ return cell.value
+
+ def get_sheet_data(
+ self, sheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
+ if self.book.read_only:
+ sheet.reset_dimensions()
+
+ data: list[list[Scalar]] = []
+ last_row_with_data = -1
+ for row_number, row in enumerate(sheet.rows):
+ converted_row = [self._convert_cell(cell) for cell in row]
+ while converted_row and converted_row[-1] == "":
+ # trim trailing empty elements
+ converted_row.pop()
+ if converted_row:
+ last_row_with_data = row_number
+ data.append(converted_row)
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
+
+ # Trim trailing empty rows
+ data = data[: last_row_with_data + 1]
+
+ if len(data) > 0:
+ # extend rows to max width
+ max_width = max(len(data_row) for data_row in data)
+ if min(len(data_row) for data_row in data) < max_width:
+ empty_cell: list[Scalar] = [""]
+ data = [
+ data_row + (max_width - len(data_row)) * empty_cell
+ for data_row in data
+ ]
+
+ return data
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6e42616c20438fa4cab16e94b5d16a01c9c61df
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_pyxlsb.py
@@ -0,0 +1,127 @@
+# pyright: reportMissingImports=false
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from pyxlsb import Workbook
+
+ from pandas._typing import (
+ FilePath,
+ ReadBuffer,
+ Scalar,
+ StorageOptions,
+ )
+
+
+class PyxlsbReader(BaseExcelReader["Workbook"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using pyxlsb engine.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or Workbook
+ Object to be parsed.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ import_optional_dependency("pyxlsb")
+ # This will call load_workbook on the filepath or buffer
+ # And set the result to the book-attribute
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[Workbook]:
+ from pyxlsb import Workbook
+
+ return Workbook
+
+ def load_workbook(
+ self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
+ ) -> Workbook:
+ from pyxlsb import open_workbook
+
+ # TODO: hack in buffer capability
+ # This might need some modifications to the Pyxlsb library
+ # Actual work for opening it is in xlsbpackage.py, line 20-ish
+
+ return open_workbook(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def sheet_names(self) -> list[str]:
+ return self.book.sheets
+
+ def get_sheet_by_name(self, name: str):
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book.get_sheet(name)
+
+ def get_sheet_by_index(self, index: int):
+ self.raise_if_bad_sheet_by_index(index)
+ # pyxlsb sheets are indexed from 1 onwards
+ # There's a fix for this in the source, but the pypi package doesn't have it
+ return self.book.get_sheet(index + 1)
+
+ def _convert_cell(self, cell) -> Scalar:
+ # TODO: there is no way to distinguish between floats and datetimes in pyxlsb
+ # This means that there is no way to read datetime types from an xlsb file yet
+ if cell.v is None:
+ return "" # Prevents non-named columns from not showing up as Unnamed: i
+ if isinstance(cell.v, float):
+ val = int(cell.v)
+ if val == cell.v:
+ return val
+ else:
+ return float(cell.v)
+
+ return cell.v
+
+ def get_sheet_data(
+ self,
+ sheet,
+ file_rows_needed: int | None = None,
+ ) -> list[list[Scalar]]:
+ data: list[list[Scalar]] = []
+ previous_row_number = -1
+ # When sparse=True the rows can have different lengths and empty rows are
+ # not returned. The cells are namedtuples of row, col, value (r, c, v).
+ for row in sheet.rows(sparse=True):
+ row_number = row[0].r
+ converted_row = [self._convert_cell(cell) for cell in row]
+ while converted_row and converted_row[-1] == "":
+ # trim trailing empty elements
+ converted_row.pop()
+ if converted_row:
+ data.extend([[]] * (row_number - previous_row_number - 1))
+ data.append(converted_row)
+ previous_row_number = row_number
+ if file_rows_needed is not None and len(data) >= file_rows_needed:
+ break
+ if data:
+ # extend rows to max_width
+ max_width = max(len(data_row) for data_row in data)
+ if min(len(data_row) for data_row in data) < max_width:
+ empty_cell: list[Scalar] = [""]
+ data = [
+ data_row + (max_width - len(data_row)) * empty_cell
+ for data_row in data
+ ]
+ return data
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_util.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7a1fcb8052e391d0853be64866663f4e6de9d08
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_util.py
@@ -0,0 +1,334 @@
+from __future__ import annotations
+
+from collections.abc import (
+ Hashable,
+ Iterable,
+ MutableMapping,
+ Sequence,
+)
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Literal,
+ TypeVar,
+ overload,
+)
+
+from pandas.compat._optional import import_optional_dependency
+
+from pandas.core.dtypes.common import (
+ is_integer,
+ is_list_like,
+)
+
+if TYPE_CHECKING:
+ from pandas.io.excel._base import ExcelWriter
+
+ ExcelWriter_t = type[ExcelWriter]
+ usecols_func = TypeVar("usecols_func", bound=Callable[[Hashable], object])
+
+_writers: MutableMapping[str, ExcelWriter_t] = {}
+
+
+def register_writer(klass: ExcelWriter_t) -> None:
+ """
+ Add engine to the excel writer registry.io.excel.
+
+ You must use this method to integrate with ``to_excel``.
+
+ Parameters
+ ----------
+ klass : ExcelWriter
+ """
+ if not callable(klass):
+ raise ValueError("Can only register callables as engines")
+ engine_name = klass._engine
+ _writers[engine_name] = klass
+
+
+def get_default_engine(ext: str, mode: Literal["reader", "writer"] = "reader") -> str:
+ """
+ Return the default reader/writer for the given extension.
+
+ Parameters
+ ----------
+ ext : str
+ The excel file extension for which to get the default engine.
+ mode : str {'reader', 'writer'}
+ Whether to get the default engine for reading or writing.
+ Either 'reader' or 'writer'
+
+ Returns
+ -------
+ str
+ The default engine for the extension.
+ """
+ _default_readers = {
+ "xlsx": "openpyxl",
+ "xlsm": "openpyxl",
+ "xlsb": "pyxlsb",
+ "xls": "xlrd",
+ "ods": "odf",
+ }
+ _default_writers = {
+ "xlsx": "openpyxl",
+ "xlsm": "openpyxl",
+ "xlsb": "pyxlsb",
+ "ods": "odf",
+ }
+ assert mode in ["reader", "writer"]
+ if mode == "writer":
+ # Prefer xlsxwriter over openpyxl if installed
+ xlsxwriter = import_optional_dependency("xlsxwriter", errors="warn")
+ if xlsxwriter:
+ _default_writers["xlsx"] = "xlsxwriter"
+ return _default_writers[ext]
+ else:
+ return _default_readers[ext]
+
+
+def get_writer(engine_name: str) -> ExcelWriter_t:
+ try:
+ return _writers[engine_name]
+ except KeyError as err:
+ raise ValueError(f"No Excel writer '{engine_name}'") from err
+
+
+def _excel2num(x: str) -> int:
+ """
+ Convert Excel column name like 'AB' to 0-based column index.
+
+ Parameters
+ ----------
+ x : str
+ The Excel column name to convert to a 0-based column index.
+
+ Returns
+ -------
+ num : int
+ The column index corresponding to the name.
+
+ Raises
+ ------
+ ValueError
+ Part of the Excel column name was invalid.
+ """
+ index = 0
+
+ for c in x.upper().strip():
+ cp = ord(c)
+
+ if cp < ord("A") or cp > ord("Z"):
+ raise ValueError(f"Invalid column name: {x}")
+
+ index = index * 26 + cp - ord("A") + 1
+
+ return index - 1
+
+
+def _range2cols(areas: str) -> list[int]:
+ """
+ Convert comma separated list of column names and ranges to indices.
+
+ Parameters
+ ----------
+ areas : str
+ A string containing a sequence of column ranges (or areas).
+
+ Returns
+ -------
+ cols : list
+ A list of 0-based column indices.
+
+ Examples
+ --------
+ >>> _range2cols('A:E')
+ [0, 1, 2, 3, 4]
+ >>> _range2cols('A,C,Z:AB')
+ [0, 2, 25, 26, 27]
+ """
+ cols: list[int] = []
+
+ for rng in areas.split(","):
+ if ":" in rng:
+ rngs = rng.split(":")
+ cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1))
+ else:
+ cols.append(_excel2num(rng))
+
+ return cols
+
+
+@overload
+def maybe_convert_usecols(usecols: str | list[int]) -> list[int]:
+ ...
+
+
+@overload
+def maybe_convert_usecols(usecols: list[str]) -> list[str]:
+ ...
+
+
+@overload
+def maybe_convert_usecols(usecols: usecols_func) -> usecols_func:
+ ...
+
+
+@overload
+def maybe_convert_usecols(usecols: None) -> None:
+ ...
+
+
+def maybe_convert_usecols(
+ usecols: str | list[int] | list[str] | usecols_func | None,
+) -> None | list[int] | list[str] | usecols_func:
+ """
+ Convert `usecols` into a compatible format for parsing in `parsers.py`.
+
+ Parameters
+ ----------
+ usecols : object
+ The use-columns object to potentially convert.
+
+ Returns
+ -------
+ converted : object
+ The compatible format of `usecols`.
+ """
+ if usecols is None:
+ return usecols
+
+ if is_integer(usecols):
+ raise ValueError(
+ "Passing an integer for `usecols` is no longer supported. "
+ "Please pass in a list of int from 0 to `usecols` inclusive instead."
+ )
+
+ if isinstance(usecols, str):
+ return _range2cols(usecols)
+
+ return usecols
+
+
+@overload
+def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]:
+ ...
+
+
+@overload
+def validate_freeze_panes(freeze_panes: None) -> Literal[False]:
+ ...
+
+
+def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool:
+ if freeze_panes is not None:
+ if len(freeze_panes) == 2 and all(
+ isinstance(item, int) for item in freeze_panes
+ ):
+ return True
+
+ raise ValueError(
+ "freeze_panes must be of form (row, column) "
+ "where row and column are integers"
+ )
+
+ # freeze_panes wasn't specified, return False so it won't be applied
+ # to output sheet
+ return False
+
+
+def fill_mi_header(
+ row: list[Hashable], control_row: list[bool]
+) -> tuple[list[Hashable], list[bool]]:
+ """
+ Forward fill blank entries in row but only inside the same parent index.
+
+ Used for creating headers in Multiindex.
+
+ Parameters
+ ----------
+ row : list
+ List of items in a single row.
+ control_row : list of bool
+ Helps to determine if particular column is in same parent index as the
+ previous value. Used to stop propagation of empty cells between
+ different indexes.
+
+ Returns
+ -------
+ Returns changed row and control_row
+ """
+ last = row[0]
+ for i in range(1, len(row)):
+ if not control_row[i]:
+ last = row[i]
+
+ if row[i] == "" or row[i] is None:
+ row[i] = last
+ else:
+ control_row[i] = False
+ last = row[i]
+
+ return row, control_row
+
+
+def pop_header_name(
+ row: list[Hashable], index_col: int | Sequence[int]
+) -> tuple[Hashable | None, list[Hashable]]:
+ """
+ Pop the header name for MultiIndex parsing.
+
+ Parameters
+ ----------
+ row : list
+ The data row to parse for the header name.
+ index_col : int, list
+ The index columns for our data. Assumed to be non-null.
+
+ Returns
+ -------
+ header_name : str
+ The extracted header name.
+ trimmed_row : list
+ The original data row with the header name removed.
+ """
+ # Pop out header name and fill w/blank.
+ if is_list_like(index_col):
+ assert isinstance(index_col, Iterable)
+ i = max(index_col)
+ else:
+ assert not isinstance(index_col, Iterable)
+ i = index_col
+
+ header_name = row[i]
+ header_name = None if header_name == "" else header_name
+
+ return header_name, row[:i] + [""] + row[i + 1 :]
+
+
+def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict:
+ """
+ Used to combine two sources of kwargs for the backend engine.
+
+ Use of kwargs is deprecated, this function is solely for use in 1.3 and should
+ be removed in 1.4/2.0. Also _base.ExcelWriter.__new__ ensures either engine_kwargs
+ or kwargs must be None or empty respectively.
+
+ Parameters
+ ----------
+ engine_kwargs: dict
+ kwargs to be passed through to the engine.
+ kwargs: dict
+ kwargs to be psased through to the engine (deprecated)
+
+ Returns
+ -------
+ engine_kwargs combined with kwargs
+ """
+ if engine_kwargs is None:
+ result = {}
+ else:
+ result = engine_kwargs.copy()
+ result.update(kwargs)
+ return result
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py
new file mode 100644
index 0000000000000000000000000000000000000000..a444970792e6e65faf3d8947b721fff59487d994
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_xlrd.py
@@ -0,0 +1,143 @@
+from __future__ import annotations
+
+from datetime import time
+import math
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from pandas.compat._optional import import_optional_dependency
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.excel._base import BaseExcelReader
+
+if TYPE_CHECKING:
+ from xlrd import Book
+
+ from pandas._typing import (
+ Scalar,
+ StorageOptions,
+ )
+
+
+class XlrdReader(BaseExcelReader["Book"]):
+ @doc(storage_options=_shared_docs["storage_options"])
+ def __init__(
+ self,
+ filepath_or_buffer,
+ storage_options: StorageOptions | None = None,
+ engine_kwargs: dict | None = None,
+ ) -> None:
+ """
+ Reader using xlrd engine.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object or Workbook
+ Object to be parsed.
+ {storage_options}
+ engine_kwargs : dict, optional
+ Arbitrary keyword arguments passed to excel engine.
+ """
+ err_msg = "Install xlrd >= 2.0.1 for xls Excel support"
+ import_optional_dependency("xlrd", extra=err_msg)
+ super().__init__(
+ filepath_or_buffer,
+ storage_options=storage_options,
+ engine_kwargs=engine_kwargs,
+ )
+
+ @property
+ def _workbook_class(self) -> type[Book]:
+ from xlrd import Book
+
+ return Book
+
+ def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book:
+ from xlrd import open_workbook
+
+ if hasattr(filepath_or_buffer, "read"):
+ data = filepath_or_buffer.read()
+ return open_workbook(file_contents=data, **engine_kwargs)
+ else:
+ return open_workbook(filepath_or_buffer, **engine_kwargs)
+
+ @property
+ def sheet_names(self):
+ return self.book.sheet_names()
+
+ def get_sheet_by_name(self, name):
+ self.raise_if_bad_sheet_by_name(name)
+ return self.book.sheet_by_name(name)
+
+ def get_sheet_by_index(self, index):
+ self.raise_if_bad_sheet_by_index(index)
+ return self.book.sheet_by_index(index)
+
+ def get_sheet_data(
+ self, sheet, file_rows_needed: int | None = None
+ ) -> list[list[Scalar]]:
+ from xlrd import (
+ XL_CELL_BOOLEAN,
+ XL_CELL_DATE,
+ XL_CELL_ERROR,
+ XL_CELL_NUMBER,
+ xldate,
+ )
+
+ epoch1904 = self.book.datemode
+
+ def _parse_cell(cell_contents, cell_typ):
+ """
+ converts the contents of the cell into a pandas appropriate object
+ """
+ if cell_typ == XL_CELL_DATE:
+ # Use the newer xlrd datetime handling.
+ try:
+ cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904)
+ except OverflowError:
+ return cell_contents
+
+ # Excel doesn't distinguish between dates and time,
+ # so we treat dates on the epoch as times only.
+ # Also, Excel supports 1900 and 1904 epochs.
+ year = (cell_contents.timetuple())[0:3]
+ if (not epoch1904 and year == (1899, 12, 31)) or (
+ epoch1904 and year == (1904, 1, 1)
+ ):
+ cell_contents = time(
+ cell_contents.hour,
+ cell_contents.minute,
+ cell_contents.second,
+ cell_contents.microsecond,
+ )
+
+ elif cell_typ == XL_CELL_ERROR:
+ cell_contents = np.nan
+ elif cell_typ == XL_CELL_BOOLEAN:
+ cell_contents = bool(cell_contents)
+ elif cell_typ == XL_CELL_NUMBER:
+ # GH5394 - Excel 'numbers' are always floats
+ # it's a minimal perf hit and less surprising
+ if math.isfinite(cell_contents):
+ # GH54564 - don't attempt to convert NaN/Inf
+ val = int(cell_contents)
+ if val == cell_contents:
+ cell_contents = val
+ return cell_contents
+
+ data = []
+
+ nrows = sheet.nrows
+ if file_rows_needed is not None:
+ nrows = min(nrows, file_rows_needed)
+ for i in range(nrows):
+ row = [
+ _parse_cell(value, typ)
+ for value, typ in zip(sheet.row_values(i), sheet.row_types(i))
+ ]
+ data.append(row)
+
+ return data
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py
new file mode 100644
index 0000000000000000000000000000000000000000..6eacac8c064fb1f297cd46b8ab0361ceb22067b4
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/excel/_xlsxwriter.py
@@ -0,0 +1,284 @@
+from __future__ import annotations
+
+import json
+from typing import (
+ TYPE_CHECKING,
+ Any,
+)
+
+from pandas.io.excel._base import ExcelWriter
+from pandas.io.excel._util import (
+ combine_kwargs,
+ validate_freeze_panes,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ ExcelWriterIfSheetExists,
+ FilePath,
+ StorageOptions,
+ WriteExcelBuffer,
+ )
+
+
+class _XlsxStyler:
+ # Map from openpyxl-oriented styles to flatter xlsxwriter representation
+ # Ordering necessary for both determinism and because some are keyed by
+ # prefixes of others.
+ STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
+ "font": [
+ (("name",), "font_name"),
+ (("sz",), "font_size"),
+ (("size",), "font_size"),
+ (("color", "rgb"), "font_color"),
+ (("color",), "font_color"),
+ (("b",), "bold"),
+ (("bold",), "bold"),
+ (("i",), "italic"),
+ (("italic",), "italic"),
+ (("u",), "underline"),
+ (("underline",), "underline"),
+ (("strike",), "font_strikeout"),
+ (("vertAlign",), "font_script"),
+ (("vertalign",), "font_script"),
+ ],
+ "number_format": [(("format_code",), "num_format"), ((), "num_format")],
+ "protection": [(("locked",), "locked"), (("hidden",), "hidden")],
+ "alignment": [
+ (("horizontal",), "align"),
+ (("vertical",), "valign"),
+ (("text_rotation",), "rotation"),
+ (("wrap_text",), "text_wrap"),
+ (("indent",), "indent"),
+ (("shrink_to_fit",), "shrink"),
+ ],
+ "fill": [
+ (("patternType",), "pattern"),
+ (("patterntype",), "pattern"),
+ (("fill_type",), "pattern"),
+ (("start_color", "rgb"), "fg_color"),
+ (("fgColor", "rgb"), "fg_color"),
+ (("fgcolor", "rgb"), "fg_color"),
+ (("start_color",), "fg_color"),
+ (("fgColor",), "fg_color"),
+ (("fgcolor",), "fg_color"),
+ (("end_color", "rgb"), "bg_color"),
+ (("bgColor", "rgb"), "bg_color"),
+ (("bgcolor", "rgb"), "bg_color"),
+ (("end_color",), "bg_color"),
+ (("bgColor",), "bg_color"),
+ (("bgcolor",), "bg_color"),
+ ],
+ "border": [
+ (("color", "rgb"), "border_color"),
+ (("color",), "border_color"),
+ (("style",), "border"),
+ (("top", "color", "rgb"), "top_color"),
+ (("top", "color"), "top_color"),
+ (("top", "style"), "top"),
+ (("top",), "top"),
+ (("right", "color", "rgb"), "right_color"),
+ (("right", "color"), "right_color"),
+ (("right", "style"), "right"),
+ (("right",), "right"),
+ (("bottom", "color", "rgb"), "bottom_color"),
+ (("bottom", "color"), "bottom_color"),
+ (("bottom", "style"), "bottom"),
+ (("bottom",), "bottom"),
+ (("left", "color", "rgb"), "left_color"),
+ (("left", "color"), "left_color"),
+ (("left", "style"), "left"),
+ (("left",), "left"),
+ ],
+ }
+
+ @classmethod
+ def convert(cls, style_dict, num_format_str=None):
+ """
+ converts a style_dict to an xlsxwriter format dict
+
+ Parameters
+ ----------
+ style_dict : style dictionary to convert
+ num_format_str : optional number format string
+ """
+ # Create a XlsxWriter format object.
+ props = {}
+
+ if num_format_str is not None:
+ props["num_format"] = num_format_str
+
+ if style_dict is None:
+ return props
+
+ if "borders" in style_dict:
+ style_dict = style_dict.copy()
+ style_dict["border"] = style_dict.pop("borders")
+
+ for style_group_key, style_group in style_dict.items():
+ for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
+ # src is a sequence of keys into a nested dict
+ # dst is a flat key
+ if dst in props:
+ continue
+ v = style_group
+ for k in src:
+ try:
+ v = v[k]
+ except (KeyError, TypeError):
+ break
+ else:
+ props[dst] = v
+
+ if isinstance(props.get("pattern"), str):
+ # TODO: support other fill patterns
+ props["pattern"] = 0 if props["pattern"] == "none" else 1
+
+ for k in ["border", "top", "right", "bottom", "left"]:
+ if isinstance(props.get(k), str):
+ try:
+ props[k] = [
+ "none",
+ "thin",
+ "medium",
+ "dashed",
+ "dotted",
+ "thick",
+ "double",
+ "hair",
+ "mediumDashed",
+ "dashDot",
+ "mediumDashDot",
+ "dashDotDot",
+ "mediumDashDotDot",
+ "slantDashDot",
+ ].index(props[k])
+ except ValueError:
+ props[k] = 2
+
+ if isinstance(props.get("font_script"), str):
+ props["font_script"] = ["baseline", "superscript", "subscript"].index(
+ props["font_script"]
+ )
+
+ if isinstance(props.get("underline"), str):
+ props["underline"] = {
+ "none": 0,
+ "single": 1,
+ "double": 2,
+ "singleAccounting": 33,
+ "doubleAccounting": 34,
+ }[props["underline"]]
+
+ # GH 30107 - xlsxwriter uses different name
+ if props.get("valign") == "center":
+ props["valign"] = "vcenter"
+
+ return props
+
+
+class XlsxWriter(ExcelWriter):
+ _engine = "xlsxwriter"
+ _supported_extensions = (".xlsx",)
+
+ def __init__(
+ self,
+ path: FilePath | WriteExcelBuffer | ExcelWriter,
+ engine: str | None = None,
+ date_format: str | None = None,
+ datetime_format: str | None = None,
+ mode: str = "w",
+ storage_options: StorageOptions | None = None,
+ if_sheet_exists: ExcelWriterIfSheetExists | None = None,
+ engine_kwargs: dict[str, Any] | None = None,
+ **kwargs,
+ ) -> None:
+ # Use the xlsxwriter module as the Excel writer.
+ from xlsxwriter import Workbook
+
+ engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
+
+ if mode == "a":
+ raise ValueError("Append mode is not supported with xlsxwriter!")
+
+ super().__init__(
+ path,
+ engine=engine,
+ date_format=date_format,
+ datetime_format=datetime_format,
+ mode=mode,
+ storage_options=storage_options,
+ if_sheet_exists=if_sheet_exists,
+ engine_kwargs=engine_kwargs,
+ )
+
+ try:
+ self._book = Workbook(self._handles.handle, **engine_kwargs)
+ except TypeError:
+ self._handles.handle.close()
+ raise
+
+ @property
+ def book(self):
+ """
+ Book instance of class xlsxwriter.Workbook.
+
+ This attribute can be used to access engine-specific features.
+ """
+ return self._book
+
+ @property
+ def sheets(self) -> dict[str, Any]:
+ result = self.book.sheetnames
+ return result
+
+ def _save(self) -> None:
+ """
+ Save workbook to disk.
+ """
+ self.book.close()
+
+ def _write_cells(
+ self,
+ cells,
+ sheet_name: str | None = None,
+ startrow: int = 0,
+ startcol: int = 0,
+ freeze_panes: tuple[int, int] | None = None,
+ ) -> None:
+ # Write the frame cells using xlsxwriter.
+ sheet_name = self._get_sheet_name(sheet_name)
+
+ wks = self.book.get_worksheet_by_name(sheet_name)
+ if wks is None:
+ wks = self.book.add_worksheet(sheet_name)
+
+ style_dict = {"null": None}
+
+ if validate_freeze_panes(freeze_panes):
+ wks.freeze_panes(*(freeze_panes))
+
+ for cell in cells:
+ val, fmt = self._value_with_fmt(cell.val)
+
+ stylekey = json.dumps(cell.style)
+ if fmt:
+ stylekey += fmt
+
+ if stylekey in style_dict:
+ style = style_dict[stylekey]
+ else:
+ style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
+ style_dict[stylekey] = style
+
+ if cell.mergestart is not None and cell.mergeend is not None:
+ wks.merge_range(
+ startrow + cell.row,
+ startcol + cell.col,
+ startrow + cell.mergestart,
+ startcol + cell.mergeend,
+ val,
+ style,
+ )
+ else:
+ wks.write(startrow + cell.row, startcol + cell.col, val, style)
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1106969d0e72c9f74e27eae17d5f16b3631ff934
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9706120724070dfaec19e85adf3cdca22206dbfc
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/_color_data.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3147f67711daa74de199f82fb67aed04ae8f3365
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/console.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b10829b3ce0aa039b769e470b89f260b1eec994
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/css.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5600ac1589cfa3d4bc3538287aa4b440c84fa998
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/csvs.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a632a171a14cd97fc92250b256b3b550a63a6ca3
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/excel.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08e7b5abbfd35535a35e73e52e6fe69f5470f992
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/format.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81024503b460478e8ca0823d2419103122ead976
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/html.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0662208ade34fdfa2b97e78baf92c5be887eee72
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/info.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a77a59397318f4274a3690490e1c34b7de3e686f
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/printing.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..994bb7460249374af29390039223bb0ffa714a19
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/string.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eed3fd3605013c58e27bbad6f0321bcc7bac4cd5
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style_render.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7257a6ed026fe38f7a34eb0c2556ca74106ed3cc
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/xml.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/_color_data.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/_color_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e7cb7f29646eb11c0ec83d8a909a8cfd7953182
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/_color_data.py
@@ -0,0 +1,157 @@
+# GH37967: Enable the use of CSS named colors, as defined in
+# matplotlib.colors.CSS4_COLORS, when exporting to Excel.
+# This data has been copied here, instead of being imported from matplotlib,
+# not to have ``to_excel`` methods require matplotlib.
+# source: matplotlib._color_data (3.3.3)
+from __future__ import annotations
+
+CSS4_COLORS = {
+ "aliceblue": "F0F8FF",
+ "antiquewhite": "FAEBD7",
+ "aqua": "00FFFF",
+ "aquamarine": "7FFFD4",
+ "azure": "F0FFFF",
+ "beige": "F5F5DC",
+ "bisque": "FFE4C4",
+ "black": "000000",
+ "blanchedalmond": "FFEBCD",
+ "blue": "0000FF",
+ "blueviolet": "8A2BE2",
+ "brown": "A52A2A",
+ "burlywood": "DEB887",
+ "cadetblue": "5F9EA0",
+ "chartreuse": "7FFF00",
+ "chocolate": "D2691E",
+ "coral": "FF7F50",
+ "cornflowerblue": "6495ED",
+ "cornsilk": "FFF8DC",
+ "crimson": "DC143C",
+ "cyan": "00FFFF",
+ "darkblue": "00008B",
+ "darkcyan": "008B8B",
+ "darkgoldenrod": "B8860B",
+ "darkgray": "A9A9A9",
+ "darkgreen": "006400",
+ "darkgrey": "A9A9A9",
+ "darkkhaki": "BDB76B",
+ "darkmagenta": "8B008B",
+ "darkolivegreen": "556B2F",
+ "darkorange": "FF8C00",
+ "darkorchid": "9932CC",
+ "darkred": "8B0000",
+ "darksalmon": "E9967A",
+ "darkseagreen": "8FBC8F",
+ "darkslateblue": "483D8B",
+ "darkslategray": "2F4F4F",
+ "darkslategrey": "2F4F4F",
+ "darkturquoise": "00CED1",
+ "darkviolet": "9400D3",
+ "deeppink": "FF1493",
+ "deepskyblue": "00BFFF",
+ "dimgray": "696969",
+ "dimgrey": "696969",
+ "dodgerblue": "1E90FF",
+ "firebrick": "B22222",
+ "floralwhite": "FFFAF0",
+ "forestgreen": "228B22",
+ "fuchsia": "FF00FF",
+ "gainsboro": "DCDCDC",
+ "ghostwhite": "F8F8FF",
+ "gold": "FFD700",
+ "goldenrod": "DAA520",
+ "gray": "808080",
+ "green": "008000",
+ "greenyellow": "ADFF2F",
+ "grey": "808080",
+ "honeydew": "F0FFF0",
+ "hotpink": "FF69B4",
+ "indianred": "CD5C5C",
+ "indigo": "4B0082",
+ "ivory": "FFFFF0",
+ "khaki": "F0E68C",
+ "lavender": "E6E6FA",
+ "lavenderblush": "FFF0F5",
+ "lawngreen": "7CFC00",
+ "lemonchiffon": "FFFACD",
+ "lightblue": "ADD8E6",
+ "lightcoral": "F08080",
+ "lightcyan": "E0FFFF",
+ "lightgoldenrodyellow": "FAFAD2",
+ "lightgray": "D3D3D3",
+ "lightgreen": "90EE90",
+ "lightgrey": "D3D3D3",
+ "lightpink": "FFB6C1",
+ "lightsalmon": "FFA07A",
+ "lightseagreen": "20B2AA",
+ "lightskyblue": "87CEFA",
+ "lightslategray": "778899",
+ "lightslategrey": "778899",
+ "lightsteelblue": "B0C4DE",
+ "lightyellow": "FFFFE0",
+ "lime": "00FF00",
+ "limegreen": "32CD32",
+ "linen": "FAF0E6",
+ "magenta": "FF00FF",
+ "maroon": "800000",
+ "mediumaquamarine": "66CDAA",
+ "mediumblue": "0000CD",
+ "mediumorchid": "BA55D3",
+ "mediumpurple": "9370DB",
+ "mediumseagreen": "3CB371",
+ "mediumslateblue": "7B68EE",
+ "mediumspringgreen": "00FA9A",
+ "mediumturquoise": "48D1CC",
+ "mediumvioletred": "C71585",
+ "midnightblue": "191970",
+ "mintcream": "F5FFFA",
+ "mistyrose": "FFE4E1",
+ "moccasin": "FFE4B5",
+ "navajowhite": "FFDEAD",
+ "navy": "000080",
+ "oldlace": "FDF5E6",
+ "olive": "808000",
+ "olivedrab": "6B8E23",
+ "orange": "FFA500",
+ "orangered": "FF4500",
+ "orchid": "DA70D6",
+ "palegoldenrod": "EEE8AA",
+ "palegreen": "98FB98",
+ "paleturquoise": "AFEEEE",
+ "palevioletred": "DB7093",
+ "papayawhip": "FFEFD5",
+ "peachpuff": "FFDAB9",
+ "peru": "CD853F",
+ "pink": "FFC0CB",
+ "plum": "DDA0DD",
+ "powderblue": "B0E0E6",
+ "purple": "800080",
+ "rebeccapurple": "663399",
+ "red": "FF0000",
+ "rosybrown": "BC8F8F",
+ "royalblue": "4169E1",
+ "saddlebrown": "8B4513",
+ "salmon": "FA8072",
+ "sandybrown": "F4A460",
+ "seagreen": "2E8B57",
+ "seashell": "FFF5EE",
+ "sienna": "A0522D",
+ "silver": "C0C0C0",
+ "skyblue": "87CEEB",
+ "slateblue": "6A5ACD",
+ "slategray": "708090",
+ "slategrey": "708090",
+ "snow": "FFFAFA",
+ "springgreen": "00FF7F",
+ "steelblue": "4682B4",
+ "tan": "D2B48C",
+ "teal": "008080",
+ "thistle": "D8BFD8",
+ "tomato": "FF6347",
+ "turquoise": "40E0D0",
+ "violet": "EE82EE",
+ "wheat": "F5DEB3",
+ "white": "FFFFFF",
+ "whitesmoke": "F5F5F5",
+ "yellow": "FFFF00",
+ "yellowgreen": "9ACD32",
+}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html.tpl b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..8c63be3ad788a8abddf3588b2b9dd6d6126f5df3
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html.tpl
@@ -0,0 +1,16 @@
+{# Update the html_style/table_structure.html documentation too #}
+{% if doctype_html %}
+
+
+
+
+{% if not exclude_styles %}{% include html_style_tpl %}{% endif %}
+
+
+{% include html_table_tpl %}
+
+
+{% elif not doctype_html %}
+{% if not exclude_styles %}{% include html_style_tpl %}{% endif %}
+{% include html_table_tpl %}
+{% endif %}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html_style.tpl b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html_style.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..5c3fcd97f51bbec263399922579420dfa9ceef9c
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html_style.tpl
@@ -0,0 +1,26 @@
+{%- block before_style -%}{%- endblock before_style -%}
+{% block style %}
+
+{% endblock style %}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html_table.tpl b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html_table.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..17118d2bb21ccd185780d44c83a5242b12bd2a0d
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/html_table.tpl
@@ -0,0 +1,63 @@
+{% block before_table %}{% endblock before_table %}
+{% block table %}
+{% if exclude_styles %}
+
+{% else %}
+
+{% endif %}
+{% block caption %}
+{% if caption and caption is string %}
+ {{caption}}
+{% elif caption and caption is sequence %}
+ {{caption[0]}}
+{% endif %}
+{% endblock caption %}
+{% block thead %}
+
+{% block before_head_rows %}{% endblock %}
+{% for r in head %}
+{% block head_tr scoped %}
+
+{% if exclude_styles %}
+{% for c in r %}
+{% if c.is_visible != False %}
+ <{{c.type}} {{c.attributes}}>{{c.display_value}}{{c.type}}>
+{% endif %}
+{% endfor %}
+{% else %}
+{% for c in r %}
+{% if c.is_visible != False %}
+ <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}}{{c.type}}>
+{% endif %}
+{% endfor %}
+{% endif %}
+
+{% endblock head_tr %}
+{% endfor %}
+{% block after_head_rows %}{% endblock %}
+
+{% endblock thead %}
+{% block tbody %}
+
+{% block before_rows %}{% endblock before_rows %}
+{% for r in body %}
+{% block tr scoped %}
+
+{% if exclude_styles %}
+{% for c in r %}{% if c.is_visible != False %}
+ <{{c.type}} {{c.attributes}}>{{c.display_value}}{{c.type}}>
+{% endif %}{% endfor %}
+{% else %}
+{% for c in r %}{% if c.is_visible != False %}
+ <{{c.type}} {%- if c.id is defined %} id="T_{{uuid}}_{{c.id}}" {%- endif %} class="{{c.class}}" {{c.attributes}}>{{c.display_value}}{{c.type}}>
+{% endif %}{% endfor %}
+{% endif %}
+
+{% endblock tr %}
+{% endfor %}
+{% block after_rows %}{% endblock after_rows %}
+
+{% endblock tbody %}
+
+{% endblock table %}
+{% block after_table %}{% endblock after_table %}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex.tpl b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..ae341bbc29823489d9d15e354fae0ce2e10a046d
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex.tpl
@@ -0,0 +1,5 @@
+{% if environment == "longtable" %}
+{% include "latex_longtable.tpl" %}
+{% else %}
+{% include "latex_table.tpl" %}
+{% endif %}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex_longtable.tpl b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex_longtable.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..b97843eeb918da1b12f6f2edd585c8e42d6b7bb5
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex_longtable.tpl
@@ -0,0 +1,82 @@
+\begin{longtable}
+{%- set position = parse_table(table_styles, 'position') %}
+{%- if position is not none %}
+[{{position}}]
+{%- endif %}
+{%- set column_format = parse_table(table_styles, 'column_format') %}
+{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %}
+
+{% for style in table_styles %}
+{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format', 'label'] %}
+\{{style['selector']}}{{parse_table(table_styles, style['selector'])}}
+{% endif %}
+{% endfor %}
+{% if caption and caption is string %}
+\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %}
+{%- set label = parse_table(table_styles, 'label') %}
+{%- if label is not none %}
+ \label{{label}}
+{%- endif %} \\
+{% elif caption and caption is sequence %}
+\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %}
+{%- set label = parse_table(table_styles, 'label') %}
+{%- if label is not none %}
+ \label{{label}}
+{%- endif %} \\
+{% else %}
+{%- set label = parse_table(table_styles, 'label') %}
+{%- if label is not none %}
+\label{{label}} \\
+{% endif %}
+{% endif %}
+{% set toprule = parse_table(table_styles, 'toprule') %}
+{% if toprule is not none %}
+\{{toprule}}
+{% endif %}
+{% for row in head %}
+{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\
+{% endfor %}
+{% set midrule = parse_table(table_styles, 'midrule') %}
+{% if midrule is not none %}
+\{{midrule}}
+{% endif %}
+\endfirsthead
+{% if caption and caption is string %}
+\caption[]{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %} \\
+{% elif caption and caption is sequence %}
+\caption[]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %} \\
+{% endif %}
+{% if toprule is not none %}
+\{{toprule}}
+{% endif %}
+{% for row in head %}
+{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx)}}{% endfor %} \\
+{% endfor %}
+{% if midrule is not none %}
+\{{midrule}}
+{% endif %}
+\endhead
+{% if midrule is not none %}
+\{{midrule}}
+{% endif %}
+\multicolumn{% raw %}{{% endraw %}{{body[0]|length}}{% raw %}}{% endraw %}{r}{Continued on next page} \\
+{% if midrule is not none %}
+\{{midrule}}
+{% endif %}
+\endfoot
+{% set bottomrule = parse_table(table_styles, 'bottomrule') %}
+{% if bottomrule is not none %}
+\{{bottomrule}}
+{% endif %}
+\endlastfoot
+{% for row in body %}
+{% for c in row %}{% if not loop.first %} & {% endif %}
+ {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %}
+{%- endfor %} \\
+{% if clines and clines[loop.index] | length > 0 %}
+ {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %}
+
+{% endif %}
+{% endfor %}
+\end{longtable}
+{% raw %}{% endraw %}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex_table.tpl b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex_table.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..7858cb4c945534a4d21cd4474460fd1abcf01f82
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/latex_table.tpl
@@ -0,0 +1,57 @@
+{% if environment or parse_wrap(table_styles, caption) %}
+\begin{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %}
+{%- set position = parse_table(table_styles, 'position') %}
+{%- if position is not none %}
+[{{position}}]
+{%- endif %}
+
+{% set position_float = parse_table(table_styles, 'position_float') %}
+{% if position_float is not none%}
+\{{position_float}}
+{% endif %}
+{% if caption and caption is string %}
+\caption{% raw %}{{% endraw %}{{caption}}{% raw %}}{% endraw %}
+
+{% elif caption and caption is sequence %}
+\caption[{{caption[1]}}]{% raw %}{{% endraw %}{{caption[0]}}{% raw %}}{% endraw %}
+
+{% endif %}
+{% for style in table_styles %}
+{% if style['selector'] not in ['position', 'position_float', 'caption', 'toprule', 'midrule', 'bottomrule', 'column_format'] %}
+\{{style['selector']}}{{parse_table(table_styles, style['selector'])}}
+{% endif %}
+{% endfor %}
+{% endif %}
+\begin{tabular}
+{%- set column_format = parse_table(table_styles, 'column_format') %}
+{% raw %}{{% endraw %}{{column_format}}{% raw %}}{% endraw %}
+
+{% set toprule = parse_table(table_styles, 'toprule') %}
+{% if toprule is not none %}
+\{{toprule}}
+{% endif %}
+{% for row in head %}
+{% for c in row %}{%- if not loop.first %} & {% endif %}{{parse_header(c, multirow_align, multicol_align, siunitx, convert_css)}}{% endfor %} \\
+{% endfor %}
+{% set midrule = parse_table(table_styles, 'midrule') %}
+{% if midrule is not none %}
+\{{midrule}}
+{% endif %}
+{% for row in body %}
+{% for c in row %}{% if not loop.first %} & {% endif %}
+ {%- if c.type == 'th' %}{{parse_header(c, multirow_align, multicol_align, False, convert_css)}}{% else %}{{parse_cell(c.cellstyle, c.display_value, convert_css)}}{% endif %}
+{%- endfor %} \\
+{% if clines and clines[loop.index] | length > 0 %}
+ {%- for cline in clines[loop.index] %}{% if not loop.first %} {% endif %}{{ cline }}{% endfor %}
+
+{% endif %}
+{% endfor %}
+{% set bottomrule = parse_table(table_styles, 'bottomrule') %}
+{% if bottomrule is not none %}
+\{{bottomrule}}
+{% endif %}
+\end{tabular}
+{% if environment or parse_wrap(table_styles, caption) %}
+\end{% raw %}{{% endraw %}{{environment if environment else "table"}}{% raw %}}{% endraw %}
+
+{% endif %}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/string.tpl b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/string.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..06aeb2b4e413c61a912b535056c19c794d4b9c85
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/templates/string.tpl
@@ -0,0 +1,12 @@
+{% for r in head %}
+{% for c in r %}{% if c["is_visible"] %}
+{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %}
+{% endif %}{% endfor %}
+
+{% endfor %}
+{% for r in body %}
+{% for c in r %}{% if c["is_visible"] %}
+{{ c["display_value"] }}{% if not loop.last %}{{ delimiter }}{% endif %}
+{% endif %}{% endfor %}
+
+{% endfor %}
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff11968db15f0f7c6057a46c252a91daee7b9cd9
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__init__.py
@@ -0,0 +1,9 @@
+from pandas.io.parsers.readers import (
+ TextFileReader,
+ TextParser,
+ read_csv,
+ read_fwf,
+ read_table,
+)
+
+__all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"]
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aca0d6a323506bcfb9d5bf8ced076298310f2afd
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f0544229f3501fe37a6292570d1fa1975fd30d5
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c429916a6264a481dcfbb9f63aef5285b103c3a6
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60ac6af10fd8ba5195dd3fd73140ba2f0c6f354b
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90790fa2d848c994a61690426554b7d9fc9660e9
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e7b4c8a40f4be0077f65a9335ccccc143f54931
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..890b22154648e6b12d636c5df3595d105ff02ac9
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py
@@ -0,0 +1,303 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+import warnings
+
+from pandas._config import using_pyarrow_string_dtype
+
+from pandas._libs import lib
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import (
+ ParserError,
+ ParserWarning,
+)
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import pandas_dtype
+from pandas.core.dtypes.inference import is_integer
+
+import pandas as pd
+from pandas import DataFrame
+
+from pandas.io._util import (
+ _arrow_dtype_mapping,
+ arrow_string_types_mapper,
+)
+from pandas.io.parsers.base_parser import ParserBase
+
+if TYPE_CHECKING:
+ from pandas._typing import ReadBuffer
+
+
+class ArrowParserWrapper(ParserBase):
+ """
+ Wrapper for the pyarrow engine for read_csv()
+ """
+
+ def __init__(self, src: ReadBuffer[bytes], **kwds) -> None:
+ super().__init__(kwds)
+ self.kwds = kwds
+ self.src = src
+
+ self._parse_kwds()
+
+ def _parse_kwds(self) -> None:
+ """
+ Validates keywords before passing to pyarrow.
+ """
+ encoding: str | None = self.kwds.get("encoding")
+ self.encoding = "utf-8" if encoding is None else encoding
+
+ na_values = self.kwds["na_values"]
+ if isinstance(na_values, dict):
+ raise ValueError(
+ "The pyarrow engine doesn't support passing a dict for na_values"
+ )
+ self.na_values = list(self.kwds["na_values"])
+
+ def _get_pyarrow_options(self) -> None:
+ """
+ Rename some arguments to pass to pyarrow
+ """
+ mapping = {
+ "usecols": "include_columns",
+ "na_values": "null_values",
+ "escapechar": "escape_char",
+ "skip_blank_lines": "ignore_empty_lines",
+ "decimal": "decimal_point",
+ "quotechar": "quote_char",
+ }
+ for pandas_name, pyarrow_name in mapping.items():
+ if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None:
+ self.kwds[pyarrow_name] = self.kwds.pop(pandas_name)
+
+ # Date format handling
+ # If we get a string, we need to convert it into a list for pyarrow
+ # If we get a dict, we want to parse those separately
+ date_format = self.date_format
+ if isinstance(date_format, str):
+ date_format = [date_format]
+ else:
+ # In case of dict, we don't want to propagate through, so
+ # just set to pyarrow default of None
+
+ # Ideally, in future we disable pyarrow dtype inference (read in as string)
+ # to prevent misreads.
+ date_format = None
+ self.kwds["timestamp_parsers"] = date_format
+
+ self.parse_options = {
+ option_name: option_value
+ for option_name, option_value in self.kwds.items()
+ if option_value is not None
+ and option_name
+ in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines")
+ }
+
+ on_bad_lines = self.kwds.get("on_bad_lines")
+ if on_bad_lines is not None:
+ if callable(on_bad_lines):
+ self.parse_options["invalid_row_handler"] = on_bad_lines
+ elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR:
+ self.parse_options[
+ "invalid_row_handler"
+ ] = None # PyArrow raises an exception by default
+ elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN:
+
+ def handle_warning(invalid_row) -> str:
+ warnings.warn(
+ f"Expected {invalid_row.expected_columns} columns, but found "
+ f"{invalid_row.actual_columns}: {invalid_row.text}",
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+ return "skip"
+
+ self.parse_options["invalid_row_handler"] = handle_warning
+ elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP:
+ self.parse_options["invalid_row_handler"] = lambda _: "skip"
+
+ self.convert_options = {
+ option_name: option_value
+ for option_name, option_value in self.kwds.items()
+ if option_value is not None
+ and option_name
+ in (
+ "include_columns",
+ "null_values",
+ "true_values",
+ "false_values",
+ "decimal_point",
+ "timestamp_parsers",
+ )
+ }
+ self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"]
+ # autogenerated column names are prefixed with 'f' in pyarrow.csv
+ if self.header is None and "include_columns" in self.convert_options:
+ self.convert_options["include_columns"] = [
+ f"f{n}" for n in self.convert_options["include_columns"]
+ ]
+
+ self.read_options = {
+ "autogenerate_column_names": self.header is None,
+ "skip_rows": self.header
+ if self.header is not None
+ else self.kwds["skiprows"],
+ "encoding": self.encoding,
+ }
+
+ def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame:
+ """
+ Processes data read in based on kwargs.
+
+ Parameters
+ ----------
+ frame: DataFrame
+ The DataFrame to process.
+
+ Returns
+ -------
+ DataFrame
+ The processed DataFrame.
+ """
+ num_cols = len(frame.columns)
+ multi_index_named = True
+ if self.header is None:
+ if self.names is None:
+ if self.header is None:
+ self.names = range(num_cols)
+ if len(self.names) != num_cols:
+ # usecols is passed through to pyarrow, we only handle index col here
+ # The only way self.names is not the same length as number of cols is
+ # if we have int index_col. We should just pad the names(they will get
+ # removed anyways) to expected length then.
+ self.names = list(range(num_cols - len(self.names))) + self.names
+ multi_index_named = False
+ frame.columns = self.names
+ # we only need the frame not the names
+ _, frame = self._do_date_conversions(frame.columns, frame)
+ if self.index_col is not None:
+ index_to_set = self.index_col.copy()
+ for i, item in enumerate(self.index_col):
+ if is_integer(item):
+ index_to_set[i] = frame.columns[item]
+ # String case
+ elif item not in frame.columns:
+ raise ValueError(f"Index {item} invalid")
+
+ # Process dtype for index_col and drop from dtypes
+ if self.dtype is not None:
+ key, new_dtype = (
+ (item, self.dtype.get(item))
+ if self.dtype.get(item) is not None
+ else (frame.columns[item], self.dtype.get(frame.columns[item]))
+ )
+ if new_dtype is not None:
+ frame[key] = frame[key].astype(new_dtype)
+ del self.dtype[key]
+
+ frame.set_index(index_to_set, drop=True, inplace=True)
+ # Clear names if headerless and no name given
+ if self.header is None and not multi_index_named:
+ frame.index.names = [None] * len(frame.index.names)
+
+ if self.dtype is not None:
+ # Ignore non-existent columns from dtype mapping
+ # like other parsers do
+ if isinstance(self.dtype, dict):
+ self.dtype = {
+ k: pandas_dtype(v)
+ for k, v in self.dtype.items()
+ if k in frame.columns
+ }
+ else:
+ self.dtype = pandas_dtype(self.dtype)
+ try:
+ frame = frame.astype(self.dtype)
+ except TypeError as e:
+ # GH#44901 reraise to keep api consistent
+ raise ValueError(e)
+ return frame
+
+ def _validate_usecols(self, usecols) -> None:
+ if lib.is_list_like(usecols) and not all(isinstance(x, str) for x in usecols):
+ raise ValueError(
+ "The pyarrow engine does not allow 'usecols' to be integer "
+ "column positions. Pass a list of string column names instead."
+ )
+ elif callable(usecols):
+ raise ValueError(
+ "The pyarrow engine does not allow 'usecols' to be a callable."
+ )
+
+ def read(self) -> DataFrame:
+ """
+ Reads the contents of a CSV file into a DataFrame and
+ processes it according to the kwargs passed in the
+ constructor.
+
+ Returns
+ -------
+ DataFrame
+ The DataFrame created from the CSV file.
+ """
+ pa = import_optional_dependency("pyarrow")
+ pyarrow_csv = import_optional_dependency("pyarrow.csv")
+ self._get_pyarrow_options()
+
+ try:
+ convert_options = pyarrow_csv.ConvertOptions(**self.convert_options)
+ except TypeError:
+ include = self.convert_options.get("include_columns", None)
+ if include is not None:
+ self._validate_usecols(include)
+
+ nulls = self.convert_options.get("null_values", set())
+ if not lib.is_list_like(nulls) or not all(
+ isinstance(x, str) for x in nulls
+ ):
+ raise TypeError(
+ "The 'pyarrow' engine requires all na_values to be strings"
+ )
+
+ raise
+
+ try:
+ table = pyarrow_csv.read_csv(
+ self.src,
+ read_options=pyarrow_csv.ReadOptions(**self.read_options),
+ parse_options=pyarrow_csv.ParseOptions(**self.parse_options),
+ convert_options=convert_options,
+ )
+ except pa.ArrowInvalid as e:
+ raise ParserError(e) from e
+
+ dtype_backend = self.kwds["dtype_backend"]
+
+ # Convert all pa.null() cols -> float64 (non nullable)
+ # else Int64 (nullable case, see below)
+ if dtype_backend is lib.no_default:
+ new_schema = table.schema
+ new_type = pa.float64()
+ for i, arrow_type in enumerate(table.schema.types):
+ if pa.types.is_null(arrow_type):
+ new_schema = new_schema.set(
+ i, new_schema.field(i).with_type(new_type)
+ )
+
+ table = table.cast(new_schema)
+
+ if dtype_backend == "pyarrow":
+ frame = table.to_pandas(types_mapper=pd.ArrowDtype)
+ elif dtype_backend == "numpy_nullable":
+ # Modify the default mapping to also
+ # map null to Int64 (to match other engines)
+ dtype_mapping = _arrow_dtype_mapping()
+ dtype_mapping[pa.null()] = pd.Int64Dtype()
+ frame = table.to_pandas(types_mapper=dtype_mapping.get)
+ elif using_pyarrow_string_dtype():
+ frame = table.to_pandas(types_mapper=arrow_string_types_mapper())
+
+ else:
+ frame = table.to_pandas()
+ return self._finalize_pandas_output(frame)
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..09f0f2af8e5c6b55bff173ff74cd290fdf61cbae
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py
@@ -0,0 +1,1448 @@
+from __future__ import annotations
+
+from collections import defaultdict
+from copy import copy
+import csv
+import datetime
+from enum import Enum
+import itertools
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ cast,
+ final,
+ overload,
+)
+import warnings
+
+import numpy as np
+
+from pandas._libs import (
+ lib,
+ parsers,
+)
+import pandas._libs.ops as libops
+from pandas._libs.parsers import STR_NA_VALUES
+from pandas._libs.tslibs import parsing
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import (
+ ParserError,
+ ParserWarning,
+)
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.astype import astype_array
+from pandas.core.dtypes.common import (
+ ensure_object,
+ is_bool_dtype,
+ is_dict_like,
+ is_extension_array_dtype,
+ is_float_dtype,
+ is_integer,
+ is_integer_dtype,
+ is_list_like,
+ is_object_dtype,
+ is_scalar,
+ is_string_dtype,
+ pandas_dtype,
+)
+from pandas.core.dtypes.dtypes import (
+ CategoricalDtype,
+ ExtensionDtype,
+)
+from pandas.core.dtypes.missing import isna
+
+from pandas import (
+ ArrowDtype,
+ DataFrame,
+ DatetimeIndex,
+ StringDtype,
+ concat,
+)
+from pandas.core import algorithms
+from pandas.core.arrays import (
+ ArrowExtensionArray,
+ BaseMaskedArray,
+ BooleanArray,
+ Categorical,
+ ExtensionArray,
+ FloatingArray,
+ IntegerArray,
+)
+from pandas.core.arrays.boolean import BooleanDtype
+from pandas.core.indexes.api import (
+ Index,
+ MultiIndex,
+ default_index,
+ ensure_index_from_sequences,
+)
+from pandas.core.series import Series
+from pandas.core.tools import datetimes as tools
+
+from pandas.io.common import is_potential_multi_index
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ )
+
+ from pandas._typing import (
+ ArrayLike,
+ DtypeArg,
+ DtypeObj,
+ Scalar,
+ )
+
+
+class ParserBase:
+ class BadLineHandleMethod(Enum):
+ ERROR = 0
+ WARN = 1
+ SKIP = 2
+
+ _implicit_index: bool
+ _first_chunk: bool
+ keep_default_na: bool
+ dayfirst: bool
+ cache_dates: bool
+ keep_date_col: bool
+ usecols_dtype: str | None
+
+ def __init__(self, kwds) -> None:
+ self._implicit_index = False
+
+ self.names = kwds.get("names")
+ self.orig_names: Sequence[Hashable] | None = None
+
+ self.index_col = kwds.get("index_col", None)
+ self.unnamed_cols: set = set()
+ self.index_names: Sequence[Hashable] | None = None
+ self.col_names: Sequence[Hashable] | None = None
+
+ self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False))
+ self._parse_date_cols: Iterable = []
+ self.date_parser = kwds.pop("date_parser", lib.no_default)
+ self.date_format = kwds.pop("date_format", None)
+ self.dayfirst = kwds.pop("dayfirst", False)
+ self.keep_date_col = kwds.pop("keep_date_col", False)
+
+ self.na_values = kwds.get("na_values")
+ self.na_fvalues = kwds.get("na_fvalues")
+ self.na_filter = kwds.get("na_filter", False)
+ self.keep_default_na = kwds.get("keep_default_na", True)
+
+ self.dtype = copy(kwds.get("dtype", None))
+ self.converters = kwds.get("converters")
+ self.dtype_backend = kwds.get("dtype_backend")
+
+ self.true_values = kwds.get("true_values")
+ self.false_values = kwds.get("false_values")
+ self.cache_dates = kwds.pop("cache_dates", True)
+
+ self._date_conv = _make_date_converter(
+ date_parser=self.date_parser,
+ date_format=self.date_format,
+ dayfirst=self.dayfirst,
+ cache_dates=self.cache_dates,
+ )
+
+ # validate header options for mi
+ self.header = kwds.get("header")
+ if is_list_like(self.header, allow_sets=False):
+ if kwds.get("usecols"):
+ raise ValueError(
+ "cannot specify usecols when specifying a multi-index header"
+ )
+ if kwds.get("names"):
+ raise ValueError(
+ "cannot specify names when specifying a multi-index header"
+ )
+
+ # validate index_col that only contains integers
+ if self.index_col is not None:
+ # In this case we can pin down index_col as list[int]
+ if is_integer(self.index_col):
+ self.index_col = [self.index_col]
+ elif not (
+ is_list_like(self.index_col, allow_sets=False)
+ and all(map(is_integer, self.index_col))
+ ):
+ raise ValueError(
+ "index_col must only contain row numbers "
+ "when specifying a multi-index header"
+ )
+ else:
+ self.index_col = list(self.index_col)
+
+ self._name_processed = False
+
+ self._first_chunk = True
+
+ self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"])
+
+ # Fallback to error to pass a sketchy test(test_override_set_noconvert_columns)
+ # Normally, this arg would get pre-processed earlier on
+ self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR)
+
+ def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable:
+ """
+ Check if parse_dates are in columns.
+
+ If user has provided names for parse_dates, check if those columns
+ are available.
+
+ Parameters
+ ----------
+ columns : list
+ List of names of the dataframe.
+
+ Returns
+ -------
+ The names of the columns which will get parsed later if a dict or list
+ is given as specification.
+
+ Raises
+ ------
+ ValueError
+ If column to parse_date is not in dataframe.
+
+ """
+ cols_needed: Iterable
+ if is_dict_like(self.parse_dates):
+ cols_needed = itertools.chain(*self.parse_dates.values())
+ elif is_list_like(self.parse_dates):
+ # a column in parse_dates could be represented
+ # ColReference = Union[int, str]
+ # DateGroups = List[ColReference]
+ # ParseDates = Union[DateGroups, List[DateGroups],
+ # Dict[ColReference, DateGroups]]
+ cols_needed = itertools.chain.from_iterable(
+ col if is_list_like(col) and not isinstance(col, tuple) else [col]
+ for col in self.parse_dates
+ )
+ else:
+ cols_needed = []
+
+ cols_needed = list(cols_needed)
+
+ # get only columns that are references using names (str), not by index
+ missing_cols = ", ".join(
+ sorted(
+ {
+ col
+ for col in cols_needed
+ if isinstance(col, str) and col not in columns
+ }
+ )
+ )
+ if missing_cols:
+ raise ValueError(
+ f"Missing column provided to 'parse_dates': '{missing_cols}'"
+ )
+ # Convert positions to actual column names
+ return [
+ col if (isinstance(col, str) or col in columns) else columns[col]
+ for col in cols_needed
+ ]
+
+ def close(self) -> None:
+ pass
+
+ @final
+ @property
+ def _has_complex_date_col(self) -> bool:
+ return isinstance(self.parse_dates, dict) or (
+ isinstance(self.parse_dates, list)
+ and len(self.parse_dates) > 0
+ and isinstance(self.parse_dates[0], list)
+ )
+
+ @final
+ def _should_parse_dates(self, i: int) -> bool:
+ if lib.is_bool(self.parse_dates):
+ return bool(self.parse_dates)
+ else:
+ if self.index_names is not None:
+ name = self.index_names[i]
+ else:
+ name = None
+ j = i if self.index_col is None else self.index_col[i]
+
+ return (j in self.parse_dates) or (
+ name is not None and name in self.parse_dates
+ )
+
+ @final
+ def _extract_multi_indexer_columns(
+ self,
+ header,
+ index_names: Sequence[Hashable] | None,
+ passed_names: bool = False,
+ ) -> tuple[
+ Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool
+ ]:
+ """
+ Extract and return the names, index_names, col_names if the column
+ names are a MultiIndex.
+
+ Parameters
+ ----------
+ header: list of lists
+ The header rows
+ index_names: list, optional
+ The names of the future index
+ passed_names: bool, default False
+ A flag specifying if names where passed
+
+ """
+ if len(header) < 2:
+ return header[0], index_names, None, passed_names
+
+ # the names are the tuples of the header that are not the index cols
+ # 0 is the name of the index, assuming index_col is a list of column
+ # numbers
+ ic = self.index_col
+ if ic is None:
+ ic = []
+
+ if not isinstance(ic, (list, tuple, np.ndarray)):
+ ic = [ic]
+ sic = set(ic)
+
+ # clean the index_names
+ index_names = header.pop(-1)
+ index_names, _, _ = self._clean_index_names(index_names, self.index_col)
+
+ # extract the columns
+ field_count = len(header[0])
+
+ # check if header lengths are equal
+ if not all(len(header_iter) == field_count for header_iter in header[1:]):
+ raise ParserError("Header rows must have an equal number of columns.")
+
+ def extract(r):
+ return tuple(r[i] for i in range(field_count) if i not in sic)
+
+ columns = list(zip(*(extract(r) for r in header)))
+ names = columns.copy()
+ for single_ic in sorted(ic):
+ names.insert(single_ic, single_ic)
+
+ # Clean the column names (if we have an index_col).
+ if len(ic):
+ col_names = [
+ r[ic[0]]
+ if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols)
+ else None
+ for r in header
+ ]
+ else:
+ col_names = [None] * len(header)
+
+ passed_names = True
+
+ return names, index_names, col_names, passed_names
+
+ @final
+ def _maybe_make_multi_index_columns(
+ self,
+ columns: Sequence[Hashable],
+ col_names: Sequence[Hashable] | None = None,
+ ) -> Sequence[Hashable] | MultiIndex:
+ # possibly create a column mi here
+ if is_potential_multi_index(columns):
+ list_columns = cast(list[tuple], columns)
+ return MultiIndex.from_tuples(list_columns, names=col_names)
+ return columns
+
+ @final
+ def _make_index(
+ self, data, alldata, columns, indexnamerow: list[Scalar] | None = None
+ ) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]:
+ index: Index | None
+ if not is_index_col(self.index_col) or not self.index_col:
+ index = None
+
+ elif not self._has_complex_date_col:
+ simple_index = self._get_simple_index(alldata, columns)
+ index = self._agg_index(simple_index)
+ elif self._has_complex_date_col:
+ if not self._name_processed:
+ (self.index_names, _, self.index_col) = self._clean_index_names(
+ list(columns), self.index_col
+ )
+ self._name_processed = True
+ date_index = self._get_complex_date_index(data, columns)
+ index = self._agg_index(date_index, try_parse_dates=False)
+
+ # add names for the index
+ if indexnamerow:
+ coffset = len(indexnamerow) - len(columns)
+ assert index is not None
+ index = index.set_names(indexnamerow[:coffset])
+
+ # maybe create a mi on the columns
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+
+ return index, columns
+
+ @final
+ def _get_simple_index(self, data, columns):
+ def ix(col):
+ if not isinstance(col, str):
+ return col
+ raise ValueError(f"Index {col} invalid")
+
+ to_remove = []
+ index = []
+ for idx in self.index_col:
+ i = ix(idx)
+ to_remove.append(i)
+ index.append(data[i])
+
+ # remove index items from content and columns, don't pop in
+ # loop
+ for i in sorted(to_remove, reverse=True):
+ data.pop(i)
+ if not self._implicit_index:
+ columns.pop(i)
+
+ return index
+
+ @final
+ def _get_complex_date_index(self, data, col_names):
+ def _get_name(icol):
+ if isinstance(icol, str):
+ return icol
+
+ if col_names is None:
+ raise ValueError(f"Must supply column order to use {icol!s} as index")
+
+ for i, c in enumerate(col_names):
+ if i == icol:
+ return c
+
+ to_remove = []
+ index = []
+ for idx in self.index_col:
+ name = _get_name(idx)
+ to_remove.append(name)
+ index.append(data[name])
+
+ # remove index items from content and columns, don't pop in
+ # loop
+ for c in sorted(to_remove, reverse=True):
+ data.pop(c)
+ col_names.remove(c)
+
+ return index
+
+ @final
+ def _clean_mapping(self, mapping):
+ """converts col numbers to names"""
+ if not isinstance(mapping, dict):
+ return mapping
+ clean = {}
+ # for mypy
+ assert self.orig_names is not None
+
+ for col, v in mapping.items():
+ if isinstance(col, int) and col not in self.orig_names:
+ col = self.orig_names[col]
+ clean[col] = v
+ if isinstance(mapping, defaultdict):
+ remaining_cols = set(self.orig_names) - set(clean.keys())
+ clean.update({col: mapping[col] for col in remaining_cols})
+ return clean
+
+ @final
+ def _agg_index(self, index, try_parse_dates: bool = True) -> Index:
+ arrays = []
+ converters = self._clean_mapping(self.converters)
+
+ for i, arr in enumerate(index):
+ if try_parse_dates and self._should_parse_dates(i):
+ arr = self._date_conv(
+ arr,
+ col=self.index_names[i] if self.index_names is not None else None,
+ )
+
+ if self.na_filter:
+ col_na_values = self.na_values
+ col_na_fvalues = self.na_fvalues
+ else:
+ col_na_values = set()
+ col_na_fvalues = set()
+
+ if isinstance(self.na_values, dict):
+ assert self.index_names is not None
+ col_name = self.index_names[i]
+ if col_name is not None:
+ col_na_values, col_na_fvalues = _get_na_values(
+ col_name, self.na_values, self.na_fvalues, self.keep_default_na
+ )
+
+ clean_dtypes = self._clean_mapping(self.dtype)
+
+ cast_type = None
+ index_converter = False
+ if self.index_names is not None:
+ if isinstance(clean_dtypes, dict):
+ cast_type = clean_dtypes.get(self.index_names[i], None)
+
+ if isinstance(converters, dict):
+ index_converter = converters.get(self.index_names[i]) is not None
+
+ try_num_bool = not (
+ cast_type and is_string_dtype(cast_type) or index_converter
+ )
+
+ arr, _ = self._infer_types(
+ arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool
+ )
+ arrays.append(arr)
+
+ names = self.index_names
+ index = ensure_index_from_sequences(arrays, names)
+
+ return index
+
+ @final
+ def _convert_to_ndarrays(
+ self,
+ dct: Mapping,
+ na_values,
+ na_fvalues,
+ verbose: bool = False,
+ converters=None,
+ dtypes=None,
+ ):
+ result = {}
+ for c, values in dct.items():
+ conv_f = None if converters is None else converters.get(c, None)
+ if isinstance(dtypes, dict):
+ cast_type = dtypes.get(c, None)
+ else:
+ # single dtype or None
+ cast_type = dtypes
+
+ if self.na_filter:
+ col_na_values, col_na_fvalues = _get_na_values(
+ c, na_values, na_fvalues, self.keep_default_na
+ )
+ else:
+ col_na_values, col_na_fvalues = set(), set()
+
+ if c in self._parse_date_cols:
+ # GH#26203 Do not convert columns which get converted to dates
+ # but replace nans to ensure to_datetime works
+ mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues)
+ np.putmask(values, mask, np.nan)
+ result[c] = values
+ continue
+
+ if conv_f is not None:
+ # conv_f applied to data before inference
+ if cast_type is not None:
+ warnings.warn(
+ (
+ "Both a converter and dtype were specified "
+ f"for column {c} - only the converter will be used."
+ ),
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ try:
+ values = lib.map_infer(values, conv_f)
+ except ValueError:
+ mask = algorithms.isin(values, list(na_values)).view(np.uint8)
+ values = lib.map_infer_mask(values, conv_f, mask)
+
+ cvals, na_count = self._infer_types(
+ values,
+ set(col_na_values) | col_na_fvalues,
+ cast_type is None,
+ try_num_bool=False,
+ )
+ else:
+ is_ea = is_extension_array_dtype(cast_type)
+ is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type)
+ # skip inference if specified dtype is object
+ # or casting to an EA
+ try_num_bool = not (cast_type and is_str_or_ea_dtype)
+
+ # general type inference and conversion
+ cvals, na_count = self._infer_types(
+ values,
+ set(col_na_values) | col_na_fvalues,
+ cast_type is None,
+ try_num_bool,
+ )
+
+ # type specified in dtype param or cast_type is an EA
+ if cast_type is not None:
+ cast_type = pandas_dtype(cast_type)
+ if cast_type and (cvals.dtype != cast_type or is_ea):
+ if not is_ea and na_count > 0:
+ if is_bool_dtype(cast_type):
+ raise ValueError(f"Bool column has NA values in column {c}")
+ cvals = self._cast_types(cvals, cast_type, c)
+
+ result[c] = cvals
+ if verbose and na_count:
+ print(f"Filled {na_count} NA values in column {c!s}")
+ return result
+
+ @final
+ def _set_noconvert_dtype_columns(
+ self, col_indices: list[int], names: Sequence[Hashable]
+ ) -> set[int]:
+ """
+ Set the columns that should not undergo dtype conversions.
+
+ Currently, any column that is involved with date parsing will not
+ undergo such conversions. If usecols is specified, the positions of the columns
+ not to cast is relative to the usecols not to all columns.
+
+ Parameters
+ ----------
+ col_indices: The indices specifying order and positions of the columns
+ names: The column names which order is corresponding with the order
+ of col_indices
+
+ Returns
+ -------
+ A set of integers containing the positions of the columns not to convert.
+ """
+ usecols: list[int] | list[str] | None
+ noconvert_columns = set()
+ if self.usecols_dtype == "integer":
+ # A set of integers will be converted to a list in
+ # the correct order every single time.
+ usecols = sorted(self.usecols)
+ elif callable(self.usecols) or self.usecols_dtype not in ("empty", None):
+ # The names attribute should have the correct columns
+ # in the proper order for indexing with parse_dates.
+ usecols = col_indices
+ else:
+ # Usecols is empty.
+ usecols = None
+
+ def _set(x) -> int:
+ if usecols is not None and is_integer(x):
+ x = usecols[x]
+
+ if not is_integer(x):
+ x = col_indices[names.index(x)]
+
+ return x
+
+ if isinstance(self.parse_dates, list):
+ for val in self.parse_dates:
+ if isinstance(val, list):
+ for k in val:
+ noconvert_columns.add(_set(k))
+ else:
+ noconvert_columns.add(_set(val))
+
+ elif isinstance(self.parse_dates, dict):
+ for val in self.parse_dates.values():
+ if isinstance(val, list):
+ for k in val:
+ noconvert_columns.add(_set(k))
+ else:
+ noconvert_columns.add(_set(val))
+
+ elif self.parse_dates:
+ if isinstance(self.index_col, list):
+ for k in self.index_col:
+ noconvert_columns.add(_set(k))
+ elif self.index_col is not None:
+ noconvert_columns.add(_set(self.index_col))
+
+ return noconvert_columns
+
+ @final
+ def _infer_types(
+ self, values, na_values, no_dtype_specified, try_num_bool: bool = True
+ ) -> tuple[ArrayLike, int]:
+ """
+ Infer types of values, possibly casting
+
+ Parameters
+ ----------
+ values : ndarray
+ na_values : set
+ no_dtype_specified: Specifies if we want to cast explicitly
+ try_num_bool : bool, default try
+ try to cast values to numeric (first preference) or boolean
+
+ Returns
+ -------
+ converted : ndarray or ExtensionArray
+ na_count : int
+ """
+ na_count = 0
+ if issubclass(values.dtype.type, (np.number, np.bool_)):
+ # If our array has numeric dtype, we don't have to check for strings in isin
+ na_values = np.array([val for val in na_values if not isinstance(val, str)])
+ mask = algorithms.isin(values, na_values)
+ na_count = mask.astype("uint8", copy=False).sum()
+ if na_count > 0:
+ if is_integer_dtype(values):
+ values = values.astype(np.float64)
+ np.putmask(values, mask, np.nan)
+ return values, na_count
+
+ dtype_backend = self.dtype_backend
+ non_default_dtype_backend = (
+ no_dtype_specified and dtype_backend is not lib.no_default
+ )
+ result: ArrayLike
+
+ if try_num_bool and is_object_dtype(values.dtype):
+ # exclude e.g DatetimeIndex here
+ try:
+ result, result_mask = lib.maybe_convert_numeric(
+ values,
+ na_values,
+ False,
+ convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]
+ )
+ except (ValueError, TypeError):
+ # e.g. encountering datetime string gets ValueError
+ # TypeError can be raised in floatify
+ na_count = parsers.sanitize_objects(values, na_values)
+ result = values
+ else:
+ if non_default_dtype_backend:
+ if result_mask is None:
+ result_mask = np.zeros(result.shape, dtype=np.bool_)
+
+ if result_mask.all():
+ result = IntegerArray(
+ np.ones(result_mask.shape, dtype=np.int64), result_mask
+ )
+ elif is_integer_dtype(result):
+ result = IntegerArray(result, result_mask)
+ elif is_bool_dtype(result):
+ result = BooleanArray(result, result_mask)
+ elif is_float_dtype(result):
+ result = FloatingArray(result, result_mask)
+
+ na_count = result_mask.sum()
+ else:
+ na_count = isna(result).sum()
+ else:
+ result = values
+ if values.dtype == np.object_:
+ na_count = parsers.sanitize_objects(values, na_values)
+
+ if result.dtype == np.object_ and try_num_bool:
+ result, bool_mask = libops.maybe_convert_bool(
+ np.asarray(values),
+ true_values=self.true_values,
+ false_values=self.false_values,
+ convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type]
+ )
+ if result.dtype == np.bool_ and non_default_dtype_backend:
+ if bool_mask is None:
+ bool_mask = np.zeros(result.shape, dtype=np.bool_)
+ result = BooleanArray(result, bool_mask)
+ elif result.dtype == np.object_ and non_default_dtype_backend:
+ # read_excel sends array of datetime objects
+ if not lib.is_datetime_array(result, skipna=True):
+ dtype = StringDtype()
+ cls = dtype.construct_array_type()
+ result = cls._from_sequence(values, dtype=dtype)
+
+ if dtype_backend == "pyarrow":
+ pa = import_optional_dependency("pyarrow")
+ if isinstance(result, np.ndarray):
+ result = ArrowExtensionArray(pa.array(result, from_pandas=True))
+ elif isinstance(result, BaseMaskedArray):
+ if result._mask.all():
+ # We want an arrow null array here
+ result = ArrowExtensionArray(pa.array([None] * len(result)))
+ else:
+ result = ArrowExtensionArray(
+ pa.array(result._data, mask=result._mask)
+ )
+ else:
+ result = ArrowExtensionArray(
+ pa.array(result.to_numpy(), from_pandas=True)
+ )
+
+ return result, na_count
+
+ @final
+ def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike:
+ """
+ Cast values to specified type
+
+ Parameters
+ ----------
+ values : ndarray or ExtensionArray
+ cast_type : np.dtype or ExtensionDtype
+ dtype to cast values to
+ column : string
+ column name - used only for error reporting
+
+ Returns
+ -------
+ converted : ndarray or ExtensionArray
+ """
+ if isinstance(cast_type, CategoricalDtype):
+ known_cats = cast_type.categories is not None
+
+ if not is_object_dtype(values.dtype) and not known_cats:
+ # TODO: this is for consistency with
+ # c-parser which parses all categories
+ # as strings
+ values = lib.ensure_string_array(
+ values, skipna=False, convert_na_value=False
+ )
+
+ cats = Index(values).unique().dropna()
+ values = Categorical._from_inferred_categories(
+ cats, cats.get_indexer(values), cast_type, true_values=self.true_values
+ )
+
+ # use the EA's implementation of casting
+ elif isinstance(cast_type, ExtensionDtype):
+ array_type = cast_type.construct_array_type()
+ try:
+ if isinstance(cast_type, BooleanDtype):
+ # error: Unexpected keyword argument "true_values" for
+ # "_from_sequence_of_strings" of "ExtensionArray"
+ return array_type._from_sequence_of_strings( # type: ignore[call-arg]
+ values,
+ dtype=cast_type,
+ true_values=self.true_values,
+ false_values=self.false_values,
+ )
+ else:
+ return array_type._from_sequence_of_strings(values, dtype=cast_type)
+ except NotImplementedError as err:
+ raise NotImplementedError(
+ f"Extension Array: {array_type} must implement "
+ "_from_sequence_of_strings in order to be used in parser methods"
+ ) from err
+
+ elif isinstance(values, ExtensionArray):
+ values = values.astype(cast_type, copy=False)
+ elif issubclass(cast_type.type, str):
+ # TODO: why skipna=True here and False above? some tests depend
+ # on it here, but nothing fails if we change it above
+ # (as no tests get there as of 2022-12-06)
+ values = lib.ensure_string_array(
+ values, skipna=True, convert_na_value=False
+ )
+ else:
+ try:
+ values = astype_array(values, cast_type, copy=True)
+ except ValueError as err:
+ raise ValueError(
+ f"Unable to convert column {column} to type {cast_type}"
+ ) from err
+ return values
+
+ @overload
+ def _do_date_conversions(
+ self,
+ names: Index,
+ data: DataFrame,
+ ) -> tuple[Sequence[Hashable] | Index, DataFrame]:
+ ...
+
+ @overload
+ def _do_date_conversions(
+ self,
+ names: Sequence[Hashable],
+ data: Mapping[Hashable, ArrayLike],
+ ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]:
+ ...
+
+ @final
+ def _do_date_conversions(
+ self,
+ names: Sequence[Hashable] | Index,
+ data: Mapping[Hashable, ArrayLike] | DataFrame,
+ ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]:
+ # returns data, columns
+
+ if self.parse_dates is not None:
+ data, names = _process_date_conversion(
+ data,
+ self._date_conv,
+ self.parse_dates,
+ self.index_col,
+ self.index_names,
+ names,
+ keep_date_col=self.keep_date_col,
+ dtype_backend=self.dtype_backend,
+ )
+
+ return names, data
+
+ @final
+ def _check_data_length(
+ self,
+ columns: Sequence[Hashable],
+ data: Sequence[ArrayLike],
+ ) -> None:
+ """Checks if length of data is equal to length of column names.
+
+ One set of trailing commas is allowed. self.index_col not False
+ results in a ParserError previously when lengths do not match.
+
+ Parameters
+ ----------
+ columns: list of column names
+ data: list of array-likes containing the data column-wise.
+ """
+ if not self.index_col and len(columns) != len(data) and columns:
+ empty_str = is_object_dtype(data[-1]) and data[-1] == ""
+ # error: No overload variant of "__ror__" of "ndarray" matches
+ # argument type "ExtensionArray"
+ empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator]
+ if len(columns) == len(data) - 1 and np.all(empty_str_or_na):
+ return
+ warnings.warn(
+ "Length of header or names does not match length of data. This leads "
+ "to a loss of data with index_col=False.",
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ @overload
+ def _evaluate_usecols(
+ self,
+ usecols: set[int] | Callable[[Hashable], object],
+ names: Sequence[Hashable],
+ ) -> set[int]:
+ ...
+
+ @overload
+ def _evaluate_usecols(
+ self, usecols: set[str], names: Sequence[Hashable]
+ ) -> set[str]:
+ ...
+
+ @final
+ def _evaluate_usecols(
+ self,
+ usecols: Callable[[Hashable], object] | set[str] | set[int],
+ names: Sequence[Hashable],
+ ) -> set[str] | set[int]:
+ """
+ Check whether or not the 'usecols' parameter
+ is a callable. If so, enumerates the 'names'
+ parameter and returns a set of indices for
+ each entry in 'names' that evaluates to True.
+ If not a callable, returns 'usecols'.
+ """
+ if callable(usecols):
+ return {i for i, name in enumerate(names) if usecols(name)}
+ return usecols
+
+ @final
+ def _validate_usecols_names(self, usecols, names: Sequence):
+ """
+ Validates that all usecols are present in a given
+ list of names. If not, raise a ValueError that
+ shows what usecols are missing.
+
+ Parameters
+ ----------
+ usecols : iterable of usecols
+ The columns to validate are present in names.
+ names : iterable of names
+ The column names to check against.
+
+ Returns
+ -------
+ usecols : iterable of usecols
+ The `usecols` parameter if the validation succeeds.
+
+ Raises
+ ------
+ ValueError : Columns were missing. Error message will list them.
+ """
+ missing = [c for c in usecols if c not in names]
+ if len(missing) > 0:
+ raise ValueError(
+ f"Usecols do not match columns, columns expected but not found: "
+ f"{missing}"
+ )
+
+ return usecols
+
+ @final
+ def _validate_usecols_arg(self, usecols):
+ """
+ Validate the 'usecols' parameter.
+
+ Checks whether or not the 'usecols' parameter contains all integers
+ (column selection by index), strings (column by name) or is a callable.
+ Raises a ValueError if that is not the case.
+
+ Parameters
+ ----------
+ usecols : list-like, callable, or None
+ List of columns to use when parsing or a callable that can be used
+ to filter a list of table columns.
+
+ Returns
+ -------
+ usecols_tuple : tuple
+ A tuple of (verified_usecols, usecols_dtype).
+
+ 'verified_usecols' is either a set if an array-like is passed in or
+ 'usecols' if a callable or None is passed in.
+
+ 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
+ is passed in or None if a callable or None is passed in.
+ """
+ msg = (
+ "'usecols' must either be list-like of all strings, all unicode, "
+ "all integers or a callable."
+ )
+ if usecols is not None:
+ if callable(usecols):
+ return usecols, None
+
+ if not is_list_like(usecols):
+ # see gh-20529
+ #
+ # Ensure it is iterable container but not string.
+ raise ValueError(msg)
+
+ usecols_dtype = lib.infer_dtype(usecols, skipna=False)
+
+ if usecols_dtype not in ("empty", "integer", "string"):
+ raise ValueError(msg)
+
+ usecols = set(usecols)
+
+ return usecols, usecols_dtype
+ return usecols, None
+
+ @final
+ def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]:
+ if not is_index_col(index_col):
+ return None, columns, index_col
+
+ columns = list(columns)
+
+ # In case of no rows and multiindex columns we have to set index_names to
+ # list of Nones GH#38292
+ if not columns:
+ return [None] * len(index_col), columns, index_col
+
+ cp_cols = list(columns)
+ index_names: list[str | int | None] = []
+
+ # don't mutate
+ index_col = list(index_col)
+
+ for i, c in enumerate(index_col):
+ if isinstance(c, str):
+ index_names.append(c)
+ for j, name in enumerate(cp_cols):
+ if name == c:
+ index_col[i] = j
+ columns.remove(name)
+ break
+ else:
+ name = cp_cols[c]
+ columns.remove(name)
+ index_names.append(name)
+
+ # Only clean index names that were placeholders.
+ for i, name in enumerate(index_names):
+ if isinstance(name, str) and name in self.unnamed_cols:
+ index_names[i] = None
+
+ return index_names, columns, index_col
+
+ @final
+ def _get_empty_meta(self, columns, dtype: DtypeArg | None = None):
+ columns = list(columns)
+
+ index_col = self.index_col
+ index_names = self.index_names
+
+ # Convert `dtype` to a defaultdict of some kind.
+ # This will enable us to write `dtype[col_name]`
+ # without worrying about KeyError issues later on.
+ dtype_dict: defaultdict[Hashable, Any]
+ if not is_dict_like(dtype):
+ # if dtype == None, default will be object.
+ default_dtype = dtype or object
+ dtype_dict = defaultdict(lambda: default_dtype)
+ else:
+ dtype = cast(dict, dtype)
+ dtype_dict = defaultdict(
+ lambda: object,
+ {columns[k] if is_integer(k) else k: v for k, v in dtype.items()},
+ )
+
+ # Even though we have no data, the "index" of the empty DataFrame
+ # could for example still be an empty MultiIndex. Thus, we need to
+ # check whether we have any index columns specified, via either:
+ #
+ # 1) index_col (column indices)
+ # 2) index_names (column names)
+ #
+ # Both must be non-null to ensure a successful construction. Otherwise,
+ # we have to create a generic empty Index.
+ index: Index
+ if (index_col is None or index_col is False) or index_names is None:
+ index = default_index(0)
+ else:
+ data = [Series([], dtype=dtype_dict[name]) for name in index_names]
+ index = ensure_index_from_sequences(data, names=index_names)
+ index_col.sort()
+
+ for i, n in enumerate(index_col):
+ columns.pop(n - i)
+
+ col_dict = {
+ col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns
+ }
+
+ return index, columns, col_dict
+
+
+def _make_date_converter(
+ date_parser=lib.no_default,
+ dayfirst: bool = False,
+ cache_dates: bool = True,
+ date_format: dict[Hashable, str] | str | None = None,
+):
+ if date_parser is not lib.no_default:
+ warnings.warn(
+ "The argument 'date_parser' is deprecated and will "
+ "be removed in a future version. "
+ "Please use 'date_format' instead, or read your data in as 'object' dtype "
+ "and then call 'to_datetime'.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ if date_parser is not lib.no_default and date_format is not None:
+ raise TypeError("Cannot use both 'date_parser' and 'date_format'")
+
+ def unpack_if_single_element(arg):
+ # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615
+ if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1:
+ return arg[0]
+ return arg
+
+ def converter(*date_cols, col: Hashable):
+ if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm":
+ return date_cols[0]
+
+ if date_parser is lib.no_default:
+ strs = parsing.concat_date_cols(date_cols)
+ date_fmt = (
+ date_format.get(col) if isinstance(date_format, dict) else date_format
+ )
+
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ ".*parsing datetimes with mixed time zones will raise an error",
+ category=FutureWarning,
+ )
+ str_objs = ensure_object(strs)
+ try:
+ result = tools.to_datetime(
+ str_objs,
+ format=date_fmt,
+ utc=False,
+ dayfirst=dayfirst,
+ cache=cache_dates,
+ )
+ except (ValueError, TypeError):
+ # test_usecols_with_parse_dates4
+ return str_objs
+
+ if isinstance(result, DatetimeIndex):
+ arr = result.to_numpy()
+ arr.flags.writeable = True
+ return arr
+ return result._values
+ else:
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ ".*parsing datetimes with mixed time zones "
+ "will raise an error",
+ category=FutureWarning,
+ )
+ pre_parsed = date_parser(
+ *(unpack_if_single_element(arg) for arg in date_cols)
+ )
+ try:
+ result = tools.to_datetime(
+ pre_parsed,
+ cache=cache_dates,
+ )
+ except (ValueError, TypeError):
+ # test_read_csv_with_custom_date_parser
+ result = pre_parsed
+ if isinstance(result, datetime.datetime):
+ raise Exception("scalar parser")
+ return result
+ except Exception:
+ # e.g. test_datetime_fractional_seconds
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ "ignore",
+ ".*parsing datetimes with mixed time zones "
+ "will raise an error",
+ category=FutureWarning,
+ )
+ pre_parsed = parsing.try_parse_dates(
+ parsing.concat_date_cols(date_cols),
+ parser=date_parser,
+ )
+ try:
+ return tools.to_datetime(pre_parsed)
+ except (ValueError, TypeError):
+ # TODO: not reached in tests 2023-10-27; needed?
+ return pre_parsed
+
+ return converter
+
+
+parser_defaults = {
+ "delimiter": None,
+ "escapechar": None,
+ "quotechar": '"',
+ "quoting": csv.QUOTE_MINIMAL,
+ "doublequote": True,
+ "skipinitialspace": False,
+ "lineterminator": None,
+ "header": "infer",
+ "index_col": None,
+ "names": None,
+ "skiprows": None,
+ "skipfooter": 0,
+ "nrows": None,
+ "na_values": None,
+ "keep_default_na": True,
+ "true_values": None,
+ "false_values": None,
+ "converters": None,
+ "dtype": None,
+ "cache_dates": True,
+ "thousands": None,
+ "comment": None,
+ "decimal": ".",
+ # 'engine': 'c',
+ "parse_dates": False,
+ "keep_date_col": False,
+ "dayfirst": False,
+ "date_parser": lib.no_default,
+ "date_format": None,
+ "usecols": None,
+ # 'iterator': False,
+ "chunksize": None,
+ "verbose": False,
+ "encoding": None,
+ "compression": None,
+ "skip_blank_lines": True,
+ "encoding_errors": "strict",
+ "on_bad_lines": ParserBase.BadLineHandleMethod.ERROR,
+ "dtype_backend": lib.no_default,
+}
+
+
+def _process_date_conversion(
+ data_dict,
+ converter: Callable,
+ parse_spec,
+ index_col,
+ index_names,
+ columns,
+ keep_date_col: bool = False,
+ dtype_backend=lib.no_default,
+):
+ def _isindex(colspec):
+ return (isinstance(index_col, list) and colspec in index_col) or (
+ isinstance(index_names, list) and colspec in index_names
+ )
+
+ new_cols = []
+ new_data = {}
+
+ orig_names = columns
+ columns = list(columns)
+
+ date_cols = set()
+
+ if parse_spec is None or isinstance(parse_spec, bool):
+ return data_dict, columns
+
+ if isinstance(parse_spec, list):
+ # list of column lists
+ for colspec in parse_spec:
+ if is_scalar(colspec) or isinstance(colspec, tuple):
+ if isinstance(colspec, int) and colspec not in data_dict:
+ colspec = orig_names[colspec]
+ if _isindex(colspec):
+ continue
+ elif dtype_backend == "pyarrow":
+ import pyarrow as pa
+
+ dtype = data_dict[colspec].dtype
+ if isinstance(dtype, ArrowDtype) and (
+ pa.types.is_timestamp(dtype.pyarrow_dtype)
+ or pa.types.is_date(dtype.pyarrow_dtype)
+ ):
+ continue
+
+ # Pyarrow engine returns Series which we need to convert to
+ # numpy array before converter, its a no-op for other parsers
+ data_dict[colspec] = converter(
+ np.asarray(data_dict[colspec]), col=colspec
+ )
+ else:
+ new_name, col, old_names = _try_convert_dates(
+ converter, colspec, data_dict, orig_names
+ )
+ if new_name in data_dict:
+ raise ValueError(f"New date column already in dict {new_name}")
+ new_data[new_name] = col
+ new_cols.append(new_name)
+ date_cols.update(old_names)
+
+ elif isinstance(parse_spec, dict):
+ # dict of new name to column list
+ for new_name, colspec in parse_spec.items():
+ if new_name in data_dict:
+ raise ValueError(f"Date column {new_name} already in dict")
+
+ _, col, old_names = _try_convert_dates(
+ converter,
+ colspec,
+ data_dict,
+ orig_names,
+ target_name=new_name,
+ )
+
+ new_data[new_name] = col
+
+ # If original column can be converted to date we keep the converted values
+ # This can only happen if values are from single column
+ if len(colspec) == 1:
+ new_data[colspec[0]] = col
+
+ new_cols.append(new_name)
+ date_cols.update(old_names)
+
+ if isinstance(data_dict, DataFrame):
+ data_dict = concat([DataFrame(new_data), data_dict], axis=1, copy=False)
+ else:
+ data_dict.update(new_data)
+ new_cols.extend(columns)
+
+ if not keep_date_col:
+ for c in list(date_cols):
+ data_dict.pop(c)
+ new_cols.remove(c)
+
+ return data_dict, new_cols
+
+
+def _try_convert_dates(
+ parser: Callable, colspec, data_dict, columns, target_name: str | None = None
+):
+ colset = set(columns)
+ colnames = []
+
+ for c in colspec:
+ if c in colset:
+ colnames.append(c)
+ elif isinstance(c, int) and c not in columns:
+ colnames.append(columns[c])
+ else:
+ colnames.append(c)
+
+ new_name: tuple | str
+ if all(isinstance(x, tuple) for x in colnames):
+ new_name = tuple(map("_".join, zip(*colnames)))
+ else:
+ new_name = "_".join([str(x) for x in colnames])
+ to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict]
+
+ new_col = parser(*to_parse, col=new_name if target_name is None else target_name)
+ return new_name, new_col, colnames
+
+
+def _get_na_values(col, na_values, na_fvalues, keep_default_na: bool):
+ """
+ Get the NaN values for a given column.
+
+ Parameters
+ ----------
+ col : str
+ The name of the column.
+ na_values : array-like, dict
+ The object listing the NaN values as strings.
+ na_fvalues : array-like, dict
+ The object listing the NaN values as floats.
+ keep_default_na : bool
+ If `na_values` is a dict, and the column is not mapped in the
+ dictionary, whether to return the default NaN values or the empty set.
+
+ Returns
+ -------
+ nan_tuple : A length-two tuple composed of
+
+ 1) na_values : the string NaN values for that column.
+ 2) na_fvalues : the float NaN values for that column.
+ """
+ if isinstance(na_values, dict):
+ if col in na_values:
+ return na_values[col], na_fvalues[col]
+ else:
+ if keep_default_na:
+ return STR_NA_VALUES, set()
+
+ return set(), set()
+ else:
+ return na_values, na_fvalues
+
+
+def _validate_parse_dates_arg(parse_dates):
+ """
+ Check whether or not the 'parse_dates' parameter
+ is a non-boolean scalar. Raises a ValueError if
+ that is the case.
+ """
+ msg = (
+ "Only booleans, lists, and dictionaries are accepted "
+ "for the 'parse_dates' parameter"
+ )
+
+ if not (
+ parse_dates is None
+ or lib.is_bool(parse_dates)
+ or isinstance(parse_dates, (list, dict))
+ ):
+ raise TypeError(msg)
+
+ return parse_dates
+
+
+def is_index_col(col) -> bool:
+ return col is not None and col is not False
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cd788c5e57399597e3fe4ee1b1bf2af4bffd74b
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py
@@ -0,0 +1,410 @@
+from __future__ import annotations
+
+from collections import defaultdict
+from typing import TYPE_CHECKING
+import warnings
+
+import numpy as np
+
+from pandas._libs import (
+ lib,
+ parsers,
+)
+from pandas.compat._optional import import_optional_dependency
+from pandas.errors import DtypeWarning
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import pandas_dtype
+from pandas.core.dtypes.concat import (
+ concat_compat,
+ union_categoricals,
+)
+from pandas.core.dtypes.dtypes import CategoricalDtype
+
+from pandas.core.indexes.api import ensure_index_from_sequences
+
+from pandas.io.common import (
+ dedup_names,
+ is_potential_multi_index,
+)
+from pandas.io.parsers.base_parser import (
+ ParserBase,
+ ParserError,
+ is_index_col,
+)
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Mapping,
+ Sequence,
+ )
+
+ from pandas._typing import (
+ ArrayLike,
+ DtypeArg,
+ DtypeObj,
+ ReadCsvBuffer,
+ )
+
+ from pandas import (
+ Index,
+ MultiIndex,
+ )
+
+
+class CParserWrapper(ParserBase):
+ low_memory: bool
+ _reader: parsers.TextReader
+
+ def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None:
+ super().__init__(kwds)
+ self.kwds = kwds
+ kwds = kwds.copy()
+
+ self.low_memory = kwds.pop("low_memory", False)
+
+ # #2442
+ # error: Cannot determine type of 'index_col'
+ kwds["allow_leading_cols"] = (
+ self.index_col is not False # type: ignore[has-type]
+ )
+
+ # GH20529, validate usecol arg before TextReader
+ kwds["usecols"] = self.usecols
+
+ # Have to pass int, would break tests using TextReader directly otherwise :(
+ kwds["on_bad_lines"] = self.on_bad_lines.value
+
+ for key in (
+ "storage_options",
+ "encoding",
+ "memory_map",
+ "compression",
+ ):
+ kwds.pop(key, None)
+
+ kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None))
+ if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default:
+ kwds["dtype_backend"] = "numpy"
+ if kwds["dtype_backend"] == "pyarrow":
+ # Fail here loudly instead of in cython after reading
+ import_optional_dependency("pyarrow")
+ self._reader = parsers.TextReader(src, **kwds)
+
+ self.unnamed_cols = self._reader.unnamed_cols
+
+ # error: Cannot determine type of 'names'
+ passed_names = self.names is None # type: ignore[has-type]
+
+ if self._reader.header is None:
+ self.names = None
+ else:
+ # error: Cannot determine type of 'names'
+ # error: Cannot determine type of 'index_names'
+ (
+ self.names, # type: ignore[has-type]
+ self.index_names,
+ self.col_names,
+ passed_names,
+ ) = self._extract_multi_indexer_columns(
+ self._reader.header,
+ self.index_names, # type: ignore[has-type]
+ passed_names,
+ )
+
+ # error: Cannot determine type of 'names'
+ if self.names is None: # type: ignore[has-type]
+ self.names = list(range(self._reader.table_width))
+
+ # gh-9755
+ #
+ # need to set orig_names here first
+ # so that proper indexing can be done
+ # with _set_noconvert_columns
+ #
+ # once names has been filtered, we will
+ # then set orig_names again to names
+ # error: Cannot determine type of 'names'
+ self.orig_names = self.names[:] # type: ignore[has-type]
+
+ if self.usecols:
+ usecols = self._evaluate_usecols(self.usecols, self.orig_names)
+
+ # GH 14671
+ # assert for mypy, orig_names is List or None, None would error in issubset
+ assert self.orig_names is not None
+ if self.usecols_dtype == "string" and not set(usecols).issubset(
+ self.orig_names
+ ):
+ self._validate_usecols_names(usecols, self.orig_names)
+
+ # error: Cannot determine type of 'names'
+ if len(self.names) > len(usecols): # type: ignore[has-type]
+ # error: Cannot determine type of 'names'
+ self.names = [ # type: ignore[has-type]
+ n
+ # error: Cannot determine type of 'names'
+ for i, n in enumerate(self.names) # type: ignore[has-type]
+ if (i in usecols or n in usecols)
+ ]
+
+ # error: Cannot determine type of 'names'
+ if len(self.names) < len(usecols): # type: ignore[has-type]
+ # error: Cannot determine type of 'names'
+ self._validate_usecols_names(
+ usecols,
+ self.names, # type: ignore[has-type]
+ )
+
+ # error: Cannot determine type of 'names'
+ self._validate_parse_dates_presence(self.names) # type: ignore[has-type]
+ self._set_noconvert_columns()
+
+ # error: Cannot determine type of 'names'
+ self.orig_names = self.names # type: ignore[has-type]
+
+ if not self._has_complex_date_col:
+ # error: Cannot determine type of 'index_col'
+ if self._reader.leading_cols == 0 and is_index_col(
+ self.index_col # type: ignore[has-type]
+ ):
+ self._name_processed = True
+ (
+ index_names,
+ # error: Cannot determine type of 'names'
+ self.names, # type: ignore[has-type]
+ self.index_col,
+ ) = self._clean_index_names(
+ # error: Cannot determine type of 'names'
+ self.names, # type: ignore[has-type]
+ # error: Cannot determine type of 'index_col'
+ self.index_col, # type: ignore[has-type]
+ )
+
+ if self.index_names is None:
+ self.index_names = index_names
+
+ if self._reader.header is None and not passed_names:
+ assert self.index_names is not None
+ self.index_names = [None] * len(self.index_names)
+
+ self._implicit_index = self._reader.leading_cols > 0
+
+ def close(self) -> None:
+ # close handles opened by C parser
+ try:
+ self._reader.close()
+ except ValueError:
+ pass
+
+ def _set_noconvert_columns(self) -> None:
+ """
+ Set the columns that should not undergo dtype conversions.
+
+ Currently, any column that is involved with date parsing will not
+ undergo such conversions.
+ """
+ assert self.orig_names is not None
+ # error: Cannot determine type of 'names'
+
+ # much faster than using orig_names.index(x) xref GH#44106
+ names_dict = {x: i for i, x in enumerate(self.orig_names)}
+ col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type]
+ # error: Cannot determine type of 'names'
+ noconvert_columns = self._set_noconvert_dtype_columns(
+ col_indices,
+ self.names, # type: ignore[has-type]
+ )
+ for col in noconvert_columns:
+ self._reader.set_noconvert(col)
+
+ def read(
+ self,
+ nrows: int | None = None,
+ ) -> tuple[
+ Index | MultiIndex | None,
+ Sequence[Hashable] | MultiIndex,
+ Mapping[Hashable, ArrayLike],
+ ]:
+ index: Index | MultiIndex | None
+ column_names: Sequence[Hashable] | MultiIndex
+ try:
+ if self.low_memory:
+ chunks = self._reader.read_low_memory(nrows)
+ # destructive to chunks
+ data = _concatenate_chunks(chunks)
+
+ else:
+ data = self._reader.read(nrows)
+ except StopIteration:
+ if self._first_chunk:
+ self._first_chunk = False
+ names = dedup_names(
+ self.orig_names,
+ is_potential_multi_index(self.orig_names, self.index_col),
+ )
+ index, columns, col_dict = self._get_empty_meta(
+ names,
+ dtype=self.dtype,
+ )
+ columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+
+ if self.usecols is not None:
+ columns = self._filter_usecols(columns)
+
+ col_dict = {k: v for k, v in col_dict.items() if k in columns}
+
+ return index, columns, col_dict
+
+ else:
+ self.close()
+ raise
+
+ # Done with first read, next time raise StopIteration
+ self._first_chunk = False
+
+ # error: Cannot determine type of 'names'
+ names = self.names # type: ignore[has-type]
+
+ if self._reader.leading_cols:
+ if self._has_complex_date_col:
+ raise NotImplementedError("file structure not yet supported")
+
+ # implicit index, no index names
+ arrays = []
+
+ if self.index_col and self._reader.leading_cols != len(self.index_col):
+ raise ParserError(
+ "Could not construct index. Requested to use "
+ f"{len(self.index_col)} number of columns, but "
+ f"{self._reader.leading_cols} left to parse."
+ )
+
+ for i in range(self._reader.leading_cols):
+ if self.index_col is None:
+ values = data.pop(i)
+ else:
+ values = data.pop(self.index_col[i])
+
+ values = self._maybe_parse_dates(values, i, try_parse_dates=True)
+ arrays.append(values)
+
+ index = ensure_index_from_sequences(arrays)
+
+ if self.usecols is not None:
+ names = self._filter_usecols(names)
+
+ names = dedup_names(names, is_potential_multi_index(names, self.index_col))
+
+ # rename dict keys
+ data_tups = sorted(data.items())
+ data = {k: v for k, (i, v) in zip(names, data_tups)}
+
+ column_names, date_data = self._do_date_conversions(names, data)
+
+ # maybe create a mi on the columns
+ column_names = self._maybe_make_multi_index_columns(
+ column_names, self.col_names
+ )
+
+ else:
+ # rename dict keys
+ data_tups = sorted(data.items())
+
+ # ugh, mutation
+
+ # assert for mypy, orig_names is List or None, None would error in list(...)
+ assert self.orig_names is not None
+ names = list(self.orig_names)
+ names = dedup_names(names, is_potential_multi_index(names, self.index_col))
+
+ if self.usecols is not None:
+ names = self._filter_usecols(names)
+
+ # columns as list
+ alldata = [x[1] for x in data_tups]
+ if self.usecols is None:
+ self._check_data_length(names, alldata)
+
+ data = {k: v for k, (i, v) in zip(names, data_tups)}
+
+ names, date_data = self._do_date_conversions(names, data)
+ index, column_names = self._make_index(date_data, alldata, names)
+
+ return index, column_names, date_data
+
+ def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]:
+ # hackish
+ usecols = self._evaluate_usecols(self.usecols, names)
+ if usecols is not None and len(names) != len(usecols):
+ names = [
+ name for i, name in enumerate(names) if i in usecols or name in usecols
+ ]
+ return names
+
+ def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True):
+ if try_parse_dates and self._should_parse_dates(index):
+ values = self._date_conv(
+ values,
+ col=self.index_names[index] if self.index_names is not None else None,
+ )
+ return values
+
+
+def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict:
+ """
+ Concatenate chunks of data read with low_memory=True.
+
+ The tricky part is handling Categoricals, where different chunks
+ may have different inferred categories.
+ """
+ names = list(chunks[0].keys())
+ warning_columns = []
+
+ result: dict = {}
+ for name in names:
+ arrs = [chunk.pop(name) for chunk in chunks]
+ # Check each arr for consistent types.
+ dtypes = {a.dtype for a in arrs}
+ non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)}
+
+ dtype = dtypes.pop()
+ if isinstance(dtype, CategoricalDtype):
+ result[name] = union_categoricals(arrs, sort_categories=False)
+ else:
+ result[name] = concat_compat(arrs)
+ if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object):
+ warning_columns.append(str(name))
+
+ if warning_columns:
+ warning_names = ",".join(warning_columns)
+ warning_message = " ".join(
+ [
+ f"Columns ({warning_names}) have mixed types. "
+ f"Specify dtype option on import or set low_memory=False."
+ ]
+ )
+ warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level())
+ return result
+
+
+def ensure_dtype_objs(
+ dtype: DtypeArg | dict[Hashable, DtypeArg] | None
+) -> DtypeObj | dict[Hashable, DtypeObj] | None:
+ """
+ Ensure we have either None, a dtype object, or a dictionary mapping to
+ dtype objects.
+ """
+ if isinstance(dtype, defaultdict):
+ # "None" not callable [misc]
+ default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc]
+ dtype_converted: defaultdict = defaultdict(lambda: default_dtype)
+ for key in dtype.keys():
+ dtype_converted[key] = pandas_dtype(dtype[key])
+ return dtype_converted
+ elif isinstance(dtype, dict):
+ return {k: pandas_dtype(dtype[k]) for k in dtype}
+ elif dtype is not None:
+ return pandas_dtype(dtype)
+ return dtype
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..79e7554a5744cf439a65e9fd1e18782a0fa71548
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py
@@ -0,0 +1,1387 @@
+from __future__ import annotations
+
+from collections import (
+ abc,
+ defaultdict,
+)
+from collections.abc import (
+ Hashable,
+ Iterator,
+ Mapping,
+ Sequence,
+)
+import csv
+from io import StringIO
+import re
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ DefaultDict,
+ Literal,
+ cast,
+)
+import warnings
+
+import numpy as np
+
+from pandas._libs import lib
+from pandas.errors import (
+ EmptyDataError,
+ ParserError,
+ ParserWarning,
+)
+from pandas.util._decorators import cache_readonly
+from pandas.util._exceptions import find_stack_level
+
+from pandas.core.dtypes.common import (
+ is_bool_dtype,
+ is_integer,
+ is_numeric_dtype,
+)
+from pandas.core.dtypes.inference import is_dict_like
+
+from pandas.io.common import (
+ dedup_names,
+ is_potential_multi_index,
+)
+from pandas.io.parsers.base_parser import (
+ ParserBase,
+ parser_defaults,
+)
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ ArrayLike,
+ ReadCsvBuffer,
+ Scalar,
+ )
+
+ from pandas import (
+ Index,
+ MultiIndex,
+ )
+
+# BOM character (byte order mark)
+# This exists at the beginning of a file to indicate endianness
+# of a file (stream). Unfortunately, this marker screws up parsing,
+# so we need to remove it if we see it.
+_BOM = "\ufeff"
+
+
+class PythonParser(ParserBase):
+ _no_thousands_columns: set[int]
+
+ def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None:
+ """
+ Workhorse function for processing nested list into DataFrame
+ """
+ super().__init__(kwds)
+
+ self.data: Iterator[str] | None = None
+ self.buf: list = []
+ self.pos = 0
+ self.line_pos = 0
+
+ self.skiprows = kwds["skiprows"]
+
+ if callable(self.skiprows):
+ self.skipfunc = self.skiprows
+ else:
+ self.skipfunc = lambda x: x in self.skiprows
+
+ self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
+ self.delimiter = kwds["delimiter"]
+
+ self.quotechar = kwds["quotechar"]
+ if isinstance(self.quotechar, str):
+ self.quotechar = str(self.quotechar)
+
+ self.escapechar = kwds["escapechar"]
+ self.doublequote = kwds["doublequote"]
+ self.skipinitialspace = kwds["skipinitialspace"]
+ self.lineterminator = kwds["lineterminator"]
+ self.quoting = kwds["quoting"]
+ self.skip_blank_lines = kwds["skip_blank_lines"]
+
+ self.has_index_names = False
+ if "has_index_names" in kwds:
+ self.has_index_names = kwds["has_index_names"]
+
+ self.verbose = kwds["verbose"]
+
+ self.thousands = kwds["thousands"]
+ self.decimal = kwds["decimal"]
+
+ self.comment = kwds["comment"]
+
+ # Set self.data to something that can read lines.
+ if isinstance(f, list):
+ # read_excel: f is a list
+ self.data = cast(Iterator[str], f)
+ else:
+ assert hasattr(f, "readline")
+ self.data = self._make_reader(f)
+
+ # Get columns in two steps: infer from data, then
+ # infer column indices from self.usecols if it is specified.
+ self._col_indices: list[int] | None = None
+ columns: list[list[Scalar | None]]
+ (
+ columns,
+ self.num_original_columns,
+ self.unnamed_cols,
+ ) = self._infer_columns()
+
+ # Now self.columns has the set of columns that we will process.
+ # The original set is stored in self.original_columns.
+ # error: Cannot determine type of 'index_names'
+ (
+ self.columns,
+ self.index_names,
+ self.col_names,
+ _,
+ ) = self._extract_multi_indexer_columns(
+ columns,
+ self.index_names, # type: ignore[has-type]
+ )
+
+ # get popped off for index
+ self.orig_names: list[Hashable] = list(self.columns)
+
+ # needs to be cleaned/refactored
+ # multiple date column thing turning into a real spaghetti factory
+
+ if not self._has_complex_date_col:
+ (index_names, self.orig_names, self.columns) = self._get_index_name()
+ self._name_processed = True
+ if self.index_names is None:
+ self.index_names = index_names
+
+ if self._col_indices is None:
+ self._col_indices = list(range(len(self.columns)))
+
+ self._parse_date_cols = self._validate_parse_dates_presence(self.columns)
+ self._no_thousands_columns = self._set_no_thousand_columns()
+
+ if len(self.decimal) != 1:
+ raise ValueError("Only length-1 decimal markers supported")
+
+ @cache_readonly
+ def num(self) -> re.Pattern:
+ decimal = re.escape(self.decimal)
+ if self.thousands is None:
+ regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
+ else:
+ thousands = re.escape(self.thousands)
+ regex = (
+ rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
+ rf"([0-9]?(E|e)\-?[0-9]+)?$"
+ )
+ return re.compile(regex)
+
+ def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]):
+ sep = self.delimiter
+
+ if sep is None or len(sep) == 1:
+ if self.lineterminator:
+ raise ValueError(
+ "Custom line terminators not supported in python parser (yet)"
+ )
+
+ class MyDialect(csv.Dialect):
+ delimiter = self.delimiter
+ quotechar = self.quotechar
+ escapechar = self.escapechar
+ doublequote = self.doublequote
+ skipinitialspace = self.skipinitialspace
+ quoting = self.quoting
+ lineterminator = "\n"
+
+ dia = MyDialect
+
+ if sep is not None:
+ dia.delimiter = sep
+ else:
+ # attempt to sniff the delimiter from the first valid line,
+ # i.e. no comment line and not in skiprows
+ line = f.readline()
+ lines = self._check_comments([[line]])[0]
+ while self.skipfunc(self.pos) or not lines:
+ self.pos += 1
+ line = f.readline()
+ lines = self._check_comments([[line]])[0]
+ lines_str = cast(list[str], lines)
+
+ # since `line` was a string, lines will be a list containing
+ # only a single string
+ line = lines_str[0]
+
+ self.pos += 1
+ self.line_pos += 1
+ sniffed = csv.Sniffer().sniff(line)
+ dia.delimiter = sniffed.delimiter
+
+ # Note: encoding is irrelevant here
+ line_rdr = csv.reader(StringIO(line), dialect=dia)
+ self.buf.extend(list(line_rdr))
+
+ # Note: encoding is irrelevant here
+ reader = csv.reader(f, dialect=dia, strict=True)
+
+ else:
+
+ def _read():
+ line = f.readline()
+ pat = re.compile(sep)
+
+ yield pat.split(line.strip())
+
+ for line in f:
+ yield pat.split(line.strip())
+
+ reader = _read()
+
+ return reader
+
+ def read(
+ self, rows: int | None = None
+ ) -> tuple[
+ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
+ ]:
+ try:
+ content = self._get_lines(rows)
+ except StopIteration:
+ if self._first_chunk:
+ content = []
+ else:
+ self.close()
+ raise
+
+ # done with first read, next time raise StopIteration
+ self._first_chunk = False
+
+ columns: Sequence[Hashable] = list(self.orig_names)
+ if not len(content): # pragma: no cover
+ # DataFrame with the right metadata, even though it's length 0
+ # error: Cannot determine type of 'index_col'
+ names = dedup_names(
+ self.orig_names,
+ is_potential_multi_index(
+ self.orig_names,
+ self.index_col, # type: ignore[has-type]
+ ),
+ )
+ index, columns, col_dict = self._get_empty_meta(
+ names,
+ self.dtype,
+ )
+ conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names)
+ return index, conv_columns, col_dict
+
+ # handle new style for names in index
+ count_empty_content_vals = count_empty_vals(content[0])
+ indexnamerow = None
+ if self.has_index_names and count_empty_content_vals == len(columns):
+ indexnamerow = content[0]
+ content = content[1:]
+
+ alldata = self._rows_to_cols(content)
+ data, columns = self._exclude_implicit_index(alldata)
+
+ conv_data = self._convert_data(data)
+ columns, conv_data = self._do_date_conversions(columns, conv_data)
+
+ index, result_columns = self._make_index(
+ conv_data, alldata, columns, indexnamerow
+ )
+
+ return index, result_columns, conv_data
+
+ def _exclude_implicit_index(
+ self,
+ alldata: list[np.ndarray],
+ ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]:
+ # error: Cannot determine type of 'index_col'
+ names = dedup_names(
+ self.orig_names,
+ is_potential_multi_index(
+ self.orig_names,
+ self.index_col, # type: ignore[has-type]
+ ),
+ )
+
+ offset = 0
+ if self._implicit_index:
+ # error: Cannot determine type of 'index_col'
+ offset = len(self.index_col) # type: ignore[has-type]
+
+ len_alldata = len(alldata)
+ self._check_data_length(names, alldata)
+
+ return {
+ name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata
+ }, names
+
+ # legacy
+ def get_chunk(
+ self, size: int | None = None
+ ) -> tuple[
+ Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike]
+ ]:
+ if size is None:
+ # error: "PythonParser" has no attribute "chunksize"
+ size = self.chunksize # type: ignore[attr-defined]
+ return self.read(rows=size)
+
+ def _convert_data(
+ self,
+ data: Mapping[Hashable, np.ndarray],
+ ) -> Mapping[Hashable, ArrayLike]:
+ # apply converters
+ clean_conv = self._clean_mapping(self.converters)
+ clean_dtypes = self._clean_mapping(self.dtype)
+
+ # Apply NA values.
+ clean_na_values = {}
+ clean_na_fvalues = {}
+
+ if isinstance(self.na_values, dict):
+ for col in self.na_values:
+ na_value = self.na_values[col]
+ na_fvalue = self.na_fvalues[col]
+
+ if isinstance(col, int) and col not in self.orig_names:
+ col = self.orig_names[col]
+
+ clean_na_values[col] = na_value
+ clean_na_fvalues[col] = na_fvalue
+ else:
+ clean_na_values = self.na_values
+ clean_na_fvalues = self.na_fvalues
+
+ return self._convert_to_ndarrays(
+ data,
+ clean_na_values,
+ clean_na_fvalues,
+ self.verbose,
+ clean_conv,
+ clean_dtypes,
+ )
+
+ @cache_readonly
+ def _have_mi_columns(self) -> bool:
+ if self.header is None:
+ return False
+
+ header = self.header
+ if isinstance(header, (list, tuple, np.ndarray)):
+ return len(header) > 1
+ else:
+ return False
+
+ def _infer_columns(
+ self,
+ ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]:
+ names = self.names
+ num_original_columns = 0
+ clear_buffer = True
+ unnamed_cols: set[Scalar | None] = set()
+
+ if self.header is not None:
+ header = self.header
+ have_mi_columns = self._have_mi_columns
+
+ if isinstance(header, (list, tuple, np.ndarray)):
+ # we have a mi columns, so read an extra line
+ if have_mi_columns:
+ header = list(header) + [header[-1] + 1]
+ else:
+ header = [header]
+
+ columns: list[list[Scalar | None]] = []
+ for level, hr in enumerate(header):
+ try:
+ line = self._buffered_line()
+
+ while self.line_pos <= hr:
+ line = self._next_line()
+
+ except StopIteration as err:
+ if 0 < self.line_pos <= hr and (
+ not have_mi_columns or hr != header[-1]
+ ):
+ # If no rows we want to raise a different message and if
+ # we have mi columns, the last line is not part of the header
+ joi = list(map(str, header[:-1] if have_mi_columns else header))
+ msg = f"[{','.join(joi)}], len of {len(joi)}, "
+ raise ValueError(
+ f"Passed header={msg}"
+ f"but only {self.line_pos} lines in file"
+ ) from err
+
+ # We have an empty file, so check
+ # if columns are provided. That will
+ # serve as the 'line' for parsing
+ if have_mi_columns and hr > 0:
+ if clear_buffer:
+ self._clear_buffer()
+ columns.append([None] * len(columns[-1]))
+ return columns, num_original_columns, unnamed_cols
+
+ if not self.names:
+ raise EmptyDataError("No columns to parse from file") from err
+
+ line = self.names[:]
+
+ this_columns: list[Scalar | None] = []
+ this_unnamed_cols = []
+
+ for i, c in enumerate(line):
+ if c == "":
+ if have_mi_columns:
+ col_name = f"Unnamed: {i}_level_{level}"
+ else:
+ col_name = f"Unnamed: {i}"
+
+ this_unnamed_cols.append(i)
+ this_columns.append(col_name)
+ else:
+ this_columns.append(c)
+
+ if not have_mi_columns:
+ counts: DefaultDict = defaultdict(int)
+ # Ensure that regular columns are used before unnamed ones
+ # to keep given names and mangle unnamed columns
+ col_loop_order = [
+ i
+ for i in range(len(this_columns))
+ if i not in this_unnamed_cols
+ ] + this_unnamed_cols
+
+ # TODO: Use pandas.io.common.dedup_names instead (see #50371)
+ for i in col_loop_order:
+ col = this_columns[i]
+ old_col = col
+ cur_count = counts[col]
+
+ if cur_count > 0:
+ while cur_count > 0:
+ counts[old_col] = cur_count + 1
+ col = f"{old_col}.{cur_count}"
+ if col in this_columns:
+ cur_count += 1
+ else:
+ cur_count = counts[col]
+
+ if (
+ self.dtype is not None
+ and is_dict_like(self.dtype)
+ and self.dtype.get(old_col) is not None
+ and self.dtype.get(col) is None
+ ):
+ self.dtype.update({col: self.dtype.get(old_col)})
+ this_columns[i] = col
+ counts[col] = cur_count + 1
+ elif have_mi_columns:
+ # if we have grabbed an extra line, but its not in our
+ # format so save in the buffer, and create an blank extra
+ # line for the rest of the parsing code
+ if hr == header[-1]:
+ lc = len(this_columns)
+ # error: Cannot determine type of 'index_col'
+ sic = self.index_col # type: ignore[has-type]
+ ic = len(sic) if sic is not None else 0
+ unnamed_count = len(this_unnamed_cols)
+
+ # if wrong number of blanks or no index, not our format
+ if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0:
+ clear_buffer = False
+ this_columns = [None] * lc
+ self.buf = [self.buf[-1]]
+
+ columns.append(this_columns)
+ unnamed_cols.update({this_columns[i] for i in this_unnamed_cols})
+
+ if len(columns) == 1:
+ num_original_columns = len(this_columns)
+
+ if clear_buffer:
+ self._clear_buffer()
+
+ first_line: list[Scalar] | None
+ if names is not None:
+ # Read first row after header to check if data are longer
+ try:
+ first_line = self._next_line()
+ except StopIteration:
+ first_line = None
+
+ len_first_data_row = 0 if first_line is None else len(first_line)
+
+ if len(names) > len(columns[0]) and len(names) > len_first_data_row:
+ raise ValueError(
+ "Number of passed names did not match "
+ "number of header fields in the file"
+ )
+ if len(columns) > 1:
+ raise TypeError("Cannot pass names with multi-index columns")
+
+ if self.usecols is not None:
+ # Set _use_cols. We don't store columns because they are
+ # overwritten.
+ self._handle_usecols(columns, names, num_original_columns)
+ else:
+ num_original_columns = len(names)
+ if self._col_indices is not None and len(names) != len(
+ self._col_indices
+ ):
+ columns = [[names[i] for i in sorted(self._col_indices)]]
+ else:
+ columns = [names]
+ else:
+ columns = self._handle_usecols(
+ columns, columns[0], num_original_columns
+ )
+ else:
+ ncols = len(self._header_line)
+ num_original_columns = ncols
+
+ if not names:
+ columns = [list(range(ncols))]
+ columns = self._handle_usecols(columns, columns[0], ncols)
+ elif self.usecols is None or len(names) >= ncols:
+ columns = self._handle_usecols([names], names, ncols)
+ num_original_columns = len(names)
+ elif not callable(self.usecols) and len(names) != len(self.usecols):
+ raise ValueError(
+ "Number of passed names did not match number of "
+ "header fields in the file"
+ )
+ else:
+ # Ignore output but set used columns.
+ columns = [names]
+ self._handle_usecols(columns, columns[0], ncols)
+
+ return columns, num_original_columns, unnamed_cols
+
+ @cache_readonly
+ def _header_line(self):
+ # Store line for reuse in _get_index_name
+ if self.header is not None:
+ return None
+
+ try:
+ line = self._buffered_line()
+ except StopIteration as err:
+ if not self.names:
+ raise EmptyDataError("No columns to parse from file") from err
+
+ line = self.names[:]
+ return line
+
+ def _handle_usecols(
+ self,
+ columns: list[list[Scalar | None]],
+ usecols_key: list[Scalar | None],
+ num_original_columns: int,
+ ) -> list[list[Scalar | None]]:
+ """
+ Sets self._col_indices
+
+ usecols_key is used if there are string usecols.
+ """
+ col_indices: set[int] | list[int]
+ if self.usecols is not None:
+ if callable(self.usecols):
+ col_indices = self._evaluate_usecols(self.usecols, usecols_key)
+ elif any(isinstance(u, str) for u in self.usecols):
+ if len(columns) > 1:
+ raise ValueError(
+ "If using multiple headers, usecols must be integers."
+ )
+ col_indices = []
+
+ for col in self.usecols:
+ if isinstance(col, str):
+ try:
+ col_indices.append(usecols_key.index(col))
+ except ValueError:
+ self._validate_usecols_names(self.usecols, usecols_key)
+ else:
+ col_indices.append(col)
+ else:
+ missing_usecols = [
+ col for col in self.usecols if col >= num_original_columns
+ ]
+ if missing_usecols:
+ raise ParserError(
+ "Defining usecols with out-of-bounds indices is not allowed. "
+ f"{missing_usecols} are out-of-bounds.",
+ )
+ col_indices = self.usecols
+
+ columns = [
+ [n for i, n in enumerate(column) if i in col_indices]
+ for column in columns
+ ]
+ self._col_indices = sorted(col_indices)
+ return columns
+
+ def _buffered_line(self) -> list[Scalar]:
+ """
+ Return a line from buffer, filling buffer if required.
+ """
+ if len(self.buf) > 0:
+ return self.buf[0]
+ else:
+ return self._next_line()
+
+ def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]:
+ """
+ Checks whether the file begins with the BOM character.
+ If it does, remove it. In addition, if there is quoting
+ in the field subsequent to the BOM, remove it as well
+ because it technically takes place at the beginning of
+ the name, not the middle of it.
+ """
+ # first_row will be a list, so we need to check
+ # that that list is not empty before proceeding.
+ if not first_row:
+ return first_row
+
+ # The first element of this row is the one that could have the
+ # BOM that we want to remove. Check that the first element is a
+ # string before proceeding.
+ if not isinstance(first_row[0], str):
+ return first_row
+
+ # Check that the string is not empty, as that would
+ # obviously not have a BOM at the start of it.
+ if not first_row[0]:
+ return first_row
+
+ # Since the string is non-empty, check that it does
+ # in fact begin with a BOM.
+ first_elt = first_row[0][0]
+ if first_elt != _BOM:
+ return first_row
+
+ first_row_bom = first_row[0]
+ new_row: str
+
+ if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar:
+ start = 2
+ quote = first_row_bom[1]
+ end = first_row_bom[2:].index(quote) + 2
+
+ # Extract the data between the quotation marks
+ new_row = first_row_bom[start:end]
+
+ # Extract any remaining data after the second
+ # quotation mark.
+ if len(first_row_bom) > end + 1:
+ new_row += first_row_bom[end + 1 :]
+
+ else:
+ # No quotation so just remove BOM from first element
+ new_row = first_row_bom[1:]
+
+ new_row_list: list[Scalar] = [new_row]
+ return new_row_list + first_row[1:]
+
+ def _is_line_empty(self, line: list[Scalar]) -> bool:
+ """
+ Check if a line is empty or not.
+
+ Parameters
+ ----------
+ line : str, array-like
+ The line of data to check.
+
+ Returns
+ -------
+ boolean : Whether or not the line is empty.
+ """
+ return not line or all(not x for x in line)
+
+ def _next_line(self) -> list[Scalar]:
+ if isinstance(self.data, list):
+ while self.skipfunc(self.pos):
+ if self.pos >= len(self.data):
+ break
+ self.pos += 1
+
+ while True:
+ try:
+ line = self._check_comments([self.data[self.pos]])[0]
+ self.pos += 1
+ # either uncommented or blank to begin with
+ if not self.skip_blank_lines and (
+ self._is_line_empty(self.data[self.pos - 1]) or line
+ ):
+ break
+ if self.skip_blank_lines:
+ ret = self._remove_empty_lines([line])
+ if ret:
+ line = ret[0]
+ break
+ except IndexError:
+ raise StopIteration
+ else:
+ while self.skipfunc(self.pos):
+ self.pos += 1
+ # assert for mypy, data is Iterator[str] or None, would error in next
+ assert self.data is not None
+ next(self.data)
+
+ while True:
+ orig_line = self._next_iter_line(row_num=self.pos + 1)
+ self.pos += 1
+
+ if orig_line is not None:
+ line = self._check_comments([orig_line])[0]
+
+ if self.skip_blank_lines:
+ ret = self._remove_empty_lines([line])
+
+ if ret:
+ line = ret[0]
+ break
+ elif self._is_line_empty(orig_line) or line:
+ break
+
+ # This was the first line of the file,
+ # which could contain the BOM at the
+ # beginning of it.
+ if self.pos == 1:
+ line = self._check_for_bom(line)
+
+ self.line_pos += 1
+ self.buf.append(line)
+ return line
+
+ def _alert_malformed(self, msg: str, row_num: int) -> None:
+ """
+ Alert a user about a malformed row, depending on value of
+ `self.on_bad_lines` enum.
+
+ If `self.on_bad_lines` is ERROR, the alert will be `ParserError`.
+ If `self.on_bad_lines` is WARN, the alert will be printed out.
+
+ Parameters
+ ----------
+ msg: str
+ The error message to display.
+ row_num: int
+ The row number where the parsing error occurred.
+ Because this row number is displayed, we 1-index,
+ even though we 0-index internally.
+ """
+ if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
+ raise ParserError(msg)
+ if self.on_bad_lines == self.BadLineHandleMethod.WARN:
+ warnings.warn(
+ f"Skipping line {row_num}: {msg}\n",
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ def _next_iter_line(self, row_num: int) -> list[Scalar] | None:
+ """
+ Wrapper around iterating through `self.data` (CSV source).
+
+ When a CSV error is raised, we check for specific
+ error messages that allow us to customize the
+ error message displayed to the user.
+
+ Parameters
+ ----------
+ row_num: int
+ The row number of the line being parsed.
+ """
+ try:
+ # assert for mypy, data is Iterator[str] or None, would error in next
+ assert self.data is not None
+ line = next(self.data)
+ # for mypy
+ assert isinstance(line, list)
+ return line
+ except csv.Error as e:
+ if self.on_bad_lines in (
+ self.BadLineHandleMethod.ERROR,
+ self.BadLineHandleMethod.WARN,
+ ):
+ msg = str(e)
+
+ if "NULL byte" in msg or "line contains NUL" in msg:
+ msg = (
+ "NULL byte detected. This byte "
+ "cannot be processed in Python's "
+ "native csv library at the moment, "
+ "so please pass in engine='c' instead"
+ )
+
+ if self.skipfooter > 0:
+ reason = (
+ "Error could possibly be due to "
+ "parsing errors in the skipped footer rows "
+ "(the skipfooter keyword is only applied "
+ "after Python's csv library has parsed "
+ "all rows)."
+ )
+ msg += ". " + reason
+
+ self._alert_malformed(msg, row_num)
+ return None
+
+ def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.comment is None:
+ return lines
+ ret = []
+ for line in lines:
+ rl = []
+ for x in line:
+ if (
+ not isinstance(x, str)
+ or self.comment not in x
+ or x in self.na_values
+ ):
+ rl.append(x)
+ else:
+ x = x[: x.find(self.comment)]
+ if len(x) > 0:
+ rl.append(x)
+ break
+ ret.append(rl)
+ return ret
+
+ def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ """
+ Iterate through the lines and remove any that are
+ either empty or contain only one whitespace value
+
+ Parameters
+ ----------
+ lines : list of list of Scalars
+ The array of lines that we are to filter.
+
+ Returns
+ -------
+ filtered_lines : list of list of Scalars
+ The same array of lines with the "empty" ones removed.
+ """
+ # Remove empty lines and lines with only one whitespace value
+ ret = [
+ line
+ for line in lines
+ if (
+ len(line) > 1
+ or len(line) == 1
+ and (not isinstance(line[0], str) or line[0].strip())
+ )
+ ]
+ return ret
+
+ def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.thousands is None:
+ return lines
+
+ return self._search_replace_num_columns(
+ lines=lines, search=self.thousands, replace=""
+ )
+
+ def _search_replace_num_columns(
+ self, lines: list[list[Scalar]], search: str, replace: str
+ ) -> list[list[Scalar]]:
+ ret = []
+ for line in lines:
+ rl = []
+ for i, x in enumerate(line):
+ if (
+ not isinstance(x, str)
+ or search not in x
+ or i in self._no_thousands_columns
+ or not self.num.search(x.strip())
+ ):
+ rl.append(x)
+ else:
+ rl.append(x.replace(search, replace))
+ ret.append(rl)
+ return ret
+
+ def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.decimal == parser_defaults["decimal"]:
+ return lines
+
+ return self._search_replace_num_columns(
+ lines=lines, search=self.decimal, replace="."
+ )
+
+ def _clear_buffer(self) -> None:
+ self.buf = []
+
+ def _get_index_name(
+ self,
+ ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]:
+ """
+ Try several cases to get lines:
+
+ 0) There are headers on row 0 and row 1 and their
+ total summed lengths equals the length of the next line.
+ Treat row 0 as columns and row 1 as indices
+ 1) Look for implicit index: there are more columns
+ on row 1 than row 0. If this is true, assume that row
+ 1 lists index columns and row 0 lists normal columns.
+ 2) Get index from the columns if it was listed.
+ """
+ columns: Sequence[Hashable] = self.orig_names
+ orig_names = list(columns)
+ columns = list(columns)
+
+ line: list[Scalar] | None
+ if self._header_line is not None:
+ line = self._header_line
+ else:
+ try:
+ line = self._next_line()
+ except StopIteration:
+ line = None
+
+ next_line: list[Scalar] | None
+ try:
+ next_line = self._next_line()
+ except StopIteration:
+ next_line = None
+
+ # implicitly index_col=0 b/c 1 fewer column names
+ implicit_first_cols = 0
+ if line is not None:
+ # leave it 0, #2442
+ # Case 1
+ # error: Cannot determine type of 'index_col'
+ index_col = self.index_col # type: ignore[has-type]
+ if index_col is not False:
+ implicit_first_cols = len(line) - self.num_original_columns
+
+ # Case 0
+ if (
+ next_line is not None
+ and self.header is not None
+ and index_col is not False
+ ):
+ if len(next_line) == len(line) + self.num_original_columns:
+ # column and index names on diff rows
+ self.index_col = list(range(len(line)))
+ self.buf = self.buf[1:]
+
+ for c in reversed(line):
+ columns.insert(0, c)
+
+ # Update list of original names to include all indices.
+ orig_names = list(columns)
+ self.num_original_columns = len(columns)
+ return line, orig_names, columns
+
+ if implicit_first_cols > 0:
+ # Case 1
+ self._implicit_index = True
+ if self.index_col is None:
+ self.index_col = list(range(implicit_first_cols))
+
+ index_name = None
+
+ else:
+ # Case 2
+ (index_name, _, self.index_col) = self._clean_index_names(
+ columns, self.index_col
+ )
+
+ return index_name, orig_names, columns
+
+ def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]:
+ col_len = self.num_original_columns
+
+ if self._implicit_index:
+ col_len += len(self.index_col)
+
+ max_len = max(len(row) for row in content)
+
+ # Check that there are no rows with too many
+ # elements in their row (rows with too few
+ # elements are padded with NaN).
+ # error: Non-overlapping identity check (left operand type: "List[int]",
+ # right operand type: "Literal[False]")
+ if (
+ max_len > col_len
+ and self.index_col is not False # type: ignore[comparison-overlap]
+ and self.usecols is None
+ ):
+ footers = self.skipfooter if self.skipfooter else 0
+ bad_lines = []
+
+ iter_content = enumerate(content)
+ content_len = len(content)
+ content = []
+
+ for i, _content in iter_content:
+ actual_len = len(_content)
+
+ if actual_len > col_len:
+ if callable(self.on_bad_lines):
+ new_l = self.on_bad_lines(_content)
+ if new_l is not None:
+ content.append(new_l)
+ elif self.on_bad_lines in (
+ self.BadLineHandleMethod.ERROR,
+ self.BadLineHandleMethod.WARN,
+ ):
+ row_num = self.pos - (content_len - i + footers)
+ bad_lines.append((row_num, actual_len))
+
+ if self.on_bad_lines == self.BadLineHandleMethod.ERROR:
+ break
+ else:
+ content.append(_content)
+
+ for row_num, actual_len in bad_lines:
+ msg = (
+ f"Expected {col_len} fields in line {row_num + 1}, saw "
+ f"{actual_len}"
+ )
+ if (
+ self.delimiter
+ and len(self.delimiter) > 1
+ and self.quoting != csv.QUOTE_NONE
+ ):
+ # see gh-13374
+ reason = (
+ "Error could possibly be due to quotes being "
+ "ignored when a multi-char delimiter is used."
+ )
+ msg += ". " + reason
+
+ self._alert_malformed(msg, row_num + 1)
+
+ # see gh-13320
+ zipped_content = list(lib.to_object_array(content, min_width=col_len).T)
+
+ if self.usecols:
+ assert self._col_indices is not None
+ col_indices = self._col_indices
+
+ if self._implicit_index:
+ zipped_content = [
+ a
+ for i, a in enumerate(zipped_content)
+ if (
+ i < len(self.index_col)
+ or i - len(self.index_col) in col_indices
+ )
+ ]
+ else:
+ zipped_content = [
+ a for i, a in enumerate(zipped_content) if i in col_indices
+ ]
+ return zipped_content
+
+ def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]:
+ lines = self.buf
+ new_rows = None
+
+ # already fetched some number
+ if rows is not None:
+ # we already have the lines in the buffer
+ if len(self.buf) >= rows:
+ new_rows, self.buf = self.buf[:rows], self.buf[rows:]
+
+ # need some lines
+ else:
+ rows -= len(self.buf)
+
+ if new_rows is None:
+ if isinstance(self.data, list):
+ if self.pos > len(self.data):
+ raise StopIteration
+ if rows is None:
+ new_rows = self.data[self.pos :]
+ new_pos = len(self.data)
+ else:
+ new_rows = self.data[self.pos : self.pos + rows]
+ new_pos = self.pos + rows
+
+ new_rows = self._remove_skipped_rows(new_rows)
+ lines.extend(new_rows)
+ self.pos = new_pos
+
+ else:
+ new_rows = []
+ try:
+ if rows is not None:
+ row_index = 0
+ row_ct = 0
+ offset = self.pos if self.pos is not None else 0
+ while row_ct < rows:
+ # assert for mypy, data is Iterator[str] or None, would
+ # error in next
+ assert self.data is not None
+ new_row = next(self.data)
+ if not self.skipfunc(offset + row_index):
+ row_ct += 1
+ row_index += 1
+ new_rows.append(new_row)
+
+ len_new_rows = len(new_rows)
+ new_rows = self._remove_skipped_rows(new_rows)
+ lines.extend(new_rows)
+ else:
+ rows = 0
+
+ while True:
+ next_row = self._next_iter_line(row_num=self.pos + rows + 1)
+ rows += 1
+
+ if next_row is not None:
+ new_rows.append(next_row)
+ len_new_rows = len(new_rows)
+
+ except StopIteration:
+ len_new_rows = len(new_rows)
+ new_rows = self._remove_skipped_rows(new_rows)
+ lines.extend(new_rows)
+ if len(lines) == 0:
+ raise
+ self.pos += len_new_rows
+
+ self.buf = []
+ else:
+ lines = new_rows
+
+ if self.skipfooter:
+ lines = lines[: -self.skipfooter]
+
+ lines = self._check_comments(lines)
+ if self.skip_blank_lines:
+ lines = self._remove_empty_lines(lines)
+ lines = self._check_thousands(lines)
+ return self._check_decimal(lines)
+
+ def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]:
+ if self.skiprows:
+ return [
+ row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos)
+ ]
+ return new_rows
+
+ def _set_no_thousand_columns(self) -> set[int]:
+ no_thousands_columns: set[int] = set()
+ if self.columns and self.parse_dates:
+ assert self._col_indices is not None
+ no_thousands_columns = self._set_noconvert_dtype_columns(
+ self._col_indices, self.columns
+ )
+ if self.columns and self.dtype:
+ assert self._col_indices is not None
+ for i, col in zip(self._col_indices, self.columns):
+ if not isinstance(self.dtype, dict) and not is_numeric_dtype(
+ self.dtype
+ ):
+ no_thousands_columns.add(i)
+ if (
+ isinstance(self.dtype, dict)
+ and col in self.dtype
+ and (
+ not is_numeric_dtype(self.dtype[col])
+ or is_bool_dtype(self.dtype[col])
+ )
+ ):
+ no_thousands_columns.add(i)
+ return no_thousands_columns
+
+
+class FixedWidthReader(abc.Iterator):
+ """
+ A reader of fixed-width lines.
+ """
+
+ def __init__(
+ self,
+ f: IO[str] | ReadCsvBuffer[str],
+ colspecs: list[tuple[int, int]] | Literal["infer"],
+ delimiter: str | None,
+ comment: str | None,
+ skiprows: set[int] | None = None,
+ infer_nrows: int = 100,
+ ) -> None:
+ self.f = f
+ self.buffer: Iterator | None = None
+ self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t "
+ self.comment = comment
+ if colspecs == "infer":
+ self.colspecs = self.detect_colspecs(
+ infer_nrows=infer_nrows, skiprows=skiprows
+ )
+ else:
+ self.colspecs = colspecs
+
+ if not isinstance(self.colspecs, (tuple, list)):
+ raise TypeError(
+ "column specifications must be a list or tuple, "
+ f"input was a {type(colspecs).__name__}"
+ )
+
+ for colspec in self.colspecs:
+ if not (
+ isinstance(colspec, (tuple, list))
+ and len(colspec) == 2
+ and isinstance(colspec[0], (int, np.integer, type(None)))
+ and isinstance(colspec[1], (int, np.integer, type(None)))
+ ):
+ raise TypeError(
+ "Each column specification must be "
+ "2 element tuple or list of integers"
+ )
+
+ def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]:
+ """
+ Read rows from self.f, skipping as specified.
+
+ We distinguish buffer_rows (the first <= infer_nrows
+ lines) from the rows returned to detect_colspecs
+ because it's simpler to leave the other locations
+ with skiprows logic alone than to modify them to
+ deal with the fact we skipped some rows here as
+ well.
+
+ Parameters
+ ----------
+ infer_nrows : int
+ Number of rows to read from self.f, not counting
+ rows that are skipped.
+ skiprows: set, optional
+ Indices of rows to skip.
+
+ Returns
+ -------
+ detect_rows : list of str
+ A list containing the rows to read.
+
+ """
+ if skiprows is None:
+ skiprows = set()
+ buffer_rows = []
+ detect_rows = []
+ for i, row in enumerate(self.f):
+ if i not in skiprows:
+ detect_rows.append(row)
+ buffer_rows.append(row)
+ if len(detect_rows) >= infer_nrows:
+ break
+ self.buffer = iter(buffer_rows)
+ return detect_rows
+
+ def detect_colspecs(
+ self, infer_nrows: int = 100, skiprows: set[int] | None = None
+ ) -> list[tuple[int, int]]:
+ # Regex escape the delimiters
+ delimiters = "".join([rf"\{x}" for x in self.delimiter])
+ pattern = re.compile(f"([^{delimiters}]+)")
+ rows = self.get_rows(infer_nrows, skiprows)
+ if not rows:
+ raise EmptyDataError("No rows from which to infer column width")
+ max_len = max(map(len, rows))
+ mask = np.zeros(max_len + 1, dtype=int)
+ if self.comment is not None:
+ rows = [row.partition(self.comment)[0] for row in rows]
+ for row in rows:
+ for m in pattern.finditer(row):
+ mask[m.start() : m.end()] = 1
+ shifted = np.roll(mask, 1)
+ shifted[0] = 0
+ edges = np.where((mask ^ shifted) == 1)[0]
+ edge_pairs = list(zip(edges[::2], edges[1::2]))
+ return edge_pairs
+
+ def __next__(self) -> list[str]:
+ # Argument 1 to "next" has incompatible type "Union[IO[str],
+ # ReadCsvBuffer[str]]"; expected "SupportsNext[str]"
+ if self.buffer is not None:
+ try:
+ line = next(self.buffer)
+ except StopIteration:
+ self.buffer = None
+ line = next(self.f) # type: ignore[arg-type]
+ else:
+ line = next(self.f) # type: ignore[arg-type]
+ # Note: 'colspecs' is a sequence of half-open intervals.
+ return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs]
+
+
+class FixedWidthFieldParser(PythonParser):
+ """
+ Specialization that Converts fixed-width fields into DataFrames.
+ See PythonParser for details.
+ """
+
+ def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None:
+ # Support iterators, convert to a list.
+ self.colspecs = kwds.pop("colspecs")
+ self.infer_nrows = kwds.pop("infer_nrows")
+ PythonParser.__init__(self, f, **kwds)
+
+ def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader:
+ return FixedWidthReader(
+ f,
+ self.colspecs,
+ self.delimiter,
+ self.comment,
+ self.skiprows,
+ self.infer_nrows,
+ )
+
+ def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]:
+ """
+ Returns the list of lines without the empty ones. With fixed-width
+ fields, empty lines become arrays of empty strings.
+
+ See PythonParser._remove_empty_lines.
+ """
+ return [
+ line
+ for line in lines
+ if any(not isinstance(e, str) or e.strip() for e in line)
+ ]
+
+
+def count_empty_vals(vals) -> int:
+ return sum(1 for v in vals if v == "" or v is None)
+
+
+def _validate_skipfooter_arg(skipfooter: int) -> int:
+ """
+ Validate the 'skipfooter' parameter.
+
+ Checks whether 'skipfooter' is a non-negative integer.
+ Raises a ValueError if that is not the case.
+
+ Parameters
+ ----------
+ skipfooter : non-negative integer
+ The number of rows to skip at the end of the file.
+
+ Returns
+ -------
+ validated_skipfooter : non-negative integer
+ The original input if the validation succeeds.
+
+ Raises
+ ------
+ ValueError : 'skipfooter' was not a non-negative integer.
+ """
+ if not is_integer(skipfooter):
+ raise ValueError("skipfooter must be an integer")
+
+ if skipfooter < 0:
+ raise ValueError("skipfooter cannot be negative")
+
+ # Incompatible return value type (got "Union[int, integer[Any]]", expected "int")
+ return skipfooter # type: ignore[return-value]
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/readers.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/readers.py
new file mode 100644
index 0000000000000000000000000000000000000000..e04f27b56061030d19081d87439f0461fa53cc76
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/parsers/readers.py
@@ -0,0 +1,2383 @@
+"""
+Module contains tools for processing files into DataFrames or other objects
+
+GH#48849 provides a convenient way of deprecating keyword arguments
+"""
+from __future__ import annotations
+
+from collections import (
+ abc,
+ defaultdict,
+)
+import csv
+import sys
+from textwrap import fill
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Literal,
+ NamedTuple,
+ TypedDict,
+ overload,
+)
+import warnings
+
+import numpy as np
+
+from pandas._config import using_copy_on_write
+
+from pandas._libs import lib
+from pandas._libs.parsers import STR_NA_VALUES
+from pandas.errors import (
+ AbstractMethodError,
+ ParserWarning,
+)
+from pandas.util._decorators import Appender
+from pandas.util._exceptions import find_stack_level
+from pandas.util._validators import check_dtype_backend
+
+from pandas.core.dtypes.common import (
+ is_file_like,
+ is_float,
+ is_hashable,
+ is_integer,
+ is_list_like,
+ pandas_dtype,
+)
+
+from pandas import Series
+from pandas.core.frame import DataFrame
+from pandas.core.indexes.api import RangeIndex
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import (
+ IOHandles,
+ get_handle,
+ stringify_path,
+ validate_header_arg,
+)
+from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper
+from pandas.io.parsers.base_parser import (
+ ParserBase,
+ is_index_col,
+ parser_defaults,
+)
+from pandas.io.parsers.c_parser_wrapper import CParserWrapper
+from pandas.io.parsers.python_parser import (
+ FixedWidthFieldParser,
+ PythonParser,
+)
+
+if TYPE_CHECKING:
+ from collections.abc import (
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ )
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionOptions,
+ CSVEngine,
+ DtypeArg,
+ DtypeBackend,
+ FilePath,
+ IndexLabel,
+ ReadCsvBuffer,
+ Self,
+ StorageOptions,
+ UsecolsArgType,
+ )
+_doc_read_csv_and_table = (
+ r"""
+{summary}
+
+Also supports optionally iterating or breaking of the file
+into chunks.
+
+Additional help can be found in the online docs for
+`IO Tools `_.
+
+Parameters
+----------
+filepath_or_buffer : str, path object or file-like object
+ Any valid string path is acceptable. The string could be a URL. Valid
+ URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is
+ expected. A local file could be: file://localhost/path/to/table.csv.
+
+ If you want to pass in a path object, pandas accepts any ``os.PathLike``.
+
+ By file-like object, we refer to objects with a ``read()`` method, such as
+ a file handle (e.g. via builtin ``open`` function) or ``StringIO``.
+sep : str, default {_default_sep}
+ Character or regex pattern to treat as the delimiter. If ``sep=None``, the
+ C engine cannot automatically detect
+ the separator, but the Python parsing engine can, meaning the latter will
+ be used and automatically detect the separator from only the first valid
+ row of the file by Python's builtin sniffer tool, ``csv.Sniffer``.
+ In addition, separators longer than 1 character and different from
+ ``'\s+'`` will be interpreted as regular expressions and will also force
+ the use of the Python parsing engine. Note that regex delimiters are prone
+ to ignoring quoted data. Regex example: ``'\r\t'``.
+delimiter : str, optional
+ Alias for ``sep``.
+header : int, Sequence of int, 'infer' or None, default 'infer'
+ Row number(s) containing column labels and marking the start of the
+ data (zero-indexed). Default behavior is to infer the column names: if no ``names``
+ are passed the behavior is identical to ``header=0`` and column
+ names are inferred from the first line of the file, if column
+ names are passed explicitly to ``names`` then the behavior is identical to
+ ``header=None``. Explicitly pass ``header=0`` to be able to
+ replace existing names. The header can be a list of integers that
+ specify row locations for a :class:`~pandas.MultiIndex` on the columns
+ e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be
+ skipped (e.g. 2 in this example is skipped). Note that this
+ parameter ignores commented lines and empty lines if
+ ``skip_blank_lines=True``, so ``header=0`` denotes the first line of
+ data rather than the first line of the file.
+names : Sequence of Hashable, optional
+ Sequence of column labels to apply. If the file contains a header row,
+ then you should explicitly pass ``header=0`` to override the column names.
+ Duplicates in this list are not allowed.
+index_col : Hashable, Sequence of Hashable or False, optional
+ Column(s) to use as row label(s), denoted either by column labels or column
+ indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex`
+ will be formed for the row labels.
+
+ Note: ``index_col=False`` can be used to force pandas to *not* use the first
+ column as the index, e.g., when you have a malformed file with delimiters at
+ the end of each line.
+usecols : Sequence of Hashable or Callable, optional
+ Subset of columns to select, denoted either by column labels or column indices.
+ If list-like, all elements must either
+ be positional (i.e. integer indices into the document columns) or strings
+ that correspond to column names provided either by the user in ``names`` or
+ inferred from the document header row(s). If ``names`` are given, the document
+ header row(s) are not taken into account. For example, a valid list-like
+ ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``.
+ Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.
+ To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order
+ preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]``
+ for columns in ``['foo', 'bar']`` order or
+ ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``
+ for ``['bar', 'foo']`` order.
+
+ If callable, the callable function will be evaluated against the column
+ names, returning names where the callable function evaluates to ``True``. An
+ example of a valid callable argument would be ``lambda x: x.upper() in
+ ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster
+ parsing time and lower memory usage.
+dtype : dtype or dict of {{Hashable : dtype}}, optional
+ Data type(s) to apply to either the whole dataset or individual columns.
+ E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}``
+ Use ``str`` or ``object`` together with suitable ``na_values`` settings
+ to preserve and not interpret ``dtype``.
+ If ``converters`` are specified, they will be applied INSTEAD
+ of ``dtype`` conversion.
+
+ .. versionadded:: 1.5.0
+
+ Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where
+ the default determines the ``dtype`` of the columns which are not explicitly
+ listed.
+engine : {{'c', 'python', 'pyarrow'}}, optional
+ Parser engine to use. The C and pyarrow engines are faster, while the python engine
+ is currently more feature-complete. Multithreading is currently only supported by
+ the pyarrow engine.
+
+ .. versionadded:: 1.4.0
+
+ The 'pyarrow' engine was added as an *experimental* engine, and some features
+ are unsupported, or may not work correctly, with this engine.
+converters : dict of {{Hashable : Callable}}, optional
+ Functions for converting values in specified columns. Keys can either
+ be column labels or column indices.
+true_values : list, optional
+ Values to consider as ``True`` in addition to case-insensitive variants of 'True'.
+false_values : list, optional
+ Values to consider as ``False`` in addition to case-insensitive variants of 'False'.
+skipinitialspace : bool, default False
+ Skip spaces after delimiter.
+skiprows : int, list of int or Callable, optional
+ Line numbers to skip (0-indexed) or number of lines to skip (``int``)
+ at the start of the file.
+
+ If callable, the callable function will be evaluated against the row
+ indices, returning ``True`` if the row should be skipped and ``False`` otherwise.
+ An example of a valid callable argument would be ``lambda x: x in [0, 2]``.
+skipfooter : int, default 0
+ Number of lines at bottom of file to skip (Unsupported with ``engine='c'``).
+nrows : int, optional
+ Number of rows of file to read. Useful for reading pieces of large files.
+na_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional
+ Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific
+ per-column ``NA`` values. By default the following values are interpreted as
+ ``NaN``: " """
+ + fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ + """ ".
+
+keep_default_na : bool, default True
+ Whether or not to include the default ``NaN`` values when parsing the data.
+ Depending on whether ``na_values`` is passed in, the behavior is as follows:
+
+ * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values``
+ is appended to the default ``NaN`` values used for parsing.
+ * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only
+ the default ``NaN`` values are used for parsing.
+ * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only
+ the ``NaN`` values specified ``na_values`` are used for parsing.
+ * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no
+ strings will be parsed as ``NaN``.
+
+ Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and
+ ``na_values`` parameters will be ignored.
+na_filter : bool, default True
+ Detect missing value markers (empty strings and the value of ``na_values``). In
+ data without any ``NA`` values, passing ``na_filter=False`` can improve the
+ performance of reading a large file.
+verbose : bool, default False
+ Indicate number of ``NA`` values placed in non-numeric columns.
+
+ .. deprecated:: 2.2.0
+skip_blank_lines : bool, default True
+ If ``True``, skip over blank lines rather than interpreting as ``NaN`` values.
+parse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \
+default False
+ The behavior is as follows:
+
+ * ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to
+ ``True`` if ``date_format`` or ``date_parser`` arguments have been passed.
+ * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3
+ each as a separate date column.
+ * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse
+ as a single date column. Values are joined with a space before parsing.
+ * ``dict``, e.g. ``{{'foo' : [1, 3]}}`` -> parse columns 1, 3 as date and call
+ result 'foo'. Values are joined with a space before parsing.
+
+ If a column or index cannot be represented as an array of ``datetime``,
+ say because of an unparsable value or a mixture of timezones, the column
+ or index will be returned unaltered as an ``object`` data type. For
+ non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after
+ :func:`~pandas.read_csv`.
+
+ Note: A fast-path exists for iso8601-formatted dates.
+infer_datetime_format : bool, default False
+ If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the
+ format of the ``datetime`` strings in the columns, and if it can be inferred,
+ switch to a faster method of parsing them. In some cases this can increase
+ the parsing speed by 5-10x.
+
+ .. deprecated:: 2.0.0
+ A strict version of this argument is now the default, passing it has no effect.
+
+keep_date_col : bool, default False
+ If ``True`` and ``parse_dates`` specifies combining multiple columns then
+ keep the original columns.
+date_parser : Callable, optional
+ Function to use for converting a sequence of string columns to an array of
+ ``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the
+ conversion. pandas will try to call ``date_parser`` in three different ways,
+ advancing to the next if an exception occurs: 1) Pass one or more arrays
+ (as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the
+ string values from the columns defined by ``parse_dates`` into a single array
+ and pass that; and 3) call ``date_parser`` once for each row using one or
+ more strings (corresponding to the columns defined by ``parse_dates``) as
+ arguments.
+
+ .. deprecated:: 2.0.0
+ Use ``date_format`` instead, or read in as ``object`` and then apply
+ :func:`~pandas.to_datetime` as-needed.
+date_format : str or dict of column -> format, optional
+ Format to use for parsing dates when used in conjunction with ``parse_dates``.
+ The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See
+ `strftime documentation
+ `_ for more information on choices, though
+ note that :const:`"%f"` will parse all the way up to nanoseconds.
+ You can also pass:
+
+ - "ISO8601", to parse any `ISO8601 `_
+ time string (not necessarily in exactly the same format);
+ - "mixed", to infer the format for each element individually. This is risky,
+ and you should probably use it along with `dayfirst`.
+
+ .. versionadded:: 2.0.0
+dayfirst : bool, default False
+ DD/MM format dates, international and European format.
+cache_dates : bool, default True
+ If ``True``, use a cache of unique, converted dates to apply the ``datetime``
+ conversion. May produce significant speed-up when parsing duplicate
+ date strings, especially ones with timezone offsets.
+
+iterator : bool, default False
+ Return ``TextFileReader`` object for iteration or getting chunks with
+ ``get_chunk()``.
+chunksize : int, optional
+ Number of lines to read from the file per chunk. Passing a value will cause the
+ function to return a ``TextFileReader`` object for iteration.
+ See the `IO Tools docs
+ `_
+ for more information on ``iterator`` and ``chunksize``.
+
+{decompression_options}
+
+ .. versionchanged:: 1.4.0 Zstandard support.
+
+thousands : str (length 1), optional
+ Character acting as the thousands separator in numerical values.
+decimal : str (length 1), default '.'
+ Character to recognize as decimal point (e.g., use ',' for European data).
+lineterminator : str (length 1), optional
+ Character used to denote a line break. Only valid with C parser.
+quotechar : str (length 1), optional
+ Character used to denote the start and end of a quoted item. Quoted
+ items can include the ``delimiter`` and it will be ignored.
+quoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \
+3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL
+ Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is
+ ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special
+ characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``,
+ or ``lineterminator``.
+doublequote : bool, default True
+ When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate
+ whether or not to interpret two consecutive ``quotechar`` elements INSIDE a
+ field as a single ``quotechar`` element.
+escapechar : str (length 1), optional
+ Character used to escape other characters.
+comment : str (length 1), optional
+ Character indicating that the remainder of line should not be parsed.
+ If found at the beginning
+ of a line, the line will be ignored altogether. This parameter must be a
+ single character. Like empty lines (as long as ``skip_blank_lines=True``),
+ fully commented lines are ignored by the parameter ``header`` but not by
+ ``skiprows``. For example, if ``comment='#'``, parsing
+ ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being
+ treated as the header.
+encoding : str, optional, default 'utf-8'
+ Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python
+ standard encodings
+ `_ .
+
+encoding_errors : str, optional, default 'strict'
+ How encoding errors are treated. `List of possible values
+ `_ .
+
+ .. versionadded:: 1.3.0
+
+dialect : str or csv.Dialect, optional
+ If provided, this parameter will override values (default or not) for the
+ following parameters: ``delimiter``, ``doublequote``, ``escapechar``,
+ ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to
+ override values, a ``ParserWarning`` will be issued. See ``csv.Dialect``
+ documentation for more details.
+on_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error'
+ Specifies what to do upon encountering a bad line (a line with too many fields).
+ Allowed values are :
+
+ - ``'error'``, raise an Exception when a bad line is encountered.
+ - ``'warn'``, raise a warning when a bad line is encountered and skip that line.
+ - ``'skip'``, skip bad lines without raising or warning when they are encountered.
+
+ .. versionadded:: 1.3.0
+
+ .. versionadded:: 1.4.0
+
+ - Callable, function with signature
+ ``(bad_line: list[str]) -> list[str] | None`` that will process a single
+ bad line. ``bad_line`` is a list of strings split by the ``sep``.
+ If the function returns ``None``, the bad line will be ignored.
+ If the function returns a new ``list`` of strings with more elements than
+ expected, a ``ParserWarning`` will be emitted while dropping extra elements.
+ Only supported when ``engine='python'``
+
+ .. versionchanged:: 2.2.0
+
+ - Callable, function with signature
+ as described in `pyarrow documentation
+ `_ when ``engine='pyarrow'``
+
+delim_whitespace : bool, default False
+ Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be
+ used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option
+ is set to ``True``, nothing should be passed in for the ``delimiter``
+ parameter.
+
+ .. deprecated:: 2.2.0
+ Use ``sep="\\s+"`` instead.
+low_memory : bool, default True
+ Internally process the file in chunks, resulting in lower memory use
+ while parsing, but possibly mixed type inference. To ensure no mixed
+ types either set ``False``, or specify the type with the ``dtype`` parameter.
+ Note that the entire file is read into a single :class:`~pandas.DataFrame`
+ regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in
+ chunks. (Only valid with C parser).
+memory_map : bool, default False
+ If a filepath is provided for ``filepath_or_buffer``, map the file object
+ directly onto memory and access the data directly from there. Using this
+ option can improve performance because there is no longer any I/O overhead.
+float_precision : {{'high', 'legacy', 'round_trip'}}, optional
+ Specifies which converter the C engine should use for floating-point
+ values. The options are ``None`` or ``'high'`` for the ordinary converter,
+ ``'legacy'`` for the original lower precision pandas converter, and
+ ``'round_trip'`` for the round-trip converter.
+
+{storage_options}
+
+dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+Returns
+-------
+DataFrame or TextFileReader
+ A comma-separated values (csv) file is returned as two-dimensional
+ data structure with labeled axes.
+
+See Also
+--------
+DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
+{see_also_func_name} : {see_also_func_summary}
+read_fwf : Read a table of fixed-width formatted lines into DataFrame.
+
+Examples
+--------
+>>> pd.{func_name}('data.csv') # doctest: +SKIP
+"""
+)
+
+
+class _C_Parser_Defaults(TypedDict):
+ delim_whitespace: Literal[False]
+ na_filter: Literal[True]
+ low_memory: Literal[True]
+ memory_map: Literal[False]
+ float_precision: None
+
+
+_c_parser_defaults: _C_Parser_Defaults = {
+ "delim_whitespace": False,
+ "na_filter": True,
+ "low_memory": True,
+ "memory_map": False,
+ "float_precision": None,
+}
+
+
+class _Fwf_Defaults(TypedDict):
+ colspecs: Literal["infer"]
+ infer_nrows: Literal[100]
+ widths: None
+
+
+_fwf_defaults: _Fwf_Defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None}
+_c_unsupported = {"skipfooter"}
+_python_unsupported = {"low_memory", "float_precision"}
+_pyarrow_unsupported = {
+ "skipfooter",
+ "float_precision",
+ "chunksize",
+ "comment",
+ "nrows",
+ "thousands",
+ "memory_map",
+ "dialect",
+ "delim_whitespace",
+ "quoting",
+ "lineterminator",
+ "converters",
+ "iterator",
+ "dayfirst",
+ "verbose",
+ "skipinitialspace",
+ "low_memory",
+}
+
+
+class _DeprecationConfig(NamedTuple):
+ default_value: Any
+ msg: str | None
+
+
+@overload
+def validate_integer(name: str, val: None, min_val: int = ...) -> None:
+ ...
+
+
+@overload
+def validate_integer(name: str, val: float, min_val: int = ...) -> int:
+ ...
+
+
+@overload
+def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None:
+ ...
+
+
+def validate_integer(
+ name: str, val: int | float | None, min_val: int = 0
+) -> int | None:
+ """
+ Checks whether the 'name' parameter for parsing is either
+ an integer OR float that can SAFELY be cast to an integer
+ without losing accuracy. Raises a ValueError if that is
+ not the case.
+
+ Parameters
+ ----------
+ name : str
+ Parameter name (used for error reporting)
+ val : int or float
+ The value to check
+ min_val : int
+ Minimum allowed value (val < min_val will result in a ValueError)
+ """
+ if val is None:
+ return val
+
+ msg = f"'{name:s}' must be an integer >={min_val:d}"
+ if is_float(val):
+ if int(val) != val:
+ raise ValueError(msg)
+ val = int(val)
+ elif not (is_integer(val) and val >= min_val):
+ raise ValueError(msg)
+
+ return int(val)
+
+
+def _validate_names(names: Sequence[Hashable] | None) -> None:
+ """
+ Raise ValueError if the `names` parameter contains duplicates or has an
+ invalid data type.
+
+ Parameters
+ ----------
+ names : array-like or None
+ An array containing a list of the names used for the output DataFrame.
+
+ Raises
+ ------
+ ValueError
+ If names are not unique or are not ordered (e.g. set).
+ """
+ if names is not None:
+ if len(names) != len(set(names)):
+ raise ValueError("Duplicate names are not allowed.")
+ if not (
+ is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)
+ ):
+ raise ValueError("Names should be an ordered collection.")
+
+
+def _read(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds
+) -> DataFrame | TextFileReader:
+ """Generic reader of line files."""
+ # if we pass a date_parser and parse_dates=False, we should not parse the
+ # dates GH#44366
+ if kwds.get("parse_dates", None) is None:
+ if (
+ kwds.get("date_parser", lib.no_default) is lib.no_default
+ and kwds.get("date_format", None) is None
+ ):
+ kwds["parse_dates"] = False
+ else:
+ kwds["parse_dates"] = True
+
+ # Extract some of the arguments (pass chunksize on).
+ iterator = kwds.get("iterator", False)
+ chunksize = kwds.get("chunksize", None)
+ if kwds.get("engine") == "pyarrow":
+ if iterator:
+ raise ValueError(
+ "The 'iterator' option is not supported with the 'pyarrow' engine"
+ )
+
+ if chunksize is not None:
+ raise ValueError(
+ "The 'chunksize' option is not supported with the 'pyarrow' engine"
+ )
+ else:
+ chunksize = validate_integer("chunksize", chunksize, 1)
+
+ nrows = kwds.get("nrows", None)
+
+ # Check for duplicates in names.
+ _validate_names(kwds.get("names", None))
+
+ # Create the parser.
+ parser = TextFileReader(filepath_or_buffer, **kwds)
+
+ if chunksize or iterator:
+ return parser
+
+ with parser:
+ return parser.read(nrows)
+
+
+# iterator=True -> TextFileReader
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[True],
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# chunksize=int -> TextFileReader
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# default case -> DataFrame
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[False] = ...,
+ chunksize: None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+# Unions -> DataFrame | TextFileReader
+@overload
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] | None = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool | lib.NoDefault = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: Literal["high", "legacy"] | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame | TextFileReader:
+ ...
+
+
+@Appender(
+ _doc_read_csv_and_table.format(
+ func_name="read_csv",
+ summary="Read a comma-separated values (csv) file into DataFrame.",
+ see_also_func_name="read_table",
+ see_also_func_summary="Read general delimited file into DataFrame.",
+ _default_sep="','",
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"]
+ % "filepath_or_buffer",
+ )
+)
+def read_csv(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = lib.no_default,
+ delimiter: str | None | lib.NoDefault = None,
+ # Column and Index Locations and Names
+ header: int | Sequence[int] | None | Literal["infer"] = "infer",
+ names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
+ index_col: IndexLabel | Literal[False] | None = None,
+ usecols: UsecolsArgType = None,
+ # General Parsing Configuration
+ dtype: DtypeArg | None = None,
+ engine: CSVEngine | None = None,
+ converters: Mapping[Hashable, Callable] | None = None,
+ true_values: list | None = None,
+ false_values: list | None = None,
+ skipinitialspace: bool = False,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,
+ skipfooter: int = 0,
+ nrows: int | None = None,
+ # NA and Missing Data Handling
+ na_values: Hashable
+ | Iterable[Hashable]
+ | Mapping[Hashable, Iterable[Hashable]]
+ | None = None,
+ keep_default_na: bool = True,
+ na_filter: bool = True,
+ verbose: bool | lib.NoDefault = lib.no_default,
+ skip_blank_lines: bool = True,
+ # Datetime Handling
+ parse_dates: bool | Sequence[Hashable] | None = None,
+ infer_datetime_format: bool | lib.NoDefault = lib.no_default,
+ keep_date_col: bool | lib.NoDefault = lib.no_default,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: str | dict[Hashable, str] | None = None,
+ dayfirst: bool = False,
+ cache_dates: bool = True,
+ # Iteration
+ iterator: bool = False,
+ chunksize: int | None = None,
+ # Quoting, Compression, and File Format
+ compression: CompressionOptions = "infer",
+ thousands: str | None = None,
+ decimal: str = ".",
+ lineterminator: str | None = None,
+ quotechar: str = '"',
+ quoting: int = csv.QUOTE_MINIMAL,
+ doublequote: bool = True,
+ escapechar: str | None = None,
+ comment: str | None = None,
+ encoding: str | None = None,
+ encoding_errors: str | None = "strict",
+ dialect: str | csv.Dialect | None = None,
+ # Error Handling
+ on_bad_lines: str = "error",
+ # Internal
+ delim_whitespace: bool | lib.NoDefault = lib.no_default,
+ low_memory: bool = _c_parser_defaults["low_memory"],
+ memory_map: bool = False,
+ float_precision: Literal["high", "legacy"] | None = None,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame | TextFileReader:
+ if keep_date_col is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'keep_date_col' keyword in pd.read_csv is deprecated and "
+ "will be removed in a future version. Explicitly remove unwanted "
+ "columns after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ keep_date_col = False
+
+ if lib.is_list_like(parse_dates):
+ # GH#55569
+ depr = False
+ # error: Item "bool" of "bool | Sequence[Hashable] | None" has no
+ # attribute "__iter__" (not iterable)
+ if not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]
+ depr = True
+ elif isinstance(parse_dates, dict) and any(
+ lib.is_list_like(x) for x in parse_dates.values()
+ ):
+ depr = True
+ if depr:
+ warnings.warn(
+ "Support for nested sequences for 'parse_dates' in pd.read_csv "
+ "is deprecated. Combine the desired columns with pd.to_datetime "
+ "after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if infer_datetime_format is not lib.no_default:
+ warnings.warn(
+ "The argument 'infer_datetime_format' is deprecated and will "
+ "be removed in a future version. "
+ "A strict version of it is now the default, see "
+ "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
+ "You can safely remove this argument.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if delim_whitespace is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'delim_whitespace' keyword in pd.read_csv is deprecated and "
+ "will be removed in a future version. Use ``sep='\\s+'`` instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ delim_whitespace = False
+
+ if verbose is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'verbose' keyword in pd.read_csv is deprecated and "
+ "will be removed in a future version.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ verbose = False
+
+ # locals() should never be modified
+ kwds = locals().copy()
+ del kwds["filepath_or_buffer"]
+ del kwds["sep"]
+
+ kwds_defaults = _refine_defaults_read(
+ dialect,
+ delimiter,
+ delim_whitespace,
+ engine,
+ sep,
+ on_bad_lines,
+ names,
+ defaults={"delimiter": ","},
+ dtype_backend=dtype_backend,
+ )
+ kwds.update(kwds_defaults)
+
+ return _read(filepath_or_buffer, kwds)
+
+
+# iterator=True -> TextFileReader
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[True],
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# chunksize=int -> TextFileReader
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> TextFileReader:
+ ...
+
+
+# default -> DataFrame
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: Literal[False] = ...,
+ chunksize: None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame:
+ ...
+
+
+# Unions -> DataFrame | TextFileReader
+@overload
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = ...,
+ delimiter: str | None | lib.NoDefault = ...,
+ header: int | Sequence[int] | None | Literal["infer"] = ...,
+ names: Sequence[Hashable] | None | lib.NoDefault = ...,
+ index_col: IndexLabel | Literal[False] | None = ...,
+ usecols: UsecolsArgType = ...,
+ dtype: DtypeArg | None = ...,
+ engine: CSVEngine | None = ...,
+ converters: Mapping[Hashable, Callable] | None = ...,
+ true_values: list | None = ...,
+ false_values: list | None = ...,
+ skipinitialspace: bool = ...,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = ...,
+ skipfooter: int = ...,
+ nrows: int | None = ...,
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ...,
+ keep_default_na: bool = ...,
+ na_filter: bool = ...,
+ verbose: bool | lib.NoDefault = ...,
+ skip_blank_lines: bool = ...,
+ parse_dates: bool | Sequence[Hashable] = ...,
+ infer_datetime_format: bool | lib.NoDefault = ...,
+ keep_date_col: bool | lib.NoDefault = ...,
+ date_parser: Callable | lib.NoDefault = ...,
+ date_format: str | dict[Hashable, str] | None = ...,
+ dayfirst: bool = ...,
+ cache_dates: bool = ...,
+ iterator: bool = ...,
+ chunksize: int | None = ...,
+ compression: CompressionOptions = ...,
+ thousands: str | None = ...,
+ decimal: str = ...,
+ lineterminator: str | None = ...,
+ quotechar: str = ...,
+ quoting: int = ...,
+ doublequote: bool = ...,
+ escapechar: str | None = ...,
+ comment: str | None = ...,
+ encoding: str | None = ...,
+ encoding_errors: str | None = ...,
+ dialect: str | csv.Dialect | None = ...,
+ on_bad_lines=...,
+ delim_whitespace: bool = ...,
+ low_memory: bool = ...,
+ memory_map: bool = ...,
+ float_precision: str | None = ...,
+ storage_options: StorageOptions = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+) -> DataFrame | TextFileReader:
+ ...
+
+
+@Appender(
+ _doc_read_csv_and_table.format(
+ func_name="read_table",
+ summary="Read general delimited file into DataFrame.",
+ see_also_func_name="read_csv",
+ see_also_func_summary=(
+ "Read a comma-separated values (csv) file into DataFrame."
+ ),
+ _default_sep=r"'\\t' (tab-stop)",
+ storage_options=_shared_docs["storage_options"],
+ decompression_options=_shared_docs["decompression_options"]
+ % "filepath_or_buffer",
+ )
+)
+def read_table(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ sep: str | None | lib.NoDefault = lib.no_default,
+ delimiter: str | None | lib.NoDefault = None,
+ # Column and Index Locations and Names
+ header: int | Sequence[int] | None | Literal["infer"] = "infer",
+ names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default,
+ index_col: IndexLabel | Literal[False] | None = None,
+ usecols: UsecolsArgType = None,
+ # General Parsing Configuration
+ dtype: DtypeArg | None = None,
+ engine: CSVEngine | None = None,
+ converters: Mapping[Hashable, Callable] | None = None,
+ true_values: list | None = None,
+ false_values: list | None = None,
+ skipinitialspace: bool = False,
+ skiprows: list[int] | int | Callable[[Hashable], bool] | None = None,
+ skipfooter: int = 0,
+ nrows: int | None = None,
+ # NA and Missing Data Handling
+ na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None,
+ keep_default_na: bool = True,
+ na_filter: bool = True,
+ verbose: bool | lib.NoDefault = lib.no_default,
+ skip_blank_lines: bool = True,
+ # Datetime Handling
+ parse_dates: bool | Sequence[Hashable] = False,
+ infer_datetime_format: bool | lib.NoDefault = lib.no_default,
+ keep_date_col: bool | lib.NoDefault = lib.no_default,
+ date_parser: Callable | lib.NoDefault = lib.no_default,
+ date_format: str | dict[Hashable, str] | None = None,
+ dayfirst: bool = False,
+ cache_dates: bool = True,
+ # Iteration
+ iterator: bool = False,
+ chunksize: int | None = None,
+ # Quoting, Compression, and File Format
+ compression: CompressionOptions = "infer",
+ thousands: str | None = None,
+ decimal: str = ".",
+ lineterminator: str | None = None,
+ quotechar: str = '"',
+ quoting: int = csv.QUOTE_MINIMAL,
+ doublequote: bool = True,
+ escapechar: str | None = None,
+ comment: str | None = None,
+ encoding: str | None = None,
+ encoding_errors: str | None = "strict",
+ dialect: str | csv.Dialect | None = None,
+ # Error Handling
+ on_bad_lines: str = "error",
+ # Internal
+ delim_whitespace: bool | lib.NoDefault = lib.no_default,
+ low_memory: bool = _c_parser_defaults["low_memory"],
+ memory_map: bool = False,
+ float_precision: str | None = None,
+ storage_options: StorageOptions | None = None,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+) -> DataFrame | TextFileReader:
+ if keep_date_col is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'keep_date_col' keyword in pd.read_table is deprecated and "
+ "will be removed in a future version. Explicitly remove unwanted "
+ "columns after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ keep_date_col = False
+
+ # error: Item "bool" of "bool | Sequence[Hashable]" has no attribute "__iter__"
+ if lib.is_list_like(parse_dates) and not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr]
+ # GH#55569
+ warnings.warn(
+ "Support for nested sequences for 'parse_dates' in pd.read_table "
+ "is deprecated. Combine the desired columns with pd.to_datetime "
+ "after parsing instead.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if infer_datetime_format is not lib.no_default:
+ warnings.warn(
+ "The argument 'infer_datetime_format' is deprecated and will "
+ "be removed in a future version. "
+ "A strict version of it is now the default, see "
+ "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. "
+ "You can safely remove this argument.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ if delim_whitespace is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'delim_whitespace' keyword in pd.read_table is deprecated and "
+ "will be removed in a future version. Use ``sep='\\s+'`` instead",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ delim_whitespace = False
+
+ if verbose is not lib.no_default:
+ # GH#55569
+ warnings.warn(
+ "The 'verbose' keyword in pd.read_table is deprecated and "
+ "will be removed in a future version.",
+ FutureWarning,
+ stacklevel=find_stack_level(),
+ )
+ else:
+ verbose = False
+
+ # locals() should never be modified
+ kwds = locals().copy()
+ del kwds["filepath_or_buffer"]
+ del kwds["sep"]
+
+ kwds_defaults = _refine_defaults_read(
+ dialect,
+ delimiter,
+ delim_whitespace,
+ engine,
+ sep,
+ on_bad_lines,
+ names,
+ defaults={"delimiter": "\t"},
+ dtype_backend=dtype_backend,
+ )
+ kwds.update(kwds_defaults)
+
+ return _read(filepath_or_buffer, kwds)
+
+
+@overload
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = ...,
+ widths: Sequence[int] | None = ...,
+ infer_nrows: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ iterator: Literal[True],
+ chunksize: int | None = ...,
+ **kwds,
+) -> TextFileReader:
+ ...
+
+
+@overload
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = ...,
+ widths: Sequence[int] | None = ...,
+ infer_nrows: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ iterator: bool = ...,
+ chunksize: int,
+ **kwds,
+) -> TextFileReader:
+ ...
+
+
+@overload
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = ...,
+ widths: Sequence[int] | None = ...,
+ infer_nrows: int = ...,
+ dtype_backend: DtypeBackend | lib.NoDefault = ...,
+ iterator: Literal[False] = ...,
+ chunksize: None = ...,
+ **kwds,
+) -> DataFrame:
+ ...
+
+
+def read_fwf(
+ filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str],
+ *,
+ colspecs: Sequence[tuple[int, int]] | str | None = "infer",
+ widths: Sequence[int] | None = None,
+ infer_nrows: int = 100,
+ dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
+ iterator: bool = False,
+ chunksize: int | None = None,
+ **kwds,
+) -> DataFrame | TextFileReader:
+ r"""
+ Read a table of fixed-width formatted lines into DataFrame.
+
+ Also supports optionally iterating or breaking of the file
+ into chunks.
+
+ Additional help can be found in the `online docs for IO Tools
+ `_.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a text ``read()`` function.The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be:
+ ``file://localhost/path/to/table.csv``.
+ colspecs : list of tuple (int, int) or 'infer'. optional
+ A list of tuples giving the extents of the fixed-width
+ fields of each line as half-open intervals (i.e., [from, to[ ).
+ String value 'infer' can be used to instruct the parser to try
+ detecting the column specifications from the first 100 rows of
+ the data which are not being skipped via skiprows (default='infer').
+ widths : list of int, optional
+ A list of field widths which can be used instead of 'colspecs' if
+ the intervals are contiguous.
+ infer_nrows : int, default 100
+ The number of rows to consider when letting the parser determine the
+ `colspecs`.
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
+ Back-end data type applied to the resultant :class:`DataFrame`
+ (still experimental). Behaviour is as follows:
+
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
+ (default).
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
+ DataFrame.
+
+ .. versionadded:: 2.0
+
+ **kwds : optional
+ Optional keyword arguments can be passed to ``TextFileReader``.
+
+ Returns
+ -------
+ DataFrame or TextFileReader
+ A comma-separated values (csv) file is returned as two-dimensional
+ data structure with labeled axes.
+
+ See Also
+ --------
+ DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
+ read_csv : Read a comma-separated values (csv) file into DataFrame.
+
+ Examples
+ --------
+ >>> pd.read_fwf('data.csv') # doctest: +SKIP
+ """
+ # Check input arguments.
+ if colspecs is None and widths is None:
+ raise ValueError("Must specify either colspecs or widths")
+ if colspecs not in (None, "infer") and widths is not None:
+ raise ValueError("You must specify only one of 'widths' and 'colspecs'")
+
+ # Compute 'colspecs' from 'widths', if specified.
+ if widths is not None:
+ colspecs, col = [], 0
+ for w in widths:
+ colspecs.append((col, col + w))
+ col += w
+
+ # for mypy
+ assert colspecs is not None
+
+ # GH#40830
+ # Ensure length of `colspecs` matches length of `names`
+ names = kwds.get("names")
+ if names is not None:
+ if len(names) != len(colspecs) and colspecs != "infer":
+ # need to check len(index_col) as it might contain
+ # unnamed indices, in which case it's name is not required
+ len_index = 0
+ if kwds.get("index_col") is not None:
+ index_col: Any = kwds.get("index_col")
+ if index_col is not False:
+ if not is_list_like(index_col):
+ len_index = 1
+ else:
+ len_index = len(index_col)
+ if kwds.get("usecols") is None and len(names) + len_index != len(colspecs):
+ # If usecols is used colspec may be longer than names
+ raise ValueError("Length of colspecs must match length of names")
+
+ kwds["colspecs"] = colspecs
+ kwds["infer_nrows"] = infer_nrows
+ kwds["engine"] = "python-fwf"
+ kwds["iterator"] = iterator
+ kwds["chunksize"] = chunksize
+
+ check_dtype_backend(dtype_backend)
+ kwds["dtype_backend"] = dtype_backend
+ return _read(filepath_or_buffer, kwds)
+
+
+class TextFileReader(abc.Iterator):
+ """
+
+ Passed dialect overrides any of the related parser options
+
+ """
+
+ def __init__(
+ self,
+ f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list,
+ engine: CSVEngine | None = None,
+ **kwds,
+ ) -> None:
+ if engine is not None:
+ engine_specified = True
+ else:
+ engine = "python"
+ engine_specified = False
+ self.engine = engine
+ self._engine_specified = kwds.get("engine_specified", engine_specified)
+
+ _validate_skipfooter(kwds)
+
+ dialect = _extract_dialect(kwds)
+ if dialect is not None:
+ if engine == "pyarrow":
+ raise ValueError(
+ "The 'dialect' option is not supported with the 'pyarrow' engine"
+ )
+ kwds = _merge_with_dialect_properties(dialect, kwds)
+
+ if kwds.get("header", "infer") == "infer":
+ kwds["header"] = 0 if kwds.get("names") is None else None
+
+ self.orig_options = kwds
+
+ # miscellanea
+ self._currow = 0
+
+ options = self._get_options_with_defaults(engine)
+ options["storage_options"] = kwds.get("storage_options", None)
+
+ self.chunksize = options.pop("chunksize", None)
+ self.nrows = options.pop("nrows", None)
+
+ self._check_file_or_buffer(f, engine)
+ self.options, self.engine = self._clean_options(options, engine)
+
+ if "has_index_names" in kwds:
+ self.options["has_index_names"] = kwds["has_index_names"]
+
+ self.handles: IOHandles | None = None
+ self._engine = self._make_engine(f, self.engine)
+
+ def close(self) -> None:
+ if self.handles is not None:
+ self.handles.close()
+ self._engine.close()
+
+ def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]:
+ kwds = self.orig_options
+
+ options = {}
+ default: object | None
+
+ for argname, default in parser_defaults.items():
+ value = kwds.get(argname, default)
+
+ # see gh-12935
+ if (
+ engine == "pyarrow"
+ and argname in _pyarrow_unsupported
+ and value != default
+ and value != getattr(value, "value", default)
+ ):
+ raise ValueError(
+ f"The {repr(argname)} option is not supported with the "
+ f"'pyarrow' engine"
+ )
+ options[argname] = value
+
+ for argname, default in _c_parser_defaults.items():
+ if argname in kwds:
+ value = kwds[argname]
+
+ if engine != "c" and value != default:
+ # TODO: Refactor this logic, its pretty convoluted
+ if "python" in engine and argname not in _python_unsupported:
+ pass
+ elif "pyarrow" in engine and argname not in _pyarrow_unsupported:
+ pass
+ else:
+ raise ValueError(
+ f"The {repr(argname)} option is not supported with the "
+ f"{repr(engine)} engine"
+ )
+ else:
+ value = default
+ options[argname] = value
+
+ if engine == "python-fwf":
+ for argname, default in _fwf_defaults.items():
+ options[argname] = kwds.get(argname, default)
+
+ return options
+
+ def _check_file_or_buffer(self, f, engine: CSVEngine) -> None:
+ # see gh-16530
+ if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"):
+ # The C engine doesn't need the file-like to have the "__iter__"
+ # attribute. However, the Python engine needs "__iter__(...)"
+ # when iterating through such an object, meaning it
+ # needs to have that attribute
+ raise ValueError(
+ "The 'python' engine cannot iterate through this file buffer."
+ )
+
+ def _clean_options(
+ self, options: dict[str, Any], engine: CSVEngine
+ ) -> tuple[dict[str, Any], CSVEngine]:
+ result = options.copy()
+
+ fallback_reason = None
+
+ # C engine not supported yet
+ if engine == "c":
+ if options["skipfooter"] > 0:
+ fallback_reason = "the 'c' engine does not support skipfooter"
+ engine = "python"
+
+ sep = options["delimiter"]
+ delim_whitespace = options["delim_whitespace"]
+
+ if sep is None and not delim_whitespace:
+ if engine in ("c", "pyarrow"):
+ fallback_reason = (
+ f"the '{engine}' engine does not support "
+ "sep=None with delim_whitespace=False"
+ )
+ engine = "python"
+ elif sep is not None and len(sep) > 1:
+ if engine == "c" and sep == r"\s+":
+ result["delim_whitespace"] = True
+ del result["delimiter"]
+ elif engine not in ("python", "python-fwf"):
+ # wait until regex engine integrated
+ fallback_reason = (
+ f"the '{engine}' engine does not support "
+ "regex separators (separators > 1 char and "
+ r"different from '\s+' are interpreted as regex)"
+ )
+ engine = "python"
+ elif delim_whitespace:
+ if "python" in engine:
+ result["delimiter"] = r"\s+"
+ elif sep is not None:
+ encodeable = True
+ encoding = sys.getfilesystemencoding() or "utf-8"
+ try:
+ if len(sep.encode(encoding)) > 1:
+ encodeable = False
+ except UnicodeDecodeError:
+ encodeable = False
+ if not encodeable and engine not in ("python", "python-fwf"):
+ fallback_reason = (
+ f"the separator encoded in {encoding} "
+ f"is > 1 char long, and the '{engine}' engine "
+ "does not support such separators"
+ )
+ engine = "python"
+
+ quotechar = options["quotechar"]
+ if quotechar is not None and isinstance(quotechar, (str, bytes)):
+ if (
+ len(quotechar) == 1
+ and ord(quotechar) > 127
+ and engine not in ("python", "python-fwf")
+ ):
+ fallback_reason = (
+ "ord(quotechar) > 127, meaning the "
+ "quotechar is larger than one byte, "
+ f"and the '{engine}' engine does not support such quotechars"
+ )
+ engine = "python"
+
+ if fallback_reason and self._engine_specified:
+ raise ValueError(fallback_reason)
+
+ if engine == "c":
+ for arg in _c_unsupported:
+ del result[arg]
+
+ if "python" in engine:
+ for arg in _python_unsupported:
+ if fallback_reason and result[arg] != _c_parser_defaults.get(arg):
+ raise ValueError(
+ "Falling back to the 'python' engine because "
+ f"{fallback_reason}, but this causes {repr(arg)} to be "
+ "ignored as it is not supported by the 'python' engine."
+ )
+ del result[arg]
+
+ if fallback_reason:
+ warnings.warn(
+ (
+ "Falling back to the 'python' engine because "
+ f"{fallback_reason}; you can avoid this warning by specifying "
+ "engine='python'."
+ ),
+ ParserWarning,
+ stacklevel=find_stack_level(),
+ )
+
+ index_col = options["index_col"]
+ names = options["names"]
+ converters = options["converters"]
+ na_values = options["na_values"]
+ skiprows = options["skiprows"]
+
+ validate_header_arg(options["header"])
+
+ if index_col is True:
+ raise ValueError("The value of index_col couldn't be 'True'")
+ if is_index_col(index_col):
+ if not isinstance(index_col, (list, tuple, np.ndarray)):
+ index_col = [index_col]
+ result["index_col"] = index_col
+
+ names = list(names) if names is not None else names
+
+ # type conversion-related
+ if converters is not None:
+ if not isinstance(converters, dict):
+ raise TypeError(
+ "Type converters must be a dict or subclass, "
+ f"input was a {type(converters).__name__}"
+ )
+ else:
+ converters = {}
+
+ # Converting values to NA
+ keep_default_na = options["keep_default_na"]
+ floatify = engine != "pyarrow"
+ na_values, na_fvalues = _clean_na_values(
+ na_values, keep_default_na, floatify=floatify
+ )
+
+ # handle skiprows; this is internally handled by the
+ # c-engine, so only need for python and pyarrow parsers
+ if engine == "pyarrow":
+ if not is_integer(skiprows) and skiprows is not None:
+ # pyarrow expects skiprows to be passed as an integer
+ raise ValueError(
+ "skiprows argument must be an integer when using "
+ "engine='pyarrow'"
+ )
+ else:
+ if is_integer(skiprows):
+ skiprows = list(range(skiprows))
+ if skiprows is None:
+ skiprows = set()
+ elif not callable(skiprows):
+ skiprows = set(skiprows)
+
+ # put stuff back
+ result["names"] = names
+ result["converters"] = converters
+ result["na_values"] = na_values
+ result["na_fvalues"] = na_fvalues
+ result["skiprows"] = skiprows
+
+ return result, engine
+
+ def __next__(self) -> DataFrame:
+ try:
+ return self.get_chunk()
+ except StopIteration:
+ self.close()
+ raise
+
+ def _make_engine(
+ self,
+ f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO,
+ engine: CSVEngine = "c",
+ ) -> ParserBase:
+ mapping: dict[str, type[ParserBase]] = {
+ "c": CParserWrapper,
+ "python": PythonParser,
+ "pyarrow": ArrowParserWrapper,
+ "python-fwf": FixedWidthFieldParser,
+ }
+ if engine not in mapping:
+ raise ValueError(
+ f"Unknown engine: {engine} (valid options are {mapping.keys()})"
+ )
+ if not isinstance(f, list):
+ # open file here
+ is_text = True
+ mode = "r"
+ if engine == "pyarrow":
+ is_text = False
+ mode = "rb"
+ elif (
+ engine == "c"
+ and self.options.get("encoding", "utf-8") == "utf-8"
+ and isinstance(stringify_path(f), str)
+ ):
+ # c engine can decode utf-8 bytes, adding TextIOWrapper makes
+ # the c-engine especially for memory_map=True far slower
+ is_text = False
+ if "b" not in mode:
+ mode += "b"
+ self.handles = get_handle(
+ f,
+ mode,
+ encoding=self.options.get("encoding", None),
+ compression=self.options.get("compression", None),
+ memory_map=self.options.get("memory_map", False),
+ is_text=is_text,
+ errors=self.options.get("encoding_errors", "strict"),
+ storage_options=self.options.get("storage_options", None),
+ )
+ assert self.handles is not None
+ f = self.handles.handle
+
+ elif engine != "python":
+ msg = f"Invalid file path or buffer object type: {type(f)}"
+ raise ValueError(msg)
+
+ try:
+ return mapping[engine](f, **self.options)
+ except Exception:
+ if self.handles is not None:
+ self.handles.close()
+ raise
+
+ def _failover_to_python(self) -> None:
+ raise AbstractMethodError(self)
+
+ def read(self, nrows: int | None = None) -> DataFrame:
+ if self.engine == "pyarrow":
+ try:
+ # error: "ParserBase" has no attribute "read"
+ df = self._engine.read() # type: ignore[attr-defined]
+ except Exception:
+ self.close()
+ raise
+ else:
+ nrows = validate_integer("nrows", nrows)
+ try:
+ # error: "ParserBase" has no attribute "read"
+ (
+ index,
+ columns,
+ col_dict,
+ ) = self._engine.read( # type: ignore[attr-defined]
+ nrows
+ )
+ except Exception:
+ self.close()
+ raise
+
+ if index is None:
+ if col_dict:
+ # Any column is actually fine:
+ new_rows = len(next(iter(col_dict.values())))
+ index = RangeIndex(self._currow, self._currow + new_rows)
+ else:
+ new_rows = 0
+ else:
+ new_rows = len(index)
+
+ if hasattr(self, "orig_options"):
+ dtype_arg = self.orig_options.get("dtype", None)
+ else:
+ dtype_arg = None
+
+ if isinstance(dtype_arg, dict):
+ dtype = defaultdict(lambda: None) # type: ignore[var-annotated]
+ dtype.update(dtype_arg)
+ elif dtype_arg is not None and pandas_dtype(dtype_arg) in (
+ np.str_,
+ np.object_,
+ ):
+ dtype = defaultdict(lambda: dtype_arg)
+ else:
+ dtype = None
+
+ if dtype is not None:
+ new_col_dict = {}
+ for k, v in col_dict.items():
+ d = (
+ dtype[k]
+ if pandas_dtype(dtype[k]) in (np.str_, np.object_)
+ else None
+ )
+ new_col_dict[k] = Series(v, index=index, dtype=d, copy=False)
+ else:
+ new_col_dict = col_dict
+
+ df = DataFrame(
+ new_col_dict,
+ columns=columns,
+ index=index,
+ copy=not using_copy_on_write(),
+ )
+
+ self._currow += new_rows
+ return df
+
+ def get_chunk(self, size: int | None = None) -> DataFrame:
+ if size is None:
+ size = self.chunksize
+ if self.nrows is not None:
+ if self._currow >= self.nrows:
+ raise StopIteration
+ size = min(size, self.nrows - self._currow)
+ return self.read(nrows=size)
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+def TextParser(*args, **kwds) -> TextFileReader:
+ """
+ Converts lists of lists/tuples into DataFrames with proper type inference
+ and optional (e.g. string to datetime) conversion. Also enables iterating
+ lazily over chunks of large files
+
+ Parameters
+ ----------
+ data : file-like object or list
+ delimiter : separator character to use
+ dialect : str or csv.Dialect instance, optional
+ Ignored if delimiter is longer than 1 character
+ names : sequence, default
+ header : int, default 0
+ Row to use to parse column labels. Defaults to the first row. Prior
+ rows will be discarded
+ index_col : int or list, optional
+ Column or columns to use as the (possibly hierarchical) index
+ has_index_names: bool, default False
+ True if the cols defined in index_col have an index name and are
+ not in the header.
+ na_values : scalar, str, list-like, or dict, optional
+ Additional strings to recognize as NA/NaN.
+ keep_default_na : bool, default True
+ thousands : str, optional
+ Thousands separator
+ comment : str, optional
+ Comment out remainder of line
+ parse_dates : bool, default False
+ keep_date_col : bool, default False
+ date_parser : function, optional
+
+ .. deprecated:: 2.0.0
+ date_format : str or dict of column -> format, default ``None``
+
+ .. versionadded:: 2.0.0
+ skiprows : list of integers
+ Row numbers to skip
+ skipfooter : int
+ Number of line at bottom of file to skip
+ converters : dict, optional
+ Dict of functions for converting values in certain columns. Keys can
+ either be integers or column labels, values are functions that take one
+ input argument, the cell (not column) content, and return the
+ transformed content.
+ encoding : str, optional
+ Encoding to use for UTF when reading/writing (ex. 'utf-8')
+ float_precision : str, optional
+ Specifies which converter the C engine should use for floating-point
+ values. The options are `None` or `high` for the ordinary converter,
+ `legacy` for the original lower precision pandas converter, and
+ `round_trip` for the round-trip converter.
+ """
+ kwds["engine"] = "python"
+ return TextFileReader(*args, **kwds)
+
+
+def _clean_na_values(na_values, keep_default_na: bool = True, floatify: bool = True):
+ na_fvalues: set | dict
+ if na_values is None:
+ if keep_default_na:
+ na_values = STR_NA_VALUES
+ else:
+ na_values = set()
+ na_fvalues = set()
+ elif isinstance(na_values, dict):
+ old_na_values = na_values.copy()
+ na_values = {} # Prevent aliasing.
+
+ # Convert the values in the na_values dictionary
+ # into array-likes for further use. This is also
+ # where we append the default NaN values, provided
+ # that `keep_default_na=True`.
+ for k, v in old_na_values.items():
+ if not is_list_like(v):
+ v = [v]
+
+ if keep_default_na:
+ v = set(v) | STR_NA_VALUES
+
+ na_values[k] = v
+ na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}
+ else:
+ if not is_list_like(na_values):
+ na_values = [na_values]
+ na_values = _stringify_na_values(na_values, floatify)
+ if keep_default_na:
+ na_values = na_values | STR_NA_VALUES
+
+ na_fvalues = _floatify_na_values(na_values)
+
+ return na_values, na_fvalues
+
+
+def _floatify_na_values(na_values):
+ # create float versions of the na_values
+ result = set()
+ for v in na_values:
+ try:
+ v = float(v)
+ if not np.isnan(v):
+ result.add(v)
+ except (TypeError, ValueError, OverflowError):
+ pass
+ return result
+
+
+def _stringify_na_values(na_values, floatify: bool):
+ """return a stringified and numeric for these values"""
+ result: list[str | float] = []
+ for x in na_values:
+ result.append(str(x))
+ result.append(x)
+ try:
+ v = float(x)
+
+ # we are like 999 here
+ if v == int(v):
+ v = int(v)
+ result.append(f"{v}.0")
+ result.append(str(v))
+
+ if floatify:
+ result.append(v)
+ except (TypeError, ValueError, OverflowError):
+ pass
+ if floatify:
+ try:
+ result.append(int(x))
+ except (TypeError, ValueError, OverflowError):
+ pass
+ return set(result)
+
+
+def _refine_defaults_read(
+ dialect: str | csv.Dialect | None,
+ delimiter: str | None | lib.NoDefault,
+ delim_whitespace: bool,
+ engine: CSVEngine | None,
+ sep: str | None | lib.NoDefault,
+ on_bad_lines: str | Callable,
+ names: Sequence[Hashable] | None | lib.NoDefault,
+ defaults: dict[str, Any],
+ dtype_backend: DtypeBackend | lib.NoDefault,
+):
+ """Validate/refine default values of input parameters of read_csv, read_table.
+
+ Parameters
+ ----------
+ dialect : str or csv.Dialect
+ If provided, this parameter will override values (default or not) for the
+ following parameters: `delimiter`, `doublequote`, `escapechar`,
+ `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to
+ override values, a ParserWarning will be issued. See csv.Dialect
+ documentation for more details.
+ delimiter : str or object
+ Alias for sep.
+ delim_whitespace : bool
+ Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be
+ used as the sep. Equivalent to setting ``sep='\\s+'``. If this option
+ is set to True, nothing should be passed in for the ``delimiter``
+ parameter.
+
+ .. deprecated:: 2.2.0
+ Use ``sep="\\s+"`` instead.
+ engine : {{'c', 'python'}}
+ Parser engine to use. The C engine is faster while the python engine is
+ currently more feature-complete.
+ sep : str or object
+ A delimiter provided by the user (str) or a sentinel value, i.e.
+ pandas._libs.lib.no_default.
+ on_bad_lines : str, callable
+ An option for handling bad lines or a sentinel value(None).
+ names : array-like, optional
+ List of column names to use. If the file contains a header row,
+ then you should explicitly pass ``header=0`` to override the column names.
+ Duplicates in this list are not allowed.
+ defaults: dict
+ Default values of input parameters.
+
+ Returns
+ -------
+ kwds : dict
+ Input parameters with correct values.
+
+ Raises
+ ------
+ ValueError :
+ If a delimiter was specified with ``sep`` (or ``delimiter``) and
+ ``delim_whitespace=True``.
+ """
+ # fix types for sep, delimiter to Union(str, Any)
+ delim_default = defaults["delimiter"]
+ kwds: dict[str, Any] = {}
+ # gh-23761
+ #
+ # When a dialect is passed, it overrides any of the overlapping
+ # parameters passed in directly. We don't want to warn if the
+ # default parameters were passed in (since it probably means
+ # that the user didn't pass them in explicitly in the first place).
+ #
+ # "delimiter" is the annoying corner case because we alias it to
+ # "sep" before doing comparison to the dialect values later on.
+ # Thus, we need a flag to indicate that we need to "override"
+ # the comparison to dialect values by checking if default values
+ # for BOTH "delimiter" and "sep" were provided.
+ if dialect is not None:
+ kwds["sep_override"] = delimiter is None and (
+ sep is lib.no_default or sep == delim_default
+ )
+
+ if delimiter and (sep is not lib.no_default):
+ raise ValueError("Specified a sep and a delimiter; you can only specify one.")
+
+ kwds["names"] = None if names is lib.no_default else names
+
+ # Alias sep -> delimiter.
+ if delimiter is None:
+ delimiter = sep
+
+ if delim_whitespace and (delimiter is not lib.no_default):
+ raise ValueError(
+ "Specified a delimiter with both sep and "
+ "delim_whitespace=True; you can only specify one."
+ )
+
+ if delimiter == "\n":
+ raise ValueError(
+ r"Specified \n as separator or delimiter. This forces the python engine "
+ "which does not accept a line terminator. Hence it is not allowed to use "
+ "the line terminator as separator.",
+ )
+
+ if delimiter is lib.no_default:
+ # assign default separator value
+ kwds["delimiter"] = delim_default
+ else:
+ kwds["delimiter"] = delimiter
+
+ if engine is not None:
+ kwds["engine_specified"] = True
+ else:
+ kwds["engine"] = "c"
+ kwds["engine_specified"] = False
+
+ if on_bad_lines == "error":
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR
+ elif on_bad_lines == "warn":
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN
+ elif on_bad_lines == "skip":
+ kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP
+ elif callable(on_bad_lines):
+ if engine not in ["python", "pyarrow"]:
+ raise ValueError(
+ "on_bad_line can only be a callable function "
+ "if engine='python' or 'pyarrow'"
+ )
+ kwds["on_bad_lines"] = on_bad_lines
+ else:
+ raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines")
+
+ check_dtype_backend(dtype_backend)
+
+ kwds["dtype_backend"] = dtype_backend
+
+ return kwds
+
+
+def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None:
+ """
+ Extract concrete csv dialect instance.
+
+ Returns
+ -------
+ csv.Dialect or None
+ """
+ if kwds.get("dialect") is None:
+ return None
+
+ dialect = kwds["dialect"]
+ if dialect in csv.list_dialects():
+ dialect = csv.get_dialect(dialect)
+
+ _validate_dialect(dialect)
+
+ return dialect
+
+
+MANDATORY_DIALECT_ATTRS = (
+ "delimiter",
+ "doublequote",
+ "escapechar",
+ "skipinitialspace",
+ "quotechar",
+ "quoting",
+)
+
+
+def _validate_dialect(dialect: csv.Dialect) -> None:
+ """
+ Validate csv dialect instance.
+
+ Raises
+ ------
+ ValueError
+ If incorrect dialect is provided.
+ """
+ for param in MANDATORY_DIALECT_ATTRS:
+ if not hasattr(dialect, param):
+ raise ValueError(f"Invalid dialect {dialect} provided")
+
+
+def _merge_with_dialect_properties(
+ dialect: csv.Dialect,
+ defaults: dict[str, Any],
+) -> dict[str, Any]:
+ """
+ Merge default kwargs in TextFileReader with dialect parameters.
+
+ Parameters
+ ----------
+ dialect : csv.Dialect
+ Concrete csv dialect. See csv.Dialect documentation for more details.
+ defaults : dict
+ Keyword arguments passed to TextFileReader.
+
+ Returns
+ -------
+ kwds : dict
+ Updated keyword arguments, merged with dialect parameters.
+ """
+ kwds = defaults.copy()
+
+ for param in MANDATORY_DIALECT_ATTRS:
+ dialect_val = getattr(dialect, param)
+
+ parser_default = parser_defaults[param]
+ provided = kwds.get(param, parser_default)
+
+ # Messages for conflicting values between the dialect
+ # instance and the actual parameters provided.
+ conflict_msgs = []
+
+ # Don't warn if the default parameter was passed in,
+ # even if it conflicts with the dialect (gh-23761).
+ if provided not in (parser_default, dialect_val):
+ msg = (
+ f"Conflicting values for '{param}': '{provided}' was "
+ f"provided, but the dialect specifies '{dialect_val}'. "
+ "Using the dialect-specified value."
+ )
+
+ # Annoying corner case for not warning about
+ # conflicts between dialect and delimiter parameter.
+ # Refer to the outer "_read_" function for more info.
+ if not (param == "delimiter" and kwds.pop("sep_override", False)):
+ conflict_msgs.append(msg)
+
+ if conflict_msgs:
+ warnings.warn(
+ "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level()
+ )
+ kwds[param] = dialect_val
+ return kwds
+
+
+def _validate_skipfooter(kwds: dict[str, Any]) -> None:
+ """
+ Check whether skipfooter is compatible with other kwargs in TextFileReader.
+
+ Parameters
+ ----------
+ kwds : dict
+ Keyword arguments passed to TextFileReader.
+
+ Raises
+ ------
+ ValueError
+ If skipfooter is not compatible with other parameters.
+ """
+ if kwds.get("skipfooter"):
+ if kwds.get("iterator") or kwds.get("chunksize"):
+ raise ValueError("'skipfooter' not supported for iteration")
+ if kwds.get("nrows"):
+ raise ValueError("'skipfooter' not supported with 'nrows'")
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..317730745b6e3a0278a48b7bb810cf43e718e787
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__init__.py
@@ -0,0 +1,3 @@
+from pandas.io.sas.sasreader import read_sas
+
+__all__ = ["read_sas"]
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/__init__.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62145cf40b3d94eb7a5ecd28df8ee394ae7f1dbf
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas7bdat.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas7bdat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f12a9c8801169ac24ddeea21e8d86bfe532f9d3
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas7bdat.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas_constants.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas_constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9e28f45fe746427aa2600b8b837abc3ca8ae130
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas_constants.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas_xport.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas_xport.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b293a5ae38a15afbcfb60347254087028b12ac51
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sas_xport.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sasreader.cpython-310.pyc b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sasreader.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..937c2649ad2b09ed26011e9314eb868849666d73
Binary files /dev/null and b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/__pycache__/sasreader.cpython-310.pyc differ
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas7bdat.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas7bdat.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5bdfb554178816f9d668157f87514776f277eb9
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas7bdat.py
@@ -0,0 +1,756 @@
+"""
+Read SAS7BDAT files
+
+Based on code written by Jared Hobbs:
+ https://bitbucket.org/jaredhobbs/sas7bdat
+
+See also:
+ https://github.com/BioStatMatt/sas7bdat
+
+Partial documentation of the file format:
+ https://cran.r-project.org/package=sas7bdat/vignettes/sas7bdat.pdf
+
+Reference for binary data compression:
+ http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm
+"""
+from __future__ import annotations
+
+from collections import abc
+from datetime import (
+ datetime,
+ timedelta,
+)
+import sys
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+from pandas._libs.byteswap import (
+ read_double_with_byteswap,
+ read_float_with_byteswap,
+ read_uint16_with_byteswap,
+ read_uint32_with_byteswap,
+ read_uint64_with_byteswap,
+)
+from pandas._libs.sas import (
+ Parser,
+ get_subheader_index,
+)
+from pandas._libs.tslibs.conversion import cast_from_unit_vectorized
+from pandas.errors import EmptyDataError
+
+import pandas as pd
+from pandas import (
+ DataFrame,
+ Timestamp,
+ isna,
+)
+
+from pandas.io.common import get_handle
+import pandas.io.sas.sas_constants as const
+from pandas.io.sas.sasreader import ReaderBase
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ ReadBuffer,
+ )
+
+
+_unix_origin = Timestamp("1970-01-01")
+_sas_origin = Timestamp("1960-01-01")
+
+
+def _parse_datetime(sas_datetime: float, unit: str):
+ if isna(sas_datetime):
+ return pd.NaT
+
+ if unit == "s":
+ return datetime(1960, 1, 1) + timedelta(seconds=sas_datetime)
+
+ elif unit == "d":
+ return datetime(1960, 1, 1) + timedelta(days=sas_datetime)
+
+ else:
+ raise ValueError("unit must be 'd' or 's'")
+
+
+def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:
+ """
+ Convert to Timestamp if possible, otherwise to datetime.datetime.
+ SAS float64 lacks precision for more than ms resolution so the fit
+ to datetime.datetime is ok.
+
+ Parameters
+ ----------
+ sas_datetimes : {Series, Sequence[float]}
+ Dates or datetimes in SAS
+ unit : {'d', 's'}
+ "d" if the floats represent dates, "s" for datetimes
+
+ Returns
+ -------
+ Series
+ Series of datetime64 dtype or datetime.datetime.
+ """
+ td = (_sas_origin - _unix_origin).as_unit("s")
+ if unit == "s":
+ millis = cast_from_unit_vectorized(
+ sas_datetimes._values, unit="s", out_unit="ms"
+ )
+ dt64ms = millis.view("M8[ms]") + td
+ return pd.Series(dt64ms, index=sas_datetimes.index, copy=False)
+ else:
+ vals = np.array(sas_datetimes, dtype="M8[D]") + td
+ return pd.Series(vals, dtype="M8[s]", index=sas_datetimes.index, copy=False)
+
+
+class _Column:
+ col_id: int
+ name: str | bytes
+ label: str | bytes
+ format: str | bytes
+ ctype: bytes
+ length: int
+
+ def __init__(
+ self,
+ col_id: int,
+ # These can be bytes when convert_header_text is False
+ name: str | bytes,
+ label: str | bytes,
+ format: str | bytes,
+ ctype: bytes,
+ length: int,
+ ) -> None:
+ self.col_id = col_id
+ self.name = name
+ self.label = label
+ self.format = format
+ self.ctype = ctype
+ self.length = length
+
+
+# SAS7BDAT represents a SAS data file in SAS7BDAT format.
+class SAS7BDATReader(ReaderBase, abc.Iterator):
+ """
+ Read SAS files in SAS7BDAT format.
+
+ Parameters
+ ----------
+ path_or_buf : path name or buffer
+ Name of SAS file or file-like object pointing to SAS file
+ contents.
+ index : column identifier, defaults to None
+ Column to use as index.
+ convert_dates : bool, defaults to True
+ Attempt to convert dates to Pandas datetime values. Note that
+ some rarely used SAS date formats may be unsupported.
+ blank_missing : bool, defaults to True
+ Convert empty strings to missing values (SAS uses blanks to
+ indicate missing character variables).
+ chunksize : int, defaults to None
+ Return SAS7BDATReader object for iterations, returns chunks
+ with given number of lines.
+ encoding : str, 'infer', defaults to None
+ String encoding acc. to Python standard encodings,
+ encoding='infer' tries to detect the encoding from the file header,
+ encoding=None will leave the data in binary format.
+ convert_text : bool, defaults to True
+ If False, text variables are left as raw bytes.
+ convert_header_text : bool, defaults to True
+ If False, header text, including column names, are left as raw
+ bytes.
+ """
+
+ _int_length: int
+ _cached_page: bytes | None
+
+ def __init__(
+ self,
+ path_or_buf: FilePath | ReadBuffer[bytes],
+ index=None,
+ convert_dates: bool = True,
+ blank_missing: bool = True,
+ chunksize: int | None = None,
+ encoding: str | None = None,
+ convert_text: bool = True,
+ convert_header_text: bool = True,
+ compression: CompressionOptions = "infer",
+ ) -> None:
+ self.index = index
+ self.convert_dates = convert_dates
+ self.blank_missing = blank_missing
+ self.chunksize = chunksize
+ self.encoding = encoding
+ self.convert_text = convert_text
+ self.convert_header_text = convert_header_text
+
+ self.default_encoding = "latin-1"
+ self.compression = b""
+ self.column_names_raw: list[bytes] = []
+ self.column_names: list[str | bytes] = []
+ self.column_formats: list[str | bytes] = []
+ self.columns: list[_Column] = []
+
+ self._current_page_data_subheader_pointers: list[tuple[int, int]] = []
+ self._cached_page = None
+ self._column_data_lengths: list[int] = []
+ self._column_data_offsets: list[int] = []
+ self._column_types: list[bytes] = []
+
+ self._current_row_in_file_index = 0
+ self._current_row_on_page_index = 0
+ self._current_row_in_file_index = 0
+
+ self.handles = get_handle(
+ path_or_buf, "rb", is_text=False, compression=compression
+ )
+
+ self._path_or_buf = self.handles.handle
+
+ # Same order as const.SASIndex
+ self._subheader_processors = [
+ self._process_rowsize_subheader,
+ self._process_columnsize_subheader,
+ self._process_subheader_counts,
+ self._process_columntext_subheader,
+ self._process_columnname_subheader,
+ self._process_columnattributes_subheader,
+ self._process_format_subheader,
+ self._process_columnlist_subheader,
+ None, # Data
+ ]
+
+ try:
+ self._get_properties()
+ self._parse_metadata()
+ except Exception:
+ self.close()
+ raise
+
+ def column_data_lengths(self) -> np.ndarray:
+ """Return a numpy int64 array of the column data lengths"""
+ return np.asarray(self._column_data_lengths, dtype=np.int64)
+
+ def column_data_offsets(self) -> np.ndarray:
+ """Return a numpy int64 array of the column offsets"""
+ return np.asarray(self._column_data_offsets, dtype=np.int64)
+
+ def column_types(self) -> np.ndarray:
+ """
+ Returns a numpy character array of the column types:
+ s (string) or d (double)
+ """
+ return np.asarray(self._column_types, dtype=np.dtype("S1"))
+
+ def close(self) -> None:
+ self.handles.close()
+
+ def _get_properties(self) -> None:
+ # Check magic number
+ self._path_or_buf.seek(0)
+ self._cached_page = self._path_or_buf.read(288)
+ if self._cached_page[0 : len(const.magic)] != const.magic:
+ raise ValueError("magic number mismatch (not a SAS file?)")
+
+ # Get alignment information
+ buf = self._read_bytes(const.align_1_offset, const.align_1_length)
+ if buf == const.u64_byte_checker_value:
+ self.U64 = True
+ self._int_length = 8
+ self._page_bit_offset = const.page_bit_offset_x64
+ self._subheader_pointer_length = const.subheader_pointer_length_x64
+ else:
+ self.U64 = False
+ self._page_bit_offset = const.page_bit_offset_x86
+ self._subheader_pointer_length = const.subheader_pointer_length_x86
+ self._int_length = 4
+ buf = self._read_bytes(const.align_2_offset, const.align_2_length)
+ if buf == const.align_1_checker_value:
+ align1 = const.align_2_value
+ else:
+ align1 = 0
+
+ # Get endianness information
+ buf = self._read_bytes(const.endianness_offset, const.endianness_length)
+ if buf == b"\x01":
+ self.byte_order = "<"
+ self.need_byteswap = sys.byteorder == "big"
+ else:
+ self.byte_order = ">"
+ self.need_byteswap = sys.byteorder == "little"
+
+ # Get encoding information
+ buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0]
+ if buf in const.encoding_names:
+ self.inferred_encoding = const.encoding_names[buf]
+ if self.encoding == "infer":
+ self.encoding = self.inferred_encoding
+ else:
+ self.inferred_encoding = f"unknown (code={buf})"
+
+ # Timestamp is epoch 01/01/1960
+ epoch = datetime(1960, 1, 1)
+ x = self._read_float(
+ const.date_created_offset + align1, const.date_created_length
+ )
+ self.date_created = epoch + pd.to_timedelta(x, unit="s")
+ x = self._read_float(
+ const.date_modified_offset + align1, const.date_modified_length
+ )
+ self.date_modified = epoch + pd.to_timedelta(x, unit="s")
+
+ self.header_length = self._read_uint(
+ const.header_size_offset + align1, const.header_size_length
+ )
+
+ # Read the rest of the header into cached_page.
+ buf = self._path_or_buf.read(self.header_length - 288)
+ self._cached_page += buf
+ # error: Argument 1 to "len" has incompatible type "Optional[bytes]";
+ # expected "Sized"
+ if len(self._cached_page) != self.header_length: # type: ignore[arg-type]
+ raise ValueError("The SAS7BDAT file appears to be truncated.")
+
+ self._page_length = self._read_uint(
+ const.page_size_offset + align1, const.page_size_length
+ )
+
+ def __next__(self) -> DataFrame:
+ da = self.read(nrows=self.chunksize or 1)
+ if da.empty:
+ self.close()
+ raise StopIteration
+ return da
+
+ # Read a single float of the given width (4 or 8).
+ def _read_float(self, offset: int, width: int):
+ assert self._cached_page is not None
+ if width == 4:
+ return read_float_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_double_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
+ self.close()
+ raise ValueError("invalid float width")
+
+ # Read a single unsigned integer of the given width (1, 2, 4 or 8).
+ def _read_uint(self, offset: int, width: int) -> int:
+ assert self._cached_page is not None
+ if width == 1:
+ return self._read_bytes(offset, 1)[0]
+ elif width == 2:
+ return read_uint16_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 4:
+ return read_uint32_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ elif width == 8:
+ return read_uint64_with_byteswap(
+ self._cached_page, offset, self.need_byteswap
+ )
+ else:
+ self.close()
+ raise ValueError("invalid int width")
+
+ def _read_bytes(self, offset: int, length: int):
+ assert self._cached_page is not None
+ if offset + length > len(self._cached_page):
+ self.close()
+ raise ValueError("The cached page is too small.")
+ return self._cached_page[offset : offset + length]
+
+ def _read_and_convert_header_text(self, offset: int, length: int) -> str | bytes:
+ return self._convert_header_text(
+ self._read_bytes(offset, length).rstrip(b"\x00 ")
+ )
+
+ def _parse_metadata(self) -> None:
+ done = False
+ while not done:
+ self._cached_page = self._path_or_buf.read(self._page_length)
+ if len(self._cached_page) <= 0:
+ break
+ if len(self._cached_page) != self._page_length:
+ raise ValueError("Failed to read a meta data page from the SAS file.")
+ done = self._process_page_meta()
+
+ def _process_page_meta(self) -> bool:
+ self._read_page_header()
+ pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type]
+ if self._current_page_type in pt:
+ self._process_page_metadata()
+ is_data_page = self._current_page_type == const.page_data_type
+ is_mix_page = self._current_page_type == const.page_mix_type
+ return bool(
+ is_data_page
+ or is_mix_page
+ or self._current_page_data_subheader_pointers != []
+ )
+
+ def _read_page_header(self) -> None:
+ bit_offset = self._page_bit_offset
+ tx = const.page_type_offset + bit_offset
+ self._current_page_type = (
+ self._read_uint(tx, const.page_type_length) & const.page_type_mask2
+ )
+ tx = const.block_count_offset + bit_offset
+ self._current_page_block_count = self._read_uint(tx, const.block_count_length)
+ tx = const.subheader_count_offset + bit_offset
+ self._current_page_subheaders_count = self._read_uint(
+ tx, const.subheader_count_length
+ )
+
+ def _process_page_metadata(self) -> None:
+ bit_offset = self._page_bit_offset
+
+ for i in range(self._current_page_subheaders_count):
+ offset = const.subheader_pointers_offset + bit_offset
+ total_offset = offset + self._subheader_pointer_length * i
+
+ subheader_offset = self._read_uint(total_offset, self._int_length)
+ total_offset += self._int_length
+
+ subheader_length = self._read_uint(total_offset, self._int_length)
+ total_offset += self._int_length
+
+ subheader_compression = self._read_uint(total_offset, 1)
+ total_offset += 1
+
+ subheader_type = self._read_uint(total_offset, 1)
+
+ if (
+ subheader_length == 0
+ or subheader_compression == const.truncated_subheader_id
+ ):
+ continue
+
+ subheader_signature = self._read_bytes(subheader_offset, self._int_length)
+ subheader_index = get_subheader_index(subheader_signature)
+ subheader_processor = self._subheader_processors[subheader_index]
+
+ if subheader_processor is None:
+ f1 = subheader_compression in (const.compressed_subheader_id, 0)
+ f2 = subheader_type == const.compressed_subheader_type
+ if self.compression and f1 and f2:
+ self._current_page_data_subheader_pointers.append(
+ (subheader_offset, subheader_length)
+ )
+ else:
+ self.close()
+ raise ValueError(
+ f"Unknown subheader signature {subheader_signature}"
+ )
+ else:
+ subheader_processor(subheader_offset, subheader_length)
+
+ def _process_rowsize_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ lcs_offset = offset
+ lcp_offset = offset
+ if self.U64:
+ lcs_offset += 682
+ lcp_offset += 706
+ else:
+ lcs_offset += 354
+ lcp_offset += 378
+
+ self.row_length = self._read_uint(
+ offset + const.row_length_offset_multiplier * int_len,
+ int_len,
+ )
+ self.row_count = self._read_uint(
+ offset + const.row_count_offset_multiplier * int_len,
+ int_len,
+ )
+ self.col_count_p1 = self._read_uint(
+ offset + const.col_count_p1_multiplier * int_len, int_len
+ )
+ self.col_count_p2 = self._read_uint(
+ offset + const.col_count_p2_multiplier * int_len, int_len
+ )
+ mx = const.row_count_on_mix_page_offset_multiplier * int_len
+ self._mix_page_row_count = self._read_uint(offset + mx, int_len)
+ self._lcs = self._read_uint(lcs_offset, 2)
+ self._lcp = self._read_uint(lcp_offset, 2)
+
+ def _process_columnsize_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ offset += int_len
+ self.column_count = self._read_uint(offset, int_len)
+ if self.col_count_p1 + self.col_count_p2 != self.column_count:
+ print(
+ f"Warning: column count mismatch ({self.col_count_p1} + "
+ f"{self.col_count_p2} != {self.column_count})\n"
+ )
+
+ # Unknown purpose
+ def _process_subheader_counts(self, offset: int, length: int) -> None:
+ pass
+
+ def _process_columntext_subheader(self, offset: int, length: int) -> None:
+ offset += self._int_length
+ text_block_size = self._read_uint(offset, const.text_block_size_length)
+
+ buf = self._read_bytes(offset, text_block_size)
+ cname_raw = buf[0:text_block_size].rstrip(b"\x00 ")
+ self.column_names_raw.append(cname_raw)
+
+ if len(self.column_names_raw) == 1:
+ compression_literal = b""
+ for cl in const.compression_literals:
+ if cl in cname_raw:
+ compression_literal = cl
+ self.compression = compression_literal
+ offset -= self._int_length
+
+ offset1 = offset + 16
+ if self.U64:
+ offset1 += 4
+
+ buf = self._read_bytes(offset1, self._lcp)
+ compression_literal = buf.rstrip(b"\x00")
+ if compression_literal == b"":
+ self._lcs = 0
+ offset1 = offset + 32
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcp)
+ self.creator_proc = buf[0 : self._lcp]
+ elif compression_literal == const.rle_compression:
+ offset1 = offset + 40
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcp)
+ self.creator_proc = buf[0 : self._lcp]
+ elif self._lcs > 0:
+ self._lcp = 0
+ offset1 = offset + 16
+ if self.U64:
+ offset1 += 4
+ buf = self._read_bytes(offset1, self._lcs)
+ self.creator_proc = buf[0 : self._lcp]
+ if hasattr(self, "creator_proc"):
+ self.creator_proc = self._convert_header_text(self.creator_proc)
+
+ def _process_columnname_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ offset += int_len
+ column_name_pointers_count = (length - 2 * int_len - 12) // 8
+ for i in range(column_name_pointers_count):
+ text_subheader = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_text_subheader_offset
+ )
+ col_name_offset = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_offset_offset
+ )
+ col_name_length = (
+ offset
+ + const.column_name_pointer_length * (i + 1)
+ + const.column_name_length_offset
+ )
+
+ idx = self._read_uint(
+ text_subheader, const.column_name_text_subheader_length
+ )
+ col_offset = self._read_uint(
+ col_name_offset, const.column_name_offset_length
+ )
+ col_len = self._read_uint(col_name_length, const.column_name_length_length)
+
+ name_raw = self.column_names_raw[idx]
+ cname = name_raw[col_offset : col_offset + col_len]
+ self.column_names.append(self._convert_header_text(cname))
+
+ def _process_columnattributes_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8)
+ for i in range(column_attributes_vectors_count):
+ col_data_offset = (
+ offset + int_len + const.column_data_offset_offset + i * (int_len + 8)
+ )
+ col_data_len = (
+ offset
+ + 2 * int_len
+ + const.column_data_length_offset
+ + i * (int_len + 8)
+ )
+ col_types = (
+ offset + 2 * int_len + const.column_type_offset + i * (int_len + 8)
+ )
+
+ x = self._read_uint(col_data_offset, int_len)
+ self._column_data_offsets.append(x)
+
+ x = self._read_uint(col_data_len, const.column_data_length_length)
+ self._column_data_lengths.append(x)
+
+ x = self._read_uint(col_types, const.column_type_length)
+ self._column_types.append(b"d" if x == 1 else b"s")
+
+ def _process_columnlist_subheader(self, offset: int, length: int) -> None:
+ # unknown purpose
+ pass
+
+ def _process_format_subheader(self, offset: int, length: int) -> None:
+ int_len = self._int_length
+ text_subheader_format = (
+ offset + const.column_format_text_subheader_index_offset + 3 * int_len
+ )
+ col_format_offset = offset + const.column_format_offset_offset + 3 * int_len
+ col_format_len = offset + const.column_format_length_offset + 3 * int_len
+ text_subheader_label = (
+ offset + const.column_label_text_subheader_index_offset + 3 * int_len
+ )
+ col_label_offset = offset + const.column_label_offset_offset + 3 * int_len
+ col_label_len = offset + const.column_label_length_offset + 3 * int_len
+
+ x = self._read_uint(
+ text_subheader_format, const.column_format_text_subheader_index_length
+ )
+ format_idx = min(x, len(self.column_names_raw) - 1)
+
+ format_start = self._read_uint(
+ col_format_offset, const.column_format_offset_length
+ )
+ format_len = self._read_uint(col_format_len, const.column_format_length_length)
+
+ label_idx = self._read_uint(
+ text_subheader_label, const.column_label_text_subheader_index_length
+ )
+ label_idx = min(label_idx, len(self.column_names_raw) - 1)
+
+ label_start = self._read_uint(
+ col_label_offset, const.column_label_offset_length
+ )
+ label_len = self._read_uint(col_label_len, const.column_label_length_length)
+
+ label_names = self.column_names_raw[label_idx]
+ column_label = self._convert_header_text(
+ label_names[label_start : label_start + label_len]
+ )
+ format_names = self.column_names_raw[format_idx]
+ column_format = self._convert_header_text(
+ format_names[format_start : format_start + format_len]
+ )
+ current_column_number = len(self.columns)
+
+ col = _Column(
+ current_column_number,
+ self.column_names[current_column_number],
+ column_label,
+ column_format,
+ self._column_types[current_column_number],
+ self._column_data_lengths[current_column_number],
+ )
+
+ self.column_formats.append(column_format)
+ self.columns.append(col)
+
+ def read(self, nrows: int | None = None) -> DataFrame:
+ if (nrows is None) and (self.chunksize is not None):
+ nrows = self.chunksize
+ elif nrows is None:
+ nrows = self.row_count
+
+ if len(self._column_types) == 0:
+ self.close()
+ raise EmptyDataError("No columns to parse from file")
+
+ if nrows > 0 and self._current_row_in_file_index >= self.row_count:
+ return DataFrame()
+
+ nrows = min(nrows, self.row_count - self._current_row_in_file_index)
+
+ nd = self._column_types.count(b"d")
+ ns = self._column_types.count(b"s")
+
+ self._string_chunk = np.empty((ns, nrows), dtype=object)
+ self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8)
+
+ self._current_row_in_chunk_index = 0
+ p = Parser(self)
+ p.read(nrows)
+
+ rslt = self._chunk_to_dataframe()
+ if self.index is not None:
+ rslt = rslt.set_index(self.index)
+
+ return rslt
+
+ def _read_next_page(self):
+ self._current_page_data_subheader_pointers = []
+ self._cached_page = self._path_or_buf.read(self._page_length)
+ if len(self._cached_page) <= 0:
+ return True
+ elif len(self._cached_page) != self._page_length:
+ self.close()
+ msg = (
+ "failed to read complete page from file (read "
+ f"{len(self._cached_page):d} of {self._page_length:d} bytes)"
+ )
+ raise ValueError(msg)
+
+ self._read_page_header()
+ if self._current_page_type in const.page_meta_types:
+ self._process_page_metadata()
+
+ if self._current_page_type not in const.page_meta_types + [
+ const.page_data_type,
+ const.page_mix_type,
+ ]:
+ return self._read_next_page()
+
+ return False
+
+ def _chunk_to_dataframe(self) -> DataFrame:
+ n = self._current_row_in_chunk_index
+ m = self._current_row_in_file_index
+ ix = range(m - n, m)
+ rslt = {}
+
+ js, jb = 0, 0
+ for j in range(self.column_count):
+ name = self.column_names[j]
+
+ if self._column_types[j] == b"d":
+ col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + "d")
+ rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix, copy=False)
+ if self.convert_dates:
+ if self.column_formats[j] in const.sas_date_formats:
+ rslt[name] = _convert_datetimes(rslt[name], "d")
+ elif self.column_formats[j] in const.sas_datetime_formats:
+ rslt[name] = _convert_datetimes(rslt[name], "s")
+ jb += 1
+ elif self._column_types[j] == b"s":
+ rslt[name] = pd.Series(self._string_chunk[js, :], index=ix, copy=False)
+ if self.convert_text and (self.encoding is not None):
+ rslt[name] = self._decode_string(rslt[name].str)
+ js += 1
+ else:
+ self.close()
+ raise ValueError(f"unknown column type {repr(self._column_types[j])}")
+
+ df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False)
+ return df
+
+ def _decode_string(self, b):
+ return b.decode(self.encoding or self.default_encoding)
+
+ def _convert_header_text(self, b: bytes) -> str | bytes:
+ if self.convert_header_text:
+ return self._decode_string(b)
+ else:
+ return b
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..62c17bd03927e5f852af708e6b9ef6cf7e74d57c
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas_constants.py
@@ -0,0 +1,310 @@
+from __future__ import annotations
+
+from typing import Final
+
+magic: Final = (
+ b"\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x00\x00\x00\x00\xc2\xea\x81\x60"
+ b"\xb3\x14\x11\xcf\xbd\x92\x08\x00"
+ b"\x09\xc7\x31\x8c\x18\x1f\x10\x11"
+)
+
+align_1_checker_value: Final = b"3"
+align_1_offset: Final = 32
+align_1_length: Final = 1
+align_1_value: Final = 4
+u64_byte_checker_value: Final = b"3"
+align_2_offset: Final = 35
+align_2_length: Final = 1
+align_2_value: Final = 4
+endianness_offset: Final = 37
+endianness_length: Final = 1
+platform_offset: Final = 39
+platform_length: Final = 1
+encoding_offset: Final = 70
+encoding_length: Final = 1
+dataset_offset: Final = 92
+dataset_length: Final = 64
+file_type_offset: Final = 156
+file_type_length: Final = 8
+date_created_offset: Final = 164
+date_created_length: Final = 8
+date_modified_offset: Final = 172
+date_modified_length: Final = 8
+header_size_offset: Final = 196
+header_size_length: Final = 4
+page_size_offset: Final = 200
+page_size_length: Final = 4
+page_count_offset: Final = 204
+page_count_length: Final = 4
+sas_release_offset: Final = 216
+sas_release_length: Final = 8
+sas_server_type_offset: Final = 224
+sas_server_type_length: Final = 16
+os_version_number_offset: Final = 240
+os_version_number_length: Final = 16
+os_maker_offset: Final = 256
+os_maker_length: Final = 16
+os_name_offset: Final = 272
+os_name_length: Final = 16
+page_bit_offset_x86: Final = 16
+page_bit_offset_x64: Final = 32
+subheader_pointer_length_x86: Final = 12
+subheader_pointer_length_x64: Final = 24
+page_type_offset: Final = 0
+page_type_length: Final = 2
+block_count_offset: Final = 2
+block_count_length: Final = 2
+subheader_count_offset: Final = 4
+subheader_count_length: Final = 2
+page_type_mask: Final = 0x0F00
+# Keep "page_comp_type" bits
+page_type_mask2: Final = 0xF000 | page_type_mask
+page_meta_type: Final = 0x0000
+page_data_type: Final = 0x0100
+page_mix_type: Final = 0x0200
+page_amd_type: Final = 0x0400
+page_meta2_type: Final = 0x4000
+page_comp_type: Final = 0x9000
+page_meta_types: Final = [page_meta_type, page_meta2_type]
+subheader_pointers_offset: Final = 8
+truncated_subheader_id: Final = 1
+compressed_subheader_id: Final = 4
+compressed_subheader_type: Final = 1
+text_block_size_length: Final = 2
+row_length_offset_multiplier: Final = 5
+row_count_offset_multiplier: Final = 6
+col_count_p1_multiplier: Final = 9
+col_count_p2_multiplier: Final = 10
+row_count_on_mix_page_offset_multiplier: Final = 15
+column_name_pointer_length: Final = 8
+column_name_text_subheader_offset: Final = 0
+column_name_text_subheader_length: Final = 2
+column_name_offset_offset: Final = 2
+column_name_offset_length: Final = 2
+column_name_length_offset: Final = 4
+column_name_length_length: Final = 2
+column_data_offset_offset: Final = 8
+column_data_length_offset: Final = 8
+column_data_length_length: Final = 4
+column_type_offset: Final = 14
+column_type_length: Final = 1
+column_format_text_subheader_index_offset: Final = 22
+column_format_text_subheader_index_length: Final = 2
+column_format_offset_offset: Final = 24
+column_format_offset_length: Final = 2
+column_format_length_offset: Final = 26
+column_format_length_length: Final = 2
+column_label_text_subheader_index_offset: Final = 28
+column_label_text_subheader_index_length: Final = 2
+column_label_offset_offset: Final = 30
+column_label_offset_length: Final = 2
+column_label_length_offset: Final = 32
+column_label_length_length: Final = 2
+rle_compression: Final = b"SASYZCRL"
+rdc_compression: Final = b"SASYZCR2"
+
+compression_literals: Final = [rle_compression, rdc_compression]
+
+# Incomplete list of encodings, using SAS nomenclature:
+# https://support.sas.com/documentation/onlinedoc/dfdmstudio/2.6/dmpdmsug/Content/dfU_Encodings_SAS.html
+# corresponding to the Python documentation of standard encodings
+# https://docs.python.org/3/library/codecs.html#standard-encodings
+encoding_names: Final = {
+ 20: "utf-8",
+ 29: "latin1",
+ 30: "latin2",
+ 31: "latin3",
+ 32: "latin4",
+ 33: "cyrillic",
+ 34: "arabic",
+ 35: "greek",
+ 36: "hebrew",
+ 37: "latin5",
+ 38: "latin6",
+ 39: "cp874",
+ 40: "latin9",
+ 41: "cp437",
+ 42: "cp850",
+ 43: "cp852",
+ 44: "cp857",
+ 45: "cp858",
+ 46: "cp862",
+ 47: "cp864",
+ 48: "cp865",
+ 49: "cp866",
+ 50: "cp869",
+ 51: "cp874",
+ # 52: "", # not found
+ # 53: "", # not found
+ # 54: "", # not found
+ 55: "cp720",
+ 56: "cp737",
+ 57: "cp775",
+ 58: "cp860",
+ 59: "cp863",
+ 60: "cp1250",
+ 61: "cp1251",
+ 62: "cp1252",
+ 63: "cp1253",
+ 64: "cp1254",
+ 65: "cp1255",
+ 66: "cp1256",
+ 67: "cp1257",
+ 68: "cp1258",
+ 118: "cp950",
+ # 119: "", # not found
+ 123: "big5",
+ 125: "gb2312",
+ 126: "cp936",
+ 134: "euc_jp",
+ 136: "cp932",
+ 138: "shift_jis",
+ 140: "euc-kr",
+ 141: "cp949",
+ 227: "latin8",
+ # 228: "", # not found
+ # 229: "" # not found
+}
+
+
+class SASIndex:
+ row_size_index: Final = 0
+ column_size_index: Final = 1
+ subheader_counts_index: Final = 2
+ column_text_index: Final = 3
+ column_name_index: Final = 4
+ column_attributes_index: Final = 5
+ format_and_label_index: Final = 6
+ column_list_index: Final = 7
+ data_subheader_index: Final = 8
+
+
+subheader_signature_to_index: Final = {
+ b"\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
+ b"\x00\x00\x00\x00\xF7\xF7\xF7\xF7": SASIndex.row_size_index,
+ b"\xF7\xF7\xF7\xF7\x00\x00\x00\x00": SASIndex.row_size_index,
+ b"\xF7\xF7\xF7\xF7\xFF\xFF\xFB\xFE": SASIndex.row_size_index,
+ b"\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
+ b"\x00\x00\x00\x00\xF6\xF6\xF6\xF6": SASIndex.column_size_index,
+ b"\xF6\xF6\xF6\xF6\x00\x00\x00\x00": SASIndex.column_size_index,
+ b"\xF6\xF6\xF6\xF6\xFF\xFF\xFB\xFE": SASIndex.column_size_index,
+ b"\x00\xFC\xFF\xFF": SASIndex.subheader_counts_index,
+ b"\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
+ b"\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.subheader_counts_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00": SASIndex.subheader_counts_index,
+ b"\xFD\xFF\xFF\xFF": SASIndex.column_text_index,
+ b"\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
+ b"\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_text_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD": SASIndex.column_text_index,
+ b"\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_name_index,
+ b"\xFC\xFF\xFF\xFF": SASIndex.column_attributes_index,
+ b"\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
+ b"\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_attributes_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC": SASIndex.column_attributes_index,
+ b"\xFE\xFB\xFF\xFF": SASIndex.format_and_label_index,
+ b"\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
+ b"\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.format_and_label_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE": SASIndex.format_and_label_index,
+ b"\xFE\xFF\xFF\xFF": SASIndex.column_list_index,
+ b"\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
+ b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF": SASIndex.column_list_index,
+ b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE": SASIndex.column_list_index,
+}
+
+
+# List of frequently used SAS date and datetime formats
+# http://support.sas.com/documentation/cdl/en/etsug/60372/HTML/default/viewer.htm#etsug_intervals_sect009.htm
+# https://github.com/epam/parso/blob/master/src/main/java/com/epam/parso/impl/SasFileConstants.java
+sas_date_formats: Final = (
+ "DATE",
+ "DAY",
+ "DDMMYY",
+ "DOWNAME",
+ "JULDAY",
+ "JULIAN",
+ "MMDDYY",
+ "MMYY",
+ "MMYYC",
+ "MMYYD",
+ "MMYYP",
+ "MMYYS",
+ "MMYYN",
+ "MONNAME",
+ "MONTH",
+ "MONYY",
+ "QTR",
+ "QTRR",
+ "NENGO",
+ "WEEKDATE",
+ "WEEKDATX",
+ "WEEKDAY",
+ "WEEKV",
+ "WORDDATE",
+ "WORDDATX",
+ "YEAR",
+ "YYMM",
+ "YYMMC",
+ "YYMMD",
+ "YYMMP",
+ "YYMMS",
+ "YYMMN",
+ "YYMON",
+ "YYMMDD",
+ "YYQ",
+ "YYQC",
+ "YYQD",
+ "YYQP",
+ "YYQS",
+ "YYQN",
+ "YYQR",
+ "YYQRC",
+ "YYQRD",
+ "YYQRP",
+ "YYQRS",
+ "YYQRN",
+ "YYMMDDP",
+ "YYMMDDC",
+ "E8601DA",
+ "YYMMDDN",
+ "MMDDYYC",
+ "MMDDYYS",
+ "MMDDYYD",
+ "YYMMDDS",
+ "B8601DA",
+ "DDMMYYN",
+ "YYMMDDD",
+ "DDMMYYB",
+ "DDMMYYP",
+ "MMDDYYP",
+ "YYMMDDB",
+ "MMDDYYN",
+ "DDMMYYC",
+ "DDMMYYD",
+ "DDMMYYS",
+ "MINGUO",
+)
+
+sas_datetime_formats: Final = (
+ "DATETIME",
+ "DTWKDATX",
+ "B8601DN",
+ "B8601DT",
+ "B8601DX",
+ "B8601DZ",
+ "B8601LX",
+ "E8601DN",
+ "E8601DT",
+ "E8601DX",
+ "E8601DZ",
+ "E8601LX",
+ "DATEAMPM",
+ "DTDATE",
+ "DTMONYY",
+ "DTMONYY",
+ "DTWKDATX",
+ "DTYEAR",
+ "TOD",
+ "MDYAMPM",
+)
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py
new file mode 100644
index 0000000000000000000000000000000000000000..11b2ed0ee73168ba82e3b8d312f96bcea9398e49
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sas_xport.py
@@ -0,0 +1,508 @@
+"""
+Read a SAS XPort format file into a Pandas DataFrame.
+
+Based on code from Jack Cushman (github.com/jcushman/xport).
+
+The file format is defined here:
+
+https://support.sas.com/content/dam/SAS/support/en/technical-papers/record-layout-of-a-sas-version-5-or-6-data-set-in-sas-transport-xport-format.pdf
+"""
+from __future__ import annotations
+
+from collections import abc
+from datetime import datetime
+import struct
+from typing import TYPE_CHECKING
+import warnings
+
+import numpy as np
+
+from pandas.util._decorators import Appender
+from pandas.util._exceptions import find_stack_level
+
+import pandas as pd
+
+from pandas.io.common import get_handle
+from pandas.io.sas.sasreader import ReaderBase
+
+if TYPE_CHECKING:
+ from pandas._typing import (
+ CompressionOptions,
+ DatetimeNaTType,
+ FilePath,
+ ReadBuffer,
+ )
+_correct_line1 = (
+ "HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_correct_header1 = (
+ "HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000"
+)
+_correct_header2 = (
+ "HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_correct_obs_header = (
+ "HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
+ "000000000000000000000000000000 "
+)
+_fieldkeys = [
+ "ntype",
+ "nhfun",
+ "field_length",
+ "nvar0",
+ "name",
+ "label",
+ "nform",
+ "nfl",
+ "num_decimals",
+ "nfj",
+ "nfill",
+ "niform",
+ "nifl",
+ "nifd",
+ "npos",
+ "_",
+]
+
+
+_base_params_doc = """\
+Parameters
+----------
+filepath_or_buffer : str or file-like object
+ Path to SAS file or object implementing binary read method."""
+
+_params2_doc = """\
+index : identifier of index column
+ Identifier of column that should be used as index of the DataFrame.
+encoding : str
+ Encoding for text data.
+chunksize : int
+ Read file `chunksize` lines at a time, returns iterator."""
+
+_format_params_doc = """\
+format : str
+ File format, only `xport` is currently supported."""
+
+_iterator_doc = """\
+iterator : bool, default False
+ Return XportReader object for reading file incrementally."""
+
+
+_read_sas_doc = f"""Read a SAS file into a DataFrame.
+
+{_base_params_doc}
+{_format_params_doc}
+{_params2_doc}
+{_iterator_doc}
+
+Returns
+-------
+DataFrame or XportReader
+
+Examples
+--------
+Read a SAS Xport file:
+
+>>> df = pd.read_sas('filename.XPT')
+
+Read a Xport file in 10,000 line chunks:
+
+>>> itr = pd.read_sas('filename.XPT', chunksize=10000)
+>>> for chunk in itr:
+>>> do_something(chunk)
+
+"""
+
+_xport_reader_doc = f"""\
+Class for reading SAS Xport files.
+
+{_base_params_doc}
+{_params2_doc}
+
+Attributes
+----------
+member_info : list
+ Contains information about the file
+fields : list
+ Contains information about the variables in the file
+"""
+
+_read_method_doc = """\
+Read observations from SAS Xport file, returning as data frame.
+
+Parameters
+----------
+nrows : int
+ Number of rows to read from data file; if None, read whole
+ file.
+
+Returns
+-------
+A DataFrame.
+"""
+
+
+def _parse_date(datestr: str) -> DatetimeNaTType:
+ """Given a date in xport format, return Python date."""
+ try:
+ # e.g. "16FEB11:10:07:55"
+ return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
+ except ValueError:
+ return pd.NaT
+
+
+def _split_line(s: str, parts):
+ """
+ Parameters
+ ----------
+ s: str
+ Fixed-length string to split
+ parts: list of (name, length) pairs
+ Used to break up string, name '_' will be filtered from output.
+
+ Returns
+ -------
+ Dict of name:contents of string at given location.
+ """
+ out = {}
+ start = 0
+ for name, length in parts:
+ out[name] = s[start : start + length].strip()
+ start += length
+ del out["_"]
+ return out
+
+
+def _handle_truncated_float_vec(vec, nbytes):
+ # This feature is not well documented, but some SAS XPORT files
+ # have 2-7 byte "truncated" floats. To read these truncated
+ # floats, pad them with zeros on the right to make 8 byte floats.
+ #
+ # References:
+ # https://github.com/jcushman/xport/pull/3
+ # The R "foreign" library
+
+ if nbytes != 8:
+ vec1 = np.zeros(len(vec), np.dtype("S8"))
+ dtype = np.dtype(f"S{nbytes},S{8 - nbytes}")
+ vec2 = vec1.view(dtype=dtype)
+ vec2["f0"] = vec
+ return vec2
+
+ return vec
+
+
+def _parse_float_vec(vec):
+ """
+ Parse a vector of float values representing IBM 8 byte floats into
+ native 8 byte floats.
+ """
+ dtype = np.dtype(">u4,>u4")
+ vec1 = vec.view(dtype=dtype)
+ xport1 = vec1["f0"]
+ xport2 = vec1["f1"]
+
+ # Start by setting first half of ieee number to first half of IBM
+ # number sans exponent
+ ieee1 = xport1 & 0x00FFFFFF
+
+ # The fraction bit to the left of the binary point in the ieee
+ # format was set and the number was shifted 0, 1, 2, or 3
+ # places. This will tell us how to adjust the ibm exponent to be a
+ # power of 2 ieee exponent and how to shift the fraction bits to
+ # restore the correct magnitude.
+ shift = np.zeros(len(vec), dtype=np.uint8)
+ shift[np.where(xport1 & 0x00200000)] = 1
+ shift[np.where(xport1 & 0x00400000)] = 2
+ shift[np.where(xport1 & 0x00800000)] = 3
+
+ # shift the ieee number down the correct number of places then
+ # set the second half of the ieee number to be the second half
+ # of the ibm number shifted appropriately, ored with the bits
+ # from the first half that would have been shifted in if we
+ # could shift a double. All we are worried about are the low
+ # order 3 bits of the first half since we're only shifting by
+ # 1, 2, or 3.
+ ieee1 >>= shift
+ ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
+
+ # clear the 1 bit to the left of the binary point
+ ieee1 &= 0xFFEFFFFF
+
+ # set the exponent of the ieee number to be the actual exponent
+ # plus the shift count + 1023. Or this into the first half of the
+ # ieee number. The ibm exponent is excess 64 but is adjusted by 65
+ # since during conversion to ibm format the exponent is
+ # incremented by 1 and the fraction bits left 4 positions to the
+ # right of the radix point. (had to add >> 24 because C treats &
+ # 0x7f as 0x7f000000 and Python doesn't)
+ ieee1 |= ((((((xport1 >> 24) & 0x7F) - 65) << 2) + shift + 1023) << 20) | (
+ xport1 & 0x80000000
+ )
+
+ ieee = np.empty((len(ieee1),), dtype=">u4,>u4")
+ ieee["f0"] = ieee1
+ ieee["f1"] = ieee2
+ ieee = ieee.view(dtype=">f8")
+ ieee = ieee.astype("f8")
+
+ return ieee
+
+
+class XportReader(ReaderBase, abc.Iterator):
+ __doc__ = _xport_reader_doc
+
+ def __init__(
+ self,
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ index=None,
+ encoding: str | None = "ISO-8859-1",
+ chunksize: int | None = None,
+ compression: CompressionOptions = "infer",
+ ) -> None:
+ self._encoding = encoding
+ self._lines_read = 0
+ self._index = index
+ self._chunksize = chunksize
+
+ self.handles = get_handle(
+ filepath_or_buffer,
+ "rb",
+ encoding=encoding,
+ is_text=False,
+ compression=compression,
+ )
+ self.filepath_or_buffer = self.handles.handle
+
+ try:
+ self._read_header()
+ except Exception:
+ self.close()
+ raise
+
+ def close(self) -> None:
+ self.handles.close()
+
+ def _get_row(self):
+ return self.filepath_or_buffer.read(80).decode()
+
+ def _read_header(self) -> None:
+ self.filepath_or_buffer.seek(0)
+
+ # read file header
+ line1 = self._get_row()
+ if line1 != _correct_line1:
+ if "**COMPRESSED**" in line1:
+ # this was created with the PROC CPORT method and can't be read
+ # https://documentation.sas.com/doc/en/pgmsascdc/9.4_3.5/movefile/p1bm6aqp3fw4uin1hucwh718f6kp.htm
+ raise ValueError(
+ "Header record indicates a CPORT file, which is not readable."
+ )
+ raise ValueError("Header record is not an XPORT file.")
+
+ line2 = self._get_row()
+ fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24], ["created", 16]]
+ file_info = _split_line(line2, fif)
+ if file_info["prefix"] != "SAS SAS SASLIB":
+ raise ValueError("Header record has invalid prefix.")
+ file_info["created"] = _parse_date(file_info["created"])
+ self.file_info = file_info
+
+ line3 = self._get_row()
+ file_info["modified"] = _parse_date(line3[:16])
+
+ # read member header
+ header1 = self._get_row()
+ header2 = self._get_row()
+ headflag1 = header1.startswith(_correct_header1)
+ headflag2 = header2 == _correct_header2
+ if not (headflag1 and headflag2):
+ raise ValueError("Member header not found")
+ # usually 140, could be 135
+ fieldnamelength = int(header1[-5:-2])
+
+ # member info
+ mem = [
+ ["prefix", 8],
+ ["set_name", 8],
+ ["sasdata", 8],
+ ["version", 8],
+ ["OS", 8],
+ ["_", 24],
+ ["created", 16],
+ ]
+ member_info = _split_line(self._get_row(), mem)
+ mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]
+ member_info.update(_split_line(self._get_row(), mem))
+ member_info["modified"] = _parse_date(member_info["modified"])
+ member_info["created"] = _parse_date(member_info["created"])
+ self.member_info = member_info
+
+ # read field names
+ types = {1: "numeric", 2: "char"}
+ fieldcount = int(self._get_row()[54:58])
+ datalength = fieldnamelength * fieldcount
+ # round up to nearest 80
+ if datalength % 80:
+ datalength += 80 - datalength % 80
+ fielddata = self.filepath_or_buffer.read(datalength)
+ fields = []
+ obs_length = 0
+ while len(fielddata) >= fieldnamelength:
+ # pull data for one field
+ fieldbytes, fielddata = (
+ fielddata[:fieldnamelength],
+ fielddata[fieldnamelength:],
+ )
+
+ # rest at end gets ignored, so if field is short, pad out
+ # to match struct pattern below
+ fieldbytes = fieldbytes.ljust(140)
+
+ fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", fieldbytes)
+ field = dict(zip(_fieldkeys, fieldstruct))
+ del field["_"]
+ field["ntype"] = types[field["ntype"]]
+ fl = field["field_length"]
+ if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
+ msg = f"Floating field width {fl} is not between 2 and 8."
+ raise TypeError(msg)
+
+ for k, v in field.items():
+ try:
+ field[k] = v.strip()
+ except AttributeError:
+ pass
+
+ obs_length += field["field_length"]
+ fields += [field]
+
+ header = self._get_row()
+ if not header == _correct_obs_header:
+ raise ValueError("Observation header not found.")
+
+ self.fields = fields
+ self.record_length = obs_length
+ self.record_start = self.filepath_or_buffer.tell()
+
+ self.nobs = self._record_count()
+ self.columns = [x["name"].decode() for x in self.fields]
+
+ # Setup the dtype.
+ dtypel = [
+ ("s" + str(i), "S" + str(field["field_length"]))
+ for i, field in enumerate(self.fields)
+ ]
+ dtype = np.dtype(dtypel)
+ self._dtype = dtype
+
+ def __next__(self) -> pd.DataFrame:
+ return self.read(nrows=self._chunksize or 1)
+
+ def _record_count(self) -> int:
+ """
+ Get number of records in file.
+
+ This is maybe suboptimal because we have to seek to the end of
+ the file.
+
+ Side effect: returns file position to record_start.
+ """
+ self.filepath_or_buffer.seek(0, 2)
+ total_records_length = self.filepath_or_buffer.tell() - self.record_start
+
+ if total_records_length % 80 != 0:
+ warnings.warn(
+ "xport file may be corrupted.",
+ stacklevel=find_stack_level(),
+ )
+
+ if self.record_length > 80:
+ self.filepath_or_buffer.seek(self.record_start)
+ return total_records_length // self.record_length
+
+ self.filepath_or_buffer.seek(-80, 2)
+ last_card_bytes = self.filepath_or_buffer.read(80)
+ last_card = np.frombuffer(last_card_bytes, dtype=np.uint64)
+
+ # 8 byte blank
+ ix = np.flatnonzero(last_card == 2314885530818453536)
+
+ if len(ix) == 0:
+ tail_pad = 0
+ else:
+ tail_pad = 8 * len(ix)
+
+ self.filepath_or_buffer.seek(self.record_start)
+
+ return (total_records_length - tail_pad) // self.record_length
+
+ def get_chunk(self, size: int | None = None) -> pd.DataFrame:
+ """
+ Reads lines from Xport file and returns as dataframe
+
+ Parameters
+ ----------
+ size : int, defaults to None
+ Number of lines to read. If None, reads whole file.
+
+ Returns
+ -------
+ DataFrame
+ """
+ if size is None:
+ size = self._chunksize
+ return self.read(nrows=size)
+
+ def _missing_double(self, vec):
+ v = vec.view(dtype="u1,u1,u2,u4")
+ miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)
+ miss1 = (
+ ((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))
+ | (v["f0"] == 0x5F)
+ | (v["f0"] == 0x2E)
+ )
+ miss &= miss1
+ return miss
+
+ @Appender(_read_method_doc)
+ def read(self, nrows: int | None = None) -> pd.DataFrame:
+ if nrows is None:
+ nrows = self.nobs
+
+ read_lines = min(nrows, self.nobs - self._lines_read)
+ read_len = read_lines * self.record_length
+ if read_len <= 0:
+ self.close()
+ raise StopIteration
+ raw = self.filepath_or_buffer.read(read_len)
+ data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
+
+ df_data = {}
+ for j, x in enumerate(self.columns):
+ vec = data["s" + str(j)]
+ ntype = self.fields[j]["ntype"]
+ if ntype == "numeric":
+ vec = _handle_truncated_float_vec(vec, self.fields[j]["field_length"])
+ miss = self._missing_double(vec)
+ v = _parse_float_vec(vec)
+ v[miss] = np.nan
+ elif self.fields[j]["ntype"] == "char":
+ v = [y.rstrip() for y in vec]
+
+ if self._encoding is not None:
+ v = [y.decode(self._encoding) for y in v]
+
+ df_data.update({x: v})
+ df = pd.DataFrame(df_data)
+
+ if self._index is None:
+ df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines))
+ else:
+ df = df.set_index(self._index)
+
+ self._lines_read += read_lines
+
+ return df
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sasreader.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sasreader.py
new file mode 100644
index 0000000000000000000000000000000000000000..c39313d5dc6548fcc014f7a886988a2b9d9001ed
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/io/sas/sasreader.py
@@ -0,0 +1,178 @@
+"""
+Read SAS sas7bdat or xport files.
+"""
+from __future__ import annotations
+
+from abc import (
+ ABC,
+ abstractmethod,
+)
+from typing import (
+ TYPE_CHECKING,
+ overload,
+)
+
+from pandas.util._decorators import doc
+
+from pandas.core.shared_docs import _shared_docs
+
+from pandas.io.common import stringify_path
+
+if TYPE_CHECKING:
+ from collections.abc import Hashable
+ from types import TracebackType
+
+ from pandas._typing import (
+ CompressionOptions,
+ FilePath,
+ ReadBuffer,
+ Self,
+ )
+
+ from pandas import DataFrame
+
+
+class ReaderBase(ABC):
+ """
+ Protocol for XportReader and SAS7BDATReader classes.
+ """
+
+ @abstractmethod
+ def read(self, nrows: int | None = None) -> DataFrame:
+ ...
+
+ @abstractmethod
+ def close(self) -> None:
+ ...
+
+ def __enter__(self) -> Self:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+@overload
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = ...,
+ index: Hashable | None = ...,
+ encoding: str | None = ...,
+ chunksize: int = ...,
+ iterator: bool = ...,
+ compression: CompressionOptions = ...,
+) -> ReaderBase:
+ ...
+
+
+@overload
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = ...,
+ index: Hashable | None = ...,
+ encoding: str | None = ...,
+ chunksize: None = ...,
+ iterator: bool = ...,
+ compression: CompressionOptions = ...,
+) -> DataFrame | ReaderBase:
+ ...
+
+
+@doc(decompression_options=_shared_docs["decompression_options"] % "filepath_or_buffer")
+def read_sas(
+ filepath_or_buffer: FilePath | ReadBuffer[bytes],
+ *,
+ format: str | None = None,
+ index: Hashable | None = None,
+ encoding: str | None = None,
+ chunksize: int | None = None,
+ iterator: bool = False,
+ compression: CompressionOptions = "infer",
+) -> DataFrame | ReaderBase:
+ """
+ Read SAS files stored as either XPORT or SAS7BDAT format files.
+
+ Parameters
+ ----------
+ filepath_or_buffer : str, path object, or file-like object
+ String, path object (implementing ``os.PathLike[str]``), or file-like
+ object implementing a binary ``read()`` function. The string could be a URL.
+ Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is
+ expected. A local file could be:
+ ``file://localhost/path/to/table.sas7bdat``.
+ format : str {{'xport', 'sas7bdat'}} or None
+ If None, file format is inferred from file extension. If 'xport' or
+ 'sas7bdat', uses the corresponding format.
+ index : identifier of index column, defaults to None
+ Identifier of column that should be used as index of the DataFrame.
+ encoding : str, default is None
+ Encoding for text data. If None, text data are stored as raw bytes.
+ chunksize : int
+ Read file `chunksize` lines at a time, returns iterator.
+ iterator : bool, defaults to False
+ If True, returns an iterator for reading the file incrementally.
+ {decompression_options}
+
+ Returns
+ -------
+ DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
+ or XportReader
+
+ Examples
+ --------
+ >>> df = pd.read_sas("sas_data.sas7bdat") # doctest: +SKIP
+ """
+ if format is None:
+ buffer_error_msg = (
+ "If this is a buffer object rather "
+ "than a string name, you must specify a format string"
+ )
+ filepath_or_buffer = stringify_path(filepath_or_buffer)
+ if not isinstance(filepath_or_buffer, str):
+ raise ValueError(buffer_error_msg)
+ fname = filepath_or_buffer.lower()
+ if ".xpt" in fname:
+ format = "xport"
+ elif ".sas7bdat" in fname:
+ format = "sas7bdat"
+ else:
+ raise ValueError(
+ f"unable to infer format of SAS file from filename: {repr(fname)}"
+ )
+
+ reader: ReaderBase
+ if format.lower() == "xport":
+ from pandas.io.sas.sas_xport import XportReader
+
+ reader = XportReader(
+ filepath_or_buffer,
+ index=index,
+ encoding=encoding,
+ chunksize=chunksize,
+ compression=compression,
+ )
+ elif format.lower() == "sas7bdat":
+ from pandas.io.sas.sas7bdat import SAS7BDATReader
+
+ reader = SAS7BDATReader(
+ filepath_or_buffer,
+ index=index,
+ encoding=encoding,
+ chunksize=chunksize,
+ compression=compression,
+ )
+ else:
+ raise ValueError("unknown SAS format")
+
+ if iterator or chunksize:
+ return reader
+
+ with reader:
+ return reader.read()
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/util/__init__.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..82b3aa56c653cd1241872c67e9d9016df04a6c5a
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/__init__.py
@@ -0,0 +1,29 @@
+def __getattr__(key: str):
+ # These imports need to be lazy to avoid circular import errors
+ if key == "hash_array":
+ from pandas.core.util.hashing import hash_array
+
+ return hash_array
+ if key == "hash_pandas_object":
+ from pandas.core.util.hashing import hash_pandas_object
+
+ return hash_pandas_object
+ if key == "Appender":
+ from pandas.util._decorators import Appender
+
+ return Appender
+ if key == "Substitution":
+ from pandas.util._decorators import Substitution
+
+ return Substitution
+
+ if key == "cache_readonly":
+ from pandas.util._decorators import cache_readonly
+
+ return cache_readonly
+
+ raise AttributeError(f"module 'pandas.util' has no attribute '{key}'")
+
+
+def capitalize_first_letter(s):
+ return s[:1].upper() + s[1:]
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_doctools.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_doctools.py
new file mode 100644
index 0000000000000000000000000000000000000000..12619abf4baaf336dfd3d5ae78a9bc2133f310c0
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_doctools.py
@@ -0,0 +1,202 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import numpy as np
+
+import pandas as pd
+
+if TYPE_CHECKING:
+ from collections.abc import Iterable
+
+
+class TablePlotter:
+ """
+ Layout some DataFrames in vertical/horizontal layout for explanation.
+ Used in merging.rst
+ """
+
+ def __init__(
+ self,
+ cell_width: float = 0.37,
+ cell_height: float = 0.25,
+ font_size: float = 7.5,
+ ) -> None:
+ self.cell_width = cell_width
+ self.cell_height = cell_height
+ self.font_size = font_size
+
+ def _shape(self, df: pd.DataFrame) -> tuple[int, int]:
+ """
+ Calculate table shape considering index levels.
+ """
+ row, col = df.shape
+ return row + df.columns.nlevels, col + df.index.nlevels
+
+ def _get_cells(self, left, right, vertical) -> tuple[int, int]:
+ """
+ Calculate appropriate figure size based on left and right data.
+ """
+ if vertical:
+ # calculate required number of cells
+ vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0])
+ hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1]
+ else:
+ vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]])
+ hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]])
+ return hcells, vcells
+
+ def plot(self, left, right, labels: Iterable[str] = (), vertical: bool = True):
+ """
+ Plot left / right DataFrames in specified layout.
+
+ Parameters
+ ----------
+ left : list of DataFrames before operation is applied
+ right : DataFrame of operation result
+ labels : list of str to be drawn as titles of left DataFrames
+ vertical : bool, default True
+ If True, use vertical layout. If False, use horizontal layout.
+ """
+ from matplotlib import gridspec
+ import matplotlib.pyplot as plt
+
+ if not isinstance(left, list):
+ left = [left]
+ left = [self._conv(df) for df in left]
+ right = self._conv(right)
+
+ hcells, vcells = self._get_cells(left, right, vertical)
+
+ if vertical:
+ figsize = self.cell_width * hcells, self.cell_height * vcells
+ else:
+ # include margin for titles
+ figsize = self.cell_width * hcells, self.cell_height * vcells
+ fig = plt.figure(figsize=figsize)
+
+ if vertical:
+ gs = gridspec.GridSpec(len(left), hcells)
+ # left
+ max_left_cols = max(self._shape(df)[1] for df in left)
+ max_left_rows = max(self._shape(df)[0] for df in left)
+ for i, (_left, _label) in enumerate(zip(left, labels)):
+ ax = fig.add_subplot(gs[i, 0:max_left_cols])
+ self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows)
+ # right
+ ax = plt.subplot(gs[:, max_left_cols:])
+ self._make_table(ax, right, title="Result", height=1.05 / vcells)
+ fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
+ else:
+ max_rows = max(self._shape(df)[0] for df in left + [right])
+ height = 1.0 / np.max(max_rows)
+ gs = gridspec.GridSpec(1, hcells)
+ # left
+ i = 0
+ for df, _label in zip(left, labels):
+ sp = self._shape(df)
+ ax = fig.add_subplot(gs[0, i : i + sp[1]])
+ self._make_table(ax, df, title=_label, height=height)
+ i += sp[1]
+ # right
+ ax = plt.subplot(gs[0, i:])
+ self._make_table(ax, right, title="Result", height=height)
+ fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
+
+ return fig
+
+ def _conv(self, data):
+ """
+ Convert each input to appropriate for table outplot.
+ """
+ if isinstance(data, pd.Series):
+ if data.name is None:
+ data = data.to_frame(name="")
+ else:
+ data = data.to_frame()
+ data = data.fillna("NaN")
+ return data
+
+ def _insert_index(self, data):
+ # insert is destructive
+ data = data.copy()
+ idx_nlevels = data.index.nlevels
+ if idx_nlevels == 1:
+ data.insert(0, "Index", data.index)
+ else:
+ for i in range(idx_nlevels):
+ data.insert(i, f"Index{i}", data.index._get_level_values(i))
+
+ col_nlevels = data.columns.nlevels
+ if col_nlevels > 1:
+ col = data.columns._get_level_values(0)
+ values = [
+ data.columns._get_level_values(i)._values for i in range(1, col_nlevels)
+ ]
+ col_df = pd.DataFrame(values)
+ data.columns = col_df.columns
+ data = pd.concat([col_df, data])
+ data.columns = col
+ return data
+
+ def _make_table(self, ax, df, title: str, height: float | None = None) -> None:
+ if df is None:
+ ax.set_visible(False)
+ return
+
+ from pandas import plotting
+
+ idx_nlevels = df.index.nlevels
+ col_nlevels = df.columns.nlevels
+ # must be convert here to get index levels for colorization
+ df = self._insert_index(df)
+ tb = plotting.table(ax, df, loc=9)
+ tb.set_fontsize(self.font_size)
+
+ if height is None:
+ height = 1.0 / (len(df) + 1)
+
+ props = tb.properties()
+ for (r, c), cell in props["celld"].items():
+ if c == -1:
+ cell.set_visible(False)
+ elif r < col_nlevels and c < idx_nlevels:
+ cell.set_visible(False)
+ elif r < col_nlevels or c < idx_nlevels:
+ cell.set_facecolor("#AAAAAA")
+ cell.set_height(height)
+
+ ax.set_title(title, size=self.font_size)
+ ax.axis("off")
+
+
+def main() -> None:
+ import matplotlib.pyplot as plt
+
+ p = TablePlotter()
+
+ df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]})
+ df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]})
+
+ p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True)
+ plt.show()
+
+ df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]})
+
+ p.plot(
+ [df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False
+ )
+ plt.show()
+
+ idx = pd.MultiIndex.from_tuples(
+ [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")]
+ )
+ column = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")])
+ df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx)
+ df3.columns = column
+ p.plot(df3, df3, labels=["df3"])
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_exceptions.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f50838d373154868ff7414775763a1c66853c65
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_exceptions.py
@@ -0,0 +1,103 @@
+from __future__ import annotations
+
+import contextlib
+import inspect
+import os
+import re
+from typing import TYPE_CHECKING
+import warnings
+
+if TYPE_CHECKING:
+ from collections.abc import Generator
+ from types import FrameType
+
+
+@contextlib.contextmanager
+def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]:
+ """
+ Rewrite the message of an exception.
+ """
+ try:
+ yield
+ except Exception as err:
+ if not err.args:
+ raise
+ msg = str(err.args[0])
+ msg = msg.replace(old_name, new_name)
+ args: tuple[str, ...] = (msg,)
+ if len(err.args) > 1:
+ args = args + err.args[1:]
+ err.args = args
+ raise
+
+
+def find_stack_level() -> int:
+ """
+ Find the first place in the stack that is not inside pandas
+ (tests notwithstanding).
+ """
+
+ import pandas as pd
+
+ pkg_dir = os.path.dirname(pd.__file__)
+ test_dir = os.path.join(pkg_dir, "tests")
+
+ # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
+ frame: FrameType | None = inspect.currentframe()
+ try:
+ n = 0
+ while frame:
+ filename = inspect.getfile(frame)
+ if filename.startswith(pkg_dir) and not filename.startswith(test_dir):
+ frame = frame.f_back
+ n += 1
+ else:
+ break
+ finally:
+ # See note in
+ # https://docs.python.org/3/library/inspect.html#inspect.Traceback
+ del frame
+ return n
+
+
+@contextlib.contextmanager
+def rewrite_warning(
+ target_message: str,
+ target_category: type[Warning],
+ new_message: str,
+ new_category: type[Warning] | None = None,
+) -> Generator[None, None, None]:
+ """
+ Rewrite the message of a warning.
+
+ Parameters
+ ----------
+ target_message : str
+ Warning message to match.
+ target_category : Warning
+ Warning type to match.
+ new_message : str
+ New warning message to emit.
+ new_category : Warning or None, default None
+ New warning type to emit. When None, will be the same as target_category.
+ """
+ if new_category is None:
+ new_category = target_category
+ with warnings.catch_warnings(record=True) as record:
+ yield
+ if len(record) > 0:
+ match = re.compile(target_message)
+ for warning in record:
+ if warning.category is target_category and re.search(
+ match, str(warning.message)
+ ):
+ category = new_category
+ message: Warning | str = new_message
+ else:
+ category, message = warning.category, warning.message
+ warnings.warn_explicit(
+ message=message,
+ category=category,
+ filename=warning.filename,
+ lineno=warning.lineno,
+ )
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_print_versions.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_print_versions.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ede5627c28b9a3eaf97f09f6a28642523ce5833
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_print_versions.py
@@ -0,0 +1,158 @@
+from __future__ import annotations
+
+import codecs
+import json
+import locale
+import os
+import platform
+import struct
+import sys
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from pandas._typing import JSONSerializable
+
+from pandas.compat._optional import (
+ VERSIONS,
+ get_version,
+ import_optional_dependency,
+)
+
+
+def _get_commit_hash() -> str | None:
+ """
+ Use vendored versioneer code to get git hash, which handles
+ git worktree correctly.
+ """
+ try:
+ from pandas._version_meson import ( # pyright: ignore [reportMissingImports]
+ __git_version__,
+ )
+
+ return __git_version__
+ except ImportError:
+ from pandas._version import get_versions
+
+ versions = get_versions()
+ return versions["full-revisionid"]
+
+
+def _get_sys_info() -> dict[str, JSONSerializable]:
+ """
+ Returns system information as a JSON serializable dictionary.
+ """
+ uname_result = platform.uname()
+ language_code, encoding = locale.getlocale()
+ return {
+ "commit": _get_commit_hash(),
+ "python": platform.python_version(),
+ "python-bits": struct.calcsize("P") * 8,
+ "OS": uname_result.system,
+ "OS-release": uname_result.release,
+ "Version": uname_result.version,
+ "machine": uname_result.machine,
+ "processor": uname_result.processor,
+ "byteorder": sys.byteorder,
+ "LC_ALL": os.environ.get("LC_ALL"),
+ "LANG": os.environ.get("LANG"),
+ "LOCALE": {"language-code": language_code, "encoding": encoding},
+ }
+
+
+def _get_dependency_info() -> dict[str, JSONSerializable]:
+ """
+ Returns dependency information as a JSON serializable dictionary.
+ """
+ deps = [
+ "pandas",
+ # required
+ "numpy",
+ "pytz",
+ "dateutil",
+ # install / build,
+ "pip",
+ "Cython",
+ # docs
+ "sphinx",
+ # Other, not imported.
+ "IPython",
+ ]
+ # Optional dependencies
+ deps.extend(list(VERSIONS))
+
+ result: dict[str, JSONSerializable] = {}
+ for modname in deps:
+ try:
+ mod = import_optional_dependency(modname, errors="ignore")
+ except Exception:
+ # Dependency conflicts may cause a non ImportError
+ result[modname] = "N/A"
+ else:
+ result[modname] = get_version(mod) if mod else None
+ return result
+
+
+def show_versions(as_json: str | bool = False) -> None:
+ """
+ Provide useful information, important for bug reports.
+
+ It comprises info about hosting operation system, pandas version,
+ and versions of other installed relative packages.
+
+ Parameters
+ ----------
+ as_json : str or bool, default False
+ * If False, outputs info in a human readable form to the console.
+ * If str, it will be considered as a path to a file.
+ Info will be written to that file in JSON format.
+ * If True, outputs info in JSON format to the console.
+
+ Examples
+ --------
+ >>> pd.show_versions() # doctest: +SKIP
+ Your output may look something like this:
+ INSTALLED VERSIONS
+ ------------------
+ commit : 37ea63d540fd27274cad6585082c91b1283f963d
+ python : 3.10.6.final.0
+ python-bits : 64
+ OS : Linux
+ OS-release : 5.10.102.1-microsoft-standard-WSL2
+ Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022
+ machine : x86_64
+ processor : x86_64
+ byteorder : little
+ LC_ALL : None
+ LANG : en_GB.UTF-8
+ LOCALE : en_GB.UTF-8
+ pandas : 2.0.1
+ numpy : 1.24.3
+ ...
+ """
+ sys_info = _get_sys_info()
+ deps = _get_dependency_info()
+
+ if as_json:
+ j = {"system": sys_info, "dependencies": deps}
+
+ if as_json is True:
+ sys.stdout.writelines(json.dumps(j, indent=2))
+ else:
+ assert isinstance(as_json, str) # needed for mypy
+ with codecs.open(as_json, "wb", encoding="utf8") as f:
+ json.dump(j, f, indent=2)
+
+ else:
+ assert isinstance(sys_info["LOCALE"], dict) # needed for mypy
+ language_code = sys_info["LOCALE"]["language-code"]
+ encoding = sys_info["LOCALE"]["encoding"]
+ sys_info["LOCALE"] = f"{language_code}.{encoding}"
+
+ maxlen = max(len(x) for x in deps)
+ print("\nINSTALLED VERSIONS")
+ print("------------------")
+ for k, v in sys_info.items():
+ print(f"{k:<{maxlen}}: {v}")
+ print("")
+ for k, v in deps.items():
+ print(f"{k:<{maxlen}}: {v}")
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_test_decorators.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_test_decorators.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c1912bce856dd2694447d820ea2c5124be9c1a0
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_test_decorators.py
@@ -0,0 +1,173 @@
+"""
+This module provides decorator functions which can be applied to test objects
+in order to skip those objects when certain conditions occur. A sample use case
+is to detect if the platform is missing ``matplotlib``. If so, any test objects
+which require ``matplotlib`` and decorated with ``@td.skip_if_no("matplotlib")``
+will be skipped by ``pytest`` during the execution of the test suite.
+
+To illustrate, after importing this module:
+
+import pandas.util._test_decorators as td
+
+The decorators can be applied to classes:
+
+@td.skip_if_no("package")
+class Foo:
+ ...
+
+Or individual functions:
+
+@td.skip_if_no("package")
+def test_foo():
+ ...
+
+For more information, refer to the ``pytest`` documentation on ``skipif``.
+"""
+from __future__ import annotations
+
+import locale
+from typing import (
+ TYPE_CHECKING,
+ Callable,
+)
+
+import pytest
+
+from pandas._config import get_option
+
+if TYPE_CHECKING:
+ from pandas._typing import F
+
+from pandas._config.config import _get_option
+
+from pandas.compat import (
+ IS64,
+ is_platform_windows,
+)
+from pandas.compat._optional import import_optional_dependency
+
+
+def skip_if_installed(package: str) -> pytest.MarkDecorator:
+ """
+ Skip a test if a package is installed.
+
+ Parameters
+ ----------
+ package : str
+ The name of the package.
+
+ Returns
+ -------
+ pytest.MarkDecorator
+ a pytest.mark.skipif to use as either a test decorator or a
+ parametrization mark.
+ """
+ return pytest.mark.skipif(
+ bool(import_optional_dependency(package, errors="ignore")),
+ reason=f"Skipping because {package} is installed.",
+ )
+
+
+def skip_if_no(package: str, min_version: str | None = None) -> pytest.MarkDecorator:
+ """
+ Generic function to help skip tests when required packages are not
+ present on the testing system.
+
+ This function returns a pytest mark with a skip condition that will be
+ evaluated during test collection. An attempt will be made to import the
+ specified ``package`` and optionally ensure it meets the ``min_version``
+
+ The mark can be used as either a decorator for a test class or to be
+ applied to parameters in pytest.mark.parametrize calls or parametrized
+ fixtures. Use pytest.importorskip if an imported moduled is later needed
+ or for test functions.
+
+ If the import and version check are unsuccessful, then the test function
+ (or test case when used in conjunction with parametrization) will be
+ skipped.
+
+ Parameters
+ ----------
+ package: str
+ The name of the required package.
+ min_version: str or None, default None
+ Optional minimum version of the package.
+
+ Returns
+ -------
+ pytest.MarkDecorator
+ a pytest.mark.skipif to use as either a test decorator or a
+ parametrization mark.
+ """
+ msg = f"Could not import '{package}'"
+ if min_version:
+ msg += f" satisfying a min_version of {min_version}"
+ return pytest.mark.skipif(
+ not bool(
+ import_optional_dependency(
+ package, errors="ignore", min_version=min_version
+ )
+ ),
+ reason=msg,
+ )
+
+
+skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit")
+skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows")
+skip_if_not_us_locale = pytest.mark.skipif(
+ locale.getlocale()[0] != "en_US",
+ reason=f"Set local {locale.getlocale()[0]} is not en_US",
+)
+
+
+def parametrize_fixture_doc(*args) -> Callable[[F], F]:
+ """
+ Intended for use as a decorator for parametrized fixture,
+ this function will wrap the decorated function with a pytest
+ ``parametrize_fixture_doc`` mark. That mark will format
+ initial fixture docstring by replacing placeholders {0}, {1} etc
+ with parameters passed as arguments.
+
+ Parameters
+ ----------
+ args: iterable
+ Positional arguments for docstring.
+
+ Returns
+ -------
+ function
+ The decorated function wrapped within a pytest
+ ``parametrize_fixture_doc`` mark
+ """
+
+ def documented_fixture(fixture):
+ fixture.__doc__ = fixture.__doc__.format(*args)
+ return fixture
+
+ return documented_fixture
+
+
+def mark_array_manager_not_yet_implemented(request) -> None:
+ mark = pytest.mark.xfail(reason="Not yet implemented for ArrayManager")
+ request.applymarker(mark)
+
+
+skip_array_manager_not_yet_implemented = pytest.mark.xfail(
+ _get_option("mode.data_manager", silent=True) == "array",
+ reason="Not yet implemented for ArrayManager",
+)
+
+skip_array_manager_invalid_test = pytest.mark.skipif(
+ _get_option("mode.data_manager", silent=True) == "array",
+ reason="Test that relies on BlockManager internals or specific behaviour",
+)
+
+skip_copy_on_write_not_yet_implemented = pytest.mark.xfail(
+ get_option("mode.copy_on_write") is True,
+ reason="Not yet implemented/adapted for Copy-on-Write mode",
+)
+
+skip_copy_on_write_invalid_test = pytest.mark.skipif(
+ get_option("mode.copy_on_write") is True,
+ reason="Test not valid for Copy-on-Write mode",
+)
diff --git a/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_validators.py b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb0b4d549f49ea972d50c97986c60be64c021c3c
--- /dev/null
+++ b/evalkit_eagle/lib/python3.10/site-packages/pandas/util/_validators.py
@@ -0,0 +1,456 @@
+"""
+Module that contains many useful utilities
+for validating data or function arguments
+"""
+from __future__ import annotations
+
+from collections.abc import (
+ Iterable,
+ Sequence,
+)
+from typing import (
+ TypeVar,
+ overload,
+)
+
+import numpy as np
+
+from pandas._libs import lib
+
+from pandas.core.dtypes.common import (
+ is_bool,
+ is_integer,
+)
+
+BoolishT = TypeVar("BoolishT", bool, int)
+BoolishNoneT = TypeVar("BoolishNoneT", bool, int, None)
+
+
+def _check_arg_length(fname, args, max_fname_arg_count, compat_args) -> None:
+ """
+ Checks whether 'args' has length of at most 'compat_args'. Raises
+ a TypeError if that is not the case, similar to in Python when a
+ function is called with too many arguments.
+ """
+ if max_fname_arg_count < 0:
+ raise ValueError("'max_fname_arg_count' must be non-negative")
+
+ if len(args) > len(compat_args):
+ max_arg_count = len(compat_args) + max_fname_arg_count
+ actual_arg_count = len(args) + max_fname_arg_count
+ argument = "argument" if max_arg_count == 1 else "arguments"
+
+ raise TypeError(
+ f"{fname}() takes at most {max_arg_count} {argument} "
+ f"({actual_arg_count} given)"
+ )
+
+
+def _check_for_default_values(fname, arg_val_dict, compat_args) -> None:
+ """
+ Check that the keys in `arg_val_dict` are mapped to their
+ default values as specified in `compat_args`.
+
+ Note that this function is to be called only when it has been
+ checked that arg_val_dict.keys() is a subset of compat_args
+ """
+ for key in arg_val_dict:
+ # try checking equality directly with '=' operator,
+ # as comparison may have been overridden for the left
+ # hand object
+ try:
+ v1 = arg_val_dict[key]
+ v2 = compat_args[key]
+
+ # check for None-ness otherwise we could end up
+ # comparing a numpy array vs None
+ if (v1 is not None and v2 is None) or (v1 is None and v2 is not None):
+ match = False
+ else:
+ match = v1 == v2
+
+ if not is_bool(match):
+ raise ValueError("'match' is not a boolean")
+
+ # could not compare them directly, so try comparison
+ # using the 'is' operator
+ except ValueError:
+ match = arg_val_dict[key] is compat_args[key]
+
+ if not match:
+ raise ValueError(
+ f"the '{key}' parameter is not supported in "
+ f"the pandas implementation of {fname}()"
+ )
+
+
+def validate_args(fname, args, max_fname_arg_count, compat_args) -> None:
+ """
+ Checks whether the length of the `*args` argument passed into a function
+ has at most `len(compat_args)` arguments and whether or not all of these
+ elements in `args` are set to their default values.
+
+ Parameters
+ ----------
+ fname : str
+ The name of the function being passed the `*args` parameter
+ args : tuple
+ The `*args` parameter passed into a function
+ max_fname_arg_count : int
+ The maximum number of arguments that the function `fname`
+ can accept, excluding those in `args`. Used for displaying
+ appropriate error messages. Must be non-negative.
+ compat_args : dict
+ A dictionary of keys and their associated default values.
+ In order to accommodate buggy behaviour in some versions of `numpy`,
+ where a signature displayed keyword arguments but then passed those
+ arguments **positionally** internally when calling downstream
+ implementations, a dict ensures that the original
+ order of the keyword arguments is enforced.
+
+ Raises
+ ------
+ TypeError
+ If `args` contains more values than there are `compat_args`
+ ValueError
+ If `args` contains values that do not correspond to those
+ of the default values specified in `compat_args`
+ """
+ _check_arg_length(fname, args, max_fname_arg_count, compat_args)
+
+ # We do this so that we can provide a more informative
+ # error message about the parameters that we are not
+ # supporting in the pandas implementation of 'fname'
+ kwargs = dict(zip(compat_args, args))
+ _check_for_default_values(fname, kwargs, compat_args)
+
+
+def _check_for_invalid_keys(fname, kwargs, compat_args) -> None:
+ """
+ Checks whether 'kwargs' contains any keys that are not
+ in 'compat_args' and raises a TypeError if there is one.
+ """
+ # set(dict) --> set of the dictionary's keys
+ diff = set(kwargs) - set(compat_args)
+
+ if diff:
+ bad_arg = next(iter(diff))
+ raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'")
+
+
+def validate_kwargs(fname, kwargs, compat_args) -> None:
+ """
+ Checks whether parameters passed to the **kwargs argument in a
+ function `fname` are valid parameters as specified in `*compat_args`
+ and whether or not they are set to their default values.
+
+ Parameters
+ ----------
+ fname : str
+ The name of the function being passed the `**kwargs` parameter
+ kwargs : dict
+ The `**kwargs` parameter passed into `fname`
+ compat_args: dict
+ A dictionary of keys that `kwargs` is allowed to have and their
+ associated default values
+
+ Raises
+ ------
+ TypeError if `kwargs` contains keys not in `compat_args`
+ ValueError if `kwargs` contains keys in `compat_args` that do not
+ map to the default values specified in `compat_args`
+ """
+ kwds = kwargs.copy()
+ _check_for_invalid_keys(fname, kwargs, compat_args)
+ _check_for_default_values(fname, kwds, compat_args)
+
+
+def validate_args_and_kwargs(
+ fname, args, kwargs, max_fname_arg_count, compat_args
+) -> None:
+ """
+ Checks whether parameters passed to the *args and **kwargs argument in a
+ function `fname` are valid parameters as specified in `*compat_args`
+ and whether or not they are set to their default values.
+
+ Parameters
+ ----------
+ fname: str
+ The name of the function being passed the `**kwargs` parameter
+ args: tuple
+ The `*args` parameter passed into a function
+ kwargs: dict
+ The `**kwargs` parameter passed into `fname`
+ max_fname_arg_count: int
+ The minimum number of arguments that the function `fname`
+ requires, excluding those in `args`. Used for displaying
+ appropriate error messages. Must be non-negative.
+ compat_args: dict
+ A dictionary of keys that `kwargs` is allowed to
+ have and their associated default values.
+
+ Raises
+ ------
+ TypeError if `args` contains more values than there are
+ `compat_args` OR `kwargs` contains keys not in `compat_args`
+ ValueError if `args` contains values not at the default value (`None`)
+ `kwargs` contains keys in `compat_args` that do not map to the default
+ value as specified in `compat_args`
+
+ See Also
+ --------
+ validate_args : Purely args validation.
+ validate_kwargs : Purely kwargs validation.
+
+ """
+ # Check that the total number of arguments passed in (i.e.
+ # args and kwargs) does not exceed the length of compat_args
+ _check_arg_length(
+ fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args
+ )
+
+ # Check there is no overlap with the positional and keyword
+ # arguments, similar to what is done in actual Python functions
+ args_dict = dict(zip(compat_args, args))
+
+ for key in args_dict:
+ if key in kwargs:
+ raise TypeError(
+ f"{fname}() got multiple values for keyword argument '{key}'"
+ )
+
+ kwargs.update(args_dict)
+ validate_kwargs(fname, kwargs, compat_args)
+
+
+def validate_bool_kwarg(
+ value: BoolishNoneT,
+ arg_name: str,
+ none_allowed: bool = True,
+ int_allowed: bool = False,
+) -> BoolishNoneT:
+ """
+ Ensure that argument passed in arg_name can be interpreted as boolean.
+
+ Parameters
+ ----------
+ value : bool
+ Value to be validated.
+ arg_name : str
+ Name of the argument. To be reflected in the error message.
+ none_allowed : bool, default True
+ Whether to consider None to be a valid boolean.
+ int_allowed : bool, default False
+ Whether to consider integer value to be a valid boolean.
+
+ Returns
+ -------
+ value
+ The same value as input.
+
+ Raises
+ ------
+ ValueError
+ If the value is not a valid boolean.
+ """
+ good_value = is_bool(value)
+ if none_allowed:
+ good_value = good_value or (value is None)
+
+ if int_allowed:
+ good_value = good_value or isinstance(value, int)
+
+ if not good_value:
+ raise ValueError(
+ f'For argument "{arg_name}" expected type bool, received '
+ f"type {type(value).__name__}."
+ )
+ return value # pyright: ignore[reportGeneralTypeIssues]
+
+
+def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True):
+ """
+ Validate the keyword arguments to 'fillna'.
+
+ This checks that exactly one of 'value' and 'method' is specified.
+ If 'method' is specified, this validates that it's a valid method.
+
+ Parameters
+ ----------
+ value, method : object
+ The 'value' and 'method' keyword arguments for 'fillna'.
+ validate_scalar_dict_value : bool, default True
+ Whether to validate that 'value' is a scalar or dict. Specifically,
+ validate that it is not a list or tuple.
+
+ Returns
+ -------
+ value, method : object
+ """
+ from pandas.core.missing import clean_fill_method
+
+ if value is None and method is None:
+ raise ValueError("Must specify a fill 'value' or 'method'.")
+ if value is None and method is not None:
+ method = clean_fill_method(method)
+
+ elif value is not None and method is None:
+ if validate_scalar_dict_value and isinstance(value, (list, tuple)):
+ raise TypeError(
+ '"value" parameter must be a scalar or dict, but '
+ f'you passed a "{type(value).__name__}"'
+ )
+
+ elif value is not None and method is not None:
+ raise ValueError("Cannot specify both 'value' and 'method'.")
+
+ return value, method
+
+
+def validate_percentile(q: float | Iterable[float]) -> np.ndarray:
+ """
+ Validate percentiles (used by describe and quantile).
+
+ This function checks if the given float or iterable of floats is a valid percentile
+ otherwise raises a ValueError.
+
+ Parameters
+ ----------
+ q: float or iterable of floats
+ A single percentile or an iterable of percentiles.
+
+ Returns
+ -------
+ ndarray
+ An ndarray of the percentiles if valid.
+
+ Raises
+ ------
+ ValueError if percentiles are not in given interval([0, 1]).
+ """
+ q_arr = np.asarray(q)
+ # Don't change this to an f-string. The string formatting
+ # is too expensive for cases where we don't need it.
+ msg = "percentiles should all be in the interval [0, 1]"
+ if q_arr.ndim == 0:
+ if not 0 <= q_arr <= 1:
+ raise ValueError(msg)
+ else:
+ if not all(0 <= qs <= 1 for qs in q_arr):
+ raise ValueError(msg)
+ return q_arr
+
+
+@overload
+def validate_ascending(ascending: BoolishT) -> BoolishT:
+ ...
+
+
+@overload
+def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]:
+ ...
+
+
+def validate_ascending(
+ ascending: bool | int | Sequence[BoolishT],
+) -> bool | int | list[BoolishT]:
+ """Validate ``ascending`` kwargs for ``sort_index`` method."""
+ kwargs = {"none_allowed": False, "int_allowed": True}
+ if not isinstance(ascending, Sequence):
+ return validate_bool_kwarg(ascending, "ascending", **kwargs)
+
+ return [validate_bool_kwarg(item, "ascending", **kwargs) for item in ascending]
+
+
+def validate_endpoints(closed: str | None) -> tuple[bool, bool]:
+ """
+ Check that the `closed` argument is among [None, "left", "right"]
+
+ Parameters
+ ----------
+ closed : {None, "left", "right"}
+
+ Returns
+ -------
+ left_closed : bool
+ right_closed : bool
+
+ Raises
+ ------
+ ValueError : if argument is not among valid values
+ """
+ left_closed = False
+ right_closed = False
+
+ if closed is None:
+ left_closed = True
+ right_closed = True
+ elif closed == "left":
+ left_closed = True
+ elif closed == "right":
+ right_closed = True
+ else:
+ raise ValueError("Closed has to be either 'left', 'right' or None")
+
+ return left_closed, right_closed
+
+
+def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]:
+ """
+ Check that the `inclusive` argument is among {"both", "neither", "left", "right"}.
+
+ Parameters
+ ----------
+ inclusive : {"both", "neither", "left", "right"}
+
+ Returns
+ -------
+ left_right_inclusive : tuple[bool, bool]
+
+ Raises
+ ------
+ ValueError : if argument is not among valid values
+ """
+ left_right_inclusive: tuple[bool, bool] | None = None
+
+ if isinstance(inclusive, str):
+ left_right_inclusive = {
+ "both": (True, True),
+ "left": (True, False),
+ "right": (False, True),
+ "neither": (False, False),
+ }.get(inclusive)
+
+ if left_right_inclusive is None:
+ raise ValueError(
+ "Inclusive has to be either 'both', 'neither', 'left' or 'right'"
+ )
+
+ return left_right_inclusive
+
+
+def validate_insert_loc(loc: int, length: int) -> int:
+ """
+ Check that we have an integer between -length and length, inclusive.
+
+ Standardize negative loc to within [0, length].
+
+ The exceptions we raise on failure match np.insert.
+ """
+ if not is_integer(loc):
+ raise TypeError(f"loc must be an integer between -{length} and {length}")
+
+ if loc < 0:
+ loc += length
+ if not 0 <= loc <= length:
+ raise IndexError(f"loc must be an integer between -{length} and {length}")
+ return loc # pyright: ignore[reportGeneralTypeIssues]
+
+
+def check_dtype_backend(dtype_backend) -> None:
+ if dtype_backend is not lib.no_default:
+ if dtype_backend not in ["numpy_nullable", "pyarrow"]:
+ raise ValueError(
+ f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and "
+ f"'pyarrow' are allowed.",
+ )