diff --git a/.gitattributes b/.gitattributes index d0bc835f5fbe3e04ea7baf880d7af02264ed5b39..bbf7a6d8f12658edf8a2ec0f69859b06654bd4b4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -552,3 +552,4 @@ moondream/lib/python3.10/site-packages/pygments/lexers/__pycache__/lisp.cpython- parrot/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text moondream/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text mantis_evalkit/bin/python3.10 filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/torch/lib/libnvrtc-672ee683.so.11.2 filter=lfs diff=lfs merge=lfs -text diff --git a/mantis_evalkit/lib/python3.10/site-packages/aiosignal/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/aiosignal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad0278993a986cecabf966d7b6ffd0c58f060e4 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/aiosignal/__init__.py @@ -0,0 +1,36 @@ +from frozenlist import FrozenList + +__version__ = "1.3.2" + +__all__ = ("Signal",) + + +class Signal(FrozenList): + """Coroutine-based signal implementation. + + To connect a callback to a signal, use any list method. + + Signals are fired using the send() coroutine, which takes named + arguments. + """ + + __slots__ = ("_owner",) + + def __init__(self, owner): + super().__init__() + self._owner = owner + + def __repr__(self): + return "".format( + self._owner, self.frozen, list(self) + ) + + async def send(self, *args, **kwargs): + """ + Sends data to all registered receivers. + """ + if not self.frozen: + raise RuntimeError("Cannot send non-frozen signal.") + + for receiver in self: + await receiver(*args, **kwargs) # type: ignore diff --git a/mantis_evalkit/lib/python3.10/site-packages/aiosignal/__init__.pyi b/mantis_evalkit/lib/python3.10/site-packages/aiosignal/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d4e3416d72246058259061578a82697e2bc0706e --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/aiosignal/__init__.pyi @@ -0,0 +1,12 @@ +from typing import Any, Generic, TypeVar + +from frozenlist import FrozenList + +__all__ = ("Signal",) + +_T = TypeVar("_T") + +class Signal(FrozenList[_T], Generic[_T]): + def __init__(self, owner: Any) -> None: ... + def __repr__(self) -> str: ... + async def send(self, *args: Any, **kwargs: Any) -> None: ... diff --git a/mantis_evalkit/lib/python3.10/site-packages/aiosignal/py.typed b/mantis_evalkit/lib/python3.10/site-packages/aiosignal/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mantis_evalkit/lib/python3.10/site-packages/async_timeout/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/async_timeout/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe4aa581635bb30ee4e880366b1bbab6227fb148 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/async_timeout/__init__.py @@ -0,0 +1,276 @@ +import asyncio +import enum +import sys +from types import TracebackType +from typing import Optional, Type, final + + +__version__ = "5.0.1" + + +__all__ = ("timeout", "timeout_at", "Timeout") + + +def timeout(delay: Optional[float]) -> "Timeout": + """timeout context manager. + + Useful in cases when you want to apply timeout logic around block + of code or in cases when asyncio.wait_for is not suitable. For example: + + >>> async with timeout(0.001): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + delay - value in seconds or None to disable timeout logic + """ + loop = asyncio.get_running_loop() + if delay is not None: + deadline = loop.time() + delay # type: Optional[float] + else: + deadline = None + return Timeout(deadline, loop) + + +def timeout_at(deadline: Optional[float]) -> "Timeout": + """Schedule the timeout at absolute time. + + deadline argument points on the time in the same clock system + as loop.time(). + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + + >>> async with timeout_at(loop.time() + 10): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + """ + loop = asyncio.get_running_loop() + return Timeout(deadline, loop) + + +class _State(enum.Enum): + INIT = "INIT" + ENTER = "ENTER" + TIMEOUT = "TIMEOUT" + EXIT = "EXIT" + + +if sys.version_info >= (3, 11): + + class _Expired: + __slots__ = ("_val",) + + def __init__(self, val: bool) -> None: + self._val = val + + def __call__(self) -> bool: + return self._val + + def __bool__(self) -> bool: + return self._val + + def __repr__(self) -> str: + return repr(self._val) + + def __str__(self) -> str: + return str(self._val) + + @final + class Timeout(asyncio.Timeout): # type: ignore[misc] + # Supports full asyncio.Timeout API. + # Also provides several asyncio_timeout specific methods + # for backward compatibility. + def __init__( + self, deadline: Optional[float], loop: asyncio.AbstractEventLoop + ) -> None: + super().__init__(deadline) + + @property + def expired(self) -> _Expired: + # a hacky property hat can provide both roles: + # timeout.expired() from asyncio + # timeout.expired from asyncio_timeout + return _Expired(super().expired()) + + @property + def deadline(self) -> Optional[float]: + return self.when() + + def reject(self) -> None: + """Reject scheduled timeout if any.""" + # cancel is maybe better name but + # task.cancel() raises CancelledError in asyncio world. + self.reschedule(None) + + def shift(self, delay: float) -> None: + """Advance timeout on delay seconds. + + The delay can be negative. + + Raise RuntimeError if shift is called when deadline is not scheduled + """ + deadline = self.when() + if deadline is None: + raise RuntimeError("cannot shift timeout if deadline is not scheduled") + self.reschedule(deadline + delay) + + def update(self, deadline: float) -> None: + """Set deadline to absolute value. + + deadline argument points on the time in the same clock system + as loop.time(). + + If new deadline is in the past the timeout is raised immediately. + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + """ + self.reschedule(deadline) + +else: + + @final + class Timeout: + # Internal class, please don't instantiate it directly + # Use timeout() and timeout_at() public factories instead. + # + # Implementation note: `async with timeout()` is preferred + # over `with timeout()`. + # While technically the Timeout class implementation + # doesn't need to be async at all, + # the `async with` statement explicitly points that + # the context manager should be used from async function context. + # + # This design allows to avoid many silly misusages. + # + # TimeoutError is raised immediately when scheduled + # if the deadline is passed. + # The purpose is to time out as soon as possible + # without waiting for the next await expression. + + __slots__ = ("_deadline", "_loop", "_state", "_timeout_handler", "_task") + + def __init__( + self, deadline: Optional[float], loop: asyncio.AbstractEventLoop + ) -> None: + self._loop = loop + self._state = _State.INIT + + self._task: Optional["asyncio.Task[object]"] = None + self._timeout_handler = None # type: Optional[asyncio.Handle] + if deadline is None: + self._deadline = None # type: Optional[float] + else: + self.update(deadline) + + async def __aenter__(self) -> "Timeout": + self._do_enter() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + @property + def expired(self) -> bool: + """Is timeout expired during execution?""" + return self._state == _State.TIMEOUT + + @property + def deadline(self) -> Optional[float]: + return self._deadline + + def reject(self) -> None: + """Reject scheduled timeout if any.""" + # cancel is maybe better name but + # task.cancel() raises CancelledError in asyncio world. + if self._state not in (_State.INIT, _State.ENTER): + raise RuntimeError(f"invalid state {self._state.value}") + self._reject() + + def _reject(self) -> None: + self._task = None + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._timeout_handler = None + + def shift(self, delay: float) -> None: + """Advance timeout on delay seconds. + + The delay can be negative. + + Raise RuntimeError if shift is called when deadline is not scheduled + """ + deadline = self._deadline + if deadline is None: + raise RuntimeError("cannot shift timeout if deadline is not scheduled") + self.update(deadline + delay) + + def update(self, deadline: float) -> None: + """Set deadline to absolute value. + + deadline argument points on the time in the same clock system + as loop.time(). + + If new deadline is in the past the timeout is raised immediately. + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + """ + if self._state == _State.EXIT: + raise RuntimeError("cannot reschedule after exit from context manager") + if self._state == _State.TIMEOUT: + raise RuntimeError("cannot reschedule expired timeout") + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._deadline = deadline + if self._state != _State.INIT: + self._reschedule() + + def _reschedule(self) -> None: + assert self._state == _State.ENTER + deadline = self._deadline + if deadline is None: + return + + now = self._loop.time() + if self._timeout_handler is not None: + self._timeout_handler.cancel() + + self._task = asyncio.current_task() + if deadline <= now: + self._timeout_handler = self._loop.call_soon(self._on_timeout) + else: + self._timeout_handler = self._loop.call_at(deadline, self._on_timeout) + + def _do_enter(self) -> None: + if self._state != _State.INIT: + raise RuntimeError(f"invalid state {self._state.value}") + self._state = _State.ENTER + self._reschedule() + + def _do_exit(self, exc_type: Optional[Type[BaseException]]) -> None: + if exc_type is asyncio.CancelledError and self._state == _State.TIMEOUT: + assert self._task is not None + self._timeout_handler = None + self._task = None + raise asyncio.TimeoutError + # timeout has not expired + self._state = _State.EXIT + self._reject() + return None + + def _on_timeout(self) -> None: + assert self._task is not None + self._task.cancel() + self._state = _State.TIMEOUT + # drop the reference early + self._timeout_handler = None diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05a05c25257f42b604007e1fc5e2329d189bbe1d Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e92de2d90648a96f89cfe7116b4ff49350c1dd41 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/__pycache__/folder_based_builder.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..146ef4e613b9d943b160c04b2286b2a2d331b80a --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py @@ -0,0 +1,406 @@ +import collections +import itertools +import os +from dataclasses import dataclass +from typing import List, Optional, Tuple, Type + +import pandas as pd +import pyarrow as pa +import pyarrow.json as paj + +import datasets +from datasets.features.features import FeatureType +from datasets.tasks.base import TaskTemplate + + +logger = datasets.utils.logging.get_logger(__name__) + + +def count_path_segments(path): + return path.replace("\\", "/").count("/") + + +@dataclass +class FolderBasedBuilderConfig(datasets.BuilderConfig): + """BuilderConfig for AutoFolder.""" + + features: Optional[datasets.Features] = None + drop_labels: bool = None + drop_metadata: bool = None + + +class FolderBasedBuilder(datasets.GeneratorBasedBuilder): + """ + Base class for generic data loaders for vision and image data. + + + Abstract class attributes to be overridden by a child class: + BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...) + BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...) + BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig` + EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files + will be included in a dataset) + CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure + """ + + BASE_FEATURE: Type[FeatureType] + BASE_COLUMN_NAME: str + BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig + EXTENSIONS: List[str] + CLASSIFICATION_TASK: TaskTemplate + + METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"] + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + + # Do an early pass if: + # * `drop_labels` is None (default) or False, to infer the class labels + # * `drop_metadata` is None (default) or False, to find the metadata files + do_analyze = not self.config.drop_labels or not self.config.drop_metadata + labels, path_depths = set(), set() + metadata_files = collections.defaultdict(set) + + def analyze(files_or_archives, downloaded_files_or_dirs, split): + if len(downloaded_files_or_dirs) == 0: + return + # The files are separated from the archives at this point, so check the first sample + # to see if it's a file or a directory and iterate accordingly + if os.path.isfile(downloaded_files_or_dirs[0]): + original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs + for original_file, downloaded_file in zip(original_files, downloaded_files): + original_file, downloaded_file = str(original_file), str(downloaded_file) + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(original_file))) + path_depths.add(count_path_segments(original_file)) + elif os.path.basename(original_file) in self.METADATA_FILENAMES: + metadata_files[split].add((original_file, downloaded_file)) + else: + original_file_name = os.path.basename(original_file) + logger.debug( + f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either." + ) + else: + archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs + for archive, downloaded_dir in zip(archives, downloaded_dirs): + archive, downloaded_dir = str(archive), str(downloaded_dir) + for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) + path_depths.add(count_path_segments(downloaded_dir_file)) + elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: + metadata_files[split].add((None, downloaded_dir_file)) + else: + archive_file_name = os.path.basename(archive) + original_file_name = os.path.basename(downloaded_dir_file) + logger.debug( + f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either." + ) + + data_files = self.config.data_files + splits = [] + for split_name, files in data_files.items(): + if isinstance(files, str): + files = [files] + files, archives = self._split_files_and_archives(files) + downloaded_files = dl_manager.download(files) + downloaded_dirs = dl_manager.download_and_extract(archives) + if do_analyze: # drop_metadata is None or False, drop_labels is None or False + logger.info(f"Searching for labels and/or metadata files in {split_name} data files...") + analyze(files, downloaded_files, split_name) + analyze(archives, downloaded_dirs, split_name) + + if metadata_files: + # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False + add_metadata = not self.config.drop_metadata + # if `metadata_files` are found, add labels only if + # `drop_labels` is set up to False explicitly (not-default behavior) + add_labels = self.config.drop_labels is False + else: + # if `metadata_files` are not found, don't add metadata + add_metadata = False + # if `metadata_files` are not found and `drop_labels` is None (default) - + # add labels if files are on the same level in directory hierarchy and there is more than one label + add_labels = ( + (len(labels) > 1 and len(path_depths) == 1) + if self.config.drop_labels is None + else not self.config.drop_labels + ) + + if add_labels: + logger.info("Adding the labels inferred from data directories to the dataset's features...") + if add_metadata: + logger.info("Adding metadata to the dataset...") + else: + add_labels, add_metadata, metadata_files = False, False, {} + + splits.append( + datasets.SplitGenerator( + name=split_name, + gen_kwargs={ + "files": list(zip(files, downloaded_files)) + + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], + "metadata_files": metadata_files, + "split_name": split_name, + "add_labels": add_labels, + "add_metadata": add_metadata, + }, + ) + ) + + if add_metadata: + # Verify that: + # * all metadata files have the same set of features + # * the `file_name` key is one of the metadata keys and is of type string + features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] + + # Check that all metadata files share the same format + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] + for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values()) + } + if len(metadata_ext) > 1: + raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}") + metadata_ext = metadata_ext.pop() + + for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()): + pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) + features_per_metadata_file.append( + (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema)) + ) + for downloaded_metadata_file, metadata_features in features_per_metadata_file: + if metadata_features != features_per_metadata_file[0][1]: + raise ValueError( + f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}" + ) + metadata_features = features_per_metadata_file[0][1] + if "file_name" not in metadata_features: + raise ValueError("`file_name` must be present as dictionary key in metadata files") + if metadata_features["file_name"] != datasets.Value("string"): + raise ValueError("`file_name` key must be a string") + del metadata_features["file_name"] + else: + metadata_features = None + + # Normally, we would do this in _info, but we need to know the labels and/or metadata + # before building the features + if self.config.features is None: + if add_labels: + self.info.features = datasets.Features( + { + self.BASE_COLUMN_NAME: self.BASE_FEATURE(), + "label": datasets.ClassLabel(names=sorted(labels)), + } + ) + self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)] + else: + self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) + + if add_metadata: + # Warn if there are duplicated keys in metadata compared to the existing features + # (`BASE_COLUMN_NAME`, optionally "label") + duplicated_keys = set(self.info.features) & set(metadata_features) + if duplicated_keys: + logger.warning( + f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in " + f"the features dictionary." + ) + # skip metadata duplicated keys + self.info.features.update( + { + feature: metadata_features[feature] + for feature in metadata_features + if feature not in duplicated_keys + } + ) + + return splits + + def _split_files_and_archives(self, data_files): + files, archives = [], [] + for data_file in data_files: + _, data_file_ext = os.path.splitext(data_file) + if data_file_ext.lower() in self.EXTENSIONS: + files.append(data_file) + elif os.path.basename(data_file) in self.METADATA_FILENAMES: + files.append(data_file) + else: + archives.append(data_file) + return files, archives + + def _read_metadata(self, metadata_file, metadata_ext: str = ""): + if metadata_ext == ".csv": + # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module + return pa.Table.from_pandas(pd.read_csv(metadata_file)) + else: + with open(metadata_file, "rb") as f: + return paj.read_json(f) + + def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): + split_metadata_files = metadata_files.get(split_name, []) + sample_empty_metadata = ( + {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} + ) + last_checked_dir = None + metadata_dir = None + metadata_dict = None + downloaded_metadata_file = None + + metadata_ext = "" + if split_metadata_files: + metadata_ext = { + os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files + } + metadata_ext = metadata_ext.pop() + + file_idx = 0 + for original_file, downloaded_file_or_dir in files: + if original_file is not None: + _, original_file_ext = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + # If the file is a file of a needed type, and we've just entered a new directory, + # find the nereast metadata file (by counting path segments) for the directory + current_dir = os.path.dirname(original_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is not None # ignore metadata_files that are inside archives + and not os.path.relpath( + original_file, os.path.dirname(metadata_file_candidate) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + file_relpath = os.path.relpath(original_file, metadata_dir) + file_relpath = file_relpath.replace("\\", "/") + if file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(original_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_file_or_dir, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1 + else: + for downloaded_dir_file in downloaded_file_or_dir: + _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + current_dir = os.path.dirname(downloaded_dir_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [ + ( + os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ), + metadata_file_candidate, + downloaded_metadata_file, + ) + for metadata_file_candidate, downloaded_metadata_file in split_metadata_files + if metadata_file_candidate + is None # ignore metadata_files that are not inside archives + and not os.path.relpath( + downloaded_dir_file, os.path.dirname(downloaded_metadata_file) + ).startswith("..") + ] + if metadata_file_candidates: + _, metadata_file, downloaded_metadata_file = min( + metadata_file_candidates, key=lambda x: count_path_segments(x[0]) + ) + pa_metadata_table = self._read_metadata( + downloaded_metadata_file, metadata_ext=metadata_ext + ) + pa_file_name_array = pa_metadata_table["file_name"] + pa_metadata_table = pa_metadata_table.drop(["file_name"]) + metadata_dir = os.path.dirname(downloaded_metadata_file) + metadata_dict = { + os.path.normpath(file_name).replace("\\", "/"): sample_metadata + for file_name, sample_metadata in zip( + pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist() + ) + } + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + if metadata_dir is not None and downloaded_metadata_file is not None: + downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) + downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/") + if downloaded_dir_file_relpath not in metadata_dict: + raise ValueError( + f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}." + ) + sample_metadata = metadata_dict[downloaded_dir_file_relpath] + else: + raise ValueError( + f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}." + ) + else: + sample_metadata = {} + if add_labels: + sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))} + else: + sample_label = {} + yield ( + file_idx, + { + **sample_empty_metadata, + self.BASE_COLUMN_NAME: downloaded_dir_file, + **sample_metadata, + **sample_label, + }, + ) + file_idx += 1 diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35fdc9624f446bb8dc41a0680980f8d8a968fee9 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca4c02a91be3f4f10d8c2aa9f031319c7409d7b1 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py new file mode 100644 index 0000000000000000000000000000000000000000..b0791ba88594fb8e76c957a11cca9936cf321bb4 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py @@ -0,0 +1,118 @@ +import sys +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union + +import pandas as pd +import pyarrow as pa + +import datasets +import datasets.config +from datasets.features.features import require_storage_cast +from datasets.table import table_cast + + +if TYPE_CHECKING: + import sqlite3 + + import sqlalchemy + + +logger = datasets.utils.logging.get_logger(__name__) + + +@dataclass +class SqlConfig(datasets.BuilderConfig): + """BuilderConfig for SQL.""" + + sql: Union[str, "sqlalchemy.sql.Selectable"] = None + con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None + index_col: Optional[Union[str, List[str]]] = None + coerce_float: bool = True + params: Optional[Union[List, Tuple, Dict]] = None + parse_dates: Optional[Union[List, Dict]] = None + columns: Optional[List[str]] = None + chunksize: Optional[int] = 10_000 + features: Optional[datasets.Features] = None + + def __post_init__(self): + if self.sql is None: + raise ValueError("sql must be specified") + if self.con is None: + raise ValueError("con must be specified") + + def create_config_id( + self, + config_kwargs: dict, + custom_features: Optional[datasets.Features] = None, + ) -> str: + config_kwargs = config_kwargs.copy() + # We need to stringify the Selectable object to make its hash deterministic + + # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html + sql = config_kwargs["sql"] + if not isinstance(sql, str): + if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules: + import sqlalchemy + + if isinstance(sql, sqlalchemy.sql.Selectable): + engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://") + sql_str = str(sql.compile(dialect=engine.dialect)) + config_kwargs["sql"] = sql_str + else: + raise TypeError( + f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" + ) + else: + raise TypeError( + f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}" + ) + con = config_kwargs["con"] + if not isinstance(con, str): + config_kwargs["con"] = id(con) + logger.info( + f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead." + ) + + return super().create_config_id(config_kwargs, custom_features=custom_features) + + @property + def pd_read_sql_kwargs(self): + pd_read_sql_kwargs = { + "index_col": self.index_col, + "columns": self.columns, + "params": self.params, + "coerce_float": self.coerce_float, + "parse_dates": self.parse_dates, + } + return pd_read_sql_kwargs + + +class Sql(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = SqlConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all(not require_storage_cast(feature) for feature in self.config.features.values()): + # cheaper cast + pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) + else: + # more expensive cast; allows str <-> int/float or str to Audio for example + pa_table = table_cast(pa_table, schema) + return pa_table + + def _generate_tables(self): + chunksize = self.config.chunksize + sql_reader = pd.read_sql( + self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs + ) + sql_reader = [sql_reader] if chunksize is None else sql_reader + for chunk_idx, df in enumerate(sql_reader): + pa_table = pa.Table.from_pandas(df) + yield chunk_idx, self._cast_table(pa_table) diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad1e05a1cab433d5f44eb854802020722f59da2e Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6964db1abde5abccf6b8f3941423ee1abeb3ddf6 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/_tenbin.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..257ae4eaeb2404056bfaae0873cc7c84293c48ad Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py new file mode 100644 index 0000000000000000000000000000000000000000..cd8c054842e090dc09bdf3d2fee59241a1a928c5 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/_tenbin.py @@ -0,0 +1,285 @@ +# +# Copyright (c) 2017-2021 NVIDIA CORPORATION. All rights reserved. +# This file coems from the WebDataset library. +# See the LICENSE file for licensing terms (BSD-style). +# + +""" +Binary tensor encodings for PyTorch and NumPy. + +This defines efficient binary encodings for tensors. The format is 8 byte +aligned and can be used directly for computations when transmitted, say, +via RDMA. The format is supported by WebDataset with the `.ten` filename +extension. It is also used by Tensorcom, Tensorcom RDMA, and can be used +for fast tensor storage with LMDB and in disk files (which can be memory +mapped) + +Data is encoded as a series of chunks: + +- magic number (int64) +- length in bytes (int64) +- bytes (multiple of 64 bytes long) + +Arrays are a header chunk followed by a data chunk. +Header chunks have the following structure: + +- dtype (int64) +- 8 byte array name +- ndim (int64) +- dim[0] +- dim[1] +- ... +""" + +import struct +import sys + +import numpy as np + + +def bytelen(a): + """Determine the length of a in bytes.""" + if hasattr(a, "nbytes"): + return a.nbytes + elif isinstance(a, (bytearray, bytes)): + return len(a) + else: + raise ValueError(a, "cannot determine nbytes") + + +def bytedata(a): + """Return a the raw data corresponding to a.""" + if isinstance(a, (bytearray, bytes, memoryview)): + return a + elif hasattr(a, "data"): + return a.data + else: + raise ValueError(a, "cannot return bytedata") + + +# tables for converting between long/short NumPy dtypes + +long_to_short = """ +float16 f2 +float32 f4 +float64 f8 +int8 i1 +int16 i2 +int32 i4 +int64 i8 +uint8 u1 +uint16 u2 +unit32 u4 +uint64 u8 +""".strip() +long_to_short = [x.split() for x in long_to_short.split("\n")] +long_to_short = {x[0]: x[1] for x in long_to_short} +short_to_long = {v: k for k, v in long_to_short.items()} + + +def check_acceptable_input_type(data, allow64): + """Check that the data has an acceptable type for tensor encoding. + + :param data: array + :param allow64: allow 64 bit types + """ + for a in data: + if a.dtype.name not in long_to_short: + raise ValueError("unsupported dataypte") + if not allow64 and a.dtype.name not in ["float64", "int64", "uint64"]: + raise ValueError("64 bit datatypes not allowed unless explicitly enabled") + + +def str64(s): + """Convert a string to an int64.""" + s = s + "\0" * (8 - len(s)) + s = s.encode("ascii") + return struct.unpack("@q", s)[0] + + +def unstr64(i): + """Convert an int64 to a string.""" + b = struct.pack("@q", i) + return b.decode("ascii").strip("\0") + + +def check_infos(data, infos, required_infos=None): + """Verify the info strings.""" + if required_infos is False or required_infos is None: + return data + if required_infos is True: + return data, infos + if not isinstance(required_infos, (tuple, list)): + raise ValueError("required_infos must be tuple or list") + for required, actual in zip(required_infos, infos): + raise ValueError(f"actual info {actual} doesn't match required info {required}") + return data + + +def encode_header(a, info=""): + """Encode an array header as a byte array.""" + if a.ndim >= 10: + raise ValueError("too many dimensions") + if a.nbytes != np.prod(a.shape) * a.itemsize: + raise ValueError("mismatch between size and shape") + if a.dtype.name not in long_to_short: + raise ValueError("unsupported array type") + header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape) + return bytedata(np.array(header, dtype="i8")) + + +def decode_header(h): + """Decode a byte array into an array header.""" + h = np.frombuffer(h, dtype="i8") + if unstr64(h[0]) not in short_to_long: + raise ValueError("unsupported array type") + dtype = np.dtype(short_to_long[unstr64(h[0])]) + info = unstr64(h[1]) + rank = int(h[2]) + shape = tuple(h[3 : 3 + rank]) + return shape, dtype, info + + +def encode_list(l, infos=None): # noqa: E741 + """Given a list of arrays, encode them into a list of byte arrays.""" + if infos is None: + infos = [""] + else: + if len(l) != len(infos): + raise ValueError(f"length of list {l} must muatch length of infos {infos}") + result = [] + for i, a in enumerate(l): + header = encode_header(a, infos[i % len(infos)]) + result += [header, bytedata(a)] + return result + + +def decode_list(l, infos=False): # noqa: E741 + """Given a list of byte arrays, decode them into arrays.""" + result = [] + infos0 = [] + for header, data in zip(l[::2], l[1::2]): + shape, dtype, info = decode_header(header) + a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape) + result += [a] + infos0 += [info] + return check_infos(result, infos0, infos) + + +magic_str = "~TenBin~" +magic = str64(magic_str) +magic_bytes = unstr64(magic).encode("ascii") + + +def roundup(n, k=64): + """Round up to the next multiple of 64.""" + return k * ((n + k - 1) // k) + + +def encode_chunks(l): # noqa: E741 + """Encode a list of chunks into a single byte array, with lengths and magics..""" + size = sum(16 + roundup(b.nbytes) for b in l) + result = bytearray(size) + offset = 0 + for b in l: + result[offset : offset + 8] = magic_bytes + offset += 8 + result[offset : offset + 8] = struct.pack("@q", b.nbytes) + offset += 8 + result[offset : offset + bytelen(b)] = b + offset += roundup(bytelen(b)) + return result + + +def decode_chunks(buf): + """Decode a byte array into a list of chunks.""" + result = [] + offset = 0 + total = bytelen(buf) + while offset < total: + if magic_bytes != buf[offset : offset + 8]: + raise ValueError("magic bytes mismatch") + offset += 8 + nbytes = struct.unpack("@q", buf[offset : offset + 8])[0] + offset += 8 + b = buf[offset : offset + nbytes] + offset += roundup(nbytes) + result.append(b) + return result + + +def encode_buffer(l, infos=None): # noqa: E741 + """Encode a list of arrays into a single byte array.""" + if not isinstance(l, list): + raise ValueError("requires list") + return encode_chunks(encode_list(l, infos=infos)) + + +def decode_buffer(buf, infos=False): + """Decode a byte array into a list of arrays.""" + return decode_list(decode_chunks(buf), infos=infos) + + +def write_chunk(stream, buf): + """Write a byte chunk to the stream with magics, length, and padding.""" + nbytes = bytelen(buf) + stream.write(magic_bytes) + stream.write(struct.pack("@q", nbytes)) + stream.write(bytedata(buf)) + padding = roundup(nbytes) - nbytes + if padding > 0: + stream.write(b"\0" * padding) + + +def read_chunk(stream): + """Read a byte chunk from a stream with magics, length, and padding.""" + magic = stream.read(8) + if magic == b"": + return None + if magic != magic_bytes: + raise ValueError("magic number does not match") + nbytes = stream.read(8) + nbytes = struct.unpack("@q", nbytes)[0] + if nbytes < 0: + raise ValueError("negative nbytes") + data = stream.read(nbytes) + padding = roundup(nbytes) - nbytes + if padding > 0: + stream.read(padding) + return data + + +def write(stream, l, infos=None): # noqa: E741 + """Write a list of arrays to a stream, with magics, length, and padding.""" + for chunk in encode_list(l, infos=infos): + write_chunk(stream, chunk) + + +def read(stream, n=sys.maxsize, infos=False): + """Read a list of arrays from a stream, with magics, length, and padding.""" + chunks = [] + for _ in range(n): + header = read_chunk(stream) + if header is None: + break + data = read_chunk(stream) + if data is None: + raise ValueError("premature EOF") + chunks += [header, data] + return decode_list(chunks, infos=infos) + + +def save(fname, *args, infos=None, nocheck=False): + """Save a list of arrays to a file, with magics, length, and padding.""" + if not nocheck and not fname.endswith(".ten"): + raise ValueError("file name should end in .ten") + with open(fname, "wb") as stream: + write(stream, args, infos=infos) + + +def load(fname, infos=False, nocheck=False): + """Read a list of arrays from a file, with magics, length, and padding.""" + if not nocheck and not fname.endswith(".ten"): + raise ValueError("file name should end in .ten") + with open(fname, "rb") as stream: + return read(stream, infos=infos) diff --git a/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac1e86fc417863ba9b5fd8fca97581c63d48768 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/webdataset.py @@ -0,0 +1,299 @@ +import io +import json +from itertools import islice +from typing import Any, Callable, Dict, List + +import numpy as np +import pyarrow as pa + +import datasets + + +logger = datasets.utils.logging.get_logger(__name__) + + +class WebDataset(datasets.GeneratorBasedBuilder): + DEFAULT_WRITER_BATCH_SIZE = 100 + IMAGE_EXTENSIONS: List[str] # definition at the bottom of the script + AUDIO_EXTENSIONS: List[str] # definition at the bottom of the script + DECODERS: Dict[str, Callable[[Any], Any]] # definition at the bottom of the script + NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5 + + @classmethod + def _get_pipeline_from_tar(cls, tar_path, tar_iterator): + current_example = {} + for filename, f in tar_iterator: + if "." in filename: + example_key, field_name = filename.split(".", 1) + if current_example and current_example["__key__"] != example_key: + yield current_example + current_example = {} + current_example["__key__"] = example_key + current_example["__url__"] = tar_path + current_example[field_name.lower()] = f.read() + if field_name in cls.DECODERS: + current_example[field_name] = cls.DECODERS[field_name](current_example[field_name]) + if current_example: + yield current_example + + def _info(self) -> datasets.DatasetInfo: + return datasets.DatasetInfo() + + def _split_generators(self, dl_manager): + """We handle string, list and dicts in datafiles""" + # Download the data files + if not self.config.data_files: + raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") + data_files = dl_manager.download(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + tar_paths = data_files + if isinstance(tar_paths, str): + tar_paths = [tar_paths] + tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths] + splits = [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators} + ) + ] + else: + splits = [] + for split_name, tar_paths in data_files.items(): + if isinstance(tar_paths, str): + tar_paths = [tar_paths] + tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths] + splits.append( + datasets.SplitGenerator( + name=split_name, gen_kwargs={"tar_paths": tar_paths, "tar_iterators": tar_iterators} + ) + ) + if not self.info.features: + # Get one example to get the feature types + pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0]) + first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE)) + if any(example.keys() != first_examples[0].keys() for example in first_examples): + raise ValueError( + "The TAR archives of the dataset should be in WebDataset format, " + "but the files in the archive don't share the same prefix or the same types." + ) + pa_tables = [pa.Table.from_pylist([example]) for example in first_examples] + if datasets.config.PYARROW_VERSION.major < 14: + inferred_arrow_schema = pa.concat_tables(pa_tables, promote=True).schema + else: + inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options="default").schema + features = datasets.Features.from_arrow_schema(inferred_arrow_schema) + + # Set Image types + for field_name in first_examples[0]: + extension = field_name.rsplit(".", 1)[-1] + if extension in self.IMAGE_EXTENSIONS: + features[field_name] = datasets.Image() + # Set Audio types + for field_name in first_examples[0]: + extension = field_name.rsplit(".", 1)[-1] + if extension in self.AUDIO_EXTENSIONS: + features[field_name] = datasets.Audio() + self.info.features = features + + return splits + + def _generate_examples(self, tar_paths, tar_iterators): + image_field_names = [ + field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Image) + ] + audio_field_names = [ + field_name for field_name, feature in self.info.features.items() if isinstance(feature, datasets.Audio) + ] + for tar_idx, (tar_path, tar_iterator) in enumerate(zip(tar_paths, tar_iterators)): + for example_idx, example in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)): + for field_name in image_field_names + audio_field_names: + example[field_name] = {"path": example["__key__"] + "." + field_name, "bytes": example[field_name]} + yield f"{tar_idx}_{example_idx}", example + + +# Obtained with: +# ``` +# import PIL.Image +# IMAGE_EXTENSIONS = [] +# PIL.Image.init() +# for ext, format in PIL.Image.EXTENSION.items(): +# if format in PIL.Image.OPEN: +# IMAGE_EXTENSIONS.append(ext[1:]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Pillow is an optional dependency, so importing Pillow in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +IMAGE_EXTENSIONS = [ + "blp", + "bmp", + "dib", + "bufr", + "cur", + "pcx", + "dcx", + "dds", + "ps", + "eps", + "fit", + "fits", + "fli", + "flc", + "ftc", + "ftu", + "gbr", + "gif", + "grib", + "h5", + "hdf", + "png", + "apng", + "jp2", + "j2k", + "jpc", + "jpf", + "jpx", + "j2c", + "icns", + "ico", + "im", + "iim", + "tif", + "tiff", + "jfif", + "jpe", + "jpg", + "jpeg", + "mpg", + "mpeg", + "msp", + "pcd", + "pxr", + "pbm", + "pgm", + "ppm", + "pnm", + "psd", + "bw", + "rgb", + "rgba", + "sgi", + "ras", + "tga", + "icb", + "vda", + "vst", + "webp", + "wmf", + "emf", + "xbm", + "xpm", +] +WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS + + +# Obtained with: +# ``` +# import soundfile as sf +# +# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()] +# +# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30: +# AUDIO_EXTENSIONS.extend([".mp3", ".opus"]) +# ``` +# We intentionally do not run this code on launch because: +# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed +# (2) To ensure the list of supported extensions is deterministic +AUDIO_EXTENSIONS = [ + "aiff", + "au", + "avr", + "caf", + "flac", + "htk", + "svx", + "mat4", + "mat5", + "mpc2k", + "ogg", + "paf", + "pvf", + "raw", + "rf64", + "sd2", + "sds", + "ircam", + "voc", + "w64", + "wav", + "nist", + "wavex", + "wve", + "xi", + "mp3", + "opus", +] +WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS + + +def text_loads(data: bytes): + return data.decode("utf-8") + + +def tenbin_loads(data: bytes): + from . import _tenbin + + return _tenbin.decode_buffer(data) + + +def msgpack_loads(data: bytes): + import msgpack + + return msgpack.unpackb(data) + + +def npy_loads(data: bytes): + import numpy.lib.format + + stream = io.BytesIO(data) + return numpy.lib.format.read_array(stream, allow_pickle=False) + + +def npz_loads(data: bytes): + return np.load(io.BytesIO(data), allow_pickle=False) + + +def cbor_loads(data: bytes): + import cbor + + return cbor.loads(data) + + +# Obtained by checking `decoders` in `webdataset.autodecode` +# and removing unsafe extension decoders. +# Removed Pickle decoders: +# - "pyd": lambda data: pickle.loads(data) +# - "pickle": lambda data: pickle.loads(data) +# Removed Torch decoders: +# - "pth": lambda data: torch_loads(data) +# Modified NumPy decoders to fix CVE-2019-6446 (add allow_pickle=False): +# - "npy": npy_loads, +# - "npz": lambda data: np.load(io.BytesIO(data)), +DECODERS = { + "txt": text_loads, + "text": text_loads, + "transcript": text_loads, + "cls": int, + "cls2": int, + "index": int, + "inx": int, + "id": int, + "json": json.loads, + "jsn": json.loads, + "ten": tenbin_loads, + "tb": tenbin_loads, + "mp": msgpack_loads, + "msg": msgpack_loads, + "npy": npy_loads, + "npz": npz_loads, + "cbor": cbor_loads, +} +WebDataset.DECODERS = DECODERS diff --git a/mantis_evalkit/lib/python3.10/site-packages/numpy/__config__.pyi b/mantis_evalkit/lib/python3.10/site-packages/numpy/__config__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bd01228a1cc85745bc08842c96c518621e4160c6 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/numpy/__config__.pyi @@ -0,0 +1,102 @@ +from enum import Enum +from types import ModuleType +from typing import Final, Literal as L, TypedDict, overload, type_check_only +from typing_extensions import NotRequired + +_CompilerConfigDictValue = TypedDict( + "_CompilerConfigDictValue", + { + "name": str, + "linker": str, + "version": str, + "commands": str, + "args": str, + "linker args": str, + }, +) +_CompilerConfigDict = TypedDict( + "_CompilerConfigDict", + { + "c": _CompilerConfigDictValue, + "cython": _CompilerConfigDictValue, + "c++": _CompilerConfigDictValue, + }, +) +_MachineInformationDict = TypedDict( + "_MachineInformationDict", + { + "host":_MachineInformationDictValue, + "build": _MachineInformationDictValue, + "cross-compiled": NotRequired[L[True]], + }, +) + +@type_check_only +class _MachineInformationDictValue(TypedDict): + cpu: str + family: str + endian: L["little", "big"] + system: str + +_BuildDependenciesDictValue = TypedDict( + "_BuildDependenciesDictValue", + { + "name": str, + "found": NotRequired[L[True]], + "version": str, + "include directory": str, + "lib directory": str, + "openblas configuration": str, + "pc file directory": str, + }, +) + +class _BuildDependenciesDict(TypedDict): + blas: _BuildDependenciesDictValue + lapack: _BuildDependenciesDictValue + +class _PythonInformationDict(TypedDict): + path: str + version: str + +_SIMDExtensionsDict = TypedDict( + "_SIMDExtensionsDict", + { + "baseline": list[str], + "found": list[str], + "not found": list[str], + }, +) + +_ConfigDict = TypedDict( + "_ConfigDict", + { + "Compilers": _CompilerConfigDict, + "Machine Information": _MachineInformationDict, + "Build Dependencies": _BuildDependenciesDict, + "Python Information": _PythonInformationDict, + "SIMD Extensions": _SIMDExtensionsDict, + }, +) + +### + +__all__ = ["show_config"] + +CONFIG: Final[_ConfigDict] = ... + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + +def _check_pyyaml() -> ModuleType: ... + +@overload +def show(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/mantis_evalkit/lib/python3.10/site-packages/numpy/__init__.cython-30.pxd b/mantis_evalkit/lib/python3.10/site-packages/numpy/__init__.cython-30.pxd new file mode 100644 index 0000000000000000000000000000000000000000..e35cef5fa1a821f8298626a9145836b729713448 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/numpy/__init__.cython-30.pxd @@ -0,0 +1,1243 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + ctypedef signed long long npy_int96 + ctypedef signed long long npy_int128 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + ctypedef unsigned long long npy_uint96 + ctypedef unsigned long long npy_uint128 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_INT128 + NPY_INT256 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_UINT128 + NPY_UINT256 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_FLOAT256 + NPY_COMPLEX32 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + NPY_COMPLEX512 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + # DEPRECATED since NumPy 1.7 ! Do not use in new code! + NPY_C_CONTIGUOUS + NPY_F_CONTIGUOUS + NPY_CONTIGUOUS + NPY_FORTRAN + NPY_OWNDATA + NPY_FORCECAST + NPY_ENSURECOPY + NPY_ENSUREARRAY + NPY_ELEMENTSTRIDES + NPY_ALIGNED + NPY_NOTSWAPPED + NPY_WRITEABLE + NPY_ARR_HAS_DESCR + + NPY_BEHAVED + NPY_BEHAVED_NS + NPY_CARRAY + NPY_CARRAY_RO + NPY_FARRAY + NPY_FARRAY_RO + NPY_DEFAULT + + NPY_IN_ARRAY + NPY_OUT_ARRAY + NPY_INOUT_ARRAY + NPY_IN_FARRAY + NPY_OUT_FARRAY + NPY_INOUT_FARRAY + + NPY_UPDATE_ALL + + enum: + # Added in NumPy 1.7 to replace the deprecated enums above. + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef int type_num + + @property + cdef inline npy_intp itemsize(self) noexcept nogil: + return PyDataType_ELSIZE(self) + + @property + cdef inline npy_intp alignment(self) noexcept nogil: + return PyDataType_ALIGNMENT(self) + + # Use fields/names with care as they may be NULL. You must check + # for this using PyDataType_HASFIELDS. + @property + cdef inline object fields(self): + return PyDataType_FIELDS(self) + + @property + cdef inline tuple names(self): + return PyDataType_NAMES(self) + + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + @property + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: + return PyDataType_SUBARRAY(self) + + @property + cdef inline npy_uint64 flags(self) noexcept nogil: + """The data types flags.""" + return PyDataType_FLAGS(self) + + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + + @property + cdef inline int numiter(self) noexcept nogil: + """The number of arrays that need to be broadcast to the same shape.""" + return PyArray_MultiIter_NUMITER(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """The total broadcasted size.""" + return PyArray_MultiIter_SIZE(self) + + @property + cdef inline npy_intp index(self) noexcept nogil: + """The current (1-d) index into the broadcasted result.""" + return PyArray_MultiIter_INDEX(self) + + @property + cdef inline int nd(self) noexcept nogil: + """The number of dimensions in the broadcasted result.""" + return PyArray_MultiIter_NDIM(self) + + @property + cdef inline npy_intp* dimensions(self) noexcept nogil: + """The shape of the broadcasted result.""" + return PyArray_MultiIter_DIMS(self) + + @property + cdef inline void** iters(self) noexcept nogil: + """An array of iterator objects that holds the iterators for the arrays to be broadcast together. + On return, the iterators are adjusted for broadcasting.""" + return PyArray_MultiIter_ITERS(self) + + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) noexcept nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) noexcept nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) noexcept nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) noexcept nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) noexcept nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t +#ctypedef npy_int96 int96_t +#ctypedef npy_int128 int128_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t +#ctypedef npy_uint96 uint96_t +#ctypedef npy_uint128 uint128_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base) except *: + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +# Iterator API added in v1.6 +ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil +ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) diff --git a/mantis_evalkit/lib/python3.10/site-packages/numpy/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/numpy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2a4fd03b6a445cb98a214c28eff14b157aaea458 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/numpy/__init__.py @@ -0,0 +1,547 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as ``np``:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +Available subpackages +--------------------- +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +distutils + Enhancements to distutils with support for + Fortran compilers support and more (for Python <= 3.11) + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- + +Start IPython and import `numpy` usually under the alias ``np``: `import +numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste +examples into the shell. To see which functions are available in `numpy`, +type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +import os +import sys +import warnings + +from ._globals import _NoValue, _CopyMode +from ._expired_attrs_2_0 import __expired_attributes__ + + +# If a version with git hash was stored, use that instead +from . import version +from .version import __version__ + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + # Allow distributors to run custom init code before importing numpy._core + from . import _distributor_init + + try: + from numpy.__config__ import show_config + except ImportError as e: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + + from . import _core + from ._core import ( + False_, ScalarType, True_, + abs, absolute, acos, acosh, add, all, allclose, + amax, amin, any, arange, arccos, arccosh, arcsin, arcsinh, + arctan, arctan2, arctanh, argmax, argmin, argpartition, argsort, + argwhere, around, array, array2string, array_equal, array_equiv, + array_repr, array_str, asanyarray, asarray, ascontiguousarray, + asfortranarray, asin, asinh, atan, atanh, atan2, astype, atleast_1d, + atleast_2d, atleast_3d, base_repr, binary_repr, bitwise_and, + bitwise_count, bitwise_invert, bitwise_left_shift, bitwise_not, + bitwise_or, bitwise_right_shift, bitwise_xor, block, bool, bool_, + broadcast, busday_count, busday_offset, busdaycalendar, byte, bytes_, + can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble, + complex128, complex64, complexfloating, compress, concat, concatenate, + conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh, + count_nonzero, cross, csingle, cumprod, cumsum, cumulative_prod, + cumulative_sum, datetime64, datetime_as_string, datetime_data, + deg2rad, degrees, diagonal, divide, divmod, dot, double, dtype, e, + einsum, einsum_path, empty, empty_like, equal, errstate, euler_gamma, + exp, exp2, expm1, fabs, finfo, flatiter, flatnonzero, flexible, + float16, float32, float64, float_power, floating, floor, floor_divide, + fmax, fmin, fmod, format_float_positional, format_float_scientific, + frexp, from_dlpack, frombuffer, fromfile, fromfunction, fromiter, + frompyfunc, fromstring, full, full_like, gcd, generic, geomspace, + get_printoptions, getbufsize, geterr, geterrcall, greater, + greater_equal, half, heaviside, hstack, hypot, identity, iinfo, + indices, inexact, inf, inner, int16, int32, int64, int8, int_, intc, + integer, intp, invert, is_busday, isclose, isdtype, isfinite, + isfortran, isinf, isnan, isnat, isscalar, issubdtype, lcm, ldexp, + left_shift, less, less_equal, lexsort, linspace, little_endian, log, + log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, + logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, + matvec, matrix_transpose, max, maximum, may_share_memory, mean, memmap, + min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, + ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, + nonzero, not_equal, number, object_, ones, ones_like, outer, partition, + permute_dims, pi, positive, pow, power, printoptions, prod, + promote_types, ptp, put, putmask, rad2deg, radians, ravel, recarray, + reciprocal, record, remainder, repeat, require, reshape, resize, + result_type, right_shift, rint, roll, rollaxis, round, sctypeDict, + searchsorted, set_printoptions, setbufsize, seterr, seterrcall, shape, + shares_memory, short, sign, signbit, signedinteger, sin, single, sinh, + size, sort, spacing, sqrt, square, squeeze, stack, std, + str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, + timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, + ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, + ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, + vecmat, void, vstack, where, zeros, zeros_like + ) + + # NOTE: It's still under discussion whether these aliases + # should be removed. + for ta in ["float96", "float128", "complex192", "complex256"]: + try: + globals()[ta] = getattr(_core, ta) + except AttributeError: + pass + del ta + + from . import lib + from .lib import scimath as emath + from .lib._histograms_impl import ( + histogram, histogram_bin_edges, histogramdd + ) + from .lib._nanfunctions_impl import ( + nanargmax, nanargmin, nancumprod, nancumsum, nanmax, nanmean, + nanmedian, nanmin, nanpercentile, nanprod, nanquantile, nanstd, + nansum, nanvar + ) + from .lib._function_base_impl import ( + select, piecewise, trim_zeros, copy, iterable, percentile, diff, + gradient, angle, unwrap, sort_complex, flip, rot90, extract, place, + vectorize, asarray_chkfinite, average, bincount, digitize, cov, + corrcoef, median, sinc, hamming, hanning, bartlett, blackman, + kaiser, trapezoid, trapz, i0, meshgrid, delete, insert, append, + interp, quantile + ) + from .lib._twodim_base_impl import ( + diag, diagflat, eye, fliplr, flipud, tri, triu, tril, vander, + histogram2d, mask_indices, tril_indices, tril_indices_from, + triu_indices, triu_indices_from + ) + from .lib._shape_base_impl import ( + apply_over_axes, apply_along_axis, array_split, column_stack, dsplit, + dstack, expand_dims, hsplit, kron, put_along_axis, row_stack, split, + take_along_axis, tile, vsplit + ) + from .lib._type_check_impl import ( + iscomplexobj, isrealobj, imag, iscomplex, isreal, nan_to_num, real, + real_if_close, typename, mintypecode, common_type + ) + from .lib._arraysetops_impl import ( + ediff1d, in1d, intersect1d, isin, setdiff1d, setxor1d, union1d, + unique, unique_all, unique_counts, unique_inverse, unique_values + ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._arraypad_impl import pad + from .lib._utils_impl import ( + show_runtime, get_include, info + ) + from .lib._stride_tricks_impl import ( + broadcast_arrays, broadcast_shapes, broadcast_to + ) + from .lib._polynomial_impl import ( + poly, polyint, polyder, polyadd, polysub, polymul, polydiv, polyval, + polyfit, poly1d, roots + ) + from .lib._npyio_impl import ( + savetxt, loadtxt, genfromtxt, load, save, savez, packbits, + savez_compressed, unpackbits, fromregex + ) + from .lib._index_tricks_impl import ( + diag_indices_from, diag_indices, fill_diagonal, ndindex, ndenumerate, + ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, + index_exp + ) + + from . import matrixlib as _mat + from .matrixlib import ( + asmatrix, bmat, matrix + ) + + # public submodules are imported lazily, therefore are accessible from + # __getattr__. Note that `distutils` (deprecated) and `array_api` + # (experimental label) are not added here, because `from numpy import *` + # must not raise any warnings - that's too disruptive. + __numpy_submodules__ = { + "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "exceptions", "lib", "ctypeslib", "testing", "typing", + "f2py", "test", "rec", "char", "core", "strings", + } + + # We build warning messages for former attributes + _msg = ( + "module 'numpy' has no attribute '{n}'.\n" + "`np.{n}` was a deprecated alias for the builtin `{n}`. " + "To avoid this error in existing code, use `{n}` by itself. " + "Doing this will not modify any behavior and is safe. {extended_msg}\n" + "The aliases was originally deprecated in NumPy 1.20; for more " + "details and guidance see the original release note at:\n" + " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __former_attrs__ = { + n: _msg.format(n=n, extended_msg=extended_msg) + for n, extended_msg in _type_info + } + + + # Some of these could be defined right away, but most were aliases to + # the Python objects and only removed in NumPy 1.24. Defining them should + # probably wait for NumPy 1.26 or 2.0. + # When defined, these should possibly not be added to `__all__` to avoid + # import with `from numpy import *`. + __future_scalars__ = {"str", "bytes", "object"} + + __array_api_version__ = "2023.12" + + from ._array_api_info import __array_namespace_info__ + + # now that numpy core module is imported, can initialize limits + _core.getlimits._register_known_types() + + __all__ = list( + __numpy_submodules__ | + set(_core.__all__) | + set(_mat.__all__) | + set(lib._histograms_impl.__all__) | + set(lib._nanfunctions_impl.__all__) | + set(lib._function_base_impl.__all__) | + set(lib._twodim_base_impl.__all__) | + set(lib._shape_base_impl.__all__) | + set(lib._type_check_impl.__all__) | + set(lib._arraysetops_impl.__all__) | + set(lib._ufunclike_impl.__all__) | + set(lib._arraypad_impl.__all__) | + set(lib._utils_impl.__all__) | + set(lib._stride_tricks_impl.__all__) | + set(lib._polynomial_impl.__all__) | + set(lib._npyio_impl.__all__) | + set(lib._index_tricks_impl.__all__) | + {"emath", "show_config", "__version__", "__array_namespace_info__"} + ) + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + def __getattr__(attr): + # Warn for expired attributes + import warnings + + if attr == "linalg": + import numpy.linalg as linalg + return linalg + elif attr == "fft": + import numpy.fft as fft + return fft + elif attr == "dtypes": + import numpy.dtypes as dtypes + return dtypes + elif attr == "random": + import numpy.random as random + return random + elif attr == "polynomial": + import numpy.polynomial as polynomial + return polynomial + elif attr == "ma": + import numpy.ma as ma + return ma + elif attr == "ctypeslib": + import numpy.ctypeslib as ctypeslib + return ctypeslib + elif attr == "exceptions": + import numpy.exceptions as exceptions + return exceptions + elif attr == "testing": + import numpy.testing as testing + return testing + elif attr == "matlib": + import numpy.matlib as matlib + return matlib + elif attr == "f2py": + import numpy.f2py as f2py + return f2py + elif attr == "typing": + import numpy.typing as typing + return typing + elif attr == "rec": + import numpy.rec as rec + return rec + elif attr == "char": + import numpy.char as char + return char + elif attr == "array_api": + raise AttributeError("`numpy.array_api` is not available from " + "numpy 2.0 onwards", name=None) + elif attr == "core": + import numpy.core as core + return core + elif attr == "strings": + import numpy.strings as strings + return strings + elif attr == "distutils": + if 'distutils' in __numpy_submodules__: + import numpy.distutils as distutils + return distutils + else: + raise AttributeError("`numpy.distutils` is not available from " + "Python 3.12 onwards", name=None) + + if attr in __future_scalars__: + # And future warnings for those that will change, but also give + # the AttributeError + warnings.warn( + f"In the future `np.{attr}` will be defined as the " + "corresponding NumPy scalar.", FutureWarning, stacklevel=2) + + if attr in __former_attrs__: + raise AttributeError(__former_attrs__[attr], name=None) + + if attr in __expired_attributes__: + raise AttributeError( + f"`np.{attr}` was removed in the NumPy 2.0 release. " + f"{__expired_attributes__[attr]}", + name=None + ) + + if attr == "chararray": + warnings.warn( + "`np.chararray` is deprecated and will be removed from " + "the main namespace in the future. Use an array with a string " + "or bytes dtype instead.", DeprecationWarning, stacklevel=2) + import numpy.char as char + return char.chararray + + raise AttributeError("module {!r} has no attribute " + "{!r}".format(__name__, attr)) + + def __dir__(): + public_symbols = ( + globals().keys() | __numpy_submodules__ + ) + public_symbols -= { + "matrixlib", "matlib", "tests", "conftest", "version", + "compat", "distutils", "array_api" + } + return list(public_symbols) + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - float32(2.0)) < 1e-5: + raise AssertionError + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + if sys.platform == "darwin": + from . import exceptions + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed Check for warning and error_message + if len(w) > 0: + for _wn in w: + if _wn.category is exceptions.RankWarning: + # Ignore other warnings, they may not be relevant (see gh-25433). + error_message = ( + f"{_wn.category.__name__}: {_wn.message}" + ) + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" + "\nhttps://numpy.org/devdocs/building/index.html" + "\nOtherwise report this to the vendor " + "that provided NumPy.\n\n{}\n".format(error_message)) + raise RuntimeError(msg) + del _wn + del w + del _mac_os_check + + def hugepage_setup(): + """ + We usually use madvise hugepages support, but on some old kernels it + is slow and thus better avoided. Specifically kernel version 4.6 + had a bug fix which probably fixed this: + https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + """ + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepage to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. + # See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepage = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + return use_hugepage + + # Note that this will currently only make a difference on Linux + _core.multiarray._set_madvise_hugepage(hugepage_setup()) + del hugepage_setup + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + _core.multiarray._multiarray_umath._reload_guard() + + # TODO: Remove the environment variable entirely now that it is "weak" + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) + + # Tell PyInstaller where to find hook-numpy.py + def _pyinstaller_hooks_dir(): + from pathlib import Path + return [str(Path(__file__).with_name("_pyinstaller").resolve())] + + +# Remove symbols imported for internal use +del os, sys, warnings diff --git a/mantis_evalkit/lib/python3.10/site-packages/numpy/_configtool.py b/mantis_evalkit/lib/python3.10/site-packages/numpy/_configtool.py new file mode 100644 index 0000000000000000000000000000000000000000..70a14b876bccd9dab58c4b989785e2aec4c690fa --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/numpy/_configtool.py @@ -0,0 +1,39 @@ +import argparse +from pathlib import Path +import sys + +from .version import __version__ +from .lib._utils_impl import get_include + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", + action="version", + version=__version__, + help="Print the version and exit.", + ) + parser.add_argument( + "--cflags", + action="store_true", + help="Compile flag needed when using the NumPy headers.", + ) + parser.add_argument( + "--pkgconfigdir", + action="store_true", + help=("Print the pkgconfig directory in which `numpy.pc` is stored " + "(useful for setting $PKG_CONFIG_PATH)."), + ) + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.cflags: + print("-I" + get_include()) + if args.pkgconfigdir: + _path = Path(get_include()) / '..' / 'lib' / 'pkgconfig' + print(_path.resolve()) + + +if __name__ == "__main__": + main() diff --git a/mantis_evalkit/lib/python3.10/site-packages/numpy/_expired_attrs_2_0.py b/mantis_evalkit/lib/python3.10/site-packages/numpy/_expired_attrs_2_0.py new file mode 100644 index 0000000000000000000000000000000000000000..f5eb59e5ea17d2480e402445eec4b8cf833eef69 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/numpy/_expired_attrs_2_0.py @@ -0,0 +1,80 @@ +""" +Dict of expired attributes that are discontinued since 2.0 release. +Each item is associated with a migration note. +""" + +__expired_attributes__ = { + "geterrobj": "Use the np.errstate context manager instead.", + "seterrobj": "Use the np.errstate context manager instead.", + "cast": "Use `np.asarray(arr, dtype=dtype)` instead.", + "source": "Use `inspect.getsource` instead.", + "lookfor": "Search NumPy's documentation directly.", + "who": "Use an IDE variable explorer or `locals()` instead.", + "fastCopyAndTranspose": "Use `arr.T.copy()` instead.", + "set_numeric_ops": + "For the general case, use `PyUFunc_ReplaceLoopBySignature`. " + "For ndarray subclasses, define the ``__array_ufunc__`` method " + "and override the relevant ufunc.", + "NINF": "Use `-np.inf` instead.", + "PINF": "Use `np.inf` instead.", + "NZERO": "Use `-0.0` instead.", + "PZERO": "Use `0.0` instead.", + "add_newdoc": + "It's still available as `np.lib.add_newdoc`.", + "add_docstring": + "It's still available as `np.lib.add_docstring`.", + "add_newdoc_ufunc": + "It's an internal function and doesn't have a replacement.", + "compat": "There's no replacement, as Python 2 is no longer supported.", + "safe_eval": "Use `ast.literal_eval` instead.", + "float_": "Use `np.float64` instead.", + "complex_": "Use `np.complex128` instead.", + "longfloat": "Use `np.longdouble` instead.", + "singlecomplex": "Use `np.complex64` instead.", + "cfloat": "Use `np.complex128` instead.", + "longcomplex": "Use `np.clongdouble` instead.", + "clongfloat": "Use `np.clongdouble` instead.", + "string_": "Use `np.bytes_` instead.", + "unicode_": "Use `np.str_` instead.", + "Inf": "Use `np.inf` instead.", + "Infinity": "Use `np.inf` instead.", + "NaN": "Use `np.nan` instead.", + "infty": "Use `np.inf` instead.", + "issctype": "Use `issubclass(rep, np.generic)` instead.", + "maximum_sctype": + "Use a specific dtype instead. You should avoid relying " + "on any implicit mechanism and select the largest dtype of " + "a kind explicitly in the code.", + "obj2sctype": "Use `np.dtype(obj).type` instead.", + "sctype2char": "Use `np.dtype(obj).char` instead.", + "sctypes": "Access dtypes explicitly instead.", + "issubsctype": "Use `np.issubdtype` instead.", + "set_string_function": + "Use `np.set_printoptions` instead with a formatter for " + "custom printing of NumPy objects.", + "asfarray": "Use `np.asarray` with a proper dtype instead.", + "issubclass_": "Use `issubclass` builtin instead.", + "tracemalloc_domain": "It's now available from `np.lib`.", + "mat": "Use `np.asmatrix` instead.", + "recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.", + "recfromtxt": "Use `np.genfromtxt` instead.", + "deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, " + "or use `typing.deprecated`.", + "deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` " + "directly, or use `typing.deprecated`.", + "disp": "Use your own printing function instead.", + "find_common_type": + "Use `numpy.promote_types` or `numpy.result_type` instead. " + "To achieve semantics for the `scalar_types` argument, use " + "`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.", + "round_": "Use `np.round` instead.", + "get_array_wrap": "", + "DataSource": "It's still available as `np.lib.npyio.DataSource`.", + "nbytes": "Use `np.dtype().itemsize` instead.", + "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", + "compare_chararrays": + "It's still available as `np.char.compare_chararrays`.", + "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", +} diff --git a/mantis_evalkit/lib/python3.10/site-packages/numpy/_globals.py b/mantis_evalkit/lib/python3.10/site-packages/numpy/_globals.py new file mode 100644 index 0000000000000000000000000000000000000000..a1474177fef88fc8c68524f7fc04965ee7f89b05 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/numpy/_globals.py @@ -0,0 +1,95 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +import enum + +from ._utils import set_module as _set_module + +__all__ = ['_NoValue', '_CopyMode'] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() + + +@_set_module("numpy") +class _CopyMode(enum.Enum): + """ + An enumeration for the copy modes supported + by numpy.copy() and numpy.array(). The following three modes are supported, + + - ALWAYS: This means that a deep copy of the input + array will always be taken. + - IF_NEEDED: This means that a deep copy of the input + array will be taken only if necessary. + - NEVER: This means that the deep copy will never be taken. + If a copy cannot be avoided then a `ValueError` will be + raised. + + Note that the buffer-protocol could in theory do copies. NumPy currently + assumes an object exporting the buffer protocol will never do this. + """ + + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self): + # For backwards compatibility + if self == _CopyMode.ALWAYS: + return True + + if self == _CopyMode.NEVER: + return False + + raise ValueError(f"{self} is neither True nor False.") diff --git a/mantis_evalkit/lib/python3.10/site-packages/numpy/exceptions.py b/mantis_evalkit/lib/python3.10/site-packages/numpy/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf74fc4d0a3b11464e9fa660cf1de7fade4bb18 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/numpy/exceptions.py @@ -0,0 +1,247 @@ +""" +Exceptions and Warnings (:mod:`numpy.exceptions`) +================================================= + +General exceptions used by NumPy. Note that some exceptions may be module +specific, such as linear algebra errors. + +.. versionadded:: NumPy 1.25 + + The exceptions module is new in NumPy 1.25. Older exceptions remain + available through the main NumPy namespace for compatibility. + +.. currentmodule:: numpy.exceptions + +Warnings +-------- +.. autosummary:: + :toctree: generated/ + + ComplexWarning Given when converting complex to real. + VisibleDeprecationWarning Same as a DeprecationWarning, but more visible. + RankWarning Issued when the design matrix is rank deficient. + +Exceptions +---------- +.. autosummary:: + :toctree: generated/ + + AxisError Given when an axis was invalid. + DTypePromotionError Given when no common dtype could be found. + TooHardError Error specific to `numpy.shares_memory`. + +""" + + +__all__ = [ + "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning", + "TooHardError", "AxisError", "DTypePromotionError"] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class ComplexWarning(RuntimeWarning): + """ + The warning raised when casting a complex dtype to a real dtype. + + As implemented, casting a complex number to a real discards its imaginary + part, but this behavior may not be what the user actually wants. + + """ + pass + + +class ModuleDeprecationWarning(DeprecationWarning): + """Module deprecation warning. + + .. warning:: + + This warning should not be used, since nose testing is not relevant + anymore. + + The nose tester turns ordinary Deprecation warnings into test failures. + That makes it hard to deprecate whole modules, because they get + imported by default. So this is a special Deprecation warning that the + nose tester will let pass without making tests fail. + + """ + pass + + +class VisibleDeprecationWarning(UserWarning): + """Visible deprecation warning. + + By default, python will not show deprecation warnings, so this class + can be used when a very visible warning is helpful, for example because + the usage is most likely a user bug. + + """ + pass + + +class RankWarning(RuntimeWarning): + """Matrix rank warning. + + Issued by polynomial functions when the design matrix is rank deficient. + + """ + pass + + +# Exception used in shares_memory() +class TooHardError(RuntimeError): + """max_work was exceeded. + + This is raised whenever the maximum number of candidate solutions + to consider specified by the ``max_work`` parameter is exceeded. + Assigning a finite number to max_work may have caused the operation + to fail. + + """ + pass + + +class AxisError(ValueError, IndexError): + """Axis supplied was invalid. + + This is raised whenever an ``axis`` parameter is specified that is larger + than the number of array dimensions. + For compatibility with code written against older numpy versions, which + raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this + situation, this exception subclasses both to ensure that + ``except ValueError`` and ``except IndexError`` statements continue + to catch ``AxisError``. + + Parameters + ---------- + axis : int or str + The out of bounds axis or a custom exception message. + If an axis is provided, then `ndim` should be specified as well. + ndim : int, optional + The number of array dimensions. + msg_prefix : str, optional + A prefix for the exception message. + + Attributes + ---------- + axis : int, optional + The out of bounds axis or ``None`` if a custom exception + message was provided. This should be the axis as passed by + the user, before any normalization to resolve negative indices. + + .. versionadded:: 1.22 + ndim : int, optional + The number of array dimensions or ``None`` if a custom exception + message was provided. + + .. versionadded:: 1.22 + + + Examples + -------- + >>> import numpy as np + >>> array_1d = np.arange(10) + >>> np.cumsum(array_1d, axis=1) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1 + + Negative axes are preserved: + + >>> np.cumsum(array_1d, axis=-2) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1 + + The class constructor generally takes the axis and arrays' + dimensionality as arguments: + + >>> print(np.exceptions.AxisError(2, 1, msg_prefix='error')) + error: axis 2 is out of bounds for array of dimension 1 + + Alternatively, a custom exception message can be passed: + + >>> print(np.exceptions.AxisError('Custom error message')) + Custom error message + + """ + + __slots__ = ("axis", "ndim", "_msg") + + def __init__(self, axis, ndim=None, msg_prefix=None): + if ndim is msg_prefix is None: + # single-argument form: directly set the error message + self._msg = axis + self.axis = None + self.ndim = None + else: + self._msg = msg_prefix + self.axis = axis + self.ndim = ndim + + def __str__(self): + axis = self.axis + ndim = self.ndim + + if axis is ndim is None: + return self._msg + else: + msg = f"axis {axis} is out of bounds for array of dimension {ndim}" + if self._msg is not None: + msg = f"{self._msg}: {msg}" + return msg + + +class DTypePromotionError(TypeError): + """Multiple DTypes could not be converted to a common one. + + This exception derives from ``TypeError`` and is raised whenever dtypes + cannot be converted to a single common one. This can be because they + are of a different category/class or incompatible instances of the same + one (see Examples). + + Notes + ----- + Many functions will use promotion to find the correct result and + implementation. For these functions the error will typically be chained + with a more specific error indicating that no implementation was found + for the input dtypes. + + Typically promotion should be considered "invalid" between the dtypes of + two arrays when `arr1 == arr2` can safely return all ``False`` because the + dtypes are fundamentally different. + + Examples + -------- + Datetimes and complex numbers are incompatible classes and cannot be + promoted: + + >>> import numpy as np + >>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + DTypePromotionError: The DType could not + be promoted by . This means that no common + DType exists for the given inputs. For example they cannot be stored in a + single array unless the dtype is `object`. The full list of DTypes is: + (, ) + + For example for structured dtypes, the structure can mismatch and the + same ``DTypePromotionError`` is given when two structured dtypes with + a mismatch in their number of fields is given: + + >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)]) + >>> dtype2 = np.dtype([("field1", np.float64)]) + >>> np.promote_types(dtype1, dtype2) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` + mismatch. + + """ # NOQA + pass diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d046d62940d2f0df77446e5f1388b1f3ece6ce11 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/_impl.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9565e14afff188220198a8eef7c67811b914078e Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/_impl.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/_utils.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7468c7e1b697ef5940e2cffb1b0520bb3a313657 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/_utils.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/base.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ebc937972584f8c44326b10f3b7d4335ae65054 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/base.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/basecontainer.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/basecontainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bc8e06011ca1bc4c414658030dd63f7f279af27 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/basecontainer.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/dictconfig.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/dictconfig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1401887cc6ed37e62f4dab8cf91a9fd7de683c25 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/dictconfig.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/errors.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cfb1f530831c023b15745791f6268b988ee6950 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/errors.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/grammar_parser.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/grammar_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c54be71545efb595eccefec6a435eba89d19d6f1 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/grammar_parser.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/grammar_visitor.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/grammar_visitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ff36e5533994359199b39b23595c76fc67d8f1f Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/grammar_visitor.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/listconfig.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/listconfig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed86798d0345bd92449b385d63e13104dbb39db3 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/listconfig.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/nodes.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/nodes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28c88b0350174c1267f8fcfa764157a0a744b545 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/nodes.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/omegaconf.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/omegaconf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e8b36d590586fd96c7d519def97e8baa389a07f Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/omegaconf.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/version.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4fde40df2ebfb956da04029275b96249048850bb Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/__pycache__/version.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/base.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/base.py new file mode 100644 index 0000000000000000000000000000000000000000..8b013d1d9fac789cc28e8ea0a23e586610a32f43 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/base.py @@ -0,0 +1,962 @@ +import copy +import sys +from abc import ABC, abstractmethod +from collections import defaultdict +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Type, Union + +from antlr4 import ParserRuleContext + +from ._utils import ( + _DEFAULT_MARKER_, + NoneType, + ValueKind, + _get_value, + _is_interpolation, + _is_missing_value, + _is_special, + format_and_raise, + get_value_kind, + is_union_annotation, + is_valid_value_annotation, + split_key, + type_str, +) +from .errors import ( + ConfigKeyError, + ConfigTypeError, + InterpolationKeyError, + InterpolationResolutionError, + InterpolationToMissingValueError, + InterpolationValidationError, + MissingMandatoryValue, + UnsupportedInterpolationType, + ValidationError, +) +from .grammar.gen.OmegaConfGrammarParser import OmegaConfGrammarParser +from .grammar_parser import parse +from .grammar_visitor import GrammarVisitor + +DictKeyType = Union[str, bytes, int, Enum, float, bool] + + +@dataclass +class Metadata: + + ref_type: Union[Type[Any], Any] + + object_type: Union[Type[Any], Any] + + optional: bool + + key: Any + + # Flags have 3 modes: + # unset : inherit from parent (None if no parent specifies) + # set to true: flag is true + # set to false: flag is false + flags: Optional[Dict[str, bool]] = None + + # If True, when checking the value of a flag, if the flag is not set None is returned + # otherwise, the parent node is queried. + flags_root: bool = False + + resolver_cache: Dict[str, Any] = field(default_factory=lambda: defaultdict(dict)) + + def __post_init__(self) -> None: + if self.flags is None: + self.flags = {} + + @property + def type_hint(self) -> Union[Type[Any], Any]: + """Compute `type_hint` from `self.optional` and `self.ref_type`""" + # For compatibility with pickled OmegaConf objects created using older + # versions of OmegaConf, we store `ref_type` and `object_type` + # separately (rather than storing `type_hint` directly). + if self.optional: + return Optional[self.ref_type] + else: + return self.ref_type + + +@dataclass +class ContainerMetadata(Metadata): + key_type: Any = None + element_type: Any = None + + def __post_init__(self) -> None: + if self.ref_type is None: + self.ref_type = Any + assert self.key_type is Any or isinstance(self.key_type, type) + if self.element_type is not None: + if not is_valid_value_annotation(self.element_type): + raise ValidationError( + f"Unsupported value type: '{type_str(self.element_type, include_module_name=True)}'" + ) + + if self.flags is None: + self.flags = {} + + +class Node(ABC): + _metadata: Metadata + + _parent: Optional["Box"] + _flags_cache: Optional[Dict[str, Optional[bool]]] + + def __init__(self, parent: Optional["Box"], metadata: Metadata): + self.__dict__["_metadata"] = metadata + self.__dict__["_parent"] = parent + self.__dict__["_flags_cache"] = None + + def __getstate__(self) -> Dict[str, Any]: + # Overridden to ensure that the flags cache is cleared on serialization. + state_dict = copy.copy(self.__dict__) + del state_dict["_flags_cache"] + return state_dict + + def __setstate__(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + self.__dict__["_flags_cache"] = None + + def _set_parent(self, parent: Optional["Box"]) -> None: + assert parent is None or isinstance(parent, Box) + self.__dict__["_parent"] = parent + self._invalidate_flags_cache() + + def _invalidate_flags_cache(self) -> None: + self.__dict__["_flags_cache"] = None + + def _get_parent(self) -> Optional["Box"]: + parent = self.__dict__["_parent"] + assert parent is None or isinstance(parent, Box) + return parent + + def _get_parent_container(self) -> Optional["Container"]: + """ + Like _get_parent, but returns the grandparent + in the case where `self` is wrapped by a UnionNode. + """ + parent = self.__dict__["_parent"] + assert parent is None or isinstance(parent, Box) + + if isinstance(parent, UnionNode): + grandparent = parent.__dict__["_parent"] + assert grandparent is None or isinstance(grandparent, Container) + return grandparent + else: + assert parent is None or isinstance(parent, Container) + return parent + + def _set_flag( + self, + flags: Union[List[str], str], + values: Union[List[Optional[bool]], Optional[bool]], + ) -> "Node": + if isinstance(flags, str): + flags = [flags] + + if values is None or isinstance(values, bool): + values = [values] + + if len(values) == 1: + values = len(flags) * values + + if len(flags) != len(values): + raise ValueError("Inconsistent lengths of input flag names and values") + + for idx, flag in enumerate(flags): + value = values[idx] + if value is None: + assert self._metadata.flags is not None + if flag in self._metadata.flags: + del self._metadata.flags[flag] + else: + assert self._metadata.flags is not None + self._metadata.flags[flag] = value + self._invalidate_flags_cache() + return self + + def _get_node_flag(self, flag: str) -> Optional[bool]: + """ + :param flag: flag to inspect + :return: the state of the flag on this node. + """ + assert self._metadata.flags is not None + return self._metadata.flags.get(flag) + + def _get_flag(self, flag: str) -> Optional[bool]: + cache = self.__dict__["_flags_cache"] + if cache is None: + cache = self.__dict__["_flags_cache"] = {} + + ret = cache.get(flag, _DEFAULT_MARKER_) + if ret is _DEFAULT_MARKER_: + ret = self._get_flag_no_cache(flag) + cache[flag] = ret + assert ret is None or isinstance(ret, bool) + return ret + + def _get_flag_no_cache(self, flag: str) -> Optional[bool]: + """ + Returns True if this config node flag is set + A flag is set if node.set_flag(True) was called + or one if it's parents is flag is set + :return: + """ + flags = self._metadata.flags + assert flags is not None + if flag in flags and flags[flag] is not None: + return flags[flag] + + if self._is_flags_root(): + return None + + parent = self._get_parent() + if parent is None: + return None + else: + # noinspection PyProtectedMember + return parent._get_flag(flag) + + def _format_and_raise( + self, + key: Any, + value: Any, + cause: Exception, + msg: Optional[str] = None, + type_override: Any = None, + ) -> None: + format_and_raise( + node=self, + key=key, + value=value, + msg=str(cause) if msg is None else msg, + cause=cause, + type_override=type_override, + ) + assert False + + @abstractmethod + def _get_full_key(self, key: Optional[Union[DictKeyType, int]]) -> str: + ... + + def _dereference_node(self) -> "Node": + node = self._dereference_node_impl(throw_on_resolution_failure=True) + assert node is not None + return node + + def _maybe_dereference_node( + self, + throw_on_resolution_failure: bool = False, + memo: Optional[Set[int]] = None, + ) -> Optional["Node"]: + return self._dereference_node_impl( + throw_on_resolution_failure=throw_on_resolution_failure, + memo=memo, + ) + + def _dereference_node_impl( + self, + throw_on_resolution_failure: bool, + memo: Optional[Set[int]] = None, + ) -> Optional["Node"]: + if not self._is_interpolation(): + return self + + parent = self._get_parent_container() + if parent is None: + if throw_on_resolution_failure: + raise InterpolationResolutionError( + "Cannot resolve interpolation for a node without a parent" + ) + return None + assert parent is not None + key = self._key() + return parent._resolve_interpolation_from_parse_tree( + parent=parent, + key=key, + value=self, + parse_tree=parse(_get_value(self)), + throw_on_resolution_failure=throw_on_resolution_failure, + memo=memo, + ) + + def _get_root(self) -> "Container": + root: Optional[Box] = self._get_parent() + if root is None: + assert isinstance(self, Container) + return self + assert root is not None and isinstance(root, Box) + while root._get_parent() is not None: + root = root._get_parent() + assert root is not None and isinstance(root, Box) + assert root is not None and isinstance(root, Container) + return root + + def _is_missing(self) -> bool: + """ + Check if the node's value is `???` (does *not* resolve interpolations). + """ + return _is_missing_value(self) + + def _is_none(self) -> bool: + """ + Check if the node's value is `None` (does *not* resolve interpolations). + """ + return self._value() is None + + @abstractmethod + def __eq__(self, other: Any) -> bool: + ... + + @abstractmethod + def __ne__(self, other: Any) -> bool: + ... + + @abstractmethod + def __hash__(self) -> int: + ... + + @abstractmethod + def _value(self) -> Any: + ... + + @abstractmethod + def _set_value(self, value: Any, flags: Optional[Dict[str, bool]] = None) -> None: + ... + + @abstractmethod + def _is_optional(self) -> bool: + ... + + @abstractmethod + def _is_interpolation(self) -> bool: + ... + + def _key(self) -> Any: + return self._metadata.key + + def _set_key(self, key: Any) -> None: + self._metadata.key = key + + def _is_flags_root(self) -> bool: + return self._metadata.flags_root + + def _set_flags_root(self, flags_root: bool) -> None: + if self._metadata.flags_root != flags_root: + self._metadata.flags_root = flags_root + self._invalidate_flags_cache() + + def _has_ref_type(self) -> bool: + return self._metadata.ref_type is not Any + + +class Box(Node): + """ + Base class for nodes that can contain other nodes. + Concrete subclasses include DictConfig, ListConfig, and UnionNode. + """ + + _content: Any + + def __init__(self, parent: Optional["Box"], metadata: Metadata): + super().__init__(parent=parent, metadata=metadata) + self.__dict__["_content"] = None + + def __copy__(self) -> Any: + # real shallow copy is impossible because of the reference to the parent. + return copy.deepcopy(self) + + def _re_parent(self) -> None: + from .dictconfig import DictConfig + from .listconfig import ListConfig + + # update parents of first level Config nodes to self + + if isinstance(self, DictConfig): + content = self.__dict__["_content"] + if isinstance(content, dict): + for _key, value in self.__dict__["_content"].items(): + if value is not None: + value._set_parent(self) + if isinstance(value, Box): + value._re_parent() + elif isinstance(self, ListConfig): + content = self.__dict__["_content"] + if isinstance(content, list): + for item in self.__dict__["_content"]: + if item is not None: + item._set_parent(self) + if isinstance(item, Box): + item._re_parent() + elif isinstance(self, UnionNode): + content = self.__dict__["_content"] + if isinstance(content, Node): + content._set_parent(self) + if isinstance(content, Box): # pragma: no cover + # No coverage here as support for containers inside + # UnionNode is not yet implemented + content._re_parent() + + +class Container(Box): + """ + Container tagging interface + """ + + _metadata: ContainerMetadata + + @abstractmethod + def _get_child( + self, + key: Any, + validate_access: bool = True, + validate_key: bool = True, + throw_on_missing_value: bool = False, + throw_on_missing_key: bool = False, + ) -> Union[Optional[Node], List[Optional[Node]]]: + ... + + @abstractmethod + def _get_node( + self, + key: Any, + validate_access: bool = True, + validate_key: bool = True, + throw_on_missing_value: bool = False, + throw_on_missing_key: bool = False, + ) -> Union[Optional[Node], List[Optional[Node]]]: + ... + + @abstractmethod + def __delitem__(self, key: Any) -> None: + ... + + @abstractmethod + def __setitem__(self, key: Any, value: Any) -> None: + ... + + @abstractmethod + def __iter__(self) -> Iterator[Any]: + ... + + @abstractmethod + def __getitem__(self, key_or_index: Any) -> Any: + ... + + def _resolve_key_and_root(self, key: str) -> Tuple["Container", str]: + orig = key + if not key.startswith("."): + return self._get_root(), key + else: + root: Optional[Container] = self + assert key.startswith(".") + while True: + assert root is not None + key = key[1:] + if not key.startswith("."): + break + root = root._get_parent_container() + if root is None: + raise ConfigKeyError(f"Error resolving key '{orig}'") + + return root, key + + def _select_impl( + self, + key: str, + throw_on_missing: bool, + throw_on_resolution_failure: bool, + memo: Optional[Set[int]] = None, + ) -> Tuple[Optional["Container"], Optional[str], Optional[Node]]: + """ + Select a value using dot separated key sequence + """ + from .omegaconf import _select_one + + if key == "": + return self, "", self + + split = split_key(key) + root: Optional[Container] = self + for i in range(len(split) - 1): + if root is None: + break + + k = split[i] + ret, _ = _select_one( + c=root, + key=k, + throw_on_missing=throw_on_missing, + throw_on_type_error=throw_on_resolution_failure, + ) + if isinstance(ret, Node): + ret = ret._maybe_dereference_node( + throw_on_resolution_failure=throw_on_resolution_failure, + memo=memo, + ) + + if ret is not None and not isinstance(ret, Container): + parent_key = ".".join(split[0 : i + 1]) + child_key = split[i + 1] + raise ConfigTypeError( + f"Error trying to access {key}: node `{parent_key}` " + f"is not a container and thus cannot contain `{child_key}`" + ) + root = ret + + if root is None: + return None, None, None + + last_key = split[-1] + value, _ = _select_one( + c=root, + key=last_key, + throw_on_missing=throw_on_missing, + throw_on_type_error=throw_on_resolution_failure, + ) + if value is None: + return root, last_key, None + + if memo is not None: + vid = id(value) + if vid in memo: + raise InterpolationResolutionError("Recursive interpolation detected") + # push to memo "stack" + memo.add(vid) + + try: + value = root._maybe_resolve_interpolation( + parent=root, + key=last_key, + value=value, + throw_on_resolution_failure=throw_on_resolution_failure, + memo=memo, + ) + finally: + if memo is not None: + # pop from memo "stack" + memo.remove(vid) + + return root, last_key, value + + def _resolve_interpolation_from_parse_tree( + self, + parent: Optional["Container"], + value: "Node", + key: Any, + parse_tree: OmegaConfGrammarParser.ConfigValueContext, + throw_on_resolution_failure: bool, + memo: Optional[Set[int]], + ) -> Optional["Node"]: + """ + Resolve an interpolation. + + This happens in two steps: + 1. The parse tree is visited, which outputs either a `Node` (e.g., + for node interpolations "${foo}"), a string (e.g., for string + interpolations "hello ${name}", or any other arbitrary value + (e.g., or custom interpolations "${foo:bar}"). + 2. This output is potentially validated and converted when the node + being resolved (`value`) is typed. + + If an error occurs in one of the above steps, an `InterpolationResolutionError` + (or a subclass of it) is raised, *unless* `throw_on_resolution_failure` is set + to `False` (in which case the return value is `None`). + + :param parent: Parent of the node being resolved. + :param value: Node being resolved. + :param key: The associated key in the parent. + :param parse_tree: The parse tree as obtained from `grammar_parser.parse()`. + :param throw_on_resolution_failure: If `False`, then exceptions raised during + the resolution of the interpolation are silenced, and instead `None` is + returned. + + :return: A `Node` that contains the interpolation result. This may be an existing + node in the config (in the case of a node interpolation "${foo}"), or a new + node that is created to wrap the interpolated value. It is `None` if and only if + `throw_on_resolution_failure` is `False` and an error occurs during resolution. + """ + + try: + resolved = self.resolve_parse_tree( + parse_tree=parse_tree, node=value, key=key, memo=memo + ) + except InterpolationResolutionError: + if throw_on_resolution_failure: + raise + return None + + return self._validate_and_convert_interpolation_result( + parent=parent, + value=value, + key=key, + resolved=resolved, + throw_on_resolution_failure=throw_on_resolution_failure, + ) + + def _validate_and_convert_interpolation_result( + self, + parent: Optional["Container"], + value: "Node", + key: Any, + resolved: Any, + throw_on_resolution_failure: bool, + ) -> Optional["Node"]: + from .nodes import AnyNode, InterpolationResultNode, ValueNode + + # If the output is not a Node already (e.g., because it is the output of a + # custom resolver), then we will need to wrap it within a Node. + must_wrap = not isinstance(resolved, Node) + + # If the node is typed, validate (and possibly convert) the result. + if isinstance(value, ValueNode) and not isinstance(value, AnyNode): + res_value = _get_value(resolved) + try: + conv_value = value.validate_and_convert(res_value) + except ValidationError as e: + if throw_on_resolution_failure: + self._format_and_raise( + key=key, + value=res_value, + cause=e, + msg=f"While dereferencing interpolation '{value}': {e}", + type_override=InterpolationValidationError, + ) + return None + + # If the converted value is of the same type, it means that no conversion + # was actually needed. As a result, we can keep the original `resolved` + # (and otherwise, the converted value must be wrapped into a new node). + if type(conv_value) != type(res_value): + must_wrap = True + resolved = conv_value + + if must_wrap: + return InterpolationResultNode(value=resolved, key=key, parent=parent) + else: + assert isinstance(resolved, Node) + return resolved + + def _validate_not_dereferencing_to_parent(self, node: Node, target: Node) -> None: + parent: Optional[Node] = node + while parent is not None: + if parent is target: + raise InterpolationResolutionError( + "Interpolation to parent node detected" + ) + parent = parent._get_parent() + + def _resolve_node_interpolation( + self, inter_key: str, memo: Optional[Set[int]] + ) -> "Node": + """A node interpolation is of the form `${foo.bar}`""" + try: + root_node, inter_key = self._resolve_key_and_root(inter_key) + except ConfigKeyError as exc: + raise InterpolationKeyError( + f"ConfigKeyError while resolving interpolation: {exc}" + ).with_traceback(sys.exc_info()[2]) + + try: + parent, last_key, value = root_node._select_impl( + inter_key, + throw_on_missing=True, + throw_on_resolution_failure=True, + memo=memo, + ) + except MissingMandatoryValue as exc: + raise InterpolationToMissingValueError( + f"MissingMandatoryValue while resolving interpolation: {exc}" + ).with_traceback(sys.exc_info()[2]) + + if parent is None or value is None: + raise InterpolationKeyError(f"Interpolation key '{inter_key}' not found") + else: + self._validate_not_dereferencing_to_parent(node=self, target=value) + return value + + def _evaluate_custom_resolver( + self, + key: Any, + node: Node, + inter_type: str, + inter_args: Tuple[Any, ...], + inter_args_str: Tuple[str, ...], + ) -> Any: + from omegaconf import OmegaConf + + resolver = OmegaConf._get_resolver(inter_type) + if resolver is not None: + root_node = self._get_root() + return resolver( + root_node, + self, + node, + inter_args, + inter_args_str, + ) + else: + raise UnsupportedInterpolationType( + f"Unsupported interpolation type {inter_type}" + ) + + def _maybe_resolve_interpolation( + self, + parent: Optional["Container"], + key: Any, + value: Node, + throw_on_resolution_failure: bool, + memo: Optional[Set[int]] = None, + ) -> Optional[Node]: + value_kind = get_value_kind(value) + if value_kind != ValueKind.INTERPOLATION: + return value + + parse_tree = parse(_get_value(value)) + return self._resolve_interpolation_from_parse_tree( + parent=parent, + value=value, + key=key, + parse_tree=parse_tree, + throw_on_resolution_failure=throw_on_resolution_failure, + memo=memo if memo is not None else set(), + ) + + def resolve_parse_tree( + self, + parse_tree: ParserRuleContext, + node: Node, + memo: Optional[Set[int]] = None, + key: Optional[Any] = None, + ) -> Any: + """ + Resolve a given parse tree into its value. + + We make no assumption here on the type of the tree's root, so that the + return value may be of any type. + """ + + def node_interpolation_callback( + inter_key: str, memo: Optional[Set[int]] + ) -> Optional["Node"]: + return self._resolve_node_interpolation(inter_key=inter_key, memo=memo) + + def resolver_interpolation_callback( + name: str, args: Tuple[Any, ...], args_str: Tuple[str, ...] + ) -> Any: + return self._evaluate_custom_resolver( + key=key, + node=node, + inter_type=name, + inter_args=args, + inter_args_str=args_str, + ) + + visitor = GrammarVisitor( + node_interpolation_callback=node_interpolation_callback, + resolver_interpolation_callback=resolver_interpolation_callback, + memo=memo, + ) + try: + return visitor.visit(parse_tree) + except InterpolationResolutionError: + raise + except Exception as exc: + # Other kinds of exceptions are wrapped in an `InterpolationResolutionError`. + raise InterpolationResolutionError( + f"{type(exc).__name__} raised while resolving interpolation: {exc}" + ).with_traceback(sys.exc_info()[2]) + + def _invalidate_flags_cache(self) -> None: + from .dictconfig import DictConfig + from .listconfig import ListConfig + + # invalidate subtree cache only if the cache is initialized in this node. + + if self.__dict__["_flags_cache"] is not None: + self.__dict__["_flags_cache"] = None + if isinstance(self, DictConfig): + content = self.__dict__["_content"] + if isinstance(content, dict): + for value in self.__dict__["_content"].values(): + value._invalidate_flags_cache() + elif isinstance(self, ListConfig): + content = self.__dict__["_content"] + if isinstance(content, list): + for item in self.__dict__["_content"]: + item._invalidate_flags_cache() + + +class SCMode(Enum): + DICT = 1 # Convert to plain dict + DICT_CONFIG = 2 # Keep as OmegaConf DictConfig + INSTANTIATE = 3 # Create a dataclass or attrs class instance + + +class UnionNode(Box): + """ + This class handles Union type hints. The `_content` attribute is either a + child node that is compatible with the given Union ref_type, or it is a + special value (None or MISSING or interpolation). + + Much of the logic for e.g. value assignment and type validation is + delegated to the child node. As such, UnionNode functions as a + "pass-through" node. User apps and downstream libraries should not need to + know about UnionNode (assuming they only use OmegaConf's public API). + """ + + _parent: Optional[Container] + _content: Union[Node, None, str] + + def __init__( + self, + content: Any, + ref_type: Any, + is_optional: bool = True, + key: Any = None, + parent: Optional[Box] = None, + ) -> None: + try: + if not is_union_annotation(ref_type): # pragma: no cover + msg = ( + f"UnionNode got unexpected ref_type {ref_type}. Please file a bug" + + " report at https://github.com/omry/omegaconf/issues" + ) + raise AssertionError(msg) + if not isinstance(parent, (Container, NoneType)): + raise ConfigTypeError("Parent type is not omegaconf.Container") + super().__init__( + parent=parent, + metadata=Metadata( + ref_type=ref_type, + object_type=None, + optional=is_optional, + key=key, + flags={"convert": False}, + ), + ) + self._set_value(content) + except Exception as ex: + format_and_raise(node=None, key=key, value=content, msg=str(ex), cause=ex) + + def _get_full_key(self, key: Optional[Union[DictKeyType, int]]) -> str: + parent = self._get_parent() + if parent is None: + if self._metadata.key is None: + return "" + else: + return str(self._metadata.key) + else: + return parent._get_full_key(self._metadata.key) + + def __eq__(self, other: Any) -> bool: + content = self.__dict__["_content"] + if isinstance(content, Node): + ret = content.__eq__(other) + elif isinstance(other, Node): + ret = other.__eq__(content) + else: + ret = content.__eq__(other) + assert isinstance(ret, (bool, type(NotImplemented))) + return ret + + def __ne__(self, other: Any) -> bool: + x = self.__eq__(other) + if x is NotImplemented: + return NotImplemented + return not x + + def __hash__(self) -> int: + return hash(self.__dict__["_content"]) + + def _value(self) -> Union[Node, None, str]: + content = self.__dict__["_content"] + assert isinstance(content, (Node, NoneType, str)) + return content + + def _set_value(self, value: Any, flags: Optional[Dict[str, bool]] = None) -> None: + previous_content = self.__dict__["_content"] + previous_metadata = self.__dict__["_metadata"] + try: + self._set_value_impl(value, flags) + except Exception as e: + self.__dict__["_content"] = previous_content + self.__dict__["_metadata"] = previous_metadata + raise e + + def _set_value_impl( + self, value: Any, flags: Optional[Dict[str, bool]] = None + ) -> None: + from omegaconf.omegaconf import _node_wrap + + ref_type = self._metadata.ref_type + type_hint = self._metadata.type_hint + + value = _get_value(value) + if _is_special(value): + assert isinstance(value, (str, NoneType)) + if value is None: + if not self._is_optional(): + raise ValidationError( + f"Value '$VALUE' is incompatible with type hint '{type_str(type_hint)}'" + ) + self.__dict__["_content"] = value + elif isinstance(value, Container): + raise ValidationError( + f"Cannot assign container '$VALUE' of type '$VALUE_TYPE' to {type_str(type_hint)}" + ) + else: + for candidate_ref_type in ref_type.__args__: + try: + self.__dict__["_content"] = _node_wrap( + value=value, + ref_type=candidate_ref_type, + is_optional=False, + key=None, + parent=self, + ) + break + except ValidationError: + continue + else: + raise ValidationError( + f"Value '$VALUE' of type '$VALUE_TYPE' is incompatible with type hint '{type_str(type_hint)}'" + ) + + def _is_optional(self) -> bool: + return self.__dict__["_metadata"].optional is True + + def _is_interpolation(self) -> bool: + return _is_interpolation(self.__dict__["_content"]) + + def __str__(self) -> str: + return str(self.__dict__["_content"]) + + def __repr__(self) -> str: + return repr(self.__dict__["_content"]) + + def __deepcopy__(self, memo: Dict[int, Any]) -> "UnionNode": + res = object.__new__(type(self)) + for key, value in self.__dict__.items(): + if key not in ("_content", "_parent"): + res.__dict__[key] = copy.deepcopy(value, memo=memo) + + src_content = self.__dict__["_content"] + if isinstance(src_content, Node): + old_parent = src_content.__dict__["_parent"] + try: + src_content.__dict__["_parent"] = None + content_copy = copy.deepcopy(src_content, memo=memo) + content_copy.__dict__["_parent"] = res + finally: + src_content.__dict__["_parent"] = old_parent + else: + # None and strings can be assigned as is + content_copy = src_content + + res.__dict__["_content"] = content_copy + res.__dict__["_parent"] = self.__dict__["_parent"] + return res diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/basecontainer.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/basecontainer.py new file mode 100644 index 0000000000000000000000000000000000000000..575df3c72105f9563d29bca8077d7e3fb79742b2 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/basecontainer.py @@ -0,0 +1,916 @@ +import copy +import sys +from abc import ABC, abstractmethod +from enum import Enum +from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Tuple, Union + +import yaml + +from ._utils import ( + _DEFAULT_MARKER_, + ValueKind, + _ensure_container, + _get_value, + _is_interpolation, + _is_missing_value, + _is_none, + _is_special, + _resolve_optional, + get_structured_config_data, + get_type_hint, + get_value_kind, + get_yaml_loader, + is_container_annotation, + is_dict_annotation, + is_list_annotation, + is_primitive_dict, + is_primitive_type_annotation, + is_structured_config, + is_tuple_annotation, + is_union_annotation, +) +from .base import ( + Box, + Container, + ContainerMetadata, + DictKeyType, + Node, + SCMode, + UnionNode, +) +from .errors import ( + ConfigCycleDetectedException, + ConfigTypeError, + InterpolationResolutionError, + KeyValidationError, + MissingMandatoryValue, + OmegaConfBaseException, + ReadonlyConfigError, + ValidationError, +) + +if TYPE_CHECKING: + from .dictconfig import DictConfig # pragma: no cover + + +class BaseContainer(Container, ABC): + _resolvers: ClassVar[Dict[str, Any]] = {} + + def __init__(self, parent: Optional[Box], metadata: ContainerMetadata): + if not (parent is None or isinstance(parent, Box)): + raise ConfigTypeError("Parent type is not omegaconf.Box") + super().__init__(parent=parent, metadata=metadata) + + def _get_child( + self, + key: Any, + validate_access: bool = True, + validate_key: bool = True, + throw_on_missing_value: bool = False, + throw_on_missing_key: bool = False, + ) -> Union[Optional[Node], List[Optional[Node]]]: + """Like _get_node, passing through to the nearest concrete Node.""" + child = self._get_node( + key=key, + validate_access=validate_access, + validate_key=validate_key, + throw_on_missing_value=throw_on_missing_value, + throw_on_missing_key=throw_on_missing_key, + ) + if isinstance(child, UnionNode) and not _is_special(child): + value = child._value() + assert isinstance(value, Node) and not isinstance(value, UnionNode) + child = value + return child + + def _resolve_with_default( + self, + key: Union[DictKeyType, int], + value: Node, + default_value: Any = _DEFAULT_MARKER_, + ) -> Any: + """returns the value with the specified key, like obj.key and obj['key']""" + if _is_missing_value(value): + if default_value is not _DEFAULT_MARKER_: + return default_value + raise MissingMandatoryValue("Missing mandatory value: $FULL_KEY") + + resolved_node = self._maybe_resolve_interpolation( + parent=self, + key=key, + value=value, + throw_on_resolution_failure=True, + ) + + return _get_value(resolved_node) + + def __str__(self) -> str: + return self.__repr__() + + def __repr__(self) -> str: + if self.__dict__["_content"] is None: + return "None" + elif self._is_interpolation() or self._is_missing(): + v = self.__dict__["_content"] + return f"'{v}'" + else: + return self.__dict__["_content"].__repr__() # type: ignore + + # Support pickle + def __getstate__(self) -> Dict[str, Any]: + dict_copy = copy.copy(self.__dict__) + + # no need to serialize the flags cache, it can be re-constructed later + dict_copy.pop("_flags_cache", None) + + dict_copy["_metadata"] = copy.copy(dict_copy["_metadata"]) + ref_type = self._metadata.ref_type + if is_container_annotation(ref_type): + if is_dict_annotation(ref_type): + dict_copy["_metadata"].ref_type = Dict + elif is_list_annotation(ref_type): + dict_copy["_metadata"].ref_type = List + else: + assert False + if sys.version_info < (3, 7): # pragma: no cover + element_type = self._metadata.element_type + if is_union_annotation(element_type): + raise OmegaConfBaseException( + "Serializing structured configs with `Union` element type requires python >= 3.7" + ) + return dict_copy + + # Support pickle + def __setstate__(self, d: Dict[str, Any]) -> None: + from omegaconf import DictConfig + from omegaconf._utils import is_generic_dict, is_generic_list + + if isinstance(self, DictConfig): + key_type = d["_metadata"].key_type + + # backward compatibility to load OmegaConf 2.0 configs + if key_type is None: + key_type = Any + d["_metadata"].key_type = key_type + + element_type = d["_metadata"].element_type + + # backward compatibility to load OmegaConf 2.0 configs + if element_type is None: + element_type = Any + d["_metadata"].element_type = element_type + + ref_type = d["_metadata"].ref_type + if is_container_annotation(ref_type): + if is_generic_dict(ref_type): + d["_metadata"].ref_type = Dict[key_type, element_type] # type: ignore + elif is_generic_list(ref_type): + d["_metadata"].ref_type = List[element_type] # type: ignore + else: + assert False + + d["_flags_cache"] = None + self.__dict__.update(d) + + @abstractmethod + def __delitem__(self, key: Any) -> None: + ... + + def __len__(self) -> int: + if self._is_none() or self._is_missing() or self._is_interpolation(): + return 0 + content = self.__dict__["_content"] + return len(content) + + def merge_with_cli(self) -> None: + args_list = sys.argv[1:] + self.merge_with_dotlist(args_list) + + def merge_with_dotlist(self, dotlist: List[str]) -> None: + from omegaconf import OmegaConf + + def fail() -> None: + raise ValueError("Input list must be a list or a tuple of strings") + + if not isinstance(dotlist, (list, tuple)): + fail() + + for arg in dotlist: + if not isinstance(arg, str): + fail() + + idx = arg.find("=") + if idx == -1: + key = arg + value = None + else: + key = arg[0:idx] + value = arg[idx + 1 :] + value = yaml.load(value, Loader=get_yaml_loader()) + + OmegaConf.update(self, key, value) + + def is_empty(self) -> bool: + """return true if config is empty""" + return len(self.__dict__["_content"]) == 0 + + @staticmethod + def _to_content( + conf: Container, + resolve: bool, + throw_on_missing: bool, + enum_to_str: bool = False, + structured_config_mode: SCMode = SCMode.DICT, + ) -> Union[None, Any, str, Dict[DictKeyType, Any], List[Any]]: + from omegaconf import MISSING, DictConfig, ListConfig + + def convert(val: Node) -> Any: + value = val._value() + if enum_to_str and isinstance(value, Enum): + value = f"{value.name}" + + return value + + def get_node_value(key: Union[DictKeyType, int]) -> Any: + try: + node = conf._get_child(key, throw_on_missing_value=throw_on_missing) + except MissingMandatoryValue as e: + conf._format_and_raise(key=key, value=None, cause=e) + assert isinstance(node, Node) + if resolve: + try: + node = node._dereference_node() + except InterpolationResolutionError as e: + conf._format_and_raise(key=key, value=None, cause=e) + + if isinstance(node, Container): + value = BaseContainer._to_content( + node, + resolve=resolve, + throw_on_missing=throw_on_missing, + enum_to_str=enum_to_str, + structured_config_mode=structured_config_mode, + ) + else: + value = convert(node) + return value + + if conf._is_none(): + return None + elif conf._is_missing(): + if throw_on_missing: + conf._format_and_raise( + key=None, + value=None, + cause=MissingMandatoryValue("Missing mandatory value"), + ) + else: + return MISSING + elif not resolve and conf._is_interpolation(): + inter = conf._value() + assert isinstance(inter, str) + return inter + + if resolve: + _conf = conf._dereference_node() + assert isinstance(_conf, Container) + conf = _conf + + if isinstance(conf, DictConfig): + if ( + conf._metadata.object_type not in (dict, None) + and structured_config_mode == SCMode.DICT_CONFIG + ): + return conf + if structured_config_mode == SCMode.INSTANTIATE and is_structured_config( + conf._metadata.object_type + ): + return conf._to_object() + + retdict: Dict[DictKeyType, Any] = {} + for key in conf.keys(): + value = get_node_value(key) + if enum_to_str and isinstance(key, Enum): + key = f"{key.name}" + retdict[key] = value + return retdict + elif isinstance(conf, ListConfig): + retlist: List[Any] = [] + for index in range(len(conf)): + item = get_node_value(index) + retlist.append(item) + + return retlist + assert False + + @staticmethod + def _map_merge(dest: "BaseContainer", src: "BaseContainer") -> None: + """merge src into dest and return a new copy, does not modified input""" + from omegaconf import AnyNode, DictConfig, ValueNode + + assert isinstance(dest, DictConfig) + assert isinstance(src, DictConfig) + src_type = src._metadata.object_type + src_ref_type = get_type_hint(src) + assert src_ref_type is not None + + # If source DictConfig is: + # - None => set the destination DictConfig to None + # - an interpolation => set the destination DictConfig to be the same interpolation + if src._is_none() or src._is_interpolation(): + dest._set_value(src._value()) + _update_types(node=dest, ref_type=src_ref_type, object_type=src_type) + return + + dest._validate_merge(value=src) + + def expand(node: Container) -> None: + rt = node._metadata.ref_type + val: Any + if rt is not Any: + if is_dict_annotation(rt): + val = {} + elif is_list_annotation(rt) or is_tuple_annotation(rt): + val = [] + else: + val = rt + elif isinstance(node, DictConfig): + val = {} + else: + assert False + + node._set_value(val) + + if ( + src._is_missing() + and not dest._is_missing() + and is_structured_config(src_ref_type) + ): + # Replace `src` with a prototype of its corresponding structured config + # whose fields are all missing (to avoid overwriting fields in `dest`). + assert src_type is None # src missing, so src's object_type should be None + src_type = src_ref_type + src = _create_structured_with_missing_fields( + ref_type=src_ref_type, object_type=src_type + ) + + if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing(): + expand(dest) + + src_items = list(src) if not src._is_missing() else [] + for key in src_items: + src_node = src._get_node(key, validate_access=False) + dest_node = dest._get_node(key, validate_access=False) + assert isinstance(src_node, Node) + assert dest_node is None or isinstance(dest_node, Node) + src_value = _get_value(src_node) + + src_vk = get_value_kind(src_node) + src_node_missing = src_vk is ValueKind.MANDATORY_MISSING + + if isinstance(dest_node, DictConfig): + dest_node._validate_merge(value=src_node) + + if ( + isinstance(dest_node, Container) + and dest_node._is_none() + and not src_node_missing + and not _is_none(src_node, resolve=True) + ): + expand(dest_node) + + if dest_node is not None and dest_node._is_interpolation(): + target_node = dest_node._maybe_dereference_node() + if isinstance(target_node, Container): + dest[key] = target_node + dest_node = dest._get_node(key) + + is_optional, et = _resolve_optional(dest._metadata.element_type) + if dest_node is None and is_structured_config(et) and not src_node_missing: + # merging into a new node. Use element_type as a base + dest[key] = DictConfig( + et, parent=dest, ref_type=et, is_optional=is_optional + ) + dest_node = dest._get_node(key) + + if dest_node is not None: + if isinstance(dest_node, BaseContainer): + if isinstance(src_node, BaseContainer): + dest_node._merge_with(src_node) + elif not src_node_missing: + dest.__setitem__(key, src_node) + else: + if isinstance(src_node, BaseContainer): + dest.__setitem__(key, src_node) + else: + assert isinstance(dest_node, (ValueNode, UnionNode)) + assert isinstance(src_node, (ValueNode, UnionNode)) + try: + if isinstance(dest_node, AnyNode): + if src_node_missing: + node = copy.copy(src_node) + # if src node is missing, use the value from the dest_node, + # but validate it against the type of the src node before assigment + node._set_value(dest_node._value()) + else: + node = src_node + dest.__setitem__(key, node) + else: + if not src_node_missing: + dest_node._set_value(src_value) + + except (ValidationError, ReadonlyConfigError) as e: + dest._format_and_raise(key=key, value=src_value, cause=e) + else: + from omegaconf import open_dict + + if is_structured_config(src_type): + # verified to be compatible above in _validate_merge + with open_dict(dest): + dest[key] = src._get_node(key) + else: + dest[key] = src._get_node(key) + + _update_types(node=dest, ref_type=src_ref_type, object_type=src_type) + + # explicit flags on the source config are replacing the flag values in the destination + flags = src._metadata.flags + assert flags is not None + for flag, value in flags.items(): + if value is not None: + dest._set_flag(flag, value) + + @staticmethod + def _list_merge(dest: Any, src: Any) -> None: + from omegaconf import DictConfig, ListConfig, OmegaConf + + assert isinstance(dest, ListConfig) + assert isinstance(src, ListConfig) + + if src._is_none(): + dest._set_value(None) + elif src._is_missing(): + # do not change dest if src is MISSING. + if dest._metadata.element_type is Any: + dest._metadata.element_type = src._metadata.element_type + elif src._is_interpolation(): + dest._set_value(src._value()) + else: + temp_target = ListConfig(content=[], parent=dest._get_parent()) + temp_target.__dict__["_metadata"] = copy.deepcopy( + dest.__dict__["_metadata"] + ) + is_optional, et = _resolve_optional(dest._metadata.element_type) + if is_structured_config(et): + prototype = DictConfig(et, ref_type=et, is_optional=is_optional) + for item in src._iter_ex(resolve=False): + if isinstance(item, DictConfig): + item = OmegaConf.merge(prototype, item) + temp_target.append(item) + else: + for item in src._iter_ex(resolve=False): + temp_target.append(item) + + dest.__dict__["_content"] = temp_target.__dict__["_content"] + + # explicit flags on the source config are replacing the flag values in the destination + flags = src._metadata.flags + assert flags is not None + for flag, value in flags.items(): + if value is not None: + dest._set_flag(flag, value) + + def merge_with( + self, + *others: Union[ + "BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any + ], + ) -> None: + try: + self._merge_with(*others) + except Exception as e: + self._format_and_raise(key=None, value=None, cause=e) + + def _merge_with( + self, + *others: Union[ + "BaseContainer", Dict[str, Any], List[Any], Tuple[Any, ...], Any + ], + ) -> None: + from .dictconfig import DictConfig + from .listconfig import ListConfig + + """merge a list of other Config objects into this one, overriding as needed""" + for other in others: + if other is None: + raise ValueError("Cannot merge with a None config") + + my_flags = {} + if self._get_flag("allow_objects") is True: + my_flags = {"allow_objects": True} + other = _ensure_container(other, flags=my_flags) + + if isinstance(self, DictConfig) and isinstance(other, DictConfig): + BaseContainer._map_merge(self, other) + elif isinstance(self, ListConfig) and isinstance(other, ListConfig): + BaseContainer._list_merge(self, other) + else: + raise TypeError("Cannot merge DictConfig with ListConfig") + + # recursively correct the parent hierarchy after the merge + self._re_parent() + + # noinspection PyProtectedMember + def _set_item_impl(self, key: Any, value: Any) -> None: + """ + Changes the value of the node key with the desired value. If the node key doesn't + exist it creates a new one. + """ + from .nodes import AnyNode, ValueNode + + if isinstance(value, Node): + do_deepcopy = not self._get_flag("no_deepcopy_set_nodes") + if not do_deepcopy and isinstance(value, Box): + # if value is from the same config, perform a deepcopy no matter what. + if self._get_root() is value._get_root(): + do_deepcopy = True + + if do_deepcopy: + value = copy.deepcopy(value) + value._set_parent(None) + + try: + old = value._key() + value._set_key(key) + self._validate_set(key, value) + finally: + value._set_key(old) + else: + self._validate_set(key, value) + + if self._get_flag("readonly"): + raise ReadonlyConfigError("Cannot change read-only config container") + + input_is_node = isinstance(value, Node) + target_node_ref = self._get_node(key) + assert target_node_ref is None or isinstance(target_node_ref, Node) + + input_is_typed_vnode = isinstance(value, ValueNode) and not isinstance( + value, AnyNode + ) + + def get_target_type_hint(val: Any) -> Any: + if not is_structured_config(val): + type_hint = self._metadata.element_type + else: + target = self._get_node(key) + if target is None: + type_hint = self._metadata.element_type + else: + assert isinstance(target, Node) + type_hint = target._metadata.type_hint + return type_hint + + target_type_hint = get_target_type_hint(value) + _, target_ref_type = _resolve_optional(target_type_hint) + + def assign(value_key: Any, val: Node) -> None: + assert val._get_parent() is None + v = val + v._set_parent(self) + v._set_key(value_key) + _deep_update_type_hint(node=v, type_hint=self._metadata.element_type) + self.__dict__["_content"][value_key] = v + + if input_is_typed_vnode and not is_union_annotation(target_ref_type): + assign(key, value) + else: + # input is not a ValueNode, can be primitive or box + + special_value = _is_special(value) + # We use the `Node._set_value` method if the target node exists and: + # 1. the target has an explicit ref_type, or + # 2. the target is an AnyNode and the input is a primitive type. + should_set_value = target_node_ref is not None and ( + target_node_ref._has_ref_type() + or ( + isinstance(target_node_ref, AnyNode) + and is_primitive_type_annotation(value) + ) + ) + if should_set_value: + if special_value and isinstance(value, Node): + value = value._value() + self.__dict__["_content"][key]._set_value(value) + elif input_is_node: + if ( + special_value + and ( + is_container_annotation(target_ref_type) + or is_structured_config(target_ref_type) + ) + or is_primitive_type_annotation(target_ref_type) + or is_union_annotation(target_ref_type) + ): + value = _get_value(value) + self._wrap_value_and_set(key, value, target_type_hint) + else: + assign(key, value) + else: + self._wrap_value_and_set(key, value, target_type_hint) + + def _wrap_value_and_set(self, key: Any, val: Any, type_hint: Any) -> None: + from omegaconf.omegaconf import _maybe_wrap + + is_optional, ref_type = _resolve_optional(type_hint) + + try: + wrapped = _maybe_wrap( + ref_type=ref_type, + key=key, + value=val, + is_optional=is_optional, + parent=self, + ) + except ValidationError as e: + self._format_and_raise(key=key, value=val, cause=e) + self.__dict__["_content"][key] = wrapped + + @staticmethod + def _item_eq( + c1: Container, + k1: Union[DictKeyType, int], + c2: Container, + k2: Union[DictKeyType, int], + ) -> bool: + v1 = c1._get_child(k1) + v2 = c2._get_child(k2) + assert v1 is not None and v2 is not None + + assert isinstance(v1, Node) + assert isinstance(v2, Node) + + if v1._is_none() and v2._is_none(): + return True + + if v1._is_missing() and v2._is_missing(): + return True + + v1_inter = v1._is_interpolation() + v2_inter = v2._is_interpolation() + dv1: Optional[Node] = v1 + dv2: Optional[Node] = v2 + + if v1_inter: + dv1 = v1._maybe_dereference_node() + if v2_inter: + dv2 = v2._maybe_dereference_node() + + if v1_inter and v2_inter: + if dv1 is None or dv2 is None: + return v1 == v2 + else: + # both are not none, if both are containers compare as container + if isinstance(dv1, Container) and isinstance(dv2, Container): + if dv1 != dv2: + return False + dv1 = _get_value(dv1) + dv2 = _get_value(dv2) + return dv1 == dv2 + elif not v1_inter and not v2_inter: + v1 = _get_value(v1) + v2 = _get_value(v2) + ret = v1 == v2 + assert isinstance(ret, bool) + return ret + else: + dv1 = _get_value(dv1) + dv2 = _get_value(dv2) + ret = dv1 == dv2 + assert isinstance(ret, bool) + return ret + + def _is_optional(self) -> bool: + return self.__dict__["_metadata"].optional is True + + def _is_interpolation(self) -> bool: + return _is_interpolation(self.__dict__["_content"]) + + @abstractmethod + def _validate_get(self, key: Any, value: Any = None) -> None: + ... + + @abstractmethod + def _validate_set(self, key: Any, value: Any) -> None: + ... + + def _value(self) -> Any: + return self.__dict__["_content"] + + def _get_full_key(self, key: Union[DictKeyType, int, slice, None]) -> str: + from .listconfig import ListConfig + from .omegaconf import _select_one + + if not isinstance(key, (int, str, Enum, float, bool, slice, bytes, type(None))): + return "" + + def _slice_to_str(x: slice) -> str: + if x.step is not None: + return f"{x.start}:{x.stop}:{x.step}" + else: + return f"{x.start}:{x.stop}" + + def prepand( + full_key: str, + parent_type: Any, + cur_type: Any, + key: Optional[Union[DictKeyType, int, slice]], + ) -> str: + if key is None: + return full_key + + if isinstance(key, slice): + key = _slice_to_str(key) + elif isinstance(key, Enum): + key = key.name + else: + key = str(key) + + assert isinstance(key, str) + + if issubclass(parent_type, ListConfig): + if full_key != "": + if issubclass(cur_type, ListConfig): + full_key = f"[{key}]{full_key}" + else: + full_key = f"[{key}].{full_key}" + else: + full_key = f"[{key}]" + else: + if full_key == "": + full_key = key + else: + if issubclass(cur_type, ListConfig): + full_key = f"{key}{full_key}" + else: + full_key = f"{key}.{full_key}" + return full_key + + if key is not None and key != "": + assert isinstance(self, Container) + cur, _ = _select_one( + c=self, key=str(key), throw_on_missing=False, throw_on_type_error=False + ) + if cur is None: + cur = self + full_key = prepand("", type(cur), None, key) + if cur._key() is not None: + full_key = prepand( + full_key, type(cur._get_parent()), type(cur), cur._key() + ) + else: + full_key = prepand("", type(cur._get_parent()), type(cur), cur._key()) + else: + cur = self + if cur._key() is None: + return "" + full_key = self._key() + + assert cur is not None + memo = {id(cur)} # remember already visited nodes so as to detect cycles + while cur._get_parent() is not None: + cur = cur._get_parent() + if id(cur) in memo: + raise ConfigCycleDetectedException( + f"Cycle when iterating over parents of key `{key!s}`" + ) + memo.add(id(cur)) + assert cur is not None + if cur._key() is not None: + full_key = prepand( + full_key, type(cur._get_parent()), type(cur), cur._key() + ) + + return full_key + + +def _create_structured_with_missing_fields( + ref_type: type, object_type: Optional[type] = None +) -> "DictConfig": + from . import MISSING, DictConfig + + cfg_data = get_structured_config_data(ref_type) + for v in cfg_data.values(): + v._set_value(MISSING) + + cfg = DictConfig(cfg_data) + cfg._metadata.optional, cfg._metadata.ref_type = _resolve_optional(ref_type) + cfg._metadata.object_type = object_type + + return cfg + + +def _update_types(node: Node, ref_type: Any, object_type: Optional[type]) -> None: + if object_type is not None and not is_primitive_dict(object_type): + node._metadata.object_type = object_type + + if node._metadata.ref_type is Any: + _deep_update_type_hint(node, ref_type) + + +def _deep_update_type_hint(node: Node, type_hint: Any) -> None: + """Ensure node is compatible with type_hint, mutating if necessary.""" + from omegaconf import DictConfig, ListConfig + + from ._utils import get_dict_key_value_types, get_list_element_type + + if type_hint is Any: + return + + _shallow_validate_type_hint(node, type_hint) + + new_is_optional, new_ref_type = _resolve_optional(type_hint) + node._metadata.ref_type = new_ref_type + node._metadata.optional = new_is_optional + + if is_list_annotation(new_ref_type) and isinstance(node, ListConfig): + new_element_type = get_list_element_type(new_ref_type) + node._metadata.element_type = new_element_type + if not _is_special(node): + for i in range(len(node)): + _deep_update_subnode(node, i, new_element_type) + + if is_dict_annotation(new_ref_type) and isinstance(node, DictConfig): + new_key_type, new_element_type = get_dict_key_value_types(new_ref_type) + node._metadata.key_type = new_key_type + node._metadata.element_type = new_element_type + if not _is_special(node): + for key in node: + if new_key_type is not Any and not isinstance(key, new_key_type): + raise KeyValidationError( + f"Key {key!r} ({type(key).__name__}) is incompatible" + + f" with key type hint '{new_key_type.__name__}'" + ) + _deep_update_subnode(node, key, new_element_type) + + +def _deep_update_subnode(node: BaseContainer, key: Any, value_type_hint: Any) -> None: + """Get node[key] and ensure it is compatible with value_type_hint, mutating if necessary.""" + subnode = node._get_node(key) + assert isinstance(subnode, Node) + if _is_special(subnode): + # Ensure special values are wrapped in a Node subclass that + # is compatible with the type hint. + node._wrap_value_and_set(key, subnode._value(), value_type_hint) + subnode = node._get_node(key) + assert isinstance(subnode, Node) + _deep_update_type_hint(subnode, value_type_hint) + + +def _shallow_validate_type_hint(node: Node, type_hint: Any) -> None: + """Error if node's type, content and metadata are not compatible with type_hint.""" + from omegaconf import DictConfig, ListConfig, ValueNode + + is_optional, ref_type = _resolve_optional(type_hint) + + vk = get_value_kind(node) + + if node._is_none(): + if not is_optional: + value = _get_value(node) + raise ValidationError( + f"Value {value!r} ({type(value).__name__})" + + f" is incompatible with type hint '{ref_type.__name__}'" + ) + return + elif vk in (ValueKind.MANDATORY_MISSING, ValueKind.INTERPOLATION): + return + elif vk == ValueKind.VALUE: + if is_primitive_type_annotation(ref_type) and isinstance(node, ValueNode): + value = node._value() + if not isinstance(value, ref_type): + raise ValidationError( + f"Value {value!r} ({type(value).__name__})" + + f" is incompatible with type hint '{ref_type.__name__}'" + ) + elif is_structured_config(ref_type) and isinstance(node, DictConfig): + return + elif is_dict_annotation(ref_type) and isinstance(node, DictConfig): + return + elif is_list_annotation(ref_type) and isinstance(node, ListConfig): + return + else: + if isinstance(node, ValueNode): + value = node._value() + raise ValidationError( + f"Value {value!r} ({type(value).__name__})" + + f" is incompatible with type hint '{ref_type}'" + ) + else: + raise ValidationError( + f"'{type(node).__name__}' is incompatible" + + f" with type hint '{ref_type}'" + ) + + else: + assert False diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/dictconfig.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/dictconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..da7c8957f1c2e30b4c1889997c85c2a33cf72f01 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/dictconfig.py @@ -0,0 +1,776 @@ +import copy +from enum import Enum +from typing import ( + Any, + Dict, + ItemsView, + Iterable, + Iterator, + KeysView, + List, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from ._utils import ( + _DEFAULT_MARKER_, + ValueKind, + _get_value, + _is_interpolation, + _is_missing_literal, + _is_missing_value, + _is_none, + _resolve_optional, + _valid_dict_key_annotation_type, + format_and_raise, + get_structured_config_data, + get_structured_config_init_field_names, + get_type_of, + get_value_kind, + is_container_annotation, + is_dict, + is_primitive_dict, + is_structured_config, + is_structured_config_frozen, + type_str, +) +from .base import Box, Container, ContainerMetadata, DictKeyType, Node +from .basecontainer import BaseContainer +from .errors import ( + ConfigAttributeError, + ConfigKeyError, + ConfigTypeError, + InterpolationResolutionError, + KeyValidationError, + MissingMandatoryValue, + OmegaConfBaseException, + ReadonlyConfigError, + ValidationError, +) +from .nodes import EnumNode, ValueNode + + +class DictConfig(BaseContainer, MutableMapping[Any, Any]): + + _metadata: ContainerMetadata + _content: Union[Dict[DictKeyType, Node], None, str] + + def __init__( + self, + content: Union[Dict[DictKeyType, Any], "DictConfig", Any], + key: Any = None, + parent: Optional[Box] = None, + ref_type: Union[Any, Type[Any]] = Any, + key_type: Union[Any, Type[Any]] = Any, + element_type: Union[Any, Type[Any]] = Any, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ) -> None: + try: + if isinstance(content, DictConfig): + if flags is None: + flags = content._metadata.flags + super().__init__( + parent=parent, + metadata=ContainerMetadata( + key=key, + optional=is_optional, + ref_type=ref_type, + object_type=dict, + key_type=key_type, + element_type=element_type, + flags=flags, + ), + ) + + if not _valid_dict_key_annotation_type(key_type): + raise KeyValidationError(f"Unsupported key type {key_type}") + + if is_structured_config(content) or is_structured_config(ref_type): + self._set_value(content, flags=flags) + if is_structured_config_frozen(content) or is_structured_config_frozen( + ref_type + ): + self._set_flag("readonly", True) + + else: + if isinstance(content, DictConfig): + metadata = copy.deepcopy(content._metadata) + metadata.key = key + metadata.ref_type = ref_type + metadata.optional = is_optional + metadata.element_type = element_type + metadata.key_type = key_type + self.__dict__["_metadata"] = metadata + self._set_value(content, flags=flags) + except Exception as ex: + format_and_raise(node=None, key=key, value=None, cause=ex, msg=str(ex)) + + def __deepcopy__(self, memo: Dict[int, Any]) -> "DictConfig": + res = DictConfig(None) + res.__dict__["_metadata"] = copy.deepcopy(self.__dict__["_metadata"], memo=memo) + res.__dict__["_flags_cache"] = copy.deepcopy( + self.__dict__["_flags_cache"], memo=memo + ) + + src_content = self.__dict__["_content"] + if isinstance(src_content, dict): + content_copy = {} + for k, v in src_content.items(): + old_parent = v.__dict__["_parent"] + try: + v.__dict__["_parent"] = None + vc = copy.deepcopy(v, memo=memo) + vc.__dict__["_parent"] = res + content_copy[k] = vc + finally: + v.__dict__["_parent"] = old_parent + else: + # None and strings can be assigned as is + content_copy = src_content + + res.__dict__["_content"] = content_copy + # parent is retained, but not copied + res.__dict__["_parent"] = self.__dict__["_parent"] + return res + + def copy(self) -> "DictConfig": + return copy.copy(self) + + def _is_typed(self) -> bool: + return self._metadata.object_type not in (Any, None) and not is_dict( + self._metadata.object_type + ) + + def _validate_get(self, key: Any, value: Any = None) -> None: + is_typed = self._is_typed() + + is_struct = self._get_flag("struct") is True + if key not in self.__dict__["_content"]: + if is_typed: + # do not raise an exception if struct is explicitly set to False + if self._get_node_flag("struct") is False: + return + if is_typed or is_struct: + if is_typed: + assert self._metadata.object_type not in (dict, None) + msg = f"Key '{key}' not in '{self._metadata.object_type.__name__}'" + else: + msg = f"Key '{key}' is not in struct" + self._format_and_raise( + key=key, value=value, cause=ConfigAttributeError(msg) + ) + + def _validate_set(self, key: Any, value: Any) -> None: + from omegaconf import OmegaConf + + vk = get_value_kind(value) + if vk == ValueKind.INTERPOLATION: + return + if _is_none(value): + self._validate_non_optional(key, value) + return + if vk == ValueKind.MANDATORY_MISSING or value is None: + return + + target = self._get_node(key) if key is not None else self + + target_has_ref_type = isinstance( + target, DictConfig + ) and target._metadata.ref_type not in (Any, dict) + is_valid_target = target is None or not target_has_ref_type + + if is_valid_target: + return + + assert isinstance(target, Node) + + target_type = target._metadata.ref_type + value_type = OmegaConf.get_type(value) + + if is_dict(value_type) and is_dict(target_type): + return + if is_container_annotation(target_type) and not is_container_annotation( + value_type + ): + raise ValidationError( + f"Cannot assign {type_str(value_type)} to {type_str(target_type)}" + ) + + if target_type is not None and value_type is not None: + origin = getattr(target_type, "__origin__", target_type) + if not issubclass(value_type, origin): + self._raise_invalid_value(value, value_type, target_type) + + def _validate_merge(self, value: Any) -> None: + from omegaconf import OmegaConf + + dest = self + src = value + + self._validate_non_optional(None, src) + + dest_obj_type = OmegaConf.get_type(dest) + src_obj_type = OmegaConf.get_type(src) + + if dest._is_missing() and src._metadata.object_type not in (dict, None): + self._validate_set(key=None, value=_get_value(src)) + + if src._is_missing(): + return + + validation_error = ( + dest_obj_type is not None + and src_obj_type is not None + and is_structured_config(dest_obj_type) + and not src._is_none() + and not is_dict(src_obj_type) + and not issubclass(src_obj_type, dest_obj_type) + ) + if validation_error: + msg = ( + f"Merge error: {type_str(src_obj_type)} is not a " + f"subclass of {type_str(dest_obj_type)}. value: {src}" + ) + raise ValidationError(msg) + + def _validate_non_optional(self, key: Optional[DictKeyType], value: Any) -> None: + if _is_none(value, resolve=True, throw_on_resolution_failure=False): + + if key is not None: + child = self._get_node(key) + if child is not None: + assert isinstance(child, Node) + field_is_optional = child._is_optional() + else: + field_is_optional, _ = _resolve_optional( + self._metadata.element_type + ) + else: + field_is_optional = self._is_optional() + + if not field_is_optional: + self._format_and_raise( + key=key, + value=value, + cause=ValidationError("field '$FULL_KEY' is not Optional"), + ) + + def _raise_invalid_value( + self, value: Any, value_type: Any, target_type: Any + ) -> None: + assert value_type is not None + assert target_type is not None + msg = ( + f"Invalid type assigned: {type_str(value_type)} is not a " + f"subclass of {type_str(target_type)}. value: {value}" + ) + raise ValidationError(msg) + + def _validate_and_normalize_key(self, key: Any) -> DictKeyType: + return self._s_validate_and_normalize_key(self._metadata.key_type, key) + + def _s_validate_and_normalize_key(self, key_type: Any, key: Any) -> DictKeyType: + if key_type is Any: + for t in DictKeyType.__args__: # type: ignore + if isinstance(key, t): + return key # type: ignore + raise KeyValidationError("Incompatible key type '$KEY_TYPE'") + elif key_type is bool and key in [0, 1]: + # Python treats True as 1 and False as 0 when used as dict keys + # assert hash(0) == hash(False) + # assert hash(1) == hash(True) + return bool(key) + elif key_type in (str, bytes, int, float, bool): # primitive type + if not isinstance(key, key_type): + raise KeyValidationError( + f"Key $KEY ($KEY_TYPE) is incompatible with ({key_type.__name__})" + ) + + return key # type: ignore + elif issubclass(key_type, Enum): + try: + return EnumNode.validate_and_convert_to_enum(key_type, key) + except ValidationError: + valid = ", ".join([x for x in key_type.__members__.keys()]) + raise KeyValidationError( + f"Key '$KEY' is incompatible with the enum type '{key_type.__name__}', valid: [{valid}]" + ) + else: + assert False, f"Unsupported key type {key_type}" + + def __setitem__(self, key: DictKeyType, value: Any) -> None: + try: + self.__set_impl(key=key, value=value) + except AttributeError as e: + self._format_and_raise( + key=key, value=value, type_override=ConfigKeyError, cause=e + ) + except Exception as e: + self._format_and_raise(key=key, value=value, cause=e) + + def __set_impl(self, key: DictKeyType, value: Any) -> None: + key = self._validate_and_normalize_key(key) + self._set_item_impl(key, value) + + # hide content while inspecting in debugger + def __dir__(self) -> Iterable[str]: + if self._is_missing() or self._is_none(): + return [] + return self.__dict__["_content"].keys() # type: ignore + + def __setattr__(self, key: str, value: Any) -> None: + """ + Allow assigning attributes to DictConfig + :param key: + :param value: + :return: + """ + try: + self.__set_impl(key, value) + except Exception as e: + if isinstance(e, OmegaConfBaseException) and e._initialized: + raise e + self._format_and_raise(key=key, value=value, cause=e) + assert False + + def __getattr__(self, key: str) -> Any: + """ + Allow accessing dictionary values as attributes + :param key: + :return: + """ + if key == "__name__": + raise AttributeError() + + try: + return self._get_impl( + key=key, default_value=_DEFAULT_MARKER_, validate_key=False + ) + except ConfigKeyError as e: + self._format_and_raise( + key=key, value=None, cause=e, type_override=ConfigAttributeError + ) + except Exception as e: + self._format_and_raise(key=key, value=None, cause=e) + + def __getitem__(self, key: DictKeyType) -> Any: + """ + Allow map style access + :param key: + :return: + """ + + try: + return self._get_impl(key=key, default_value=_DEFAULT_MARKER_) + except AttributeError as e: + self._format_and_raise( + key=key, value=None, cause=e, type_override=ConfigKeyError + ) + except Exception as e: + self._format_and_raise(key=key, value=None, cause=e) + + def __delattr__(self, key: str) -> None: + """ + Allow deleting dictionary values as attributes + :param key: + :return: + """ + if self._get_flag("readonly"): + self._format_and_raise( + key=key, + value=None, + cause=ReadonlyConfigError( + "DictConfig in read-only mode does not support deletion" + ), + ) + try: + del self.__dict__["_content"][key] + except KeyError: + msg = "Attribute not found: '$KEY'" + self._format_and_raise(key=key, value=None, cause=ConfigAttributeError(msg)) + + def __delitem__(self, key: DictKeyType) -> None: + key = self._validate_and_normalize_key(key) + if self._get_flag("readonly"): + self._format_and_raise( + key=key, + value=None, + cause=ReadonlyConfigError( + "DictConfig in read-only mode does not support deletion" + ), + ) + if self._get_flag("struct"): + self._format_and_raise( + key=key, + value=None, + cause=ConfigTypeError( + "DictConfig in struct mode does not support deletion" + ), + ) + if self._is_typed() and self._get_node_flag("struct") is not False: + self._format_and_raise( + key=key, + value=None, + cause=ConfigTypeError( + f"{type_str(self._metadata.object_type)} (DictConfig) does not support deletion" + ), + ) + + try: + del self.__dict__["_content"][key] + except KeyError: + msg = "Key not found: '$KEY'" + self._format_and_raise(key=key, value=None, cause=ConfigKeyError(msg)) + + def get(self, key: DictKeyType, default_value: Any = None) -> Any: + """Return the value for `key` if `key` is in the dictionary, else + `default_value` (defaulting to `None`).""" + try: + return self._get_impl(key=key, default_value=default_value) + except KeyValidationError as e: + self._format_and_raise(key=key, value=None, cause=e) + + def _get_impl( + self, key: DictKeyType, default_value: Any, validate_key: bool = True + ) -> Any: + try: + node = self._get_child( + key=key, throw_on_missing_key=True, validate_key=validate_key + ) + except (ConfigAttributeError, ConfigKeyError): + if default_value is not _DEFAULT_MARKER_: + return default_value + else: + raise + assert isinstance(node, Node) + return self._resolve_with_default( + key=key, value=node, default_value=default_value + ) + + def _get_node( + self, + key: DictKeyType, + validate_access: bool = True, + validate_key: bool = True, + throw_on_missing_value: bool = False, + throw_on_missing_key: bool = False, + ) -> Optional[Node]: + try: + key = self._validate_and_normalize_key(key) + except KeyValidationError: + if validate_access and validate_key: + raise + else: + if throw_on_missing_key: + raise ConfigAttributeError + else: + return None + + if validate_access: + self._validate_get(key) + + value: Optional[Node] = self.__dict__["_content"].get(key) + if value is None: + if throw_on_missing_key: + raise ConfigKeyError(f"Missing key {key!s}") + elif throw_on_missing_value and value._is_missing(): + raise MissingMandatoryValue("Missing mandatory value: $KEY") + return value + + def pop(self, key: DictKeyType, default: Any = _DEFAULT_MARKER_) -> Any: + try: + if self._get_flag("readonly"): + raise ReadonlyConfigError("Cannot pop from read-only node") + if self._get_flag("struct"): + raise ConfigTypeError("DictConfig in struct mode does not support pop") + if self._is_typed() and self._get_node_flag("struct") is not False: + raise ConfigTypeError( + f"{type_str(self._metadata.object_type)} (DictConfig) does not support pop" + ) + key = self._validate_and_normalize_key(key) + node = self._get_child(key=key, validate_access=False) + if node is not None: + assert isinstance(node, Node) + value = self._resolve_with_default( + key=key, value=node, default_value=default + ) + + del self[key] + return value + else: + if default is not _DEFAULT_MARKER_: + return default + else: + full = self._get_full_key(key=key) + if full != key: + raise ConfigKeyError( + f"Key not found: '{key!s}' (path: '{full}')" + ) + else: + raise ConfigKeyError(f"Key not found: '{key!s}'") + except Exception as e: + self._format_and_raise(key=key, value=None, cause=e) + + def keys(self) -> KeysView[DictKeyType]: + if self._is_missing() or self._is_interpolation() or self._is_none(): + return {}.keys() + ret = self.__dict__["_content"].keys() + assert isinstance(ret, KeysView) + return ret + + def __contains__(self, key: object) -> bool: + """ + A key is contained in a DictConfig if there is an associated value and + it is not a mandatory missing value ('???'). + :param key: + :return: + """ + + try: + key = self._validate_and_normalize_key(key) + except KeyValidationError: + return False + + try: + node = self._get_child(key) + assert node is None or isinstance(node, Node) + except (KeyError, AttributeError): + node = None + + if node is None: + return False + else: + try: + self._resolve_with_default(key=key, value=node) + return True + except InterpolationResolutionError: + # Interpolations that fail count as existing. + return True + except MissingMandatoryValue: + # Missing values count as *not* existing. + return False + + def __iter__(self) -> Iterator[DictKeyType]: + return iter(self.keys()) + + def items(self) -> ItemsView[DictKeyType, Any]: + return dict(self.items_ex(resolve=True, keys=None)).items() + + def setdefault(self, key: DictKeyType, default: Any = None) -> Any: + if key in self: + ret = self.__getitem__(key) + else: + ret = default + self.__setitem__(key, default) + return ret + + def items_ex( + self, resolve: bool = True, keys: Optional[Sequence[DictKeyType]] = None + ) -> List[Tuple[DictKeyType, Any]]: + items: List[Tuple[DictKeyType, Any]] = [] + + if self._is_none(): + self._format_and_raise( + key=None, + value=None, + cause=TypeError("Cannot iterate a DictConfig object representing None"), + ) + if self._is_missing(): + raise MissingMandatoryValue("Cannot iterate a missing DictConfig") + + for key in self.keys(): + if resolve: + value = self[key] + else: + value = self.__dict__["_content"][key] + if isinstance(value, ValueNode): + value = value._value() + if keys is None or key in keys: + items.append((key, value)) + + return items + + def __eq__(self, other: Any) -> bool: + if other is None: + return self.__dict__["_content"] is None + if is_primitive_dict(other) or is_structured_config(other): + other = DictConfig(other, flags={"allow_objects": True}) + return DictConfig._dict_conf_eq(self, other) + if isinstance(other, DictConfig): + return DictConfig._dict_conf_eq(self, other) + if self._is_missing(): + return _is_missing_literal(other) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + x = self.__eq__(other) + if x is not NotImplemented: + return not x + return NotImplemented + + def __hash__(self) -> int: + return hash(str(self)) + + def _promote(self, type_or_prototype: Optional[Type[Any]]) -> None: + """ + Retypes a node. + This should only be used in rare circumstances, where you want to dynamically change + the runtime structured-type of a DictConfig. + It will change the type and add the additional fields based on the input class or object + """ + if type_or_prototype is None: + return + if not is_structured_config(type_or_prototype): + raise ValueError(f"Expected structured config class: {type_or_prototype}") + + from omegaconf import OmegaConf + + proto: DictConfig = OmegaConf.structured(type_or_prototype) + object_type = proto._metadata.object_type + # remove the type to prevent assignment validation from rejecting the promotion. + proto._metadata.object_type = None + self.merge_with(proto) + # restore the type. + self._metadata.object_type = object_type + + def _set_value(self, value: Any, flags: Optional[Dict[str, bool]] = None) -> None: + try: + previous_content = self.__dict__["_content"] + self._set_value_impl(value, flags) + except Exception as e: + self.__dict__["_content"] = previous_content + raise e + + def _set_value_impl( + self, value: Any, flags: Optional[Dict[str, bool]] = None + ) -> None: + from omegaconf import MISSING, flag_override + + if flags is None: + flags = {} + + assert not isinstance(value, ValueNode) + self._validate_set(key=None, value=value) + + if _is_none(value, resolve=True): + self.__dict__["_content"] = None + self._metadata.object_type = None + elif _is_interpolation(value, strict_interpolation_validation=True): + self.__dict__["_content"] = value + self._metadata.object_type = None + elif _is_missing_value(value): + self.__dict__["_content"] = MISSING + self._metadata.object_type = None + else: + self.__dict__["_content"] = {} + if is_structured_config(value): + self._metadata.object_type = None + ao = self._get_flag("allow_objects") + data = get_structured_config_data(value, allow_objects=ao) + with flag_override(self, ["struct", "readonly"], False): + for k, v in data.items(): + self.__setitem__(k, v) + self._metadata.object_type = get_type_of(value) + + elif isinstance(value, DictConfig): + self._metadata.flags = copy.deepcopy(flags) + with flag_override(self, ["struct", "readonly"], False): + for k, v in value.__dict__["_content"].items(): + self.__setitem__(k, v) + self._metadata.object_type = value._metadata.object_type + + elif isinstance(value, dict): + with flag_override(self, ["struct", "readonly"], False): + for k, v in value.items(): + self.__setitem__(k, v) + self._metadata.object_type = dict + + else: # pragma: no cover + msg = f"Unsupported value type: {value}" + raise ValidationError(msg) + + @staticmethod + def _dict_conf_eq(d1: "DictConfig", d2: "DictConfig") -> bool: + + d1_none = d1.__dict__["_content"] is None + d2_none = d2.__dict__["_content"] is None + if d1_none and d2_none: + return True + if d1_none != d2_none: + return False + + assert isinstance(d1, DictConfig) + assert isinstance(d2, DictConfig) + if len(d1) != len(d2): + return False + if d1._is_missing() or d2._is_missing(): + return d1._is_missing() is d2._is_missing() + + for k, v in d1.items_ex(resolve=False): + if k not in d2.__dict__["_content"]: + return False + if not BaseContainer._item_eq(d1, k, d2, k): + return False + + return True + + def _to_object(self) -> Any: + """ + Instantiate an instance of `self._metadata.object_type`. + This requires `self` to be a structured config. + Nested subconfigs are converted by calling `OmegaConf.to_object`. + """ + from omegaconf import OmegaConf + + object_type = self._metadata.object_type + assert is_structured_config(object_type) + init_field_names = set(get_structured_config_init_field_names(object_type)) + + init_field_items: Dict[str, Any] = {} + non_init_field_items: Dict[str, Any] = {} + for k in self.keys(): + assert isinstance(k, str) + node = self._get_child(k) + assert isinstance(node, Node) + try: + node = node._dereference_node() + except InterpolationResolutionError as e: + self._format_and_raise(key=k, value=None, cause=e) + if node._is_missing(): + if k not in init_field_names: + continue # MISSING is ignored for init=False fields + self._format_and_raise( + key=k, + value=None, + cause=MissingMandatoryValue( + "Structured config of type `$OBJECT_TYPE` has missing mandatory value: $KEY" + ), + ) + if isinstance(node, Container): + v = OmegaConf.to_object(node) + else: + v = node._value() + + if k in init_field_names: + init_field_items[k] = v + else: + non_init_field_items[k] = v + + try: + result = object_type(**init_field_items) + except TypeError as exc: + self._format_and_raise( + key=None, + value=None, + cause=exc, + msg="Could not create instance of `$OBJECT_TYPE`: " + str(exc), + ) + + for k, v in non_init_field_items.items(): + setattr(result, k, v) + return result diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd0ba63c9666cc09d4bd5d58cf7a7fbe4658bcd0 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarLexer.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarLexer.py new file mode 100644 index 0000000000000000000000000000000000000000..1bf9416fab12aaf7eb45c73b620037d4800e3496 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarLexer.py @@ -0,0 +1,337 @@ +# Generated from /tmp/build-via-sdist-fm63w174/omegaconf-2.3.0/omegaconf/grammar/OmegaConfGrammarLexer.g4 by ANTLR 4.9.3 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\36") + buf.write("\u01e7\b\1\b\1\b\1\b\1\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5") + buf.write("\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13") + buf.write("\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t") + buf.write("\21\4\22\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26") + buf.write("\4\27\t\27\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34") + buf.write("\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t") + buf.write("\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4") + buf.write("+\t+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62") + buf.write("\t\62\4\63\t\63\4\64\t\64\4\65\t\65\4\66\t\66\3\2\3\2") + buf.write("\3\3\3\3\3\4\3\4\3\4\5\4y\n\4\3\4\7\4|\n\4\f\4\16\4\177") + buf.write("\13\4\5\4\u0081\n\4\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3\6\3") + buf.write("\7\7\7\u008c\n\7\f\7\16\7\u008f\13\7\3\7\3\7\3\b\7\b\u0094") + buf.write("\n\b\f\b\16\b\u0097\13\b\3\b\3\b\3\b\3\b\3\t\6\t\u009e") + buf.write("\n\t\r\t\16\t\u009f\3\n\6\n\u00a3\n\n\r\n\16\n\u00a4\3") + buf.write("\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\5\f\u00b1\n") + buf.write("\f\3\f\3\f\3\r\3\r\5\r\u00b7\n\r\3\r\3\r\3\16\5\16\u00bc") + buf.write("\n\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\20\3\20") + buf.write("\3\20\3\20\3\21\5\21\u00cb\n\21\3\21\3\21\5\21\u00cf\n") + buf.write("\21\3\22\3\22\5\22\u00d3\n\22\3\23\5\23\u00d6\n\23\3\23") + buf.write("\3\23\3\24\5\24\u00db\n\24\3\24\3\24\5\24\u00df\n\24\3") + buf.write("\25\3\25\3\25\3\25\5\25\u00e5\n\25\3\25\3\25\3\25\5\25") + buf.write("\u00ea\n\25\3\25\7\25\u00ed\n\25\f\25\16\25\u00f0\13\25") + buf.write("\5\25\u00f2\n\25\3\26\3\26\5\26\u00f6\n\26\3\26\3\26\5") + buf.write("\26\u00fa\n\26\3\26\3\26\5\26\u00fe\n\26\3\26\7\26\u0101") + buf.write("\n\26\f\26\16\26\u0104\13\26\3\27\5\27\u0107\n\27\3\27") + buf.write("\3\27\3\27\3\27\3\27\3\27\3\27\3\27\5\27\u0111\n\27\3") + buf.write("\30\5\30\u0114\n\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31") + buf.write("\3\31\3\31\3\31\3\31\5\31\u0121\n\31\3\32\3\32\3\32\3") + buf.write("\32\3\32\3\33\3\33\3\34\3\34\5\34\u012c\n\34\3\34\3\34") + buf.write("\3\34\7\34\u0131\n\34\f\34\16\34\u0134\13\34\3\35\3\35") + buf.write("\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35") + buf.write("\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\6\35") + buf.write("\u014d\n\35\r\35\16\35\u014e\3\36\6\36\u0152\n\36\r\36") + buf.write("\16\36\u0153\3\37\3\37\5\37\u0158\n\37\3\37\3\37\3\37") + buf.write("\3 \5 \u015e\n \3 \3 \5 \u0162\n \3 \3 \3 \3!\5!\u0168") + buf.write("\n!\3!\3!\3!\3!\3\"\3\"\3#\3#\3#\3#\3$\3$\3$\3$\3%\3%") + buf.write("\3%\3%\3&\6&\u017d\n&\r&\16&\u017e\3\'\3\'\3\'\3\'\3\'") + buf.write("\3(\3(\3(\3(\3)\7)\u018b\n)\f)\16)\u018e\13)\3)\3)\3)") + buf.write("\3)\3*\3*\3*\3*\3+\7+\u0199\n+\f+\16+\u019c\13+\3+\3+") + buf.write("\3+\3+\3+\3,\6,\u01a4\n,\r,\16,\u01a5\3-\6-\u01a9\n-\r") + buf.write("-\16-\u01aa\3-\3-\3.\3.\3.\3.\3/\3/\3/\3/\3/\3\60\3\60") + buf.write("\3\60\3\60\3\60\3\61\7\61\u01be\n\61\f\61\16\61\u01c1") + buf.write("\13\61\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\63\7") + buf.write("\63\u01cc\n\63\f\63\16\63\u01cf\13\63\3\63\3\63\3\63\3") + buf.write("\63\3\63\3\64\6\64\u01d7\n\64\r\64\16\64\u01d8\3\64\3") + buf.write("\64\3\65\6\65\u01de\n\65\r\65\16\65\u01df\3\65\3\65\3") + buf.write("\66\3\66\3\66\3\66\2\2\67\7\2\t\2\13\2\r\2\17\2\21\3\23") + buf.write("\4\25\5\27\2\31\34\33\6\35\7\37\b!\t#\n%\13\'\f)\r+\16") + buf.write("-\2/\2\61\17\63\20\65\21\67\229\23;\24=\25?\26A\2C\2E") + buf.write("\27G\30I\35K\36M\2O\31Q\2S\32U\2W\2Y\2[\33]\2_\2a\2c\2") + buf.write("e\2g\2i\2k\2m\2o\2\7\2\3\4\5\6\32\4\2C\\c|\3\2\62;\3\2") + buf.write("\63;\3\2&&\4\2&&^^\4\2GGgg\4\2--//\4\2KKkk\4\2PPpp\4\2") + buf.write("HHhh\4\2CCcc\4\2VVvv\4\2TTtt\4\2WWww\4\2NNnn\4\2UUuu\b") + buf.write("\2&\',-/\61AB^^~~\4\2//aa\4\2\13\13\"\"\13\2\13\13\"\"") + buf.write("$$)+\60\60<<]_}}\177\177\4\2&&))\5\2&&))^^\4\2$$&&\5\2") + buf.write("$$&&^^\2\u0218\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2") + buf.write("\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\3\33\3\2\2\2\3") + buf.write("\35\3\2\2\2\3\37\3\2\2\2\3!\3\2\2\2\3#\3\2\2\2\3%\3\2") + buf.write("\2\2\3\'\3\2\2\2\3)\3\2\2\2\3+\3\2\2\2\3\61\3\2\2\2\3") + buf.write("\63\3\2\2\2\3\65\3\2\2\2\3\67\3\2\2\2\39\3\2\2\2\3;\3") + buf.write("\2\2\2\3=\3\2\2\2\3?\3\2\2\2\4A\3\2\2\2\4C\3\2\2\2\4E") + buf.write("\3\2\2\2\4G\3\2\2\2\4I\3\2\2\2\4K\3\2\2\2\4M\3\2\2\2\4") + buf.write("O\3\2\2\2\5Q\3\2\2\2\5S\3\2\2\2\5U\3\2\2\2\5W\3\2\2\2") + buf.write("\5Y\3\2\2\2\5[\3\2\2\2\5]\3\2\2\2\5_\3\2\2\2\6a\3\2\2") + buf.write("\2\6c\3\2\2\2\6e\3\2\2\2\6g\3\2\2\2\6i\3\2\2\2\6k\3\2") + buf.write("\2\2\6m\3\2\2\2\6o\3\2\2\2\7q\3\2\2\2\ts\3\2\2\2\13\u0080") + buf.write("\3\2\2\2\r\u0082\3\2\2\2\17\u0085\3\2\2\2\21\u008d\3\2") + buf.write("\2\2\23\u0095\3\2\2\2\25\u009d\3\2\2\2\27\u00a2\3\2\2") + buf.write("\2\31\u00a8\3\2\2\2\33\u00ac\3\2\2\2\35\u00b4\3\2\2\2") + buf.write("\37\u00bb\3\2\2\2!\u00c1\3\2\2\2#\u00c5\3\2\2\2%\u00ca") + buf.write("\3\2\2\2\'\u00d0\3\2\2\2)\u00d5\3\2\2\2+\u00da\3\2\2\2") + buf.write("-\u00f1\3\2\2\2/\u00f5\3\2\2\2\61\u0106\3\2\2\2\63\u0113") + buf.write("\3\2\2\2\65\u0120\3\2\2\2\67\u0122\3\2\2\29\u0127\3\2") + buf.write("\2\2;\u012b\3\2\2\2=\u014c\3\2\2\2?\u0151\3\2\2\2A\u0155") + buf.write("\3\2\2\2C\u015d\3\2\2\2E\u0167\3\2\2\2G\u016d\3\2\2\2") + buf.write("I\u016f\3\2\2\2K\u0173\3\2\2\2M\u0177\3\2\2\2O\u017c\3") + buf.write("\2\2\2Q\u0180\3\2\2\2S\u0185\3\2\2\2U\u018c\3\2\2\2W\u0193") + buf.write("\3\2\2\2Y\u019a\3\2\2\2[\u01a3\3\2\2\2]\u01a8\3\2\2\2") + buf.write("_\u01ae\3\2\2\2a\u01b2\3\2\2\2c\u01b7\3\2\2\2e\u01bf\3") + buf.write("\2\2\2g\u01c6\3\2\2\2i\u01cd\3\2\2\2k\u01d6\3\2\2\2m\u01dd") + buf.write("\3\2\2\2o\u01e3\3\2\2\2qr\t\2\2\2r\b\3\2\2\2st\t\3\2\2") + buf.write("t\n\3\2\2\2u\u0081\7\62\2\2v}\t\4\2\2wy\7a\2\2xw\3\2\2") + buf.write("\2xy\3\2\2\2yz\3\2\2\2z|\5\t\3\2{x\3\2\2\2|\177\3\2\2") + buf.write("\2}{\3\2\2\2}~\3\2\2\2~\u0081\3\2\2\2\177}\3\2\2\2\u0080") + buf.write("u\3\2\2\2\u0080v\3\2\2\2\u0081\f\3\2\2\2\u0082\u0083\7") + buf.write("^\2\2\u0083\u0084\7^\2\2\u0084\16\3\2\2\2\u0085\u0086") + buf.write("\5\33\f\2\u0086\u0087\3\2\2\2\u0087\u0088\b\6\2\2\u0088") + buf.write("\u0089\b\6\3\2\u0089\20\3\2\2\2\u008a\u008c\n\5\2\2\u008b") + buf.write("\u008a\3\2\2\2\u008c\u008f\3\2\2\2\u008d\u008b\3\2\2\2") + buf.write("\u008d\u008e\3\2\2\2\u008e\u0090\3\2\2\2\u008f\u008d\3") + buf.write("\2\2\2\u0090\u0091\n\6\2\2\u0091\22\3\2\2\2\u0092\u0094") + buf.write("\5\r\5\2\u0093\u0092\3\2\2\2\u0094\u0097\3\2\2\2\u0095") + buf.write("\u0093\3\2\2\2\u0095\u0096\3\2\2\2\u0096\u0098\3\2\2\2") + buf.write("\u0097\u0095\3\2\2\2\u0098\u0099\7^\2\2\u0099\u009a\7") + buf.write("&\2\2\u009a\u009b\7}\2\2\u009b\24\3\2\2\2\u009c\u009e") + buf.write("\5\r\5\2\u009d\u009c\3\2\2\2\u009e\u009f\3\2\2\2\u009f") + buf.write("\u009d\3\2\2\2\u009f\u00a0\3\2\2\2\u00a0\26\3\2\2\2\u00a1") + buf.write("\u00a3\7^\2\2\u00a2\u00a1\3\2\2\2\u00a3\u00a4\3\2\2\2") + buf.write("\u00a4\u00a2\3\2\2\2\u00a4\u00a5\3\2\2\2\u00a5\u00a6\3") + buf.write("\2\2\2\u00a6\u00a7\b\n\4\2\u00a7\30\3\2\2\2\u00a8\u00a9") + buf.write("\7&\2\2\u00a9\u00aa\3\2\2\2\u00aa\u00ab\b\13\4\2\u00ab") + buf.write("\32\3\2\2\2\u00ac\u00ad\7&\2\2\u00ad\u00ae\7}\2\2\u00ae") + buf.write("\u00b0\3\2\2\2\u00af\u00b1\5?\36\2\u00b0\u00af\3\2\2\2") + buf.write("\u00b0\u00b1\3\2\2\2\u00b1\u00b2\3\2\2\2\u00b2\u00b3\b") + buf.write("\f\3\2\u00b3\34\3\2\2\2\u00b4\u00b6\7}\2\2\u00b5\u00b7") + buf.write("\5?\36\2\u00b6\u00b5\3\2\2\2\u00b6\u00b7\3\2\2\2\u00b7") + buf.write("\u00b8\3\2\2\2\u00b8\u00b9\b\r\5\2\u00b9\36\3\2\2\2\u00ba") + buf.write("\u00bc\5?\36\2\u00bb\u00ba\3\2\2\2\u00bb\u00bc\3\2\2\2") + buf.write("\u00bc\u00bd\3\2\2\2\u00bd\u00be\7\177\2\2\u00be\u00bf") + buf.write("\3\2\2\2\u00bf\u00c0\b\16\6\2\u00c0 \3\2\2\2\u00c1\u00c2") + buf.write("\7)\2\2\u00c2\u00c3\3\2\2\2\u00c3\u00c4\b\17\7\2\u00c4") + buf.write("\"\3\2\2\2\u00c5\u00c6\7$\2\2\u00c6\u00c7\3\2\2\2\u00c7") + buf.write("\u00c8\b\20\b\2\u00c8$\3\2\2\2\u00c9\u00cb\5?\36\2\u00ca") + buf.write("\u00c9\3\2\2\2\u00ca\u00cb\3\2\2\2\u00cb\u00cc\3\2\2\2") + buf.write("\u00cc\u00ce\7.\2\2\u00cd\u00cf\5?\36\2\u00ce\u00cd\3") + buf.write("\2\2\2\u00ce\u00cf\3\2\2\2\u00cf&\3\2\2\2\u00d0\u00d2") + buf.write("\7]\2\2\u00d1\u00d3\5?\36\2\u00d2\u00d1\3\2\2\2\u00d2") + buf.write("\u00d3\3\2\2\2\u00d3(\3\2\2\2\u00d4\u00d6\5?\36\2\u00d5") + buf.write("\u00d4\3\2\2\2\u00d5\u00d6\3\2\2\2\u00d6\u00d7\3\2\2\2") + buf.write("\u00d7\u00d8\7_\2\2\u00d8*\3\2\2\2\u00d9\u00db\5?\36\2") + buf.write("\u00da\u00d9\3\2\2\2\u00da\u00db\3\2\2\2\u00db\u00dc\3") + buf.write("\2\2\2\u00dc\u00de\7<\2\2\u00dd\u00df\5?\36\2\u00de\u00dd") + buf.write("\3\2\2\2\u00de\u00df\3\2\2\2\u00df,\3\2\2\2\u00e0\u00e1") + buf.write("\5\13\4\2\u00e1\u00e2\7\60\2\2\u00e2\u00f2\3\2\2\2\u00e3") + buf.write("\u00e5\5\13\4\2\u00e4\u00e3\3\2\2\2\u00e4\u00e5\3\2\2") + buf.write("\2\u00e5\u00e6\3\2\2\2\u00e6\u00e7\7\60\2\2\u00e7\u00ee") + buf.write("\5\t\3\2\u00e8\u00ea\7a\2\2\u00e9\u00e8\3\2\2\2\u00e9") + buf.write("\u00ea\3\2\2\2\u00ea\u00eb\3\2\2\2\u00eb\u00ed\5\t\3\2") + buf.write("\u00ec\u00e9\3\2\2\2\u00ed\u00f0\3\2\2\2\u00ee\u00ec\3") + buf.write("\2\2\2\u00ee\u00ef\3\2\2\2\u00ef\u00f2\3\2\2\2\u00f0\u00ee") + buf.write("\3\2\2\2\u00f1\u00e0\3\2\2\2\u00f1\u00e4\3\2\2\2\u00f2") + buf.write(".\3\2\2\2\u00f3\u00f6\5\13\4\2\u00f4\u00f6\5-\25\2\u00f5") + buf.write("\u00f3\3\2\2\2\u00f5\u00f4\3\2\2\2\u00f6\u00f7\3\2\2\2") + buf.write("\u00f7\u00f9\t\7\2\2\u00f8\u00fa\t\b\2\2\u00f9\u00f8\3") + buf.write("\2\2\2\u00f9\u00fa\3\2\2\2\u00fa\u00fb\3\2\2\2\u00fb\u0102") + buf.write("\5\t\3\2\u00fc\u00fe\7a\2\2\u00fd\u00fc\3\2\2\2\u00fd") + buf.write("\u00fe\3\2\2\2\u00fe\u00ff\3\2\2\2\u00ff\u0101\5\t\3\2") + buf.write("\u0100\u00fd\3\2\2\2\u0101\u0104\3\2\2\2\u0102\u0100\3") + buf.write("\2\2\2\u0102\u0103\3\2\2\2\u0103\60\3\2\2\2\u0104\u0102") + buf.write("\3\2\2\2\u0105\u0107\t\b\2\2\u0106\u0105\3\2\2\2\u0106") + buf.write("\u0107\3\2\2\2\u0107\u0110\3\2\2\2\u0108\u0111\5-\25\2") + buf.write("\u0109\u0111\5/\26\2\u010a\u010b\t\t\2\2\u010b\u010c\t") + buf.write("\n\2\2\u010c\u0111\t\13\2\2\u010d\u010e\t\n\2\2\u010e") + buf.write("\u010f\t\f\2\2\u010f\u0111\t\n\2\2\u0110\u0108\3\2\2\2") + buf.write("\u0110\u0109\3\2\2\2\u0110\u010a\3\2\2\2\u0110\u010d\3") + buf.write("\2\2\2\u0111\62\3\2\2\2\u0112\u0114\t\b\2\2\u0113\u0112") + buf.write("\3\2\2\2\u0113\u0114\3\2\2\2\u0114\u0115\3\2\2\2\u0115") + buf.write("\u0116\5\13\4\2\u0116\64\3\2\2\2\u0117\u0118\t\r\2\2\u0118") + buf.write("\u0119\t\16\2\2\u0119\u011a\t\17\2\2\u011a\u0121\t\7\2") + buf.write("\2\u011b\u011c\t\13\2\2\u011c\u011d\t\f\2\2\u011d\u011e") + buf.write("\t\20\2\2\u011e\u011f\t\21\2\2\u011f\u0121\t\7\2\2\u0120") + buf.write("\u0117\3\2\2\2\u0120\u011b\3\2\2\2\u0121\66\3\2\2\2\u0122") + buf.write("\u0123\t\n\2\2\u0123\u0124\t\17\2\2\u0124\u0125\t\20\2") + buf.write("\2\u0125\u0126\t\20\2\2\u01268\3\2\2\2\u0127\u0128\t\22") + buf.write("\2\2\u0128:\3\2\2\2\u0129\u012c\5\7\2\2\u012a\u012c\7") + buf.write("a\2\2\u012b\u0129\3\2\2\2\u012b\u012a\3\2\2\2\u012c\u0132") + buf.write("\3\2\2\2\u012d\u0131\5\7\2\2\u012e\u0131\5\t\3\2\u012f") + buf.write("\u0131\t\23\2\2\u0130\u012d\3\2\2\2\u0130\u012e\3\2\2") + buf.write("\2\u0130\u012f\3\2\2\2\u0131\u0134\3\2\2\2\u0132\u0130") + buf.write("\3\2\2\2\u0132\u0133\3\2\2\2\u0133<\3\2\2\2\u0134\u0132") + buf.write("\3\2\2\2\u0135\u014d\5\r\5\2\u0136\u0137\7^\2\2\u0137") + buf.write("\u014d\7*\2\2\u0138\u0139\7^\2\2\u0139\u014d\7+\2\2\u013a") + buf.write("\u013b\7^\2\2\u013b\u014d\7]\2\2\u013c\u013d\7^\2\2\u013d") + buf.write("\u014d\7_\2\2\u013e\u013f\7^\2\2\u013f\u014d\7}\2\2\u0140") + buf.write("\u0141\7^\2\2\u0141\u014d\7\177\2\2\u0142\u0143\7^\2\2") + buf.write("\u0143\u014d\7<\2\2\u0144\u0145\7^\2\2\u0145\u014d\7?") + buf.write("\2\2\u0146\u0147\7^\2\2\u0147\u014d\7.\2\2\u0148\u0149") + buf.write("\7^\2\2\u0149\u014d\7\"\2\2\u014a\u014b\7^\2\2\u014b\u014d") + buf.write("\7\13\2\2\u014c\u0135\3\2\2\2\u014c\u0136\3\2\2\2\u014c") + buf.write("\u0138\3\2\2\2\u014c\u013a\3\2\2\2\u014c\u013c\3\2\2\2") + buf.write("\u014c\u013e\3\2\2\2\u014c\u0140\3\2\2\2\u014c\u0142\3") + buf.write("\2\2\2\u014c\u0144\3\2\2\2\u014c\u0146\3\2\2\2\u014c\u0148") + buf.write("\3\2\2\2\u014c\u014a\3\2\2\2\u014d\u014e\3\2\2\2\u014e") + buf.write("\u014c\3\2\2\2\u014e\u014f\3\2\2\2\u014f>\3\2\2\2\u0150") + buf.write("\u0152\t\24\2\2\u0151\u0150\3\2\2\2\u0152\u0153\3\2\2") + buf.write("\2\u0153\u0151\3\2\2\2\u0153\u0154\3\2\2\2\u0154@\3\2") + buf.write("\2\2\u0155\u0157\5\33\f\2\u0156\u0158\5?\36\2\u0157\u0156") + buf.write("\3\2\2\2\u0157\u0158\3\2\2\2\u0158\u0159\3\2\2\2\u0159") + buf.write("\u015a\b\37\2\2\u015a\u015b\b\37\3\2\u015bB\3\2\2\2\u015c") + buf.write("\u015e\5?\36\2\u015d\u015c\3\2\2\2\u015d\u015e\3\2\2\2") + buf.write("\u015e\u015f\3\2\2\2\u015f\u0161\7<\2\2\u0160\u0162\5") + buf.write("?\36\2\u0161\u0160\3\2\2\2\u0161\u0162\3\2\2\2\u0162\u0163") + buf.write("\3\2\2\2\u0163\u0164\b \t\2\u0164\u0165\b \n\2\u0165D") + buf.write("\3\2\2\2\u0166\u0168\5?\36\2\u0167\u0166\3\2\2\2\u0167") + buf.write("\u0168\3\2\2\2\u0168\u0169\3\2\2\2\u0169\u016a\7\177\2") + buf.write("\2\u016a\u016b\3\2\2\2\u016b\u016c\b!\6\2\u016cF\3\2\2") + buf.write("\2\u016d\u016e\7\60\2\2\u016eH\3\2\2\2\u016f\u0170\7]") + buf.write("\2\2\u0170\u0171\3\2\2\2\u0171\u0172\b#\13\2\u0172J\3") + buf.write("\2\2\2\u0173\u0174\7_\2\2\u0174\u0175\3\2\2\2\u0175\u0176") + buf.write("\b$\f\2\u0176L\3\2\2\2\u0177\u0178\5;\34\2\u0178\u0179") + buf.write("\3\2\2\2\u0179\u017a\b%\r\2\u017aN\3\2\2\2\u017b\u017d") + buf.write("\n\25\2\2\u017c\u017b\3\2\2\2\u017d\u017e\3\2\2\2\u017e") + buf.write("\u017c\3\2\2\2\u017e\u017f\3\2\2\2\u017fP\3\2\2\2\u0180") + buf.write("\u0181\5\33\f\2\u0181\u0182\3\2\2\2\u0182\u0183\b\'\2") + buf.write("\2\u0183\u0184\b\'\3\2\u0184R\3\2\2\2\u0185\u0186\7)\2") + buf.write("\2\u0186\u0187\3\2\2\2\u0187\u0188\b(\6\2\u0188T\3\2\2") + buf.write("\2\u0189\u018b\n\26\2\2\u018a\u0189\3\2\2\2\u018b\u018e") + buf.write("\3\2\2\2\u018c\u018a\3\2\2\2\u018c\u018d\3\2\2\2\u018d") + buf.write("\u018f\3\2\2\2\u018e\u018c\3\2\2\2\u018f\u0190\n\27\2") + buf.write("\2\u0190\u0191\3\2\2\2\u0191\u0192\b)\4\2\u0192V\3\2\2") + buf.write("\2\u0193\u0194\5\23\b\2\u0194\u0195\3\2\2\2\u0195\u0196") + buf.write("\b*\16\2\u0196X\3\2\2\2\u0197\u0199\5\r\5\2\u0198\u0197") + buf.write("\3\2\2\2\u0199\u019c\3\2\2\2\u019a\u0198\3\2\2\2\u019a") + buf.write("\u019b\3\2\2\2\u019b\u019d\3\2\2\2\u019c\u019a\3\2\2\2") + buf.write("\u019d\u019e\7^\2\2\u019e\u019f\7)\2\2\u019f\u01a0\3\2") + buf.write("\2\2\u01a0\u01a1\b+\17\2\u01a1Z\3\2\2\2\u01a2\u01a4\5") + buf.write("\r\5\2\u01a3\u01a2\3\2\2\2\u01a4\u01a5\3\2\2\2\u01a5\u01a3") + buf.write("\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\\\3\2\2\2\u01a7\u01a9") + buf.write("\7^\2\2\u01a8\u01a7\3\2\2\2\u01a9\u01aa\3\2\2\2\u01aa") + buf.write("\u01a8\3\2\2\2\u01aa\u01ab\3\2\2\2\u01ab\u01ac\3\2\2\2") + buf.write("\u01ac\u01ad\b-\4\2\u01ad^\3\2\2\2\u01ae\u01af\7&\2\2") + buf.write("\u01af\u01b0\3\2\2\2\u01b0\u01b1\b.\4\2\u01b1`\3\2\2\2") + buf.write("\u01b2\u01b3\5\33\f\2\u01b3\u01b4\3\2\2\2\u01b4\u01b5") + buf.write("\b/\2\2\u01b5\u01b6\b/\3\2\u01b6b\3\2\2\2\u01b7\u01b8") + buf.write("\7$\2\2\u01b8\u01b9\3\2\2\2\u01b9\u01ba\b\60\20\2\u01ba") + buf.write("\u01bb\b\60\6\2\u01bbd\3\2\2\2\u01bc\u01be\n\30\2\2\u01bd") + buf.write("\u01bc\3\2\2\2\u01be\u01c1\3\2\2\2\u01bf\u01bd\3\2\2\2") + buf.write("\u01bf\u01c0\3\2\2\2\u01c0\u01c2\3\2\2\2\u01c1\u01bf\3") + buf.write("\2\2\2\u01c2\u01c3\n\31\2\2\u01c3\u01c4\3\2\2\2\u01c4") + buf.write("\u01c5\b\61\4\2\u01c5f\3\2\2\2\u01c6\u01c7\5\23\b\2\u01c7") + buf.write("\u01c8\3\2\2\2\u01c8\u01c9\b\62\16\2\u01c9h\3\2\2\2\u01ca") + buf.write("\u01cc\5\r\5\2\u01cb\u01ca\3\2\2\2\u01cc\u01cf\3\2\2\2") + buf.write("\u01cd\u01cb\3\2\2\2\u01cd\u01ce\3\2\2\2\u01ce\u01d0\3") + buf.write("\2\2\2\u01cf\u01cd\3\2\2\2\u01d0\u01d1\7^\2\2\u01d1\u01d2") + buf.write("\7$\2\2\u01d2\u01d3\3\2\2\2\u01d3\u01d4\b\63\17\2\u01d4") + buf.write("j\3\2\2\2\u01d5\u01d7\5\r\5\2\u01d6\u01d5\3\2\2\2\u01d7") + buf.write("\u01d8\3\2\2\2\u01d8\u01d6\3\2\2\2\u01d8\u01d9\3\2\2\2") + buf.write("\u01d9\u01da\3\2\2\2\u01da\u01db\b\64\21\2\u01dbl\3\2") + buf.write("\2\2\u01dc\u01de\7^\2\2\u01dd\u01dc\3\2\2\2\u01de\u01df") + buf.write("\3\2\2\2\u01df\u01dd\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0") + buf.write("\u01e1\3\2\2\2\u01e1\u01e2\b\65\4\2\u01e2n\3\2\2\2\u01e3") + buf.write("\u01e4\7&\2\2\u01e4\u01e5\3\2\2\2\u01e5\u01e6\b\66\4\2") + buf.write("\u01e6p\3\2\2\2\66\2\3\4\5\6x}\u0080\u008d\u0095\u009f") + buf.write("\u00a4\u00b0\u00b6\u00bb\u00ca\u00ce\u00d2\u00d5\u00da") + buf.write("\u00de\u00e4\u00e9\u00ee\u00f1\u00f5\u00f9\u00fd\u0102") + buf.write("\u0106\u0110\u0113\u0120\u012b\u0130\u0132\u014c\u014e") + buf.write("\u0153\u0157\u015d\u0161\u0167\u017e\u018c\u019a\u01a5") + buf.write("\u01aa\u01bf\u01cd\u01d8\u01df\22\t\6\2\7\4\2\t\3\2\7") + buf.write("\3\2\6\2\2\7\5\2\7\6\2\t\16\2\4\3\2\t\f\2\t\r\2\t\24\2") + buf.write("\t\4\2\t\25\2\t\32\2\t\33\2") + return buf.getvalue() + + +class OmegaConfGrammarLexer(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + VALUE_MODE = 1 + INTERPOLATION_MODE = 2 + QUOTED_SINGLE_MODE = 3 + QUOTED_DOUBLE_MODE = 4 + + ANY_STR = 1 + ESC_INTER = 2 + TOP_ESC = 3 + INTER_OPEN = 4 + BRACE_OPEN = 5 + BRACE_CLOSE = 6 + QUOTE_OPEN_SINGLE = 7 + QUOTE_OPEN_DOUBLE = 8 + COMMA = 9 + BRACKET_OPEN = 10 + BRACKET_CLOSE = 11 + COLON = 12 + FLOAT = 13 + INT = 14 + BOOL = 15 + NULL = 16 + UNQUOTED_CHAR = 17 + ID = 18 + ESC = 19 + WS = 20 + INTER_CLOSE = 21 + DOT = 22 + INTER_KEY = 23 + MATCHING_QUOTE_CLOSE = 24 + QUOTED_ESC = 25 + DOLLAR = 26 + INTER_BRACKET_OPEN = 27 + INTER_BRACKET_CLOSE = 28 + + channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ] + + modeNames = [ "DEFAULT_MODE", "VALUE_MODE", "INTERPOLATION_MODE", "QUOTED_SINGLE_MODE", + "QUOTED_DOUBLE_MODE" ] + + literalNames = [ "", + "'.'", "'['", "']'" ] + + symbolicNames = [ "", + "ANY_STR", "ESC_INTER", "TOP_ESC", "INTER_OPEN", "BRACE_OPEN", + "BRACE_CLOSE", "QUOTE_OPEN_SINGLE", "QUOTE_OPEN_DOUBLE", "COMMA", + "BRACKET_OPEN", "BRACKET_CLOSE", "COLON", "FLOAT", "INT", "BOOL", + "NULL", "UNQUOTED_CHAR", "ID", "ESC", "WS", "INTER_CLOSE", "DOT", + "INTER_KEY", "MATCHING_QUOTE_CLOSE", "QUOTED_ESC", "DOLLAR", + "INTER_BRACKET_OPEN", "INTER_BRACKET_CLOSE" ] + + ruleNames = [ "CHAR", "DIGIT", "INT_UNSIGNED", "ESC_BACKSLASH", "TOP_INTER_OPEN", + "ANY_STR", "ESC_INTER", "TOP_ESC", "BACKSLASHES", "DOLLAR", + "INTER_OPEN", "BRACE_OPEN", "BRACE_CLOSE", "QUOTE_OPEN_SINGLE", + "QUOTE_OPEN_DOUBLE", "COMMA", "BRACKET_OPEN", "BRACKET_CLOSE", + "COLON", "POINT_FLOAT", "EXPONENT_FLOAT", "FLOAT", "INT", + "BOOL", "NULL", "UNQUOTED_CHAR", "ID", "ESC", "WS", "NESTED_INTER_OPEN", + "INTER_COLON", "INTER_CLOSE", "DOT", "INTER_BRACKET_OPEN", + "INTER_BRACKET_CLOSE", "INTER_ID", "INTER_KEY", "QSINGLE_INTER_OPEN", + "MATCHING_QUOTE_CLOSE", "QSINGLE_STR", "QSINGLE_ESC_INTER", + "QSINGLE_ESC_QUOTE", "QUOTED_ESC", "QSINGLE_BACKSLASHES", + "QSINGLE_DOLLAR", "QDOUBLE_INTER_OPEN", "QDOUBLE_CLOSE", + "QDOUBLE_STR", "QDOUBLE_ESC_INTER", "QDOUBLE_ESC_QUOTE", + "QDOUBLE_ESC", "QDOUBLE_BACKSLASHES", "QDOUBLE_DOLLAR" ] + + grammarFileName = "OmegaConfGrammarLexer.g4" + + def __init__(self, input=None, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.9.3") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParser.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParser.py new file mode 100644 index 0000000000000000000000000000000000000000..024cbab87fddd6ed3b4abb63105589f673b0e864 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParser.py @@ -0,0 +1,1595 @@ +# Generated from /tmp/build-via-sdist-fm63w174/omegaconf-2.3.0/omegaconf/grammar/OmegaConfGrammarParser.g4 by ANTLR 4.9.3 +# encoding: utf-8 +from antlr4 import * +from io import StringIO +import sys +if sys.version_info[1] > 5: + from typing import TextIO +else: + from typing.io import TextIO + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\36") + buf.write("\u00b7\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7") + buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16") + buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\3\2\3\2\3\2\3\3\3") + buf.write("\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\6\4/\n\4\r\4\16\4\60\3") + buf.write("\5\3\5\3\5\3\5\5\5\67\n\5\3\6\3\6\5\6;\n\6\3\6\3\6\3\7") + buf.write("\3\7\3\7\3\7\7\7C\n\7\f\7\16\7F\13\7\5\7H\n\7\3\7\3\7") + buf.write("\3\b\3\b\3\b\3\b\3\t\3\t\3\t\5\tS\n\t\7\tU\n\t\f\t\16") + buf.write("\tX\13\t\3\t\3\t\5\t\\\n\t\6\t^\n\t\r\t\16\t_\5\tb\n\t") + buf.write("\3\n\3\n\5\nf\n\n\3\13\3\13\7\13j\n\13\f\13\16\13m\13") + buf.write("\13\3\13\3\13\3\13\3\13\3\13\5\13t\n\13\3\13\3\13\3\13") + buf.write("\3\13\3\13\3\13\7\13|\n\13\f\13\16\13\177\13\13\3\13\3") + buf.write("\13\3\f\3\f\3\f\3\f\5\f\u0087\n\f\3\f\3\f\3\r\3\r\3\r") + buf.write("\5\r\u008e\n\r\3\16\3\16\5\16\u0092\n\16\3\16\3\16\3\16") + buf.write("\5\16\u0097\n\16\7\16\u0099\n\16\f\16\16\16\u009c\13\16") + buf.write("\3\17\3\17\5\17\u00a0\n\17\3\17\3\17\3\20\3\20\3\20\3") + buf.write("\20\3\20\3\20\3\20\3\20\3\20\3\20\6\20\u00ae\n\20\r\20") + buf.write("\16\20\u00af\3\21\6\21\u00b3\n\21\r\21\16\21\u00b4\3\21") + buf.write("\2\2\22\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \2\4\3") + buf.write("\2\t\n\3\2\17\26\2\u00ce\2\"\3\2\2\2\4%\3\2\2\2\6.\3\2") + buf.write("\2\2\b\66\3\2\2\2\n8\3\2\2\2\f>\3\2\2\2\16K\3\2\2\2\20") + buf.write("a\3\2\2\2\22e\3\2\2\2\24g\3\2\2\2\26\u0082\3\2\2\2\30") + buf.write("\u008d\3\2\2\2\32\u0091\3\2\2\2\34\u009d\3\2\2\2\36\u00ad") + buf.write("\3\2\2\2 \u00b2\3\2\2\2\"#\5\6\4\2#$\7\2\2\3$\3\3\2\2") + buf.write("\2%&\5\b\5\2&\'\7\2\2\3\'\5\3\2\2\2(/\5\22\n\2)/\7\3\2") + buf.write("\2*/\7\25\2\2+/\7\4\2\2,/\7\5\2\2-/\7\33\2\2.(\3\2\2\2") + buf.write(".)\3\2\2\2.*\3\2\2\2.+\3\2\2\2.,\3\2\2\2.-\3\2\2\2/\60") + buf.write("\3\2\2\2\60.\3\2\2\2\60\61\3\2\2\2\61\7\3\2\2\2\62\67") + buf.write("\5\36\20\2\63\67\5\34\17\2\64\67\5\n\6\2\65\67\5\f\7\2") + buf.write("\66\62\3\2\2\2\66\63\3\2\2\2\66\64\3\2\2\2\66\65\3\2\2") + buf.write("\2\67\t\3\2\2\28:\7\f\2\29;\5\20\t\2:9\3\2\2\2:;\3\2\2") + buf.write("\2;<\3\2\2\2<=\7\r\2\2=\13\3\2\2\2>G\7\7\2\2?D\5\16\b") + buf.write("\2@A\7\13\2\2AC\5\16\b\2B@\3\2\2\2CF\3\2\2\2DB\3\2\2\2") + buf.write("DE\3\2\2\2EH\3\2\2\2FD\3\2\2\2G?\3\2\2\2GH\3\2\2\2HI\3") + buf.write("\2\2\2IJ\7\b\2\2J\r\3\2\2\2KL\5 \21\2LM\7\16\2\2MN\5\b") + buf.write("\5\2N\17\3\2\2\2OV\5\b\5\2PR\7\13\2\2QS\5\b\5\2RQ\3\2") + buf.write("\2\2RS\3\2\2\2SU\3\2\2\2TP\3\2\2\2UX\3\2\2\2VT\3\2\2\2") + buf.write("VW\3\2\2\2Wb\3\2\2\2XV\3\2\2\2Y[\7\13\2\2Z\\\5\b\5\2[") + buf.write("Z\3\2\2\2[\\\3\2\2\2\\^\3\2\2\2]Y\3\2\2\2^_\3\2\2\2_]") + buf.write("\3\2\2\2_`\3\2\2\2`b\3\2\2\2aO\3\2\2\2a]\3\2\2\2b\21\3") + buf.write("\2\2\2cf\5\24\13\2df\5\26\f\2ec\3\2\2\2ed\3\2\2\2f\23") + buf.write("\3\2\2\2gk\7\6\2\2hj\7\30\2\2ih\3\2\2\2jm\3\2\2\2ki\3") + buf.write("\2\2\2kl\3\2\2\2ls\3\2\2\2mk\3\2\2\2nt\5\30\r\2op\7\f") + buf.write("\2\2pq\5\30\r\2qr\7\r\2\2rt\3\2\2\2sn\3\2\2\2so\3\2\2") + buf.write("\2t}\3\2\2\2uv\7\30\2\2v|\5\30\r\2wx\7\f\2\2xy\5\30\r") + buf.write("\2yz\7\r\2\2z|\3\2\2\2{u\3\2\2\2{w\3\2\2\2|\177\3\2\2") + buf.write("\2}{\3\2\2\2}~\3\2\2\2~\u0080\3\2\2\2\177}\3\2\2\2\u0080") + buf.write("\u0081\7\27\2\2\u0081\25\3\2\2\2\u0082\u0083\7\6\2\2\u0083") + buf.write("\u0084\5\32\16\2\u0084\u0086\7\16\2\2\u0085\u0087\5\20") + buf.write("\t\2\u0086\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088") + buf.write("\3\2\2\2\u0088\u0089\7\b\2\2\u0089\27\3\2\2\2\u008a\u008e") + buf.write("\5\22\n\2\u008b\u008e\7\24\2\2\u008c\u008e\7\31\2\2\u008d") + buf.write("\u008a\3\2\2\2\u008d\u008b\3\2\2\2\u008d\u008c\3\2\2\2") + buf.write("\u008e\31\3\2\2\2\u008f\u0092\5\22\n\2\u0090\u0092\7\24") + buf.write("\2\2\u0091\u008f\3\2\2\2\u0091\u0090\3\2\2\2\u0092\u009a") + buf.write("\3\2\2\2\u0093\u0096\7\30\2\2\u0094\u0097\5\22\n\2\u0095") + buf.write("\u0097\7\24\2\2\u0096\u0094\3\2\2\2\u0096\u0095\3\2\2") + buf.write("\2\u0097\u0099\3\2\2\2\u0098\u0093\3\2\2\2\u0099\u009c") + buf.write("\3\2\2\2\u009a\u0098\3\2\2\2\u009a\u009b\3\2\2\2\u009b") + buf.write("\33\3\2\2\2\u009c\u009a\3\2\2\2\u009d\u009f\t\2\2\2\u009e") + buf.write("\u00a0\5\6\4\2\u009f\u009e\3\2\2\2\u009f\u00a0\3\2\2\2") + buf.write("\u00a0\u00a1\3\2\2\2\u00a1\u00a2\7\32\2\2\u00a2\35\3\2") + buf.write("\2\2\u00a3\u00ae\7\24\2\2\u00a4\u00ae\7\22\2\2\u00a5\u00ae") + buf.write("\7\20\2\2\u00a6\u00ae\7\17\2\2\u00a7\u00ae\7\21\2\2\u00a8") + buf.write("\u00ae\7\23\2\2\u00a9\u00ae\7\16\2\2\u00aa\u00ae\7\25") + buf.write("\2\2\u00ab\u00ae\7\26\2\2\u00ac\u00ae\5\22\n\2\u00ad\u00a3") + buf.write("\3\2\2\2\u00ad\u00a4\3\2\2\2\u00ad\u00a5\3\2\2\2\u00ad") + buf.write("\u00a6\3\2\2\2\u00ad\u00a7\3\2\2\2\u00ad\u00a8\3\2\2\2") + buf.write("\u00ad\u00a9\3\2\2\2\u00ad\u00aa\3\2\2\2\u00ad\u00ab\3") + buf.write("\2\2\2\u00ad\u00ac\3\2\2\2\u00ae\u00af\3\2\2\2\u00af\u00ad") + buf.write("\3\2\2\2\u00af\u00b0\3\2\2\2\u00b0\37\3\2\2\2\u00b1\u00b3") + buf.write("\t\3\2\2\u00b2\u00b1\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4") + buf.write("\u00b2\3\2\2\2\u00b4\u00b5\3\2\2\2\u00b5!\3\2\2\2\33.") + buf.write("\60\66:DGRV[_aeks{}\u0086\u008d\u0091\u0096\u009a\u009f") + buf.write("\u00ad\u00af\u00b4") + return buf.getvalue() + + +class OmegaConfGrammarParser ( Parser ): + + grammarFileName = "OmegaConfGrammarParser.g4" + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + sharedContextCache = PredictionContextCache() + + literalNames = [ "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "", "", + "", "", "'.'", "", "", + "", "", "'['", "']'" ] + + symbolicNames = [ "", "ANY_STR", "ESC_INTER", "TOP_ESC", "INTER_OPEN", + "BRACE_OPEN", "BRACE_CLOSE", "QUOTE_OPEN_SINGLE", + "QUOTE_OPEN_DOUBLE", "COMMA", "BRACKET_OPEN", "BRACKET_CLOSE", + "COLON", "FLOAT", "INT", "BOOL", "NULL", "UNQUOTED_CHAR", + "ID", "ESC", "WS", "INTER_CLOSE", "DOT", "INTER_KEY", + "MATCHING_QUOTE_CLOSE", "QUOTED_ESC", "DOLLAR", "INTER_BRACKET_OPEN", + "INTER_BRACKET_CLOSE" ] + + RULE_configValue = 0 + RULE_singleElement = 1 + RULE_text = 2 + RULE_element = 3 + RULE_listContainer = 4 + RULE_dictContainer = 5 + RULE_dictKeyValuePair = 6 + RULE_sequence = 7 + RULE_interpolation = 8 + RULE_interpolationNode = 9 + RULE_interpolationResolver = 10 + RULE_configKey = 11 + RULE_resolverName = 12 + RULE_quotedValue = 13 + RULE_primitive = 14 + RULE_dictKey = 15 + + ruleNames = [ "configValue", "singleElement", "text", "element", "listContainer", + "dictContainer", "dictKeyValuePair", "sequence", "interpolation", + "interpolationNode", "interpolationResolver", "configKey", + "resolverName", "quotedValue", "primitive", "dictKey" ] + + EOF = Token.EOF + ANY_STR=1 + ESC_INTER=2 + TOP_ESC=3 + INTER_OPEN=4 + BRACE_OPEN=5 + BRACE_CLOSE=6 + QUOTE_OPEN_SINGLE=7 + QUOTE_OPEN_DOUBLE=8 + COMMA=9 + BRACKET_OPEN=10 + BRACKET_CLOSE=11 + COLON=12 + FLOAT=13 + INT=14 + BOOL=15 + NULL=16 + UNQUOTED_CHAR=17 + ID=18 + ESC=19 + WS=20 + INTER_CLOSE=21 + DOT=22 + INTER_KEY=23 + MATCHING_QUOTE_CLOSE=24 + QUOTED_ESC=25 + DOLLAR=26 + INTER_BRACKET_OPEN=27 + INTER_BRACKET_CLOSE=28 + + def __init__(self, input:TokenStream, output:TextIO = sys.stdout): + super().__init__(input, output) + self.checkVersion("4.9.3") + self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) + self._predicates = None + + + + + class ConfigValueContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def text(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.TextContext,0) + + + def EOF(self): + return self.getToken(OmegaConfGrammarParser.EOF, 0) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_configValue + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterConfigValue" ): + listener.enterConfigValue(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitConfigValue" ): + listener.exitConfigValue(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitConfigValue" ): + return visitor.visitConfigValue(self) + else: + return visitor.visitChildren(self) + + + + + def configValue(self): + + localctx = OmegaConfGrammarParser.ConfigValueContext(self, self._ctx, self.state) + self.enterRule(localctx, 0, self.RULE_configValue) + try: + self.enterOuterAlt(localctx, 1) + self.state = 32 + self.text() + self.state = 33 + self.match(OmegaConfGrammarParser.EOF) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class SingleElementContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def element(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.ElementContext,0) + + + def EOF(self): + return self.getToken(OmegaConfGrammarParser.EOF, 0) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_singleElement + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterSingleElement" ): + listener.enterSingleElement(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitSingleElement" ): + listener.exitSingleElement(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitSingleElement" ): + return visitor.visitSingleElement(self) + else: + return visitor.visitChildren(self) + + + + + def singleElement(self): + + localctx = OmegaConfGrammarParser.SingleElementContext(self, self._ctx, self.state) + self.enterRule(localctx, 2, self.RULE_singleElement) + try: + self.enterOuterAlt(localctx, 1) + self.state = 35 + self.element() + self.state = 36 + self.match(OmegaConfGrammarParser.EOF) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class TextContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def interpolation(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(OmegaConfGrammarParser.InterpolationContext) + else: + return self.getTypedRuleContext(OmegaConfGrammarParser.InterpolationContext,i) + + + def ANY_STR(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ANY_STR) + else: + return self.getToken(OmegaConfGrammarParser.ANY_STR, i) + + def ESC(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ESC) + else: + return self.getToken(OmegaConfGrammarParser.ESC, i) + + def ESC_INTER(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ESC_INTER) + else: + return self.getToken(OmegaConfGrammarParser.ESC_INTER, i) + + def TOP_ESC(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.TOP_ESC) + else: + return self.getToken(OmegaConfGrammarParser.TOP_ESC, i) + + def QUOTED_ESC(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.QUOTED_ESC) + else: + return self.getToken(OmegaConfGrammarParser.QUOTED_ESC, i) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_text + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterText" ): + listener.enterText(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitText" ): + listener.exitText(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitText" ): + return visitor.visitText(self) + else: + return visitor.visitChildren(self) + + + + + def text(self): + + localctx = OmegaConfGrammarParser.TextContext(self, self._ctx, self.state) + self.enterRule(localctx, 4, self.RULE_text) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 44 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 44 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.INTER_OPEN]: + self.state = 38 + self.interpolation() + pass + elif token in [OmegaConfGrammarParser.ANY_STR]: + self.state = 39 + self.match(OmegaConfGrammarParser.ANY_STR) + pass + elif token in [OmegaConfGrammarParser.ESC]: + self.state = 40 + self.match(OmegaConfGrammarParser.ESC) + pass + elif token in [OmegaConfGrammarParser.ESC_INTER]: + self.state = 41 + self.match(OmegaConfGrammarParser.ESC_INTER) + pass + elif token in [OmegaConfGrammarParser.TOP_ESC]: + self.state = 42 + self.match(OmegaConfGrammarParser.TOP_ESC) + pass + elif token in [OmegaConfGrammarParser.QUOTED_ESC]: + self.state = 43 + self.match(OmegaConfGrammarParser.QUOTED_ESC) + pass + else: + raise NoViableAltException(self) + + self.state = 46 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.ANY_STR) | (1 << OmegaConfGrammarParser.ESC_INTER) | (1 << OmegaConfGrammarParser.TOP_ESC) | (1 << OmegaConfGrammarParser.INTER_OPEN) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.QUOTED_ESC))) != 0)): + break + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class ElementContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def primitive(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.PrimitiveContext,0) + + + def quotedValue(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.QuotedValueContext,0) + + + def listContainer(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.ListContainerContext,0) + + + def dictContainer(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.DictContainerContext,0) + + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_element + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterElement" ): + listener.enterElement(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitElement" ): + listener.exitElement(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitElement" ): + return visitor.visitElement(self) + else: + return visitor.visitChildren(self) + + + + + def element(self): + + localctx = OmegaConfGrammarParser.ElementContext(self, self._ctx, self.state) + self.enterRule(localctx, 6, self.RULE_element) + try: + self.state = 52 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.INTER_OPEN, OmegaConfGrammarParser.COLON, OmegaConfGrammarParser.FLOAT, OmegaConfGrammarParser.INT, OmegaConfGrammarParser.BOOL, OmegaConfGrammarParser.NULL, OmegaConfGrammarParser.UNQUOTED_CHAR, OmegaConfGrammarParser.ID, OmegaConfGrammarParser.ESC, OmegaConfGrammarParser.WS]: + self.enterOuterAlt(localctx, 1) + self.state = 48 + self.primitive() + pass + elif token in [OmegaConfGrammarParser.QUOTE_OPEN_SINGLE, OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE]: + self.enterOuterAlt(localctx, 2) + self.state = 49 + self.quotedValue() + pass + elif token in [OmegaConfGrammarParser.BRACKET_OPEN]: + self.enterOuterAlt(localctx, 3) + self.state = 50 + self.listContainer() + pass + elif token in [OmegaConfGrammarParser.BRACE_OPEN]: + self.enterOuterAlt(localctx, 4) + self.state = 51 + self.dictContainer() + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class ListContainerContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def BRACKET_OPEN(self): + return self.getToken(OmegaConfGrammarParser.BRACKET_OPEN, 0) + + def BRACKET_CLOSE(self): + return self.getToken(OmegaConfGrammarParser.BRACKET_CLOSE, 0) + + def sequence(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.SequenceContext,0) + + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_listContainer + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterListContainer" ): + listener.enterListContainer(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitListContainer" ): + listener.exitListContainer(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitListContainer" ): + return visitor.visitListContainer(self) + else: + return visitor.visitChildren(self) + + + + + def listContainer(self): + + localctx = OmegaConfGrammarParser.ListContainerContext(self, self._ctx, self.state) + self.enterRule(localctx, 8, self.RULE_listContainer) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 54 + self.match(OmegaConfGrammarParser.BRACKET_OPEN) + self.state = 56 + self._errHandler.sync(self) + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.INTER_OPEN) | (1 << OmegaConfGrammarParser.BRACE_OPEN) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_SINGLE) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE) | (1 << OmegaConfGrammarParser.COMMA) | (1 << OmegaConfGrammarParser.BRACKET_OPEN) | (1 << OmegaConfGrammarParser.COLON) | (1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0): + self.state = 55 + self.sequence() + + + self.state = 58 + self.match(OmegaConfGrammarParser.BRACKET_CLOSE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class DictContainerContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def BRACE_OPEN(self): + return self.getToken(OmegaConfGrammarParser.BRACE_OPEN, 0) + + def BRACE_CLOSE(self): + return self.getToken(OmegaConfGrammarParser.BRACE_CLOSE, 0) + + def dictKeyValuePair(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(OmegaConfGrammarParser.DictKeyValuePairContext) + else: + return self.getTypedRuleContext(OmegaConfGrammarParser.DictKeyValuePairContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.COMMA) + else: + return self.getToken(OmegaConfGrammarParser.COMMA, i) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_dictContainer + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterDictContainer" ): + listener.enterDictContainer(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitDictContainer" ): + listener.exitDictContainer(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitDictContainer" ): + return visitor.visitDictContainer(self) + else: + return visitor.visitChildren(self) + + + + + def dictContainer(self): + + localctx = OmegaConfGrammarParser.DictContainerContext(self, self._ctx, self.state) + self.enterRule(localctx, 10, self.RULE_dictContainer) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 60 + self.match(OmegaConfGrammarParser.BRACE_OPEN) + self.state = 69 + self._errHandler.sync(self) + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0): + self.state = 61 + self.dictKeyValuePair() + self.state = 66 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==OmegaConfGrammarParser.COMMA: + self.state = 62 + self.match(OmegaConfGrammarParser.COMMA) + self.state = 63 + self.dictKeyValuePair() + self.state = 68 + self._errHandler.sync(self) + _la = self._input.LA(1) + + + + self.state = 71 + self.match(OmegaConfGrammarParser.BRACE_CLOSE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class DictKeyValuePairContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def dictKey(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.DictKeyContext,0) + + + def COLON(self): + return self.getToken(OmegaConfGrammarParser.COLON, 0) + + def element(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.ElementContext,0) + + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_dictKeyValuePair + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterDictKeyValuePair" ): + listener.enterDictKeyValuePair(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitDictKeyValuePair" ): + listener.exitDictKeyValuePair(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitDictKeyValuePair" ): + return visitor.visitDictKeyValuePair(self) + else: + return visitor.visitChildren(self) + + + + + def dictKeyValuePair(self): + + localctx = OmegaConfGrammarParser.DictKeyValuePairContext(self, self._ctx, self.state) + self.enterRule(localctx, 12, self.RULE_dictKeyValuePair) + try: + self.enterOuterAlt(localctx, 1) + self.state = 73 + self.dictKey() + self.state = 74 + self.match(OmegaConfGrammarParser.COLON) + self.state = 75 + self.element() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class SequenceContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def element(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(OmegaConfGrammarParser.ElementContext) + else: + return self.getTypedRuleContext(OmegaConfGrammarParser.ElementContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.COMMA) + else: + return self.getToken(OmegaConfGrammarParser.COMMA, i) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_sequence + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterSequence" ): + listener.enterSequence(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitSequence" ): + listener.exitSequence(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitSequence" ): + return visitor.visitSequence(self) + else: + return visitor.visitChildren(self) + + + + + def sequence(self): + + localctx = OmegaConfGrammarParser.SequenceContext(self, self._ctx, self.state) + self.enterRule(localctx, 14, self.RULE_sequence) + self._la = 0 # Token type + try: + self.state = 95 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.INTER_OPEN, OmegaConfGrammarParser.BRACE_OPEN, OmegaConfGrammarParser.QUOTE_OPEN_SINGLE, OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE, OmegaConfGrammarParser.BRACKET_OPEN, OmegaConfGrammarParser.COLON, OmegaConfGrammarParser.FLOAT, OmegaConfGrammarParser.INT, OmegaConfGrammarParser.BOOL, OmegaConfGrammarParser.NULL, OmegaConfGrammarParser.UNQUOTED_CHAR, OmegaConfGrammarParser.ID, OmegaConfGrammarParser.ESC, OmegaConfGrammarParser.WS]: + self.enterOuterAlt(localctx, 1) + self.state = 77 + self.element() + self.state = 84 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==OmegaConfGrammarParser.COMMA: + self.state = 78 + self.match(OmegaConfGrammarParser.COMMA) + self.state = 80 + self._errHandler.sync(self) + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.INTER_OPEN) | (1 << OmegaConfGrammarParser.BRACE_OPEN) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_SINGLE) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE) | (1 << OmegaConfGrammarParser.BRACKET_OPEN) | (1 << OmegaConfGrammarParser.COLON) | (1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0): + self.state = 79 + self.element() + + + self.state = 86 + self._errHandler.sync(self) + _la = self._input.LA(1) + + pass + elif token in [OmegaConfGrammarParser.COMMA]: + self.enterOuterAlt(localctx, 2) + self.state = 91 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 87 + self.match(OmegaConfGrammarParser.COMMA) + self.state = 89 + self._errHandler.sync(self) + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.INTER_OPEN) | (1 << OmegaConfGrammarParser.BRACE_OPEN) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_SINGLE) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE) | (1 << OmegaConfGrammarParser.BRACKET_OPEN) | (1 << OmegaConfGrammarParser.COLON) | (1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0): + self.state = 88 + self.element() + + + self.state = 93 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not (_la==OmegaConfGrammarParser.COMMA): + break + + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class InterpolationContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def interpolationNode(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.InterpolationNodeContext,0) + + + def interpolationResolver(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.InterpolationResolverContext,0) + + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_interpolation + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterInterpolation" ): + listener.enterInterpolation(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitInterpolation" ): + listener.exitInterpolation(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitInterpolation" ): + return visitor.visitInterpolation(self) + else: + return visitor.visitChildren(self) + + + + + def interpolation(self): + + localctx = OmegaConfGrammarParser.InterpolationContext(self, self._ctx, self.state) + self.enterRule(localctx, 16, self.RULE_interpolation) + try: + self.state = 99 + self._errHandler.sync(self) + la_ = self._interp.adaptivePredict(self._input,11,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 97 + self.interpolationNode() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 98 + self.interpolationResolver() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class InterpolationNodeContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def INTER_OPEN(self): + return self.getToken(OmegaConfGrammarParser.INTER_OPEN, 0) + + def INTER_CLOSE(self): + return self.getToken(OmegaConfGrammarParser.INTER_CLOSE, 0) + + def configKey(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(OmegaConfGrammarParser.ConfigKeyContext) + else: + return self.getTypedRuleContext(OmegaConfGrammarParser.ConfigKeyContext,i) + + + def BRACKET_OPEN(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.BRACKET_OPEN) + else: + return self.getToken(OmegaConfGrammarParser.BRACKET_OPEN, i) + + def BRACKET_CLOSE(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.BRACKET_CLOSE) + else: + return self.getToken(OmegaConfGrammarParser.BRACKET_CLOSE, i) + + def DOT(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.DOT) + else: + return self.getToken(OmegaConfGrammarParser.DOT, i) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_interpolationNode + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterInterpolationNode" ): + listener.enterInterpolationNode(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitInterpolationNode" ): + listener.exitInterpolationNode(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitInterpolationNode" ): + return visitor.visitInterpolationNode(self) + else: + return visitor.visitChildren(self) + + + + + def interpolationNode(self): + + localctx = OmegaConfGrammarParser.InterpolationNodeContext(self, self._ctx, self.state) + self.enterRule(localctx, 18, self.RULE_interpolationNode) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 101 + self.match(OmegaConfGrammarParser.INTER_OPEN) + self.state = 105 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==OmegaConfGrammarParser.DOT: + self.state = 102 + self.match(OmegaConfGrammarParser.DOT) + self.state = 107 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 113 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.INTER_OPEN, OmegaConfGrammarParser.ID, OmegaConfGrammarParser.INTER_KEY]: + self.state = 108 + self.configKey() + pass + elif token in [OmegaConfGrammarParser.BRACKET_OPEN]: + self.state = 109 + self.match(OmegaConfGrammarParser.BRACKET_OPEN) + self.state = 110 + self.configKey() + self.state = 111 + self.match(OmegaConfGrammarParser.BRACKET_CLOSE) + pass + else: + raise NoViableAltException(self) + + self.state = 123 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==OmegaConfGrammarParser.BRACKET_OPEN or _la==OmegaConfGrammarParser.DOT: + self.state = 121 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.DOT]: + self.state = 115 + self.match(OmegaConfGrammarParser.DOT) + self.state = 116 + self.configKey() + pass + elif token in [OmegaConfGrammarParser.BRACKET_OPEN]: + self.state = 117 + self.match(OmegaConfGrammarParser.BRACKET_OPEN) + self.state = 118 + self.configKey() + self.state = 119 + self.match(OmegaConfGrammarParser.BRACKET_CLOSE) + pass + else: + raise NoViableAltException(self) + + self.state = 125 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 126 + self.match(OmegaConfGrammarParser.INTER_CLOSE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class InterpolationResolverContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def INTER_OPEN(self): + return self.getToken(OmegaConfGrammarParser.INTER_OPEN, 0) + + def resolverName(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.ResolverNameContext,0) + + + def COLON(self): + return self.getToken(OmegaConfGrammarParser.COLON, 0) + + def BRACE_CLOSE(self): + return self.getToken(OmegaConfGrammarParser.BRACE_CLOSE, 0) + + def sequence(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.SequenceContext,0) + + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_interpolationResolver + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterInterpolationResolver" ): + listener.enterInterpolationResolver(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitInterpolationResolver" ): + listener.exitInterpolationResolver(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitInterpolationResolver" ): + return visitor.visitInterpolationResolver(self) + else: + return visitor.visitChildren(self) + + + + + def interpolationResolver(self): + + localctx = OmegaConfGrammarParser.InterpolationResolverContext(self, self._ctx, self.state) + self.enterRule(localctx, 20, self.RULE_interpolationResolver) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 128 + self.match(OmegaConfGrammarParser.INTER_OPEN) + self.state = 129 + self.resolverName() + self.state = 130 + self.match(OmegaConfGrammarParser.COLON) + self.state = 132 + self._errHandler.sync(self) + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.INTER_OPEN) | (1 << OmegaConfGrammarParser.BRACE_OPEN) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_SINGLE) | (1 << OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE) | (1 << OmegaConfGrammarParser.COMMA) | (1 << OmegaConfGrammarParser.BRACKET_OPEN) | (1 << OmegaConfGrammarParser.COLON) | (1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0): + self.state = 131 + self.sequence() + + + self.state = 134 + self.match(OmegaConfGrammarParser.BRACE_CLOSE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class ConfigKeyContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def interpolation(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.InterpolationContext,0) + + + def ID(self): + return self.getToken(OmegaConfGrammarParser.ID, 0) + + def INTER_KEY(self): + return self.getToken(OmegaConfGrammarParser.INTER_KEY, 0) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_configKey + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterConfigKey" ): + listener.enterConfigKey(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitConfigKey" ): + listener.exitConfigKey(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitConfigKey" ): + return visitor.visitConfigKey(self) + else: + return visitor.visitChildren(self) + + + + + def configKey(self): + + localctx = OmegaConfGrammarParser.ConfigKeyContext(self, self._ctx, self.state) + self.enterRule(localctx, 22, self.RULE_configKey) + try: + self.state = 139 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.INTER_OPEN]: + self.enterOuterAlt(localctx, 1) + self.state = 136 + self.interpolation() + pass + elif token in [OmegaConfGrammarParser.ID]: + self.enterOuterAlt(localctx, 2) + self.state = 137 + self.match(OmegaConfGrammarParser.ID) + pass + elif token in [OmegaConfGrammarParser.INTER_KEY]: + self.enterOuterAlt(localctx, 3) + self.state = 138 + self.match(OmegaConfGrammarParser.INTER_KEY) + pass + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class ResolverNameContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def interpolation(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(OmegaConfGrammarParser.InterpolationContext) + else: + return self.getTypedRuleContext(OmegaConfGrammarParser.InterpolationContext,i) + + + def ID(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ID) + else: + return self.getToken(OmegaConfGrammarParser.ID, i) + + def DOT(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.DOT) + else: + return self.getToken(OmegaConfGrammarParser.DOT, i) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_resolverName + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterResolverName" ): + listener.enterResolverName(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitResolverName" ): + listener.exitResolverName(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitResolverName" ): + return visitor.visitResolverName(self) + else: + return visitor.visitChildren(self) + + + + + def resolverName(self): + + localctx = OmegaConfGrammarParser.ResolverNameContext(self, self._ctx, self.state) + self.enterRule(localctx, 24, self.RULE_resolverName) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 143 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.INTER_OPEN]: + self.state = 141 + self.interpolation() + pass + elif token in [OmegaConfGrammarParser.ID]: + self.state = 142 + self.match(OmegaConfGrammarParser.ID) + pass + else: + raise NoViableAltException(self) + + self.state = 152 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==OmegaConfGrammarParser.DOT: + self.state = 145 + self.match(OmegaConfGrammarParser.DOT) + self.state = 148 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.INTER_OPEN]: + self.state = 146 + self.interpolation() + pass + elif token in [OmegaConfGrammarParser.ID]: + self.state = 147 + self.match(OmegaConfGrammarParser.ID) + pass + else: + raise NoViableAltException(self) + + self.state = 154 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class QuotedValueContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def MATCHING_QUOTE_CLOSE(self): + return self.getToken(OmegaConfGrammarParser.MATCHING_QUOTE_CLOSE, 0) + + def QUOTE_OPEN_SINGLE(self): + return self.getToken(OmegaConfGrammarParser.QUOTE_OPEN_SINGLE, 0) + + def QUOTE_OPEN_DOUBLE(self): + return self.getToken(OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE, 0) + + def text(self): + return self.getTypedRuleContext(OmegaConfGrammarParser.TextContext,0) + + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_quotedValue + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterQuotedValue" ): + listener.enterQuotedValue(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitQuotedValue" ): + listener.exitQuotedValue(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitQuotedValue" ): + return visitor.visitQuotedValue(self) + else: + return visitor.visitChildren(self) + + + + + def quotedValue(self): + + localctx = OmegaConfGrammarParser.QuotedValueContext(self, self._ctx, self.state) + self.enterRule(localctx, 26, self.RULE_quotedValue) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 155 + _la = self._input.LA(1) + if not(_la==OmegaConfGrammarParser.QUOTE_OPEN_SINGLE or _la==OmegaConfGrammarParser.QUOTE_OPEN_DOUBLE): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 157 + self._errHandler.sync(self) + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.ANY_STR) | (1 << OmegaConfGrammarParser.ESC_INTER) | (1 << OmegaConfGrammarParser.TOP_ESC) | (1 << OmegaConfGrammarParser.INTER_OPEN) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.QUOTED_ESC))) != 0): + self.state = 156 + self.text() + + + self.state = 159 + self.match(OmegaConfGrammarParser.MATCHING_QUOTE_CLOSE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class PrimitiveContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ID(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ID) + else: + return self.getToken(OmegaConfGrammarParser.ID, i) + + def NULL(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.NULL) + else: + return self.getToken(OmegaConfGrammarParser.NULL, i) + + def INT(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.INT) + else: + return self.getToken(OmegaConfGrammarParser.INT, i) + + def FLOAT(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.FLOAT) + else: + return self.getToken(OmegaConfGrammarParser.FLOAT, i) + + def BOOL(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.BOOL) + else: + return self.getToken(OmegaConfGrammarParser.BOOL, i) + + def UNQUOTED_CHAR(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.UNQUOTED_CHAR) + else: + return self.getToken(OmegaConfGrammarParser.UNQUOTED_CHAR, i) + + def COLON(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.COLON) + else: + return self.getToken(OmegaConfGrammarParser.COLON, i) + + def ESC(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ESC) + else: + return self.getToken(OmegaConfGrammarParser.ESC, i) + + def WS(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.WS) + else: + return self.getToken(OmegaConfGrammarParser.WS, i) + + def interpolation(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(OmegaConfGrammarParser.InterpolationContext) + else: + return self.getTypedRuleContext(OmegaConfGrammarParser.InterpolationContext,i) + + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_primitive + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterPrimitive" ): + listener.enterPrimitive(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitPrimitive" ): + listener.exitPrimitive(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitPrimitive" ): + return visitor.visitPrimitive(self) + else: + return visitor.visitChildren(self) + + + + + def primitive(self): + + localctx = OmegaConfGrammarParser.PrimitiveContext(self, self._ctx, self.state) + self.enterRule(localctx, 28, self.RULE_primitive) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 171 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 171 + self._errHandler.sync(self) + token = self._input.LA(1) + if token in [OmegaConfGrammarParser.ID]: + self.state = 161 + self.match(OmegaConfGrammarParser.ID) + pass + elif token in [OmegaConfGrammarParser.NULL]: + self.state = 162 + self.match(OmegaConfGrammarParser.NULL) + pass + elif token in [OmegaConfGrammarParser.INT]: + self.state = 163 + self.match(OmegaConfGrammarParser.INT) + pass + elif token in [OmegaConfGrammarParser.FLOAT]: + self.state = 164 + self.match(OmegaConfGrammarParser.FLOAT) + pass + elif token in [OmegaConfGrammarParser.BOOL]: + self.state = 165 + self.match(OmegaConfGrammarParser.BOOL) + pass + elif token in [OmegaConfGrammarParser.UNQUOTED_CHAR]: + self.state = 166 + self.match(OmegaConfGrammarParser.UNQUOTED_CHAR) + pass + elif token in [OmegaConfGrammarParser.COLON]: + self.state = 167 + self.match(OmegaConfGrammarParser.COLON) + pass + elif token in [OmegaConfGrammarParser.ESC]: + self.state = 168 + self.match(OmegaConfGrammarParser.ESC) + pass + elif token in [OmegaConfGrammarParser.WS]: + self.state = 169 + self.match(OmegaConfGrammarParser.WS) + pass + elif token in [OmegaConfGrammarParser.INTER_OPEN]: + self.state = 170 + self.interpolation() + pass + else: + raise NoViableAltException(self) + + self.state = 173 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.INTER_OPEN) | (1 << OmegaConfGrammarParser.COLON) | (1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0)): + break + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + class DictKeyContext(ParserRuleContext): + __slots__ = 'parser' + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ID(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ID) + else: + return self.getToken(OmegaConfGrammarParser.ID, i) + + def NULL(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.NULL) + else: + return self.getToken(OmegaConfGrammarParser.NULL, i) + + def INT(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.INT) + else: + return self.getToken(OmegaConfGrammarParser.INT, i) + + def FLOAT(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.FLOAT) + else: + return self.getToken(OmegaConfGrammarParser.FLOAT, i) + + def BOOL(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.BOOL) + else: + return self.getToken(OmegaConfGrammarParser.BOOL, i) + + def UNQUOTED_CHAR(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.UNQUOTED_CHAR) + else: + return self.getToken(OmegaConfGrammarParser.UNQUOTED_CHAR, i) + + def ESC(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.ESC) + else: + return self.getToken(OmegaConfGrammarParser.ESC, i) + + def WS(self, i:int=None): + if i is None: + return self.getTokens(OmegaConfGrammarParser.WS) + else: + return self.getToken(OmegaConfGrammarParser.WS, i) + + def getRuleIndex(self): + return OmegaConfGrammarParser.RULE_dictKey + + def enterRule(self, listener:ParseTreeListener): + if hasattr( listener, "enterDictKey" ): + listener.enterDictKey(self) + + def exitRule(self, listener:ParseTreeListener): + if hasattr( listener, "exitDictKey" ): + listener.exitDictKey(self) + + def accept(self, visitor:ParseTreeVisitor): + if hasattr( visitor, "visitDictKey" ): + return visitor.visitDictKey(self) + else: + return visitor.visitChildren(self) + + + + + def dictKey(self): + + localctx = OmegaConfGrammarParser.DictKeyContext(self, self._ctx, self.state) + self.enterRule(localctx, 30, self.RULE_dictKey) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 176 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 175 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0)): + self._errHandler.recoverInline(self) + else: + self._errHandler.reportMatch(self) + self.consume() + self.state = 178 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << OmegaConfGrammarParser.FLOAT) | (1 << OmegaConfGrammarParser.INT) | (1 << OmegaConfGrammarParser.BOOL) | (1 << OmegaConfGrammarParser.NULL) | (1 << OmegaConfGrammarParser.UNQUOTED_CHAR) | (1 << OmegaConfGrammarParser.ID) | (1 << OmegaConfGrammarParser.ESC) | (1 << OmegaConfGrammarParser.WS))) != 0)): + break + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + + + diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParserListener.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParserListener.py new file mode 100644 index 0000000000000000000000000000000000000000..d149348181e910757b6a3d6388073b951fac4678 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParserListener.py @@ -0,0 +1,156 @@ +# Generated from /tmp/build-via-sdist-fm63w174/omegaconf-2.3.0/omegaconf/grammar/OmegaConfGrammarParser.g4 by ANTLR 4.9.3 +from antlr4 import * +if __name__ is not None and "." in __name__: + from .OmegaConfGrammarParser import OmegaConfGrammarParser +else: + from OmegaConfGrammarParser import OmegaConfGrammarParser + +# This class defines a complete listener for a parse tree produced by OmegaConfGrammarParser. +class OmegaConfGrammarParserListener(ParseTreeListener): + + # Enter a parse tree produced by OmegaConfGrammarParser#configValue. + def enterConfigValue(self, ctx:OmegaConfGrammarParser.ConfigValueContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#configValue. + def exitConfigValue(self, ctx:OmegaConfGrammarParser.ConfigValueContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#singleElement. + def enterSingleElement(self, ctx:OmegaConfGrammarParser.SingleElementContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#singleElement. + def exitSingleElement(self, ctx:OmegaConfGrammarParser.SingleElementContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#text. + def enterText(self, ctx:OmegaConfGrammarParser.TextContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#text. + def exitText(self, ctx:OmegaConfGrammarParser.TextContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#element. + def enterElement(self, ctx:OmegaConfGrammarParser.ElementContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#element. + def exitElement(self, ctx:OmegaConfGrammarParser.ElementContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#listContainer. + def enterListContainer(self, ctx:OmegaConfGrammarParser.ListContainerContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#listContainer. + def exitListContainer(self, ctx:OmegaConfGrammarParser.ListContainerContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#dictContainer. + def enterDictContainer(self, ctx:OmegaConfGrammarParser.DictContainerContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#dictContainer. + def exitDictContainer(self, ctx:OmegaConfGrammarParser.DictContainerContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#dictKeyValuePair. + def enterDictKeyValuePair(self, ctx:OmegaConfGrammarParser.DictKeyValuePairContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#dictKeyValuePair. + def exitDictKeyValuePair(self, ctx:OmegaConfGrammarParser.DictKeyValuePairContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#sequence. + def enterSequence(self, ctx:OmegaConfGrammarParser.SequenceContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#sequence. + def exitSequence(self, ctx:OmegaConfGrammarParser.SequenceContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#interpolation. + def enterInterpolation(self, ctx:OmegaConfGrammarParser.InterpolationContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#interpolation. + def exitInterpolation(self, ctx:OmegaConfGrammarParser.InterpolationContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#interpolationNode. + def enterInterpolationNode(self, ctx:OmegaConfGrammarParser.InterpolationNodeContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#interpolationNode. + def exitInterpolationNode(self, ctx:OmegaConfGrammarParser.InterpolationNodeContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#interpolationResolver. + def enterInterpolationResolver(self, ctx:OmegaConfGrammarParser.InterpolationResolverContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#interpolationResolver. + def exitInterpolationResolver(self, ctx:OmegaConfGrammarParser.InterpolationResolverContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#configKey. + def enterConfigKey(self, ctx:OmegaConfGrammarParser.ConfigKeyContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#configKey. + def exitConfigKey(self, ctx:OmegaConfGrammarParser.ConfigKeyContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#resolverName. + def enterResolverName(self, ctx:OmegaConfGrammarParser.ResolverNameContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#resolverName. + def exitResolverName(self, ctx:OmegaConfGrammarParser.ResolverNameContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#quotedValue. + def enterQuotedValue(self, ctx:OmegaConfGrammarParser.QuotedValueContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#quotedValue. + def exitQuotedValue(self, ctx:OmegaConfGrammarParser.QuotedValueContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#primitive. + def enterPrimitive(self, ctx:OmegaConfGrammarParser.PrimitiveContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#primitive. + def exitPrimitive(self, ctx:OmegaConfGrammarParser.PrimitiveContext): + pass + + + # Enter a parse tree produced by OmegaConfGrammarParser#dictKey. + def enterDictKey(self, ctx:OmegaConfGrammarParser.DictKeyContext): + pass + + # Exit a parse tree produced by OmegaConfGrammarParser#dictKey. + def exitDictKey(self, ctx:OmegaConfGrammarParser.DictKeyContext): + pass + + + +del OmegaConfGrammarParser \ No newline at end of file diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParserVisitor.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParserVisitor.py new file mode 100644 index 0000000000000000000000000000000000000000..ed247dd92f6654d2c862e848204a4679cc23e3e3 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/OmegaConfGrammarParserVisitor.py @@ -0,0 +1,93 @@ +# Generated from /tmp/build-via-sdist-fm63w174/omegaconf-2.3.0/omegaconf/grammar/OmegaConfGrammarParser.g4 by ANTLR 4.9.3 +from antlr4 import * +if __name__ is not None and "." in __name__: + from .OmegaConfGrammarParser import OmegaConfGrammarParser +else: + from OmegaConfGrammarParser import OmegaConfGrammarParser + +# This class defines a complete generic visitor for a parse tree produced by OmegaConfGrammarParser. + +class OmegaConfGrammarParserVisitor(ParseTreeVisitor): + + # Visit a parse tree produced by OmegaConfGrammarParser#configValue. + def visitConfigValue(self, ctx:OmegaConfGrammarParser.ConfigValueContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#singleElement. + def visitSingleElement(self, ctx:OmegaConfGrammarParser.SingleElementContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#text. + def visitText(self, ctx:OmegaConfGrammarParser.TextContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#element. + def visitElement(self, ctx:OmegaConfGrammarParser.ElementContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#listContainer. + def visitListContainer(self, ctx:OmegaConfGrammarParser.ListContainerContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#dictContainer. + def visitDictContainer(self, ctx:OmegaConfGrammarParser.DictContainerContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#dictKeyValuePair. + def visitDictKeyValuePair(self, ctx:OmegaConfGrammarParser.DictKeyValuePairContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#sequence. + def visitSequence(self, ctx:OmegaConfGrammarParser.SequenceContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#interpolation. + def visitInterpolation(self, ctx:OmegaConfGrammarParser.InterpolationContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#interpolationNode. + def visitInterpolationNode(self, ctx:OmegaConfGrammarParser.InterpolationNodeContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#interpolationResolver. + def visitInterpolationResolver(self, ctx:OmegaConfGrammarParser.InterpolationResolverContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#configKey. + def visitConfigKey(self, ctx:OmegaConfGrammarParser.ConfigKeyContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#resolverName. + def visitResolverName(self, ctx:OmegaConfGrammarParser.ResolverNameContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#quotedValue. + def visitQuotedValue(self, ctx:OmegaConfGrammarParser.QuotedValueContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#primitive. + def visitPrimitive(self, ctx:OmegaConfGrammarParser.PrimitiveContext): + return self.visitChildren(ctx) + + + # Visit a parse tree produced by OmegaConfGrammarParser#dictKey. + def visitDictKey(self, ctx:OmegaConfGrammarParser.DictKeyContext): + return self.visitChildren(ctx) + + + +del OmegaConfGrammarParser \ No newline at end of file diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarLexer.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarLexer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6da66f6fc93d99bc44548aa5c7cb451ef51a23 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarLexer.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParser.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c6b28a8e7e92beb812a751ab3679b7b5e916a36 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParser.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserListener.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserListener.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..715e3d2706c25a290d47e250d2bf68a97f0fe565 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserListener.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserVisitor.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserVisitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4561f5f53ef62651a30c6975b8ccd1f94df2405d Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/OmegaConfGrammarParserVisitor.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9febbafa145a304d194de9c523dbd03d38b74ab2 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar/gen/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar_parser.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..3c883c2cfd49b442a90bda0c7777cccca706e82d --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar_parser.py @@ -0,0 +1,144 @@ +import re +import threading +from typing import Any + +from antlr4 import CommonTokenStream, InputStream, ParserRuleContext +from antlr4.error.ErrorListener import ErrorListener + +from .errors import GrammarParseError + +# Import from visitor in order to check the presence of generated grammar files +# files in a single place. +from .grammar_visitor import ( # type: ignore + OmegaConfGrammarLexer, + OmegaConfGrammarParser, +) + +# Used to cache grammar objects to avoid re-creating them on each call to `parse()`. +# We use a per-thread cache to make it thread-safe. +_grammar_cache = threading.local() + +# Build regex pattern to efficiently identify typical interpolations. +# See test `test_match_simple_interpolation_pattern` for examples. +_config_key = r"[$\w]+" # foo, $0, $bar, $foo_$bar123$ +_key_maybe_brackets = f"{_config_key}|\\[{_config_key}\\]" # foo, [foo], [$bar] +_node_access = f"\\.{_key_maybe_brackets}" # .foo, [foo], [$bar] +_node_path = f"(\\.)*({_key_maybe_brackets})({_node_access})*" # [foo].bar, .foo[bar] +_node_inter = f"\\${{\\s*{_node_path}\\s*}}" # node interpolation ${foo.bar} +_id = "[a-zA-Z_][\\w\\-]*" # foo, foo_bar, foo-bar, abc123 +_resolver_name = f"({_id}(\\.{_id})*)?" # foo, ns.bar3, ns_1.ns_2.b0z +_arg = r"[a-zA-Z_0-9/\-\+.$%*@?|]+" # string representing a resolver argument +_args = f"{_arg}(\\s*,\\s*{_arg})*" # list of resolver arguments +_resolver_inter = f"\\${{\\s*{_resolver_name}\\s*:\\s*{_args}?\\s*}}" # ${foo:bar} +_inter = f"({_node_inter}|{_resolver_inter})" # any kind of interpolation +_outer = "([^$]|\\$(?!{))+" # any character except $ (unless not followed by {) +SIMPLE_INTERPOLATION_PATTERN = re.compile( + f"({_outer})?({_inter}({_outer})?)+$", flags=re.ASCII +) +# NOTE: SIMPLE_INTERPOLATION_PATTERN must not generate false positive matches: +# it must not accept anything that isn't a valid interpolation (per the +# interpolation grammar defined in `omegaconf/grammar/*.g4`). + + +class OmegaConfErrorListener(ErrorListener): # type: ignore + def syntaxError( + self, + recognizer: Any, + offending_symbol: Any, + line: Any, + column: Any, + msg: Any, + e: Any, + ) -> None: + raise GrammarParseError(str(e) if msg is None else msg) from e + + def reportAmbiguity( + self, + recognizer: Any, + dfa: Any, + startIndex: Any, + stopIndex: Any, + exact: Any, + ambigAlts: Any, + configs: Any, + ) -> None: + raise GrammarParseError("ANTLR error: Ambiguity") # pragma: no cover + + def reportAttemptingFullContext( + self, + recognizer: Any, + dfa: Any, + startIndex: Any, + stopIndex: Any, + conflictingAlts: Any, + configs: Any, + ) -> None: + # Note: for now we raise an error to be safe. However this is mostly a + # performance warning, so in the future this may be relaxed if we need + # to change the grammar in such a way that this warning cannot be + # avoided (another option would be to switch to SLL parsing mode). + raise GrammarParseError( + "ANTLR error: Attempting Full Context" + ) # pragma: no cover + + def reportContextSensitivity( + self, + recognizer: Any, + dfa: Any, + startIndex: Any, + stopIndex: Any, + prediction: Any, + configs: Any, + ) -> None: + raise GrammarParseError("ANTLR error: ContextSensitivity") # pragma: no cover + + +def parse( + value: str, parser_rule: str = "configValue", lexer_mode: str = "DEFAULT_MODE" +) -> ParserRuleContext: + """ + Parse interpolated string `value` (and return the parse tree). + """ + l_mode = getattr(OmegaConfGrammarLexer, lexer_mode) + istream = InputStream(value) + + cached = getattr(_grammar_cache, "data", None) + if cached is None: + error_listener = OmegaConfErrorListener() + lexer = OmegaConfGrammarLexer(istream) + lexer.removeErrorListeners() + lexer.addErrorListener(error_listener) + lexer.mode(l_mode) + token_stream = CommonTokenStream(lexer) + parser = OmegaConfGrammarParser(token_stream) + parser.removeErrorListeners() + parser.addErrorListener(error_listener) + + # The two lines below could be enabled in the future if we decide to switch + # to SLL prediction mode. Warning though, it has not been fully tested yet! + # from antlr4 import PredictionMode + # parser._interp.predictionMode = PredictionMode.SLL + + # Note that although the input stream `istream` is implicitly cached within + # the lexer, it will be replaced by a new input next time the lexer is re-used. + _grammar_cache.data = lexer, token_stream, parser + + else: + lexer, token_stream, parser = cached + # Replace the old input stream with the new one. + lexer.inputStream = istream + # Initialize the lexer / token stream / parser to process the new input. + lexer.mode(l_mode) + token_stream.setTokenSource(lexer) + parser.reset() + + try: + return getattr(parser, parser_rule)() + except Exception as exc: + if type(exc) is Exception and str(exc) == "Empty Stack": + # This exception is raised by antlr when trying to pop a mode while + # no mode has been pushed. We convert it into an `GrammarParseError` + # to facilitate exception handling from the caller. + raise GrammarParseError("Empty Stack") + else: + raise diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar_visitor.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar_visitor.py new file mode 100644 index 0000000000000000000000000000000000000000..1771516dcf78bbc692512d58d4dea55850b957b7 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/grammar_visitor.py @@ -0,0 +1,392 @@ +import sys +import warnings +from itertools import zip_longest +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generator, + List, + Optional, + Set, + Tuple, + Union, +) + +from antlr4 import TerminalNode + +from .errors import InterpolationResolutionError + +if TYPE_CHECKING: + from .base import Node # noqa F401 + +try: + from omegaconf.grammar.gen.OmegaConfGrammarLexer import OmegaConfGrammarLexer + from omegaconf.grammar.gen.OmegaConfGrammarParser import OmegaConfGrammarParser + from omegaconf.grammar.gen.OmegaConfGrammarParserVisitor import ( + OmegaConfGrammarParserVisitor, + ) + +except ModuleNotFoundError: # pragma: no cover + print( + "Error importing OmegaConf's generated parsers, run `python setup.py antlr` to regenerate.", + file=sys.stderr, + ) + sys.exit(1) + + +class GrammarVisitor(OmegaConfGrammarParserVisitor): + def __init__( + self, + node_interpolation_callback: Callable[ + [str, Optional[Set[int]]], + Optional["Node"], + ], + resolver_interpolation_callback: Callable[..., Any], + memo: Optional[Set[int]], + **kw: Dict[Any, Any], + ): + """ + Constructor. + + :param node_interpolation_callback: Callback function that is called when + needing to resolve a node interpolation. This function should take a single + string input which is the key's dot path (ex: `"foo.bar"`). + + :param resolver_interpolation_callback: Callback function that is called when + needing to resolve a resolver interpolation. This function should accept + three keyword arguments: `name` (str, the name of the resolver), + `args` (tuple, the inputs to the resolver), and `args_str` (tuple, + the string representation of the inputs to the resolver). + + :param kw: Additional keyword arguments to be forwarded to parent class. + """ + super().__init__(**kw) + self.node_interpolation_callback = node_interpolation_callback + self.resolver_interpolation_callback = resolver_interpolation_callback + self.memo = memo + + def aggregateResult(self, aggregate: List[Any], nextResult: Any) -> List[Any]: + raise NotImplementedError + + def defaultResult(self) -> List[Any]: + # Raising an exception because not currently used (like `aggregateResult()`). + raise NotImplementedError + + def visitConfigKey(self, ctx: OmegaConfGrammarParser.ConfigKeyContext) -> str: + from ._utils import _get_value + + # interpolation | ID | INTER_KEY + assert ctx.getChildCount() == 1 + child = ctx.getChild(0) + if isinstance(child, OmegaConfGrammarParser.InterpolationContext): + res = _get_value(self.visitInterpolation(child)) + if not isinstance(res, str): + raise InterpolationResolutionError( + f"The following interpolation is used to denote a config key and " + f"thus should return a string, but instead returned `{res}` of " + f"type `{type(res)}`: {ctx.getChild(0).getText()}" + ) + return res + else: + assert isinstance(child, TerminalNode) and isinstance( + child.symbol.text, str + ) + return child.symbol.text + + def visitConfigValue(self, ctx: OmegaConfGrammarParser.ConfigValueContext) -> Any: + # text EOF + assert ctx.getChildCount() == 2 + return self.visit(ctx.getChild(0)) + + def visitDictKey(self, ctx: OmegaConfGrammarParser.DictKeyContext) -> Any: + return self._createPrimitive(ctx) + + def visitDictContainer( + self, ctx: OmegaConfGrammarParser.DictContainerContext + ) -> Dict[Any, Any]: + # BRACE_OPEN (dictKeyValuePair (COMMA dictKeyValuePair)*)? BRACE_CLOSE + assert ctx.getChildCount() >= 2 + return dict( + self.visitDictKeyValuePair(ctx.getChild(i)) + for i in range(1, ctx.getChildCount() - 1, 2) + ) + + def visitElement(self, ctx: OmegaConfGrammarParser.ElementContext) -> Any: + # primitive | quotedValue | listContainer | dictContainer + assert ctx.getChildCount() == 1 + return self.visit(ctx.getChild(0)) + + def visitInterpolation( + self, ctx: OmegaConfGrammarParser.InterpolationContext + ) -> Any: + assert ctx.getChildCount() == 1 # interpolationNode | interpolationResolver + return self.visit(ctx.getChild(0)) + + def visitInterpolationNode( + self, ctx: OmegaConfGrammarParser.InterpolationNodeContext + ) -> Optional["Node"]: + # INTER_OPEN + # DOT* // relative interpolation? + # (configKey | BRACKET_OPEN configKey BRACKET_CLOSE) // foo, [foo] + # (DOT configKey | BRACKET_OPEN configKey BRACKET_CLOSE)* // .foo, [foo], .foo[bar], [foo].bar[baz] + # INTER_CLOSE; + + assert ctx.getChildCount() >= 3 + + inter_key_tokens = [] # parsed elements of the dot path + for child in ctx.getChildren(): + if isinstance(child, TerminalNode): + s = child.symbol + if s.type in [ + OmegaConfGrammarLexer.DOT, + OmegaConfGrammarLexer.BRACKET_OPEN, + OmegaConfGrammarLexer.BRACKET_CLOSE, + ]: + inter_key_tokens.append(s.text) + else: + assert s.type in ( + OmegaConfGrammarLexer.INTER_OPEN, + OmegaConfGrammarLexer.INTER_CLOSE, + ) + else: + assert isinstance(child, OmegaConfGrammarParser.ConfigKeyContext) + inter_key_tokens.append(self.visitConfigKey(child)) + + inter_key = "".join(inter_key_tokens) + return self.node_interpolation_callback(inter_key, self.memo) + + def visitInterpolationResolver( + self, ctx: OmegaConfGrammarParser.InterpolationResolverContext + ) -> Any: + + # INTER_OPEN resolverName COLON sequence? BRACE_CLOSE + assert 4 <= ctx.getChildCount() <= 5 + + resolver_name = self.visit(ctx.getChild(1)) + maybe_seq = ctx.getChild(3) + args = [] + args_str = [] + if isinstance(maybe_seq, TerminalNode): # means there are no args + assert maybe_seq.symbol.type == OmegaConfGrammarLexer.BRACE_CLOSE + else: + assert isinstance(maybe_seq, OmegaConfGrammarParser.SequenceContext) + for val, txt in self.visitSequence(maybe_seq): + args.append(val) + args_str.append(txt) + + return self.resolver_interpolation_callback( + name=resolver_name, + args=tuple(args), + args_str=tuple(args_str), + ) + + def visitDictKeyValuePair( + self, ctx: OmegaConfGrammarParser.DictKeyValuePairContext + ) -> Tuple[Any, Any]: + from ._utils import _get_value + + assert ctx.getChildCount() == 3 # dictKey COLON element + key = self.visit(ctx.getChild(0)) + colon = ctx.getChild(1) + assert ( + isinstance(colon, TerminalNode) + and colon.symbol.type == OmegaConfGrammarLexer.COLON + ) + value = _get_value(self.visitElement(ctx.getChild(2))) + return key, value + + def visitListContainer( + self, ctx: OmegaConfGrammarParser.ListContainerContext + ) -> List[Any]: + # BRACKET_OPEN sequence? BRACKET_CLOSE; + assert ctx.getChildCount() in (2, 3) + if ctx.getChildCount() == 2: + return [] + sequence = ctx.getChild(1) + assert isinstance(sequence, OmegaConfGrammarParser.SequenceContext) + return list(val for val, _ in self.visitSequence(sequence)) # ignore raw text + + def visitPrimitive(self, ctx: OmegaConfGrammarParser.PrimitiveContext) -> Any: + return self._createPrimitive(ctx) + + def visitQuotedValue(self, ctx: OmegaConfGrammarParser.QuotedValueContext) -> str: + # (QUOTE_OPEN_SINGLE | QUOTE_OPEN_DOUBLE) text? MATCHING_QUOTE_CLOSE + n = ctx.getChildCount() + assert n in [2, 3] + return str(self.visit(ctx.getChild(1))) if n == 3 else "" + + def visitResolverName(self, ctx: OmegaConfGrammarParser.ResolverNameContext) -> str: + from ._utils import _get_value + + # (interpolation | ID) (DOT (interpolation | ID))* + assert ctx.getChildCount() >= 1 + items = [] + for child in list(ctx.getChildren())[::2]: + if isinstance(child, TerminalNode): + assert child.symbol.type == OmegaConfGrammarLexer.ID + items.append(child.symbol.text) + else: + assert isinstance(child, OmegaConfGrammarParser.InterpolationContext) + item = _get_value(self.visitInterpolation(child)) + if not isinstance(item, str): + raise InterpolationResolutionError( + f"The name of a resolver must be a string, but the interpolation " + f"{child.getText()} resolved to `{item}` which is of type " + f"{type(item)}" + ) + items.append(item) + return ".".join(items) + + def visitSequence( + self, ctx: OmegaConfGrammarParser.SequenceContext + ) -> Generator[Any, None, None]: + from ._utils import _get_value + + # (element (COMMA element?)*) | (COMMA element?)+ + assert ctx.getChildCount() >= 1 + + # DEPRECATED: remove in 2.2 (revert #571) + def empty_str_warning() -> None: + txt = ctx.getText() + warnings.warn( + f"In the sequence `{txt}` some elements are missing: please replace " + f"them with empty quoted strings. " + f"See https://github.com/omry/omegaconf/issues/572 for details.", + category=UserWarning, + ) + + is_previous_comma = True # whether previous child was a comma (init to True) + for child in ctx.getChildren(): + if isinstance(child, OmegaConfGrammarParser.ElementContext): + # Also preserve the original text representation of `child` so + # as to allow backward compatibility with old resolvers (registered + # with `legacy_register_resolver()`). Note that we cannot just cast + # the value to string later as for instance `null` would become "None". + yield _get_value(self.visitElement(child)), child.getText() + is_previous_comma = False + else: + assert ( + isinstance(child, TerminalNode) + and child.symbol.type == OmegaConfGrammarLexer.COMMA + ) + if is_previous_comma: + empty_str_warning() + yield "", "" + else: + is_previous_comma = True + if is_previous_comma: + # Trailing comma. + empty_str_warning() + yield "", "" + + def visitSingleElement( + self, ctx: OmegaConfGrammarParser.SingleElementContext + ) -> Any: + # element EOF + assert ctx.getChildCount() == 2 + return self.visit(ctx.getChild(0)) + + def visitText(self, ctx: OmegaConfGrammarParser.TextContext) -> Any: + # (interpolation | ANY_STR | ESC | ESC_INTER | TOP_ESC | QUOTED_ESC)+ + + # Single interpolation? If yes, return its resolved value "as is". + if ctx.getChildCount() == 1: + c = ctx.getChild(0) + if isinstance(c, OmegaConfGrammarParser.InterpolationContext): + return self.visitInterpolation(c) + + # Otherwise, concatenate string representations together. + return self._unescape(list(ctx.getChildren())) + + def _createPrimitive( + self, + ctx: Union[ + OmegaConfGrammarParser.PrimitiveContext, + OmegaConfGrammarParser.DictKeyContext, + ], + ) -> Any: + # (ID | NULL | INT | FLOAT | BOOL | UNQUOTED_CHAR | COLON | ESC | WS | interpolation)+ + if ctx.getChildCount() == 1: + child = ctx.getChild(0) + if isinstance(child, OmegaConfGrammarParser.InterpolationContext): + return self.visitInterpolation(child) + assert isinstance(child, TerminalNode) + symbol = child.symbol + # Parse primitive types. + if symbol.type in ( + OmegaConfGrammarLexer.ID, + OmegaConfGrammarLexer.UNQUOTED_CHAR, + OmegaConfGrammarLexer.COLON, + ): + return symbol.text + elif symbol.type == OmegaConfGrammarLexer.NULL: + return None + elif symbol.type == OmegaConfGrammarLexer.INT: + return int(symbol.text) + elif symbol.type == OmegaConfGrammarLexer.FLOAT: + return float(symbol.text) + elif symbol.type == OmegaConfGrammarLexer.BOOL: + return symbol.text.lower() == "true" + elif symbol.type == OmegaConfGrammarLexer.ESC: + return self._unescape([child]) + elif symbol.type == OmegaConfGrammarLexer.WS: # pragma: no cover + # A single WS should have been "consumed" by another token. + raise AssertionError("WS should never be reached") + assert False, symbol.type + # Concatenation of multiple items ==> un-escape the concatenation. + return self._unescape(list(ctx.getChildren())) + + def _unescape( + self, + seq: List[Union[TerminalNode, OmegaConfGrammarParser.InterpolationContext]], + ) -> str: + """ + Concatenate all symbols / interpolations in `seq`, unescaping symbols as needed. + + Interpolations are resolved and cast to string *WITHOUT* escaping their result + (it is assumed that whatever escaping is required was already handled during the + resolving of the interpolation). + """ + chrs = [] + for node, next_node in zip_longest(seq, seq[1:]): + if isinstance(node, TerminalNode): + s = node.symbol + if s.type == OmegaConfGrammarLexer.ESC_INTER: + # `ESC_INTER` is of the form `\\...\${`: the formula below computes + # the number of characters to keep at the end of the string to remove + # the correct number of backslashes. + text = s.text[-(len(s.text) // 2 + 1) :] + elif ( + # Character sequence identified as requiring un-escaping. + s.type == OmegaConfGrammarLexer.ESC + or ( + # At top level, we need to un-escape backslashes that precede + # an interpolation. + s.type == OmegaConfGrammarLexer.TOP_ESC + and isinstance( + next_node, OmegaConfGrammarParser.InterpolationContext + ) + ) + or ( + # In a quoted sring, we need to un-escape backslashes that + # either end the string, or are followed by an interpolation. + s.type == OmegaConfGrammarLexer.QUOTED_ESC + and ( + next_node is None + or isinstance( + next_node, OmegaConfGrammarParser.InterpolationContext + ) + ) + ) + ): + text = s.text[1::2] # un-escape the sequence + else: + text = s.text # keep the original text + else: + assert isinstance(node, OmegaConfGrammarParser.InterpolationContext) + text = str(self.visitInterpolation(node)) + chrs.append(text) + + return "".join(chrs) diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/nodes.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..1f02cde65801fc9b7443595fc80bd8017dd8273b --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/nodes.py @@ -0,0 +1,545 @@ +import copy +import math +import sys +from abc import abstractmethod +from enum import Enum +from pathlib import Path +from typing import Any, Dict, Optional, Type, Union + +from omegaconf._utils import ( + ValueKind, + _is_interpolation, + get_type_of, + get_value_kind, + is_primitive_container, + type_str, +) +from omegaconf.base import Box, DictKeyType, Metadata, Node +from omegaconf.errors import ReadonlyConfigError, UnsupportedValueType, ValidationError + + +class ValueNode(Node): + _val: Any + + def __init__(self, parent: Optional[Box], value: Any, metadata: Metadata): + from omegaconf import read_write + + super().__init__(parent=parent, metadata=metadata) + with read_write(self): + self._set_value(value) # lgtm [py/init-calls-subclass] + + def _value(self) -> Any: + return self._val + + def _set_value(self, value: Any, flags: Optional[Dict[str, bool]] = None) -> None: + if self._get_flag("readonly"): + raise ReadonlyConfigError("Cannot set value of read-only config node") + + if isinstance(value, str) and get_value_kind( + value, strict_interpolation_validation=True + ) in ( + ValueKind.INTERPOLATION, + ValueKind.MANDATORY_MISSING, + ): + self._val = value + else: + self._val = self.validate_and_convert(value) + + def _strict_validate_type(self, value: Any) -> None: + ref_type = self._metadata.ref_type + if isinstance(ref_type, type) and type(value) is not ref_type: + type_hint = type_str(self._metadata.type_hint) + raise ValidationError( + f"Value '$VALUE' of type '$VALUE_TYPE' is incompatible with type hint '{type_hint}'" + ) + + def validate_and_convert(self, value: Any) -> Any: + """ + Validates input and converts to canonical form + :param value: input value + :return: converted value ("100" may be converted to 100 for example) + """ + if value is None: + if self._is_optional(): + return None + ref_type_str = type_str(self._metadata.ref_type) + raise ValidationError( + f"Incompatible value '{value}' for field of type '{ref_type_str}'" + ) + + # Subclasses can assume that `value` is not None in + # `_validate_and_convert_impl()` and in `_strict_validate_type()`. + if self._get_flag("convert") is False: + self._strict_validate_type(value) + return value + else: + return self._validate_and_convert_impl(value) + + @abstractmethod + def _validate_and_convert_impl(self, value: Any) -> Any: + ... + + def __str__(self) -> str: + return str(self._val) + + def __repr__(self) -> str: + return repr(self._val) if hasattr(self, "_val") else "__INVALID__" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, AnyNode): + return self._val == other._val # type: ignore + else: + return self._val == other # type: ignore + + def __ne__(self, other: Any) -> bool: + x = self.__eq__(other) + assert x is not NotImplemented + return not x + + def __hash__(self) -> int: + return hash(self._val) + + def _deepcopy_impl(self, res: Any, memo: Dict[int, Any]) -> None: + res.__dict__["_metadata"] = copy.deepcopy(self._metadata, memo=memo) + # shallow copy for value to support non-copyable value + res.__dict__["_val"] = self._val + + # parent is retained, but not copied + res.__dict__["_parent"] = self._parent + + def _is_optional(self) -> bool: + return self._metadata.optional + + def _is_interpolation(self) -> bool: + return _is_interpolation(self._value()) + + def _get_full_key(self, key: Optional[Union[DictKeyType, int]]) -> str: + parent = self._get_parent() + if parent is None: + if self._metadata.key is None: + return "" + else: + return str(self._metadata.key) + else: + return parent._get_full_key(self._metadata.key) + + +class AnyNode(ValueNode): + def __init__( + self, + value: Any = None, + key: Any = None, + parent: Optional[Box] = None, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + ref_type=Any, object_type=None, key=key, optional=True, flags=flags + ), + ) + + def _validate_and_convert_impl(self, value: Any) -> Any: + from ._utils import is_primitive_type_annotation + + # allow_objects is internal and not an official API. use at your own risk. + # Please be aware that this support is subject to change without notice. + # If this is deemed useful and supportable it may become an official API. + + if self._get_flag( + "allow_objects" + ) is not True and not is_primitive_type_annotation(value): + t = get_type_of(value) + raise UnsupportedValueType( + f"Value '{t.__name__}' is not a supported primitive type" + ) + return value + + def __deepcopy__(self, memo: Dict[int, Any]) -> "AnyNode": + res = AnyNode() + self._deepcopy_impl(res, memo) + return res + + +class StringNode(ValueNode): + def __init__( + self, + value: Any = None, + key: Any = None, + parent: Optional[Box] = None, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + key=key, + optional=is_optional, + ref_type=str, + object_type=str, + flags=flags, + ), + ) + + def _validate_and_convert_impl(self, value: Any) -> str: + from omegaconf import OmegaConf + + if ( + OmegaConf.is_config(value) + or is_primitive_container(value) + or isinstance(value, bytes) + ): + raise ValidationError("Cannot convert '$VALUE_TYPE' to string: '$VALUE'") + return str(value) + + def __deepcopy__(self, memo: Dict[int, Any]) -> "StringNode": + res = StringNode() + self._deepcopy_impl(res, memo) + return res + + +class PathNode(ValueNode): + def __init__( + self, + value: Any = None, + key: Any = None, + parent: Optional[Box] = None, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + key=key, + optional=is_optional, + ref_type=Path, + object_type=Path, + flags=flags, + ), + ) + + def _strict_validate_type(self, value: Any) -> None: + if not isinstance(value, Path): + raise ValidationError( + "Value '$VALUE' of type '$VALUE_TYPE' is not an instance of 'pathlib.Path'" + ) + + def _validate_and_convert_impl(self, value: Any) -> Path: + if not isinstance(value, (str, Path)): + raise ValidationError( + "Value '$VALUE' of type '$VALUE_TYPE' could not be converted to Path" + ) + + return Path(value) + + def __deepcopy__(self, memo: Dict[int, Any]) -> "PathNode": + res = PathNode() + self._deepcopy_impl(res, memo) + return res + + +class IntegerNode(ValueNode): + def __init__( + self, + value: Any = None, + key: Any = None, + parent: Optional[Box] = None, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + key=key, + optional=is_optional, + ref_type=int, + object_type=int, + flags=flags, + ), + ) + + def _validate_and_convert_impl(self, value: Any) -> int: + try: + if type(value) in (str, int): + val = int(value) + else: + raise ValueError() + except ValueError: + raise ValidationError( + "Value '$VALUE' of type '$VALUE_TYPE' could not be converted to Integer" + ) + return val + + def __deepcopy__(self, memo: Dict[int, Any]) -> "IntegerNode": + res = IntegerNode() + self._deepcopy_impl(res, memo) + return res + + +class BytesNode(ValueNode): + def __init__( + self, + value: Any = None, + key: Any = None, + parent: Optional[Box] = None, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + key=key, + optional=is_optional, + ref_type=bytes, + object_type=bytes, + flags=flags, + ), + ) + + def _validate_and_convert_impl(self, value: Any) -> bytes: + if not isinstance(value, bytes): + raise ValidationError( + "Value '$VALUE' of type '$VALUE_TYPE' is not of type 'bytes'" + ) + return value + + def __deepcopy__(self, memo: Dict[int, Any]) -> "BytesNode": + res = BytesNode() + self._deepcopy_impl(res, memo) + return res + + +class FloatNode(ValueNode): + def __init__( + self, + value: Any = None, + key: Any = None, + parent: Optional[Box] = None, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + key=key, + optional=is_optional, + ref_type=float, + object_type=float, + flags=flags, + ), + ) + + def _validate_and_convert_impl(self, value: Any) -> float: + try: + if type(value) in (float, str, int): + return float(value) + else: + raise ValueError() + except ValueError: + raise ValidationError( + "Value '$VALUE' of type '$VALUE_TYPE' could not be converted to Float" + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, ValueNode): + other_val = other._val + else: + other_val = other + if self._val is None and other is None: + return True + if self._val is None and other is not None: + return False + if self._val is not None and other is None: + return False + nan1 = math.isnan(self._val) if isinstance(self._val, float) else False + nan2 = math.isnan(other_val) if isinstance(other_val, float) else False + return self._val == other_val or (nan1 and nan2) + + def __hash__(self) -> int: + return hash(self._val) + + def __deepcopy__(self, memo: Dict[int, Any]) -> "FloatNode": + res = FloatNode() + self._deepcopy_impl(res, memo) + return res + + +class BooleanNode(ValueNode): + def __init__( + self, + value: Any = None, + key: Any = None, + parent: Optional[Box] = None, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + key=key, + optional=is_optional, + ref_type=bool, + object_type=bool, + flags=flags, + ), + ) + + def _validate_and_convert_impl(self, value: Any) -> bool: + if isinstance(value, bool): + return value + if isinstance(value, int): + return value != 0 + elif isinstance(value, str): + try: + return self._validate_and_convert_impl(int(value)) + except ValueError as e: + if value.lower() in ("yes", "y", "on", "true"): + return True + elif value.lower() in ("no", "n", "off", "false"): + return False + else: + raise ValidationError( + "Value '$VALUE' is not a valid bool (type $VALUE_TYPE)" + ).with_traceback(sys.exc_info()[2]) from e + else: + raise ValidationError( + "Value '$VALUE' is not a valid bool (type $VALUE_TYPE)" + ) + + def __deepcopy__(self, memo: Dict[int, Any]) -> "BooleanNode": + res = BooleanNode() + self._deepcopy_impl(res, memo) + return res + + +class EnumNode(ValueNode): # lgtm [py/missing-equals] : Intentional. + """ + NOTE: EnumNode is serialized to yaml as a string ("Color.BLUE"), not as a fully qualified yaml type. + this means serialization to YAML of a typed config (with EnumNode) will not retain the type of the Enum + when loaded. + This is intentional, Please open an issue against OmegaConf if you wish to discuss this decision. + """ + + def __init__( + self, + enum_type: Type[Enum], + value: Optional[Union[Enum, str]] = None, + key: Any = None, + parent: Optional[Box] = None, + is_optional: bool = True, + flags: Optional[Dict[str, bool]] = None, + ): + if not isinstance(enum_type, type) or not issubclass(enum_type, Enum): + raise ValidationError( + f"EnumNode can only operate on Enum subclasses ({enum_type})" + ) + self.fields: Dict[str, str] = {} + self.enum_type: Type[Enum] = enum_type + for name, constant in enum_type.__members__.items(): + self.fields[name] = constant.value + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + key=key, + optional=is_optional, + ref_type=enum_type, + object_type=enum_type, + flags=flags, + ), + ) + + def _strict_validate_type(self, value: Any) -> None: + ref_type = self._metadata.ref_type + if not isinstance(value, ref_type): + type_hint = type_str(self._metadata.type_hint) + raise ValidationError( + f"Value '$VALUE' of type '$VALUE_TYPE' is incompatible with type hint '{type_hint}'" + ) + + def _validate_and_convert_impl(self, value: Any) -> Enum: + return self.validate_and_convert_to_enum(enum_type=self.enum_type, value=value) + + @staticmethod + def validate_and_convert_to_enum(enum_type: Type[Enum], value: Any) -> Enum: + if not isinstance(value, (str, int)) and not isinstance(value, enum_type): + raise ValidationError( + f"Value $VALUE ($VALUE_TYPE) is not a valid input for {enum_type}" + ) + + if isinstance(value, enum_type): + return value + + try: + if isinstance(value, (float, bool)): + raise ValueError + + if isinstance(value, int): + return enum_type(value) + + if isinstance(value, str): + prefix = f"{enum_type.__name__}." + if value.startswith(prefix): + value = value[len(prefix) :] + return enum_type[value] + + assert False + + except (ValueError, KeyError) as e: + valid = ", ".join([x for x in enum_type.__members__.keys()]) + raise ValidationError( + f"Invalid value '$VALUE', expected one of [{valid}]" + ).with_traceback(sys.exc_info()[2]) from e + + def __deepcopy__(self, memo: Dict[int, Any]) -> "EnumNode": + res = EnumNode(enum_type=self.enum_type) + self._deepcopy_impl(res, memo) + return res + + +class InterpolationResultNode(ValueNode): + """ + Special node type, used to wrap interpolation results. + """ + + def __init__( + self, + value: Any, + key: Any = None, + parent: Optional[Box] = None, + flags: Optional[Dict[str, bool]] = None, + ): + super().__init__( + parent=parent, + value=value, + metadata=Metadata( + ref_type=Any, object_type=None, key=key, optional=True, flags=flags + ), + ) + # In general we should not try to write into interpolation results. + if flags is None or "readonly" not in flags: + self._set_flag("readonly", True) + + def _set_value(self, value: Any, flags: Optional[Dict[str, bool]] = None) -> None: + if self._get_flag("readonly"): + raise ReadonlyConfigError("Cannot set value of read-only config node") + self._val = self.validate_and_convert(value) + + def _validate_and_convert_impl(self, value: Any) -> Any: + # Interpolation results may be anything. + return value + + def __deepcopy__(self, memo: Dict[int, Any]) -> "InterpolationResultNode": + # Currently there should be no need to deep-copy such nodes. + raise NotImplementedError + + def _is_interpolation(self) -> bool: + # The result of an interpolation cannot be itself an interpolation. + return False diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/omegaconf.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/omegaconf.py new file mode 100644 index 0000000000000000000000000000000000000000..efde14a4e266ede954c479c8809cdb8ab80cb2d5 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/omegaconf.py @@ -0,0 +1,1157 @@ +"""OmegaConf module""" +import copy +import inspect +import io +import os +import pathlib +import sys +import warnings +from collections import defaultdict +from contextlib import contextmanager +from enum import Enum +from textwrap import dedent +from typing import ( + IO, + Any, + Callable, + Dict, + Generator, + Iterable, + List, + Optional, + Set, + Tuple, + Type, + Union, + overload, +) + +import yaml + +from . import DictConfig, DictKeyType, ListConfig +from ._utils import ( + _DEFAULT_MARKER_, + _ensure_container, + _get_value, + format_and_raise, + get_dict_key_value_types, + get_list_element_type, + get_omega_conf_dumper, + get_type_of, + is_attr_class, + is_dataclass, + is_dict_annotation, + is_int, + is_list_annotation, + is_primitive_container, + is_primitive_dict, + is_primitive_list, + is_structured_config, + is_tuple_annotation, + is_union_annotation, + nullcontext, + split_key, + type_str, +) +from .base import Box, Container, Node, SCMode, UnionNode +from .basecontainer import BaseContainer +from .errors import ( + MissingMandatoryValue, + OmegaConfBaseException, + UnsupportedInterpolationType, + ValidationError, +) +from .nodes import ( + AnyNode, + BooleanNode, + BytesNode, + EnumNode, + FloatNode, + IntegerNode, + PathNode, + StringNode, + ValueNode, +) + +MISSING: Any = "???" + +Resolver = Callable[..., Any] + + +def II(interpolation: str) -> Any: + """ + Equivalent to ``${interpolation}`` + + :param interpolation: + :return: input ``${node}`` with type Any + """ + return "${" + interpolation + "}" + + +def SI(interpolation: str) -> Any: + """ + Use this for String interpolation, for example ``"http://${host}:${port}"`` + + :param interpolation: interpolation string + :return: input interpolation with type ``Any`` + """ + return interpolation + + +def register_default_resolvers() -> None: + from omegaconf.resolvers import oc + + OmegaConf.register_new_resolver("oc.create", oc.create) + OmegaConf.register_new_resolver("oc.decode", oc.decode) + OmegaConf.register_new_resolver("oc.deprecated", oc.deprecated) + OmegaConf.register_new_resolver("oc.env", oc.env) + OmegaConf.register_new_resolver("oc.select", oc.select) + OmegaConf.register_new_resolver("oc.dict.keys", oc.dict.keys) + OmegaConf.register_new_resolver("oc.dict.values", oc.dict.values) + + +class OmegaConf: + """OmegaConf primary class""" + + def __init__(self) -> None: + raise NotImplementedError("Use one of the static construction functions") + + @staticmethod + def structured( + obj: Any, + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> Any: + return OmegaConf.create(obj, parent, flags) + + @staticmethod + @overload + def create( + obj: str, + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> Union[DictConfig, ListConfig]: + ... + + @staticmethod + @overload + def create( + obj: Union[List[Any], Tuple[Any, ...]], + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> ListConfig: + ... + + @staticmethod + @overload + def create( + obj: DictConfig, + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> DictConfig: + ... + + @staticmethod + @overload + def create( + obj: ListConfig, + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> ListConfig: + ... + + @staticmethod + @overload + def create( + obj: Optional[Dict[Any, Any]] = None, + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> DictConfig: + ... + + @staticmethod + def create( # noqa F811 + obj: Any = _DEFAULT_MARKER_, + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> Union[DictConfig, ListConfig]: + return OmegaConf._create_impl( + obj=obj, + parent=parent, + flags=flags, + ) + + @staticmethod + def load(file_: Union[str, pathlib.Path, IO[Any]]) -> Union[DictConfig, ListConfig]: + from ._utils import get_yaml_loader + + if isinstance(file_, (str, pathlib.Path)): + with io.open(os.path.abspath(file_), "r", encoding="utf-8") as f: + obj = yaml.load(f, Loader=get_yaml_loader()) + elif getattr(file_, "read", None): + obj = yaml.load(file_, Loader=get_yaml_loader()) + else: + raise TypeError("Unexpected file type") + + if obj is not None and not isinstance(obj, (list, dict, str)): + raise IOError( # pragma: no cover + f"Invalid loaded object type: {type(obj).__name__}" + ) + + ret: Union[DictConfig, ListConfig] + if obj is None: + ret = OmegaConf.create() + else: + ret = OmegaConf.create(obj) + return ret + + @staticmethod + def save( + config: Any, f: Union[str, pathlib.Path, IO[Any]], resolve: bool = False + ) -> None: + """ + Save as configuration object to a file + + :param config: omegaconf.Config object (DictConfig or ListConfig). + :param f: filename or file object + :param resolve: True to save a resolved config (defaults to False) + """ + if is_dataclass(config) or is_attr_class(config): + config = OmegaConf.create(config) + data = OmegaConf.to_yaml(config, resolve=resolve) + if isinstance(f, (str, pathlib.Path)): + with io.open(os.path.abspath(f), "w", encoding="utf-8") as file: + file.write(data) + elif hasattr(f, "write"): + f.write(data) + f.flush() + else: + raise TypeError("Unexpected file type") + + @staticmethod + def from_cli(args_list: Optional[List[str]] = None) -> DictConfig: + if args_list is None: + # Skip program name + args_list = sys.argv[1:] + return OmegaConf.from_dotlist(args_list) + + @staticmethod + def from_dotlist(dotlist: List[str]) -> DictConfig: + """ + Creates config from the content sys.argv or from the specified args list of not None + + :param dotlist: A list of dotlist-style strings, e.g. ``["foo.bar=1", "baz=qux"]``. + :return: A ``DictConfig`` object created from the dotlist. + """ + conf = OmegaConf.create() + conf.merge_with_dotlist(dotlist) + return conf + + @staticmethod + def merge( + *configs: Union[ + DictConfig, + ListConfig, + Dict[DictKeyType, Any], + List[Any], + Tuple[Any, ...], + Any, + ], + ) -> Union[ListConfig, DictConfig]: + """ + Merge a list of previously created configs into a single one + + :param configs: Input configs + :return: the merged config object. + """ + assert len(configs) > 0 + target = copy.deepcopy(configs[0]) + target = _ensure_container(target) + assert isinstance(target, (DictConfig, ListConfig)) + + with flag_override(target, "readonly", False): + target.merge_with(*configs[1:]) + turned_readonly = target._get_flag("readonly") is True + + if turned_readonly: + OmegaConf.set_readonly(target, True) + + return target + + @staticmethod + def unsafe_merge( + *configs: Union[ + DictConfig, + ListConfig, + Dict[DictKeyType, Any], + List[Any], + Tuple[Any, ...], + Any, + ], + ) -> Union[ListConfig, DictConfig]: + """ + Merge a list of previously created configs into a single one + This is much faster than OmegaConf.merge() as the input configs are not copied. + However, the input configs must not be used after this operation as will become inconsistent. + + :param configs: Input configs + :return: the merged config object. + """ + assert len(configs) > 0 + target = configs[0] + target = _ensure_container(target) + assert isinstance(target, (DictConfig, ListConfig)) + + with flag_override( + target, ["readonly", "no_deepcopy_set_nodes"], [False, True] + ): + target.merge_with(*configs[1:]) + turned_readonly = target._get_flag("readonly") is True + + if turned_readonly: + OmegaConf.set_readonly(target, True) + + return target + + @staticmethod + def register_resolver(name: str, resolver: Resolver) -> None: + warnings.warn( + dedent( + """\ + register_resolver() is deprecated. + See https://github.com/omry/omegaconf/issues/426 for migration instructions. + """ + ), + stacklevel=2, + ) + return OmegaConf.legacy_register_resolver(name, resolver) + + # This function will eventually be deprecated and removed. + @staticmethod + def legacy_register_resolver(name: str, resolver: Resolver) -> None: + assert callable(resolver), "resolver must be callable" + # noinspection PyProtectedMember + assert ( + name not in BaseContainer._resolvers + ), f"resolver '{name}' is already registered" + + def resolver_wrapper( + config: BaseContainer, + parent: BaseContainer, + node: Node, + args: Tuple[Any, ...], + args_str: Tuple[str, ...], + ) -> Any: + cache = OmegaConf.get_cache(config)[name] + # "Un-escape " spaces and commas. + args_unesc = [x.replace(r"\ ", " ").replace(r"\,", ",") for x in args_str] + + # Nested interpolations behave in a potentially surprising way with + # legacy resolvers (they remain as strings, e.g., "${foo}"). If any + # input looks like an interpolation we thus raise an exception. + try: + bad_arg = next(i for i in args_unesc if "${" in i) + except StopIteration: + pass + else: + raise ValueError( + f"Resolver '{name}' was called with argument '{bad_arg}' that appears " + f"to be an interpolation. Nested interpolations are not supported for " + f"resolvers registered with `[legacy_]register_resolver()`, please use " + f"`register_new_resolver()` instead (see " + f"https://github.com/omry/omegaconf/issues/426 for migration instructions)." + ) + key = args_str + val = cache[key] if key in cache else resolver(*args_unesc) + cache[key] = val + return val + + # noinspection PyProtectedMember + BaseContainer._resolvers[name] = resolver_wrapper + + @staticmethod + def register_new_resolver( + name: str, + resolver: Resolver, + *, + replace: bool = False, + use_cache: bool = False, + ) -> None: + """ + Register a resolver. + + :param name: Name of the resolver. + :param resolver: Callable whose arguments are provided in the interpolation, + e.g., with ${foo:x,0,${y.z}} these arguments are respectively "x" (str), + 0 (int) and the value of ``y.z``. + :param replace: If set to ``False`` (default), then a ``ValueError`` is raised if + an existing resolver has already been registered with the same name. + If set to ``True``, then the new resolver replaces the previous one. + NOTE: The cache on existing config objects is not affected, use + ``OmegaConf.clear_cache(cfg)`` to clear it. + :param use_cache: Whether the resolver's outputs should be cached. The cache is + based only on the string literals representing the resolver arguments, e.g., + ${foo:${bar}} will always return the same value regardless of the value of + ``bar`` if the cache is enabled for ``foo``. + """ + if not callable(resolver): + raise TypeError("resolver must be callable") + if not name: + raise ValueError("cannot use an empty resolver name") + + if not replace and OmegaConf.has_resolver(name): + raise ValueError(f"resolver '{name}' is already registered") + + try: + sig: Optional[inspect.Signature] = inspect.signature(resolver) + except ValueError: + sig = None + + def _should_pass(special: str) -> bool: + ret = sig is not None and special in sig.parameters + if ret and use_cache: + raise ValueError( + f"use_cache=True is incompatible with functions that receive the {special}" + ) + return ret + + pass_parent = _should_pass("_parent_") + pass_node = _should_pass("_node_") + pass_root = _should_pass("_root_") + + def resolver_wrapper( + config: BaseContainer, + parent: Container, + node: Node, + args: Tuple[Any, ...], + args_str: Tuple[str, ...], + ) -> Any: + if use_cache: + cache = OmegaConf.get_cache(config)[name] + try: + return cache[args_str] + except KeyError: + pass + + # Call resolver. + kwargs: Dict[str, Node] = {} + if pass_parent: + kwargs["_parent_"] = parent + if pass_node: + kwargs["_node_"] = node + if pass_root: + kwargs["_root_"] = config + + ret = resolver(*args, **kwargs) + + if use_cache: + cache[args_str] = ret + return ret + + # noinspection PyProtectedMember + BaseContainer._resolvers[name] = resolver_wrapper + + @classmethod + def has_resolver(cls, name: str) -> bool: + return cls._get_resolver(name) is not None + + # noinspection PyProtectedMember + @staticmethod + def clear_resolvers() -> None: + """ + Clear(remove) all OmegaConf resolvers, then re-register OmegaConf's default resolvers. + """ + BaseContainer._resolvers = {} + register_default_resolvers() + + @classmethod + def clear_resolver(cls, name: str) -> bool: + """ + Clear(remove) any resolver only if it exists. + + Returns a bool: True if resolver is removed and False if not removed. + + .. warning: + This method can remove deafult resolvers as well. + + :param name: Name of the resolver. + :return: A bool (``True`` if resolver is removed, ``False`` if not found before removing). + """ + if cls.has_resolver(name): + BaseContainer._resolvers.pop(name) + return True + else: + # return False if resolver does not exist + return False + + @staticmethod + def get_cache(conf: BaseContainer) -> Dict[str, Any]: + return conf._metadata.resolver_cache + + @staticmethod + def set_cache(conf: BaseContainer, cache: Dict[str, Any]) -> None: + conf._metadata.resolver_cache = copy.deepcopy(cache) + + @staticmethod + def clear_cache(conf: BaseContainer) -> None: + OmegaConf.set_cache(conf, defaultdict(dict, {})) + + @staticmethod + def copy_cache(from_config: BaseContainer, to_config: BaseContainer) -> None: + OmegaConf.set_cache(to_config, OmegaConf.get_cache(from_config)) + + @staticmethod + def set_readonly(conf: Node, value: Optional[bool]) -> None: + # noinspection PyProtectedMember + conf._set_flag("readonly", value) + + @staticmethod + def is_readonly(conf: Node) -> Optional[bool]: + # noinspection PyProtectedMember + return conf._get_flag("readonly") + + @staticmethod + def set_struct(conf: Container, value: Optional[bool]) -> None: + # noinspection PyProtectedMember + conf._set_flag("struct", value) + + @staticmethod + def is_struct(conf: Container) -> Optional[bool]: + # noinspection PyProtectedMember + return conf._get_flag("struct") + + @staticmethod + def masked_copy(conf: DictConfig, keys: Union[str, List[str]]) -> DictConfig: + """ + Create a masked copy of of this config that contains a subset of the keys + + :param conf: DictConfig object + :param keys: keys to preserve in the copy + :return: The masked ``DictConfig`` object. + """ + from .dictconfig import DictConfig + + if not isinstance(conf, DictConfig): + raise ValueError("masked_copy is only supported for DictConfig") + + if isinstance(keys, str): + keys = [keys] + content = {key: value for key, value in conf.items_ex(resolve=False, keys=keys)} + return DictConfig(content=content) + + @staticmethod + def to_container( + cfg: Any, + *, + resolve: bool = False, + throw_on_missing: bool = False, + enum_to_str: bool = False, + structured_config_mode: SCMode = SCMode.DICT, + ) -> Union[Dict[DictKeyType, Any], List[Any], None, str, Any]: + """ + Resursively converts an OmegaConf config to a primitive container (dict or list). + + :param cfg: the config to convert + :param resolve: True to resolve all values + :param throw_on_missing: When True, raise MissingMandatoryValue if any missing values are present. + When False (the default), replace missing values with the string "???" in the output container. + :param enum_to_str: True to convert Enum keys and values to strings + :param structured_config_mode: Specify how Structured Configs (DictConfigs backed by a dataclass) are handled. + - By default (``structured_config_mode=SCMode.DICT``) structured configs are converted to plain dicts. + - If ``structured_config_mode=SCMode.DICT_CONFIG``, structured config nodes will remain as DictConfig. + - If ``structured_config_mode=SCMode.INSTANTIATE``, this function will instantiate structured configs + (DictConfigs backed by a dataclass), by creating an instance of the underlying dataclass. + + See also OmegaConf.to_object. + :return: A dict or a list representing this config as a primitive container. + """ + if not OmegaConf.is_config(cfg): + raise ValueError( + f"Input cfg is not an OmegaConf config object ({type_str(type(cfg))})" + ) + + return BaseContainer._to_content( + cfg, + resolve=resolve, + throw_on_missing=throw_on_missing, + enum_to_str=enum_to_str, + structured_config_mode=structured_config_mode, + ) + + @staticmethod + def to_object(cfg: Any) -> Union[Dict[DictKeyType, Any], List[Any], None, str, Any]: + """ + Resursively converts an OmegaConf config to a primitive container (dict or list). + Any DictConfig objects backed by dataclasses or attrs classes are instantiated + as instances of those backing classes. + + This is an alias for OmegaConf.to_container(..., resolve=True, throw_on_missing=True, + structured_config_mode=SCMode.INSTANTIATE) + + :param cfg: the config to convert + :return: A dict or a list or dataclass representing this config. + """ + return OmegaConf.to_container( + cfg=cfg, + resolve=True, + throw_on_missing=True, + enum_to_str=False, + structured_config_mode=SCMode.INSTANTIATE, + ) + + @staticmethod + def is_missing(cfg: Any, key: DictKeyType) -> bool: + assert isinstance(cfg, Container) + try: + node = cfg._get_child(key) + if node is None: + return False + assert isinstance(node, Node) + return node._is_missing() + except (UnsupportedInterpolationType, KeyError, AttributeError): + return False + + @staticmethod + def is_interpolation(node: Any, key: Optional[Union[int, str]] = None) -> bool: + if key is not None: + assert isinstance(node, Container) + target = node._get_child(key) + else: + target = node + if target is not None: + assert isinstance(target, Node) + return target._is_interpolation() + return False + + @staticmethod + def is_list(obj: Any) -> bool: + from . import ListConfig + + return isinstance(obj, ListConfig) + + @staticmethod + def is_dict(obj: Any) -> bool: + from . import DictConfig + + return isinstance(obj, DictConfig) + + @staticmethod + def is_config(obj: Any) -> bool: + from . import Container + + return isinstance(obj, Container) + + @staticmethod + def get_type(obj: Any, key: Optional[str] = None) -> Optional[Type[Any]]: + if key is not None: + c = obj._get_child(key) + else: + c = obj + return OmegaConf._get_obj_type(c) + + @staticmethod + def select( + cfg: Container, + key: str, + *, + default: Any = _DEFAULT_MARKER_, + throw_on_resolution_failure: bool = True, + throw_on_missing: bool = False, + ) -> Any: + """ + :param cfg: Config node to select from + :param key: Key to select + :param default: Default value to return if key is not found + :param throw_on_resolution_failure: Raise an exception if an interpolation + resolution error occurs, otherwise return None + :param throw_on_missing: Raise an exception if an attempt to select a missing key (with the value '???') + is made, otherwise return None + :return: selected value or None if not found. + """ + from ._impl import select_value + + try: + return select_value( + cfg=cfg, + key=key, + default=default, + throw_on_resolution_failure=throw_on_resolution_failure, + throw_on_missing=throw_on_missing, + ) + except Exception as e: + format_and_raise(node=cfg, key=key, value=None, cause=e, msg=str(e)) + + @staticmethod + def update( + cfg: Container, + key: str, + value: Any = None, + *, + merge: bool = True, + force_add: bool = False, + ) -> None: + """ + Updates a dot separated key sequence to a value + + :param cfg: input config to update + :param key: key to update (can be a dot separated path) + :param value: value to set, if value if a list or a dict it will be merged or set + depending on merge_config_values + :param merge: If value is a dict or a list, True (default) to merge + into the destination, False to replace the destination. + :param force_add: insert the entire path regardless of Struct flag or Structured Config nodes. + """ + + split = split_key(key) + root = cfg + for i in range(len(split) - 1): + k = split[i] + # if next_root is a primitive (string, int etc) replace it with an empty map + next_root, key_ = _select_one(root, k, throw_on_missing=False) + if not isinstance(next_root, Container): + if force_add: + with flag_override(root, "struct", False): + root[key_] = {} + else: + root[key_] = {} + root = root[key_] + + last = split[-1] + + assert isinstance( + root, Container + ), f"Unexpected type for root: {type(root).__name__}" + + last_key: Union[str, int] = last + if isinstance(root, ListConfig): + last_key = int(last) + + ctx = flag_override(root, "struct", False) if force_add else nullcontext() + with ctx: + if merge and (OmegaConf.is_config(value) or is_primitive_container(value)): + assert isinstance(root, BaseContainer) + node = root._get_child(last_key) + if OmegaConf.is_config(node): + assert isinstance(node, BaseContainer) + node.merge_with(value) + return + + if OmegaConf.is_dict(root): + assert isinstance(last_key, str) + root.__setattr__(last_key, value) + elif OmegaConf.is_list(root): + assert isinstance(last_key, int) + root.__setitem__(last_key, value) + else: + assert False + + @staticmethod + def to_yaml(cfg: Any, *, resolve: bool = False, sort_keys: bool = False) -> str: + """ + returns a yaml dump of this config object. + + :param cfg: Config object, Structured Config type or instance + :param resolve: if True, will return a string with the interpolations resolved, otherwise + interpolations are preserved + :param sort_keys: If True, will print dict keys in sorted order. default False. + :return: A string containing the yaml representation. + """ + cfg = _ensure_container(cfg) + container = OmegaConf.to_container(cfg, resolve=resolve, enum_to_str=True) + return yaml.dump( # type: ignore + container, + default_flow_style=False, + allow_unicode=True, + sort_keys=sort_keys, + Dumper=get_omega_conf_dumper(), + ) + + @staticmethod + def resolve(cfg: Container) -> None: + """ + Resolves all interpolations in the given config object in-place. + + :param cfg: An OmegaConf container (DictConfig, ListConfig) + Raises a ValueError if the input object is not an OmegaConf container. + """ + import omegaconf._impl + + if not OmegaConf.is_config(cfg): + # Since this function is mutating the input object in-place, it doesn't make sense to + # auto-convert the input object to an OmegaConf container + raise ValueError( + f"Invalid config type ({type(cfg).__name__}), expected an OmegaConf Container" + ) + omegaconf._impl._resolve(cfg) + + @staticmethod + def missing_keys(cfg: Any) -> Set[str]: + """ + Returns a set of missing keys in a dotlist style. + + :param cfg: An ``OmegaConf.Container``, + or a convertible object via ``OmegaConf.create`` (dict, list, ...). + :return: set of strings of the missing keys. + :raises ValueError: On input not representing a config. + """ + cfg = _ensure_container(cfg) + missings: Set[str] = set() + + def gather(_cfg: Container) -> None: + itr: Iterable[Any] + if isinstance(_cfg, ListConfig): + itr = range(len(_cfg)) + else: + itr = _cfg + + for key in itr: + if OmegaConf.is_missing(_cfg, key): + missings.add(_cfg._get_full_key(key)) + elif OmegaConf.is_config(_cfg[key]): + gather(_cfg[key]) + + gather(cfg) + return missings + + # === private === # + + @staticmethod + def _create_impl( # noqa F811 + obj: Any = _DEFAULT_MARKER_, + parent: Optional[BaseContainer] = None, + flags: Optional[Dict[str, bool]] = None, + ) -> Union[DictConfig, ListConfig]: + try: + from ._utils import get_yaml_loader + from .dictconfig import DictConfig + from .listconfig import ListConfig + + if obj is _DEFAULT_MARKER_: + obj = {} + if isinstance(obj, str): + obj = yaml.load(obj, Loader=get_yaml_loader()) + if obj is None: + return OmegaConf.create({}, parent=parent, flags=flags) + elif isinstance(obj, str): + return OmegaConf.create({obj: None}, parent=parent, flags=flags) + else: + assert isinstance(obj, (list, dict)) + return OmegaConf.create(obj, parent=parent, flags=flags) + + else: + if ( + is_primitive_dict(obj) + or OmegaConf.is_dict(obj) + or is_structured_config(obj) + or obj is None + ): + if isinstance(obj, DictConfig): + return DictConfig( + content=obj, + parent=parent, + ref_type=obj._metadata.ref_type, + is_optional=obj._metadata.optional, + key_type=obj._metadata.key_type, + element_type=obj._metadata.element_type, + flags=flags, + ) + else: + obj_type = OmegaConf.get_type(obj) + key_type, element_type = get_dict_key_value_types(obj_type) + return DictConfig( + content=obj, + parent=parent, + key_type=key_type, + element_type=element_type, + flags=flags, + ) + elif is_primitive_list(obj) or OmegaConf.is_list(obj): + if isinstance(obj, ListConfig): + return ListConfig( + content=obj, + parent=parent, + element_type=obj._metadata.element_type, + ref_type=obj._metadata.ref_type, + is_optional=obj._metadata.optional, + flags=flags, + ) + else: + obj_type = OmegaConf.get_type(obj) + element_type = get_list_element_type(obj_type) + return ListConfig( + content=obj, + parent=parent, + element_type=element_type, + ref_type=Any, + is_optional=True, + flags=flags, + ) + else: + if isinstance(obj, type): + raise ValidationError( + f"Input class '{obj.__name__}' is not a structured config. " + "did you forget to decorate it as a dataclass?" + ) + else: + raise ValidationError( + f"Object of unsupported type: '{type(obj).__name__}'" + ) + except OmegaConfBaseException as e: + format_and_raise(node=None, key=None, value=None, msg=str(e), cause=e) + assert False + + @staticmethod + def _get_obj_type(c: Any) -> Optional[Type[Any]]: + if is_structured_config(c): + return get_type_of(c) + elif c is None: + return None + elif isinstance(c, DictConfig): + if c._is_none(): + return None + elif c._is_missing(): + return None + else: + if is_structured_config(c._metadata.object_type): + return c._metadata.object_type + else: + return dict + elif isinstance(c, ListConfig): + return list + elif isinstance(c, ValueNode): + return type(c._value()) + elif isinstance(c, UnionNode): + return type(_get_value(c)) + elif isinstance(c, dict): + return dict + elif isinstance(c, (list, tuple)): + return list + else: + return get_type_of(c) + + @staticmethod + def _get_resolver( + name: str, + ) -> Optional[ + Callable[ + [Container, Container, Node, Tuple[Any, ...], Tuple[str, ...]], + Any, + ] + ]: + # noinspection PyProtectedMember + return ( + BaseContainer._resolvers[name] if name in BaseContainer._resolvers else None + ) + + +# register all default resolvers +register_default_resolvers() + + +@contextmanager +def flag_override( + config: Node, + names: Union[List[str], str], + values: Union[List[Optional[bool]], Optional[bool]], +) -> Generator[Node, None, None]: + if isinstance(names, str): + names = [names] + if values is None or isinstance(values, bool): + values = [values] + + prev_states = [config._get_node_flag(name) for name in names] + + try: + config._set_flag(names, values) + yield config + finally: + config._set_flag(names, prev_states) + + +@contextmanager +def read_write(config: Node) -> Generator[Node, None, None]: + prev_state = config._get_node_flag("readonly") + try: + OmegaConf.set_readonly(config, False) + yield config + finally: + OmegaConf.set_readonly(config, prev_state) + + +@contextmanager +def open_dict(config: Container) -> Generator[Container, None, None]: + prev_state = config._get_node_flag("struct") + try: + OmegaConf.set_struct(config, False) + yield config + finally: + OmegaConf.set_struct(config, prev_state) + + +# === private === # + + +def _node_wrap( + parent: Optional[Box], + is_optional: bool, + value: Any, + key: Any, + ref_type: Any = Any, +) -> Node: + node: Node + if is_dict_annotation(ref_type) or (is_primitive_dict(value) and ref_type is Any): + key_type, element_type = get_dict_key_value_types(ref_type) + node = DictConfig( + content=value, + key=key, + parent=parent, + ref_type=ref_type, + is_optional=is_optional, + key_type=key_type, + element_type=element_type, + ) + elif (is_list_annotation(ref_type) or is_tuple_annotation(ref_type)) or ( + type(value) in (list, tuple) and ref_type is Any + ): + element_type = get_list_element_type(ref_type) + node = ListConfig( + content=value, + key=key, + parent=parent, + is_optional=is_optional, + element_type=element_type, + ref_type=ref_type, + ) + elif is_structured_config(ref_type) or is_structured_config(value): + key_type, element_type = get_dict_key_value_types(value) + node = DictConfig( + ref_type=ref_type, + is_optional=is_optional, + content=value, + key=key, + parent=parent, + key_type=key_type, + element_type=element_type, + ) + elif is_union_annotation(ref_type): + node = UnionNode( + content=value, + ref_type=ref_type, + is_optional=is_optional, + key=key, + parent=parent, + ) + elif ref_type == Any or ref_type is None: + node = AnyNode(value=value, key=key, parent=parent) + elif isinstance(ref_type, type) and issubclass(ref_type, Enum): + node = EnumNode( + enum_type=ref_type, + value=value, + key=key, + parent=parent, + is_optional=is_optional, + ) + elif ref_type == int: + node = IntegerNode(value=value, key=key, parent=parent, is_optional=is_optional) + elif ref_type == float: + node = FloatNode(value=value, key=key, parent=parent, is_optional=is_optional) + elif ref_type == bool: + node = BooleanNode(value=value, key=key, parent=parent, is_optional=is_optional) + elif ref_type == str: + node = StringNode(value=value, key=key, parent=parent, is_optional=is_optional) + elif ref_type == bytes: + node = BytesNode(value=value, key=key, parent=parent, is_optional=is_optional) + elif ref_type == pathlib.Path: + node = PathNode(value=value, key=key, parent=parent, is_optional=is_optional) + else: + if parent is not None and parent._get_flag("allow_objects") is True: + if type(value) in (list, tuple): + node = ListConfig( + content=value, + key=key, + parent=parent, + ref_type=ref_type, + is_optional=is_optional, + ) + elif is_primitive_dict(value): + node = DictConfig( + content=value, + key=key, + parent=parent, + ref_type=ref_type, + is_optional=is_optional, + ) + else: + node = AnyNode(value=value, key=key, parent=parent) + else: + raise ValidationError(f"Unexpected type annotation: {type_str(ref_type)}") + return node + + +def _maybe_wrap( + ref_type: Any, + key: Any, + value: Any, + is_optional: bool, + parent: Optional[BaseContainer], +) -> Node: + # if already a node, update key and parent and return as is. + # NOTE: that this mutate the input node! + if isinstance(value, Node): + value._set_key(key) + value._set_parent(parent) + return value + else: + return _node_wrap( + ref_type=ref_type, + parent=parent, + is_optional=is_optional, + value=value, + key=key, + ) + + +def _select_one( + c: Container, key: str, throw_on_missing: bool, throw_on_type_error: bool = True +) -> Tuple[Optional[Node], Union[str, int]]: + from .dictconfig import DictConfig + from .listconfig import ListConfig + + ret_key: Union[str, int] = key + assert isinstance(c, Container), f"Unexpected type: {c}" + if c._is_none(): + return None, ret_key + + if isinstance(c, DictConfig): + assert isinstance(ret_key, str) + val = c._get_child(ret_key, validate_access=False) + elif isinstance(c, ListConfig): + assert isinstance(ret_key, str) + if not is_int(ret_key): + if throw_on_type_error: + raise TypeError( + f"Index '{ret_key}' ({type(ret_key).__name__}) is not an int" + ) + else: + val = None + else: + ret_key = int(ret_key) + if ret_key < 0 or ret_key + 1 > len(c): + val = None + else: + val = c._get_child(ret_key) + else: + assert False + + if val is not None: + assert isinstance(val, Node) + if val._is_missing(): + if throw_on_missing: + raise MissingMandatoryValue( + f"Missing mandatory value: {c._get_full_key(ret_key)}" + ) + else: + return val, ret_key + + assert val is None or isinstance(val, Node) + return val, ret_key diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0bb71168722281af5ddecb46f79e36f63c71e4 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/__init__.py @@ -0,0 +1,5 @@ +from omegaconf.resolvers import oc + +__all__ = [ + "oc", +] diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e988e42914fa6f65210d954c67170bca94e05456 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c177ab4f1b35dfa5502a9b6359439bfca180ba1d --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__init__.py @@ -0,0 +1,113 @@ +import os +import string +import warnings +from typing import Any, Optional + +from omegaconf import Container, Node +from omegaconf._utils import _DEFAULT_MARKER_, _get_value +from omegaconf.basecontainer import BaseContainer +from omegaconf.errors import ConfigKeyError +from omegaconf.grammar_parser import parse +from omegaconf.resolvers.oc import dict + + +def create(obj: Any, _parent_: Container) -> Any: + """Create a config object from `obj`, similar to `OmegaConf.create`""" + from omegaconf import OmegaConf + + assert isinstance(_parent_, BaseContainer) + return OmegaConf.create(obj, parent=_parent_) + + +def env(key: str, default: Any = _DEFAULT_MARKER_) -> Optional[str]: + """ + :param key: Environment variable key + :param default: Optional default value to use in case the key environment variable is not set. + If default is not a string, it is converted with str(default). + None default is returned as is. + :return: The environment variable 'key'. If the environment variable is not set and a default is + provided, the default is used. If used, the default is converted to a string with str(default). + If the default is None, None is returned (without a string conversion). + """ + try: + return os.environ[key] + except KeyError: + if default is not _DEFAULT_MARKER_: + return str(default) if default is not None else None + else: + raise KeyError(f"Environment variable '{key}' not found") + + +def decode(expr: Optional[str], _parent_: Container, _node_: Node) -> Any: + """ + Parse and evaluate `expr` according to the `singleElement` rule of the grammar. + + If `expr` is `None`, then return `None`. + """ + if expr is None: + return None + + if not isinstance(expr, str): + raise TypeError( + f"`oc.decode` can only take strings or None as input, " + f"but `{expr}` is of type {type(expr).__name__}" + ) + + parse_tree = parse(expr, parser_rule="singleElement", lexer_mode="VALUE_MODE") + val = _parent_.resolve_parse_tree(parse_tree, node=_node_) + return _get_value(val) + + +def deprecated( + key: str, + message: str = "'$OLD_KEY' is deprecated. Change your code and config to use '$NEW_KEY'", + *, + _parent_: Container, + _node_: Node, +) -> Any: + from omegaconf._impl import select_node + + if not isinstance(key, str): + raise TypeError( + f"oc.deprecated: interpolation key type is not a string ({type(key).__name__})" + ) + + if not isinstance(message, str): + raise TypeError( + f"oc.deprecated: interpolation message type is not a string ({type(message).__name__})" + ) + + full_key = _node_._get_full_key(key=None) + target_node = select_node(_parent_, key, absolute_key=True) + if target_node is None: + raise ConfigKeyError( + f"In oc.deprecated resolver at '{full_key}': Key not found: '{key}'" + ) + new_key = target_node._get_full_key(key=None) + msg = string.Template(message).safe_substitute( + OLD_KEY=full_key, + NEW_KEY=new_key, + ) + warnings.warn(category=UserWarning, message=msg) + return target_node + + +def select( + key: str, + default: Any = _DEFAULT_MARKER_, + *, + _parent_: Container, +) -> Any: + from omegaconf._impl import select_value + + return select_value(cfg=_parent_, key=key, absolute_key=True, default=default) + + +__all__ = [ + "create", + "decode", + "deprecated", + "dict", + "env", + "select", +] diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f81bd570c2ab242ceb439b0f086e7f6690d4b12d Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__pycache__/dict.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__pycache__/dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07f445334c641845122fe0d27d4221804e910998 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/__pycache__/dict.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/dict.py b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/dict.py new file mode 100644 index 0000000000000000000000000000000000000000..adbfd16ac47f2dced46f509c8dfdc8516ba97f4a --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/omegaconf/resolvers/oc/dict.py @@ -0,0 +1,83 @@ +from typing import Any, List + +from omegaconf import AnyNode, Container, DictConfig, ListConfig +from omegaconf._utils import Marker +from omegaconf.basecontainer import BaseContainer +from omegaconf.errors import ConfigKeyError + +_DEFAULT_SELECT_MARKER_: Any = Marker("_DEFAULT_SELECT_MARKER_") + + +def keys( + key: str, + _parent_: Container, +) -> ListConfig: + from omegaconf import OmegaConf + + assert isinstance(_parent_, BaseContainer) + + in_dict = _get_and_validate_dict_input( + key, parent=_parent_, resolver_name="oc.dict.keys" + ) + + ret = OmegaConf.create(list(in_dict.keys()), parent=_parent_) + assert isinstance(ret, ListConfig) + return ret + + +def values(key: str, _root_: BaseContainer, _parent_: Container) -> ListConfig: + assert isinstance(_parent_, BaseContainer) + in_dict = _get_and_validate_dict_input( + key, parent=_parent_, resolver_name="oc.dict.values" + ) + + content = in_dict._content + assert isinstance(content, dict) + + ret = ListConfig([]) + if key.startswith("."): + key = f".{key}" # extra dot to compensate for extra level of nesting within ret ListConfig + for k in content: + ref_node = AnyNode(f"${{{key}.{k!s}}}") + ret.append(ref_node) + + # Finalize result by setting proper type and parent. + element_type: Any = in_dict._metadata.element_type + ret._metadata.element_type = element_type + ret._metadata.ref_type = List[element_type] + ret._set_parent(_parent_) + + return ret + + +def _get_and_validate_dict_input( + key: str, + parent: BaseContainer, + resolver_name: str, +) -> DictConfig: + from omegaconf._impl import select_value + + if not isinstance(key, str): + raise TypeError( + f"`{resolver_name}` requires a string as input, but obtained `{key}` " + f"of type: {type(key).__name__}" + ) + + in_dict = select_value( + parent, + key, + throw_on_missing=True, + absolute_key=True, + default=_DEFAULT_SELECT_MARKER_, + ) + + if in_dict is _DEFAULT_SELECT_MARKER_: + raise ConfigKeyError(f"Key not found: '{key}'") + + if not isinstance(in_dict, DictConfig): + raise TypeError( + f"`{resolver_name}` cannot be applied to objects of type: " + f"{type(in_dict).__name__}" + ) + + return in_dict diff --git a/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..640b66e9f96cf8920639b5a109aa7fc951252a8d --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/METADATA @@ -0,0 +1,209 @@ +Metadata-Version: 2.3 +Name: tokenizers +Version: 0.19.1 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Dist: huggingface-hub >=0.16.4, <1.0 +Requires-Dist: pytest ; extra == 'testing' +Requires-Dist: requests ; extra == 'testing' +Requires-Dist: numpy ; extra == 'testing' +Requires-Dist: datasets ; extra == 'testing' +Requires-Dist: black ==22.3 ; extra == 'testing' +Requires-Dist: ruff ; extra == 'testing' +Requires-Dist: sphinx ; extra == 'docs' +Requires-Dist: sphinx-rtd-theme ; extra == 'docs' +Requires-Dist: setuptools-rust ; extra == 'docs' +Requires-Dist: tokenizers[testing] ; extra == 'dev' +Provides-Extra: testing +Provides-Extra: docs +Provides-Extra: dev +Keywords: NLP,tokenizer,BPE,transformer,deep learning +Author: Anthony MOI +Author-email: Nicolas Patry , Anthony Moi +Requires-Python: >=3.7 +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM +Project-URL: Homepage, https://github.com/huggingface/tokenizers +Project-URL: Source, https://github.com/huggingface/tokenizers + +

+
+ +
+

+

+ + Build + + + GitHub + +

+
+ +# Tokenizers + +Provides an implementation of today's most used tokenizers, with a focus on performance and +versatility. + +Bindings over the [Rust](https://github.com/huggingface/tokenizers/tree/master/tokenizers) implementation. +If you are interested in the High-level design, you can go check it there. + +Otherwise, let's dive in! + +## Main features: + + - Train new vocabularies and tokenize using 4 pre-made tokenizers (Bert WordPiece and the 3 + most common BPE versions). + - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes + less than 20 seconds to tokenize a GB of text on a server's CPU. + - Easy to use, but also extremely versatile. + - Designed for research and production. + - Normalization comes with alignments tracking. It's always possible to get the part of the + original sentence that corresponds to a given token. + - Does all the pre-processing: Truncate, Pad, add the special tokens your model needs. + +### Installation + +#### With pip: + +```bash +pip install tokenizers +``` + +#### From sources: + +To use this method, you need to have the Rust installed: + +```bash +# Install with: +curl https://sh.rustup.rs -sSf | sh -s -- -y +export PATH="$HOME/.cargo/bin:$PATH" +``` + +Once Rust is installed, you can compile doing the following + +```bash +git clone https://github.com/huggingface/tokenizers +cd tokenizers/bindings/python + +# Create a virtual env (you can use yours as well) +python -m venv .env +source .env/bin/activate + +# Install `tokenizers` in the current virtual env +pip install -e . +``` + +### Load a pretrained tokenizer from the Hub + +```python +from tokenizers import Tokenizer + +tokenizer = Tokenizer.from_pretrained("bert-base-cased") +``` + +### Using the provided Tokenizers + +We provide some pre-build tokenizers to cover the most common cases. You can easily load one of +these using some `vocab.json` and `merges.txt` files: + +```python +from tokenizers import CharBPETokenizer + +# Initialize a tokenizer +vocab = "./path/to/vocab.json" +merges = "./path/to/merges.txt" +tokenizer = CharBPETokenizer(vocab, merges) + +# And then encode: +encoded = tokenizer.encode("I can feel the magic, can you?") +print(encoded.ids) +print(encoded.tokens) +``` + +And you can train them just as simply: + +```python +from tokenizers import CharBPETokenizer + +# Initialize a tokenizer +tokenizer = CharBPETokenizer() + +# Then train it! +tokenizer.train([ "./path/to/files/1.txt", "./path/to/files/2.txt" ]) + +# Now, let's use it: +encoded = tokenizer.encode("I can feel the magic, can you?") + +# And finally save it somewhere +tokenizer.save("./path/to/directory/my-bpe.tokenizer.json") +``` + +#### Provided Tokenizers + + - `CharBPETokenizer`: The original BPE + - `ByteLevelBPETokenizer`: The byte level version of the BPE + - `SentencePieceBPETokenizer`: A BPE implementation compatible with the one used by SentencePiece + - `BertWordPieceTokenizer`: The famous Bert tokenizer, using WordPiece + +All of these can be used and trained as explained above! + +### Build your own + +Whenever these provided tokenizers don't give you enough freedom, you can build your own tokenizer, +by putting all the different parts you need together. +You can check how we implemented the [provided tokenizers](https://github.com/huggingface/tokenizers/tree/master/bindings/python/py_src/tokenizers/implementations) and adapt them easily to your own needs. + +#### Building a byte-level BPE + +Here is an example showing how to build your own byte-level BPE by putting all the different pieces +together, and then saving it to a single file: + +```python +from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors + +# Initialize a tokenizer +tokenizer = Tokenizer(models.BPE()) + +# Customize pre-tokenization and decoding +tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True) +tokenizer.decoder = decoders.ByteLevel() +tokenizer.post_processor = processors.ByteLevel(trim_offsets=True) + +# And then train +trainer = trainers.BpeTrainer( + vocab_size=20000, + min_frequency=2, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet() +) +tokenizer.train([ + "./path/to/dataset/1.txt", + "./path/to/dataset/2.txt", + "./path/to/dataset/3.txt" +], trainer=trainer) + +# And Save it +tokenizer.save("byte-level-bpe.tokenizer.json", pretty=True) +``` + +Now, when you want to use this tokenizer, this is as simple as: + +```python +from tokenizers import Tokenizer + +tokenizer = Tokenizer.from_file("byte-level-bpe.tokenizer.json") + +encoded = tokenizer.encode("I can feel the magic, can you?") +``` + diff --git a/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d474bd92bead1efd9ccd7c8577ffe295d651435b --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/RECORD @@ -0,0 +1,46 @@ +tokenizers-0.19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tokenizers-0.19.1.dist-info/METADATA,sha256=rCAgg9DA_ZsETxHzh_dz0hPeRKTvoj9m9kUNZe14vxc,6719 +tokenizers-0.19.1.dist-info/RECORD,, +tokenizers-0.19.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tokenizers-0.19.1.dist-info/WHEEL,sha256=JL8sd1C0RQ2f7cmwbAn1Jp257v_vSS2r0VvTBpJeZwA,129 +tokenizers/__init__.py,sha256=ZE5ZagUvobBScrHBQdEobhx4wqM0bsq9F9aLYkBNjYQ,2615 +tokenizers/__init__.pyi,sha256=YBIWZCSN4Rs_-yKdEwhVv77bgHRE36hX9iwFrWGMJ8E,38536 +tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/decoders/__init__.py,sha256=lGp32h8qerE0F48gyZL8wGmeQVlmjVpeIsRb1SM9kf4,335 +tokenizers/decoders/__init__.pyi,sha256=xsReo7OFRCiQ4bBZY9ogYb1iLJ5DTgI5elNB-Uggocs,7244 +tokenizers/decoders/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__init__.py,sha256=VzAsplaIo7rl4AFO8Miu7ig7MfZjvonwVblZw01zR6M,310 +tokenizers/implementations/__pycache__/__init__.cpython-310.pyc,, +tokenizers/implementations/__pycache__/base_tokenizer.cpython-310.pyc,, +tokenizers/implementations/__pycache__/bert_wordpiece.cpython-310.pyc,, +tokenizers/implementations/__pycache__/byte_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/char_level_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_bpe.cpython-310.pyc,, +tokenizers/implementations/__pycache__/sentencepiece_unigram.cpython-310.pyc,, +tokenizers/implementations/base_tokenizer.py,sha256=2TFZhLupaJiMDYGJuUNmxYJv-cnR8bDHmbMzaYpFROs,14206 +tokenizers/implementations/bert_wordpiece.py,sha256=sKCum0FKPYdSgJFJN8LDerVBoTDRSqyqSdrcm-lvQqI,5520 +tokenizers/implementations/byte_level_bpe.py,sha256=OA_jyy3EQmYTa6hnf-EKwLOFuyroqFYOJz25ysM2BUk,4289 +tokenizers/implementations/char_level_bpe.py,sha256=Q2ZEAW0xMQHF7YCUtmplwaxbU-J0P2NK4PJGMxUb-_c,5466 +tokenizers/implementations/sentencepiece_bpe.py,sha256=LwrofoohnUfME2lK2lQYoyQIhP84RP0CIlHRaj0hyNs,3738 +tokenizers/implementations/sentencepiece_unigram.py,sha256=SYiVXL8ZtqLXKpuqwnwmrfxgGotu8yAkOu7dLztEXIo,7580 +tokenizers/models/__init__.py,sha256=eJZ4HTAQZpxnKILNylWaTFqxXy-Ba6OKswWN47feeV8,176 +tokenizers/models/__init__.pyi,sha256=wH4M-ZZprw3UQ98fxWrF3MpivuNVY3s3pv4pGY0A_kE,16932 +tokenizers/models/__pycache__/__init__.cpython-310.pyc,, +tokenizers/normalizers/__init__.py,sha256=hKOwnqWM-IlcVv7HDWT9SYhlczevuCNDQJY05ZFxkzk,808 +tokenizers/normalizers/__init__.pyi,sha256=5SGm-u896MZht6TXMS9sWv1lCATnwNqbC2Udl5aP4dg,19597 +tokenizers/normalizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/pre_tokenizers/__init__.py,sha256=wd6KYQA_RsGSQK-HeG9opTRhv4ttSRkyno2dk6az-PM,557 +tokenizers/pre_tokenizers/__init__.pyi,sha256=IhF7dZt9_9_WM2ESKwEIvN59uW_YzS2PzmWBUScysWU,23258 +tokenizers/pre_tokenizers/__pycache__/__init__.cpython-310.pyc,, +tokenizers/processors/__init__.py,sha256=xM2DEKwKtHIumHsszM8AMkq-AlaqvBZFXWgLU8SNhOY,307 +tokenizers/processors/__init__.pyi,sha256=hx767ZY8SHhxb_hiXPRxm-f_KcoR4XDx7vfK2c0lR-Q,11357 +tokenizers/processors/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so,sha256=Pkcy8QzpfJ9ekDd03LlTwPFRj0Cyfih5kgdTEYNc784,11815960 +tokenizers/tools/__init__.py,sha256=xG8caB9OHC8cbB01S5vYV14HZxhO6eWbLehsb70ppio,55 +tokenizers/tools/__pycache__/__init__.cpython-310.pyc,, +tokenizers/tools/__pycache__/visualizer.cpython-310.pyc,, +tokenizers/tools/visualizer-styles.css,sha256=zAydq1oGWD8QEll4-eyL8Llw0B1sty_hpIE3tYxL02k,4850 +tokenizers/tools/visualizer.py,sha256=gi-E2NCP7FuG6ujpQOdalSTXUlaV85V6NI-ZPPTvA_4,14625 +tokenizers/trainers/__init__.py,sha256=UTu22AGcp76IvpW45xLRbJWET04NxPW6NfCb2YYz0EM,248 +tokenizers/trainers/__init__.pyi,sha256=3TwFKts4me7zQfVRcSTmtXYiP4XwcRjfAYtwqoZVtoQ,5382 +tokenizers/trainers/__pycache__/__init__.cpython-310.pyc,, diff --git a/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/REQUESTED b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..efc3fe1c2443c99179e12648847b89e865477d5f --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/tokenizers-0.19.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: maturin (1.5.1) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64 diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/constructors/__init__.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/constructors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/constructors/test_from_dict.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/constructors/test_from_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..60a8e688b3b8adc495c7b6e6ddb47532df131852 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/frame/constructors/test_from_dict.py @@ -0,0 +1,228 @@ +from collections import OrderedDict + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas import ( + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, +) +import pandas._testing as tm + + +class TestFromDict: + # Note: these tests are specific to the from_dict method, not for + # passing dictionaries to DataFrame.__init__ + + def test_constructor_list_of_odicts(self): + data = [ + OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]), + OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]), + OrderedDict([["a", 1.5], ["d", 6]]), + OrderedDict(), + OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]), + OrderedDict([["b", 3], ["c", 4], ["d", 6]]), + ] + + result = DataFrame(data) + expected = DataFrame.from_dict( + dict(zip(range(len(data)), data)), orient="index" + ) + tm.assert_frame_equal(result, expected.reindex(result.index)) + + def test_constructor_single_row(self): + data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])] + + result = DataFrame(data) + expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex( + result.index + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.skipif( + using_pyarrow_string_dtype(), reason="columns inferring logic broken" + ) + def test_constructor_list_of_series(self): + data = [ + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]), + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]), + ] + sdict = OrderedDict(zip(["x", "y"], data)) + idx = Index(["a", "b", "c"]) + + # all named + data2 = [ + Series([1.5, 3, 4], idx, dtype="O", name="x"), + Series([1.5, 3, 6], idx, name="y"), + ] + result = DataFrame(data2) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected) + + # some unnamed + data2 = [ + Series([1.5, 3, 4], idx, dtype="O", name="x"), + Series([1.5, 3, 6], idx), + ] + result = DataFrame(data2) + + sdict = OrderedDict(zip(["x", "Unnamed 0"], data)) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected) + + # none named + data = [ + OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]), + OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]), + OrderedDict([["a", 1.5], ["d", 6]]), + OrderedDict(), + OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]), + OrderedDict([["b", 3], ["c", 4], ["d", 6]]), + ] + data = [Series(d) for d in data] + + result = DataFrame(data) + sdict = OrderedDict(zip(range(len(data)), data)) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected.reindex(result.index)) + + result2 = DataFrame(data, index=np.arange(6, dtype=np.int64)) + tm.assert_frame_equal(result, result2) + + result = DataFrame([Series(dtype=object)]) + expected = DataFrame(index=[0]) + tm.assert_frame_equal(result, expected) + + data = [ + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]), + OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]), + ] + sdict = OrderedDict(zip(range(len(data)), data)) + + idx = Index(["a", "b", "c"]) + data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)] + result = DataFrame(data2) + expected = DataFrame.from_dict(sdict, orient="index") + tm.assert_frame_equal(result, expected) + + def test_constructor_orient(self, float_string_frame): + data_dict = float_string_frame.T._series + recons = DataFrame.from_dict(data_dict, orient="index") + expected = float_string_frame.reindex(index=recons.index) + tm.assert_frame_equal(recons, expected) + + # dict of sequence + a = {"hi": [32, 3, 3], "there": [3, 5, 3]} + rs = DataFrame.from_dict(a, orient="index") + xp = DataFrame.from_dict(a).T.reindex(list(a.keys())) + tm.assert_frame_equal(rs, xp) + + def test_constructor_from_ordered_dict(self): + # GH#8425 + a = OrderedDict( + [ + ("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])), + ("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])), + ("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])), + ] + ) + expected = DataFrame.from_dict(a, orient="columns").T + result = DataFrame.from_dict(a, orient="index") + tm.assert_frame_equal(result, expected) + + def test_from_dict_columns_parameter(self): + # GH#18529 + # Test new columns parameter for from_dict that was added to make + # from_items(..., orient='index', columns=[...]) easier to replicate + result = DataFrame.from_dict( + OrderedDict([("A", [1, 2]), ("B", [4, 5])]), + orient="index", + columns=["one", "two"], + ) + expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"]) + tm.assert_frame_equal(result, expected) + + msg = "cannot use columns parameter with orient='columns'" + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict( + {"A": [1, 2], "B": [4, 5]}, + orient="columns", + columns=["one", "two"], + ) + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"]) + + @pytest.mark.parametrize( + "data_dict, orient, expected", + [ + ({}, "index", RangeIndex(0)), + ( + [{("a",): 1}, {("a",): 2}], + "columns", + Index([("a",)], tupleize_cols=False), + ), + ( + [OrderedDict([(("a",), 1), (("b",), 2)])], + "columns", + Index([("a",), ("b",)], tupleize_cols=False), + ), + ([{("a", "b"): 1}], "columns", Index([("a", "b")], tupleize_cols=False)), + ], + ) + def test_constructor_from_dict_tuples(self, data_dict, orient, expected): + # GH#16769 + df = DataFrame.from_dict(data_dict, orient) + result = df.columns + tm.assert_index_equal(result, expected) + + def test_frame_dict_constructor_empty_series(self): + s1 = Series( + [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]) + ) + s2 = Series( + [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]) + ) + s3 = Series(dtype=object) + + # it works! + DataFrame({"foo": s1, "bar": s2, "baz": s3}) + DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2}) + + def test_from_dict_scalars_requires_index(self): + msg = "If using all scalar values, you must pass an index" + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)])) + + def test_from_dict_orient_invalid(self): + msg = ( + "Expected 'index', 'columns' or 'tight' for orient parameter. " + "Got 'abc' instead" + ) + with pytest.raises(ValueError, match=msg): + DataFrame.from_dict({"foo": 1, "baz": 3, "bar": 2}, orient="abc") + + def test_from_dict_order_with_single_column(self): + data = { + "alpha": { + "value2": 123, + "value1": 532, + "animal": 222, + "plant": False, + "name": "test", + } + } + result = DataFrame.from_dict( + data, + orient="columns", + ) + expected = DataFrame( + [[123], [532], [222], [False], ["test"]], + index=["value2", "value1", "animal", "plant", "name"], + columns=["alpha"], + ) + tm.assert_frame_equal(result, expected) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..521d2cb14ac6adf0d127e817833ef6620d6cb5e8 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_convert_dtypes.py @@ -0,0 +1,202 @@ +import datetime + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestConvertDtypes: + @pytest.mark.parametrize( + "convert_integer, expected", [(False, np.dtype("int32")), (True, "Int32")] + ) + def test_convert_dtypes( + self, convert_integer, expected, string_storage, using_infer_string + ): + # Specific types are tested in tests/series/test_dtypes.py + # Just check that it works for DataFrame here + if using_infer_string: + string_storage = "pyarrow_numpy" + df = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), + "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")), + } + ) + with pd.option_context("string_storage", string_storage): + result = df.convert_dtypes(True, True, convert_integer, False) + expected = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=expected), + "b": pd.Series(["x", "y", "z"], dtype=f"string[{string_storage}]"), + } + ) + tm.assert_frame_equal(result, expected) + + def test_convert_empty(self): + # Empty DataFrame can pass convert_dtypes, see GH#40393 + empty_df = pd.DataFrame() + tm.assert_frame_equal(empty_df, empty_df.convert_dtypes()) + + def test_convert_dtypes_retain_column_names(self): + # GH#41435 + df = pd.DataFrame({"a": [1, 2], "b": [3, 4]}) + df.columns.name = "cols" + + result = df.convert_dtypes() + tm.assert_index_equal(result.columns, df.columns) + assert result.columns.name == "cols" + + def test_pyarrow_dtype_backend(self): + pa = pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")), + "b": pd.Series(["x", "y", None], dtype=np.dtype("O")), + "c": pd.Series([True, False, None], dtype=np.dtype("O")), + "d": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")), + "e": pd.Series(pd.date_range("2022", periods=3)), + "f": pd.Series(pd.date_range("2022", periods=3, tz="UTC").as_unit("s")), + "g": pd.Series(pd.timedelta_range("1D", periods=3)), + } + ) + result = df.convert_dtypes(dtype_backend="pyarrow") + expected = pd.DataFrame( + { + "a": pd.arrays.ArrowExtensionArray( + pa.array([1, 2, 3], type=pa.int32()) + ), + "b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])), + "c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])), + "d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])), + "e": pd.arrays.ArrowExtensionArray( + pa.array( + [ + datetime.datetime(2022, 1, 1), + datetime.datetime(2022, 1, 2), + datetime.datetime(2022, 1, 3), + ], + type=pa.timestamp(unit="ns"), + ) + ), + "f": pd.arrays.ArrowExtensionArray( + pa.array( + [ + datetime.datetime(2022, 1, 1), + datetime.datetime(2022, 1, 2), + datetime.datetime(2022, 1, 3), + ], + type=pa.timestamp(unit="s", tz="UTC"), + ) + ), + "g": pd.arrays.ArrowExtensionArray( + pa.array( + [ + datetime.timedelta(1), + datetime.timedelta(2), + datetime.timedelta(3), + ], + type=pa.duration("ns"), + ) + ), + } + ) + tm.assert_frame_equal(result, expected) + + def test_pyarrow_dtype_backend_already_pyarrow(self): + pytest.importorskip("pyarrow") + expected = pd.DataFrame([1, 2, 3], dtype="int64[pyarrow]") + result = expected.convert_dtypes(dtype_backend="pyarrow") + tm.assert_frame_equal(result, expected) + + def test_pyarrow_dtype_backend_from_pandas_nullable(self): + pa = pytest.importorskip("pyarrow") + df = pd.DataFrame( + { + "a": pd.Series([1, 2, None], dtype="Int32"), + "b": pd.Series(["x", "y", None], dtype="string[python]"), + "c": pd.Series([True, False, None], dtype="boolean"), + "d": pd.Series([None, 100.5, 200], dtype="Float64"), + } + ) + result = df.convert_dtypes(dtype_backend="pyarrow") + expected = pd.DataFrame( + { + "a": pd.arrays.ArrowExtensionArray( + pa.array([1, 2, None], type=pa.int32()) + ), + "b": pd.arrays.ArrowExtensionArray(pa.array(["x", "y", None])), + "c": pd.arrays.ArrowExtensionArray(pa.array([True, False, None])), + "d": pd.arrays.ArrowExtensionArray(pa.array([None, 100.5, 200.0])), + } + ) + tm.assert_frame_equal(result, expected) + + def test_pyarrow_dtype_empty_object(self): + # GH 50970 + pytest.importorskip("pyarrow") + expected = pd.DataFrame(columns=[0]) + result = expected.convert_dtypes(dtype_backend="pyarrow") + tm.assert_frame_equal(result, expected) + + def test_pyarrow_engine_lines_false(self): + # GH 48893 + df = pd.DataFrame({"a": [1, 2, 3]}) + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + df.convert_dtypes(dtype_backend="numpy") + + def test_pyarrow_backend_no_conversion(self): + # GH#52872 + pytest.importorskip("pyarrow") + df = pd.DataFrame({"a": [1, 2], "b": 1.5, "c": True, "d": "x"}) + expected = df.copy() + result = df.convert_dtypes( + convert_floating=False, + convert_integer=False, + convert_boolean=False, + convert_string=False, + dtype_backend="pyarrow", + ) + tm.assert_frame_equal(result, expected) + + def test_convert_dtypes_pyarrow_to_np_nullable(self): + # GH 53648 + pytest.importorskip("pyarrow") + ser = pd.DataFrame(range(2), dtype="int32[pyarrow]") + result = ser.convert_dtypes(dtype_backend="numpy_nullable") + expected = pd.DataFrame(range(2), dtype="Int32") + tm.assert_frame_equal(result, expected) + + def test_convert_dtypes_pyarrow_timestamp(self): + # GH 54191 + pytest.importorskip("pyarrow") + ser = pd.Series(pd.date_range("2020-01-01", "2020-01-02", freq="1min")) + expected = ser.astype("timestamp[ms][pyarrow]") + result = expected.convert_dtypes(dtype_backend="pyarrow") + tm.assert_series_equal(result, expected) + + def test_convert_dtypes_avoid_block_splitting(self): + # GH#55341 + df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": "a"}) + result = df.convert_dtypes(convert_integer=False) + expected = pd.DataFrame( + { + "a": [1, 2, 3], + "b": [4, 5, 6], + "c": pd.Series(["a"] * 3, dtype="string[python]"), + } + ) + tm.assert_frame_equal(result, expected) + assert result._mgr.nblocks == 2 + + def test_convert_dtypes_from_arrow(self): + # GH#56581 + df = pd.DataFrame([["a", datetime.time(18, 12)]], columns=["a", "b"]) + result = df.convert_dtypes() + expected = df.astype({"a": "string[python]"}) + tm.assert_frame_equal(result, expected) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_count.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_count.py new file mode 100644 index 0000000000000000000000000000000000000000..1553a8a86305dd931c5378245daf272472d41b20 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_count.py @@ -0,0 +1,39 @@ +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestDataFrameCount: + def test_count(self): + # corner case + frame = DataFrame() + ct1 = frame.count(1) + assert isinstance(ct1, Series) + + ct2 = frame.count(0) + assert isinstance(ct2, Series) + + # GH#423 + df = DataFrame(index=range(10)) + result = df.count(1) + expected = Series(0, index=df.index) + tm.assert_series_equal(result, expected) + + df = DataFrame(columns=range(10)) + result = df.count(0) + expected = Series(0, index=df.columns) + tm.assert_series_equal(result, expected) + + df = DataFrame() + result = df.count() + expected = Series(dtype="int64") + tm.assert_series_equal(result, expected) + + def test_count_objects(self, float_string_frame): + dm = DataFrame(float_string_frame._series) + df = DataFrame(float_string_frame._series) + + tm.assert_series_equal(dm.count(), df.count()) + tm.assert_series_equal(dm.count(1), df.count(1)) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_info.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_info.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb7677f03f279fe35b9ebfc103bbd59f1073076 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_info.py @@ -0,0 +1,565 @@ +from io import StringIO +import re +from string import ascii_uppercase +import sys +import textwrap + +import numpy as np +import pytest + +from pandas.compat import ( + IS64, + PYPY, +) + +from pandas import ( + CategoricalIndex, + DataFrame, + MultiIndex, + Series, + date_range, + option_context, +) +import pandas._testing as tm + + +@pytest.fixture +def duplicate_columns_frame(): + """Dataframe with duplicate column names.""" + return DataFrame( + np.random.default_rng(2).standard_normal((1500, 4)), + columns=["a", "a", "b", "b"], + ) + + +def test_info_empty(): + # GH #45494 + df = DataFrame() + buf = StringIO() + df.info(buf=buf) + result = buf.getvalue() + expected = textwrap.dedent( + """\ + + RangeIndex: 0 entries + Empty DataFrame\n""" + ) + assert result == expected + + +def test_info_categorical_column_smoke_test(): + n = 2500 + df = DataFrame({"int64": np.random.default_rng(2).integers(100, size=n, dtype=int)}) + df["category"] = Series( + np.array(list("abcdefghij")).take( + np.random.default_rng(2).integers(0, 10, size=n, dtype=int) + ) + ).astype("category") + df.isna() + buf = StringIO() + df.info(buf=buf) + + df2 = df[df["category"] == "d"] + buf = StringIO() + df2.info(buf=buf) + + +@pytest.mark.parametrize( + "fixture_func_name", + [ + "int_frame", + "float_frame", + "datetime_frame", + "duplicate_columns_frame", + "float_string_frame", + ], +) +def test_info_smoke_test(fixture_func_name, request): + frame = request.getfixturevalue(fixture_func_name) + buf = StringIO() + frame.info(buf=buf) + result = buf.getvalue().splitlines() + assert len(result) > 10 + + buf = StringIO() + frame.info(buf=buf, verbose=False) + + +def test_info_smoke_test2(float_frame): + # pretty useless test, used to be mixed into the repr tests + buf = StringIO() + float_frame.reindex(columns=["A"]).info(verbose=False, buf=buf) + float_frame.reindex(columns=["A", "B"]).info(verbose=False, buf=buf) + + # no columns or index + DataFrame().info(buf=buf) + + +@pytest.mark.parametrize( + "num_columns, max_info_columns, verbose", + [ + (10, 100, True), + (10, 11, True), + (10, 10, True), + (10, 9, False), + (10, 1, False), + ], +) +def test_info_default_verbose_selection(num_columns, max_info_columns, verbose): + frame = DataFrame(np.random.default_rng(2).standard_normal((5, num_columns))) + with option_context("display.max_info_columns", max_info_columns): + io_default = StringIO() + frame.info(buf=io_default) + result = io_default.getvalue() + + io_explicit = StringIO() + frame.info(buf=io_explicit, verbose=verbose) + expected = io_explicit.getvalue() + + assert result == expected + + +def test_info_verbose_check_header_separator_body(): + buf = StringIO() + size = 1001 + start = 5 + frame = DataFrame(np.random.default_rng(2).standard_normal((3, size))) + frame.info(verbose=True, buf=buf) + + res = buf.getvalue() + header = " # Column Dtype \n--- ------ ----- " + assert header in res + + frame.info(verbose=True, buf=buf) + buf.seek(0) + lines = buf.readlines() + assert len(lines) > 0 + + for i, line in enumerate(lines): + if start <= i < start + size: + line_nr = f" {i - start} " + assert line.startswith(line_nr) + + +@pytest.mark.parametrize( + "size, header_exp, separator_exp, first_line_exp, last_line_exp", + [ + ( + 4, + " # Column Non-Null Count Dtype ", + "--- ------ -------------- ----- ", + " 0 0 3 non-null float64", + " 3 3 3 non-null float64", + ), + ( + 11, + " # Column Non-Null Count Dtype ", + "--- ------ -------------- ----- ", + " 0 0 3 non-null float64", + " 10 10 3 non-null float64", + ), + ( + 101, + " # Column Non-Null Count Dtype ", + "--- ------ -------------- ----- ", + " 0 0 3 non-null float64", + " 100 100 3 non-null float64", + ), + ( + 1001, + " # Column Non-Null Count Dtype ", + "--- ------ -------------- ----- ", + " 0 0 3 non-null float64", + " 1000 1000 3 non-null float64", + ), + ( + 10001, + " # Column Non-Null Count Dtype ", + "--- ------ -------------- ----- ", + " 0 0 3 non-null float64", + " 10000 10000 3 non-null float64", + ), + ], +) +def test_info_verbose_with_counts_spacing( + size, header_exp, separator_exp, first_line_exp, last_line_exp +): + """Test header column, spacer, first line and last line in verbose mode.""" + frame = DataFrame(np.random.default_rng(2).standard_normal((3, size))) + with StringIO() as buf: + frame.info(verbose=True, show_counts=True, buf=buf) + all_lines = buf.getvalue().splitlines() + # Here table would contain only header, separator and table lines + # dframe repr, index summary, memory usage and dtypes are excluded + table = all_lines[3:-2] + header, separator, first_line, *rest, last_line = table + assert header == header_exp + assert separator == separator_exp + assert first_line == first_line_exp + assert last_line == last_line_exp + + +def test_info_memory(): + # https://github.com/pandas-dev/pandas/issues/21056 + df = DataFrame({"a": Series([1, 2], dtype="i8")}) + buf = StringIO() + df.info(buf=buf) + result = buf.getvalue() + bytes = float(df.memory_usage().sum()) + expected = textwrap.dedent( + f"""\ + + RangeIndex: 2 entries, 0 to 1 + Data columns (total 1 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 a 2 non-null int64 + dtypes: int64(1) + memory usage: {bytes} bytes + """ + ) + assert result == expected + + +def test_info_wide(): + io = StringIO() + df = DataFrame(np.random.default_rng(2).standard_normal((5, 101))) + df.info(buf=io) + + io = StringIO() + df.info(buf=io, max_cols=101) + result = io.getvalue() + assert len(result.splitlines()) > 100 + + expected = result + with option_context("display.max_info_columns", 101): + io = StringIO() + df.info(buf=io) + result = io.getvalue() + assert result == expected + + +def test_info_duplicate_columns_shows_correct_dtypes(): + # GH11761 + io = StringIO() + frame = DataFrame([[1, 2.0]], columns=["a", "a"]) + frame.info(buf=io) + lines = io.getvalue().splitlines(True) + assert " 0 a 1 non-null int64 \n" == lines[5] + assert " 1 a 1 non-null float64\n" == lines[6] + + +def test_info_shows_column_dtypes(): + dtypes = [ + "int64", + "float64", + "datetime64[ns]", + "timedelta64[ns]", + "complex128", + "object", + "bool", + ] + data = {} + n = 10 + for i, dtype in enumerate(dtypes): + data[i] = np.random.default_rng(2).integers(2, size=n).astype(dtype) + df = DataFrame(data) + buf = StringIO() + df.info(buf=buf) + res = buf.getvalue() + header = ( + " # Column Non-Null Count Dtype \n" + "--- ------ -------------- ----- " + ) + assert header in res + for i, dtype in enumerate(dtypes): + name = f" {i:d} {i:d} {n:d} non-null {dtype}" + assert name in res + + +def test_info_max_cols(): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + for len_, verbose in [(5, None), (5, False), (12, True)]: + # For verbose always ^ setting ^ summarize ^ full output + with option_context("max_info_columns", 4): + buf = StringIO() + df.info(buf=buf, verbose=verbose) + res = buf.getvalue() + assert len(res.strip().split("\n")) == len_ + + for len_, verbose in [(12, None), (5, False), (12, True)]: + # max_cols not exceeded + with option_context("max_info_columns", 5): + buf = StringIO() + df.info(buf=buf, verbose=verbose) + res = buf.getvalue() + assert len(res.strip().split("\n")) == len_ + + for len_, max_cols in [(12, 5), (5, 4)]: + # setting truncates + with option_context("max_info_columns", 4): + buf = StringIO() + df.info(buf=buf, max_cols=max_cols) + res = buf.getvalue() + assert len(res.strip().split("\n")) == len_ + + # setting wouldn't truncate + with option_context("max_info_columns", 5): + buf = StringIO() + df.info(buf=buf, max_cols=max_cols) + res = buf.getvalue() + assert len(res.strip().split("\n")) == len_ + + +def test_info_memory_usage(): + # Ensure memory usage is displayed, when asserted, on the last line + dtypes = [ + "int64", + "float64", + "datetime64[ns]", + "timedelta64[ns]", + "complex128", + "object", + "bool", + ] + data = {} + n = 10 + for i, dtype in enumerate(dtypes): + data[i] = np.random.default_rng(2).integers(2, size=n).astype(dtype) + df = DataFrame(data) + buf = StringIO() + + # display memory usage case + df.info(buf=buf, memory_usage=True) + res = buf.getvalue().splitlines() + assert "memory usage: " in res[-1] + + # do not display memory usage case + df.info(buf=buf, memory_usage=False) + res = buf.getvalue().splitlines() + assert "memory usage: " not in res[-1] + + df.info(buf=buf, memory_usage=True) + res = buf.getvalue().splitlines() + + # memory usage is a lower bound, so print it as XYZ+ MB + assert re.match(r"memory usage: [^+]+\+", res[-1]) + + df.iloc[:, :5].info(buf=buf, memory_usage=True) + res = buf.getvalue().splitlines() + + # excluded column with object dtype, so estimate is accurate + assert not re.match(r"memory usage: [^+]+\+", res[-1]) + + # Test a DataFrame with duplicate columns + dtypes = ["int64", "int64", "int64", "float64"] + data = {} + n = 100 + for i, dtype in enumerate(dtypes): + data[i] = np.random.default_rng(2).integers(2, size=n).astype(dtype) + df = DataFrame(data) + df.columns = dtypes + + df_with_object_index = DataFrame({"a": [1]}, index=["foo"]) + df_with_object_index.info(buf=buf, memory_usage=True) + res = buf.getvalue().splitlines() + assert re.match(r"memory usage: [^+]+\+", res[-1]) + + df_with_object_index.info(buf=buf, memory_usage="deep") + res = buf.getvalue().splitlines() + assert re.match(r"memory usage: [^+]+$", res[-1]) + + # Ensure df size is as expected + # (cols * rows * bytes) + index size + df_size = df.memory_usage().sum() + exp_size = len(dtypes) * n * 8 + df.index.nbytes + assert df_size == exp_size + + # Ensure number of cols in memory_usage is the same as df + size_df = np.size(df.columns.values) + 1 # index=True; default + assert size_df == np.size(df.memory_usage()) + + # assert deep works only on object + assert df.memory_usage().sum() == df.memory_usage(deep=True).sum() + + # test for validity + DataFrame(1, index=["a"], columns=["A"]).memory_usage(index=True) + DataFrame(1, index=["a"], columns=["A"]).index.nbytes + df = DataFrame( + data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"] + ) + df.index.nbytes + df.memory_usage(index=True) + df.index.values.nbytes + + mem = df.memory_usage(deep=True).sum() + assert mem > 0 + + +@pytest.mark.skipif(PYPY, reason="on PyPy deep=True doesn't change result") +def test_info_memory_usage_deep_not_pypy(): + df_with_object_index = DataFrame({"a": [1]}, index=["foo"]) + assert ( + df_with_object_index.memory_usage(index=True, deep=True).sum() + > df_with_object_index.memory_usage(index=True).sum() + ) + + df_object = DataFrame({"a": ["a"]}) + assert df_object.memory_usage(deep=True).sum() > df_object.memory_usage().sum() + + +@pytest.mark.xfail(not PYPY, reason="on PyPy deep=True does not change result") +def test_info_memory_usage_deep_pypy(): + df_with_object_index = DataFrame({"a": [1]}, index=["foo"]) + assert ( + df_with_object_index.memory_usage(index=True, deep=True).sum() + == df_with_object_index.memory_usage(index=True).sum() + ) + + df_object = DataFrame({"a": ["a"]}) + assert df_object.memory_usage(deep=True).sum() == df_object.memory_usage().sum() + + +@pytest.mark.skipif(PYPY, reason="PyPy getsizeof() fails by design") +def test_usage_via_getsizeof(): + df = DataFrame( + data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"] + ) + mem = df.memory_usage(deep=True).sum() + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = mem - sys.getsizeof(df) + assert abs(diff) < 100 + + +def test_info_memory_usage_qualified(): + buf = StringIO() + df = DataFrame(1, columns=list("ab"), index=[1, 2, 3]) + df.info(buf=buf) + assert "+" not in buf.getvalue() + + buf = StringIO() + df = DataFrame(1, columns=list("ab"), index=list("ABC")) + df.info(buf=buf) + assert "+" in buf.getvalue() + + buf = StringIO() + df = DataFrame( + 1, columns=list("ab"), index=MultiIndex.from_product([range(3), range(3)]) + ) + df.info(buf=buf) + assert "+" not in buf.getvalue() + + buf = StringIO() + df = DataFrame( + 1, columns=list("ab"), index=MultiIndex.from_product([range(3), ["foo", "bar"]]) + ) + df.info(buf=buf) + assert "+" in buf.getvalue() + + +def test_info_memory_usage_bug_on_multiindex(): + # GH 14308 + # memory usage introspection should not materialize .values + + def memory_usage(f): + return f.memory_usage(deep=True).sum() + + N = 100 + M = len(ascii_uppercase) + index = MultiIndex.from_product( + [list(ascii_uppercase), date_range("20160101", periods=N)], + names=["id", "date"], + ) + df = DataFrame( + {"value": np.random.default_rng(2).standard_normal(N * M)}, index=index + ) + + unstacked = df.unstack("id") + assert df.values.nbytes == unstacked.values.nbytes + assert memory_usage(df) > memory_usage(unstacked) + + # high upper bound + assert memory_usage(unstacked) - memory_usage(df) < 2000 + + +def test_info_categorical(): + # GH14298 + idx = CategoricalIndex(["a", "b"]) + df = DataFrame(np.zeros((2, 2)), index=idx, columns=idx) + + buf = StringIO() + df.info(buf=buf) + + +@pytest.mark.xfail(not IS64, reason="GH 36579: fail on 32-bit system") +def test_info_int_columns(): + # GH#37245 + df = DataFrame({1: [1, 2], 2: [2, 3]}, index=["A", "B"]) + buf = StringIO() + df.info(show_counts=True, buf=buf) + result = buf.getvalue() + expected = textwrap.dedent( + """\ + + Index: 2 entries, A to B + Data columns (total 2 columns): + # Column Non-Null Count Dtype + --- ------ -------------- ----- + 0 1 2 non-null int64 + 1 2 2 non-null int64 + dtypes: int64(2) + memory usage: 48.0+ bytes + """ + ) + assert result == expected + + +def test_memory_usage_empty_no_warning(): + # GH#50066 + df = DataFrame(index=["a", "b"]) + with tm.assert_produces_warning(None): + result = df.memory_usage() + expected = Series(16 if IS64 else 8, index=["Index"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.single_cpu +def test_info_compute_numba(): + # GH#51922 + pytest.importorskip("numba") + df = DataFrame([[1, 2], [3, 4]]) + + with option_context("compute.use_numba", True): + buf = StringIO() + df.info(buf=buf) + result = buf.getvalue() + + buf = StringIO() + df.info(buf=buf) + expected = buf.getvalue() + assert result == expected + + +@pytest.mark.parametrize( + "row, columns, show_counts, result", + [ + [20, 20, None, True], + [20, 20, True, True], + [20, 20, False, False], + [5, 5, None, False], + [5, 5, True, False], + [5, 5, False, False], + ], +) +def test_info_show_counts(row, columns, show_counts, result): + # Explicit cast to float to avoid implicit cast when setting nan + df = DataFrame(1, columns=range(10), index=range(10)).astype({1: "float"}) + df.iloc[1, 1] = np.nan + + with option_context( + "display.max_info_rows", row, "display.max_info_columns", columns + ): + with StringIO() as buf: + df.info(buf=buf, show_counts=show_counts) + assert ("non-null" in buf.getvalue()) is result diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_reorder_levels.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_reorder_levels.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6b65daae4d513b3d3333856a57a2199cb79ed0 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_reorder_levels.py @@ -0,0 +1,74 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, +) +import pandas._testing as tm + + +class TestReorderLevels: + def test_reorder_levels(self, frame_or_series): + index = MultiIndex( + levels=[["bar"], ["one", "two", "three"], [0, 1]], + codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], + names=["L0", "L1", "L2"], + ) + df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index) + obj = tm.get_obj(df, frame_or_series) + + # no change, position + result = obj.reorder_levels([0, 1, 2]) + tm.assert_equal(obj, result) + + # no change, labels + result = obj.reorder_levels(["L0", "L1", "L2"]) + tm.assert_equal(obj, result) + + # rotate, position + result = obj.reorder_levels([1, 2, 0]) + e_idx = MultiIndex( + levels=[["one", "two", "three"], [0, 1], ["bar"]], + codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]], + names=["L1", "L2", "L0"], + ) + expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx) + expected = tm.get_obj(expected, frame_or_series) + tm.assert_equal(result, expected) + + result = obj.reorder_levels([0, 0, 0]) + e_idx = MultiIndex( + levels=[["bar"], ["bar"], ["bar"]], + codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], + names=["L0", "L0", "L0"], + ) + expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx) + expected = tm.get_obj(expected, frame_or_series) + tm.assert_equal(result, expected) + + result = obj.reorder_levels(["L0", "L0", "L0"]) + tm.assert_equal(result, expected) + + def test_reorder_levels_swaplevel_equivalence( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + result = ymd.reorder_levels(["month", "day", "year"]) + expected = ymd.swaplevel(0, 1).swaplevel(1, 2) + tm.assert_frame_equal(result, expected) + + result = ymd["A"].reorder_levels(["month", "day", "year"]) + expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2) + tm.assert_series_equal(result, expected) + + result = ymd.T.reorder_levels(["month", "day", "year"], axis=1) + expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1) + tm.assert_frame_equal(result, expected) + + with pytest.raises(TypeError, match="hierarchical axis"): + ymd.reorder_levels([1, 2], axis=1) + + with pytest.raises(IndexError, match="Too many levels"): + ymd.index.reorder_levels([1, 2, 3]) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_to_timestamp.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_to_timestamp.py new file mode 100644 index 0000000000000000000000000000000000000000..0e7e1d595d6be9250638932e7690f420b9a12fc0 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/frame/methods/test_to_timestamp.py @@ -0,0 +1,154 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + PeriodIndex, + Series, + Timedelta, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + + +def _get_with_delta(delta, freq="YE-DEC"): + return date_range( + to_datetime("1/1/2001") + delta, + to_datetime("12/31/2009") + delta, + freq=freq, + ) + + +class TestToTimestamp: + def test_to_timestamp(self, frame_or_series): + K = 5 + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(index), K)), + index=index, + columns=["A", "B", "C", "D", "E"], + ) + obj["mix"] = "a" + obj = tm.get_obj(obj, frame_or_series) + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="YE-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = obj.to_timestamp("D", "end") + tm.assert_index_equal(result.index, exp_index) + tm.assert_numpy_array_equal(result.values, obj.values) + if frame_or_series is Series: + assert result.name == "A" + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="YS-JAN") + result = obj.to_timestamp("D", "start") + tm.assert_index_equal(result.index, exp_index) + + result = obj.to_timestamp(how="start") + tm.assert_index_equal(result.index, exp_index) + + delta = timedelta(hours=23) + result = obj.to_timestamp("H", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = obj.to_timestamp("T", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + result = obj.to_timestamp("S", "end") + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + def test_to_timestamp_columns(self): + K = 5 + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), K)), + index=index, + columns=["A", "B", "C", "D", "E"], + ) + df["mix"] = "a" + + # columns + df = df.T + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="YE-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = df.to_timestamp("D", "end", axis=1) + tm.assert_index_equal(result.columns, exp_index) + tm.assert_numpy_array_equal(result.values, df.values) + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="YS-JAN") + result = df.to_timestamp("D", "start", axis=1) + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23) + result = df.to_timestamp("H", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = df.to_timestamp("min", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + result = df.to_timestamp("S", "end", axis=1) + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + result1 = df.to_timestamp("5min", axis=1) + result2 = df.to_timestamp("min", axis=1) + expected = date_range("2001-01-01", "2009-01-01", freq="YS") + assert isinstance(result1.columns, DatetimeIndex) + assert isinstance(result2.columns, DatetimeIndex) + tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8) + tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8) + # PeriodIndex.to_timestamp always use 'infer' + assert result1.columns.freqstr == "YS-JAN" + assert result2.columns.freqstr == "YS-JAN" + + def test_to_timestamp_invalid_axis(self): + index = period_range(freq="Y", start="1/1/2001", end="12/1/2009") + obj = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 5)), index=index + ) + + # invalid axis + with pytest.raises(ValueError, match="axis"): + obj.to_timestamp(axis=2) + + def test_to_timestamp_hourly(self, frame_or_series): + index = period_range(freq="h", start="1/1/2001", end="1/2/2001") + obj = Series(1, index=index, name="foo") + if frame_or_series is not Series: + obj = obj.to_frame() + + exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="h") + result = obj.to_timestamp(how="end") + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + if frame_or_series is Series: + assert result.name == "foo" + + def test_to_timestamp_raises(self, index, frame_or_series): + # GH#33327 + obj = frame_or_series(index=index, dtype=object) + + if not isinstance(index, PeriodIndex): + msg = f"unsupported Type {type(index).__name__}" + with pytest.raises(TypeError, match=msg): + obj.to_timestamp() diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/frame/test_arithmetic.py b/moondream/lib/python3.10/site-packages/pandas/tests/frame/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..0593de7556406ab143088a0f450e620653db83a6 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/frame/test_arithmetic.py @@ -0,0 +1,2136 @@ +from collections import deque +from datetime import ( + datetime, + timezone, +) +from enum import Enum +import functools +import operator +import re + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.core.computation import expressions as expr +from pandas.tests.frame.common import ( + _check_mixed_float, + _check_mixed_int, +) + + +@pytest.fixture +def simple_frame(): + """ + Fixture for simple 3x3 DataFrame + + Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c']. + + one two three + a 1.0 2.0 3.0 + b 4.0 5.0 6.0 + c 7.0 8.0 9.0 + """ + arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]) + + return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"]) + + +@pytest.fixture(autouse=True, params=[0, 100], ids=["numexpr", "python"]) +def switch_numexpr_min_elements(request, monkeypatch): + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", request.param) + yield request.param + + +class DummyElement: + def __init__(self, value, dtype) -> None: + self.value = value + self.dtype = np.dtype(dtype) + + def __array__(self, dtype=None, copy=None): + return np.array(self.value, dtype=self.dtype) + + def __str__(self) -> str: + return f"DummyElement({self.value}, {self.dtype})" + + def __repr__(self) -> str: + return str(self) + + def astype(self, dtype, copy=False): + self.dtype = dtype + return self + + def view(self, dtype): + return type(self)(self.value.view(dtype), dtype) + + def any(self, axis=None): + return bool(self.value) + + +# ------------------------------------------------------------------- +# Comparisons + + +class TestFrameComparisons: + # Specifically _not_ flex-comparisons + + def test_comparison_with_categorical_dtype(self): + # GH#12564 + + df = DataFrame({"A": ["foo", "bar", "baz"]}) + exp = DataFrame({"A": [True, False, False]}) + + res = df == "foo" + tm.assert_frame_equal(res, exp) + + # casting to categorical shouldn't affect the result + df["A"] = df["A"].astype("category") + + res = df == "foo" + tm.assert_frame_equal(res, exp) + + def test_frame_in_list(self): + # GH#12689 this should raise at the DataFrame level, not blocks + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), columns=list("ABCD") + ) + msg = "The truth value of a DataFrame is ambiguous" + with pytest.raises(ValueError, match=msg): + df in [None] + + @pytest.mark.parametrize( + "arg, arg2", + [ + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + ], + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + [ + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": np.random.default_rng(2).integers(10, size=10), + }, + ], + [ + { + "a": np.random.default_rng(2).integers(10, size=10), + "b": pd.date_range("20010101", periods=10), + }, + { + "a": pd.date_range("20010101", periods=10), + "b": pd.date_range("20010101", periods=10), + }, + ], + ], + ) + def test_comparison_invalid(self, arg, arg2): + # GH4968 + # invalid date/int comparisons + x = DataFrame(arg) + y = DataFrame(arg2) + # we expect the result to match Series comparisons for + # == and !=, inequalities should raise + result = x == y + expected = DataFrame( + {col: x[col] == y[col] for col in x.columns}, + index=x.index, + columns=x.columns, + ) + tm.assert_frame_equal(result, expected) + + result = x != y + expected = DataFrame( + {col: x[col] != y[col] for col in x.columns}, + index=x.index, + columns=x.columns, + ) + tm.assert_frame_equal(result, expected) + + msgs = [ + r"Invalid comparison between dtype=datetime64\[ns\] and ndarray", + "invalid type promotion", + ( + # npdev 1.20.0 + r"The DTypes and " + r" do not have a common DType." + ), + ] + msg = "|".join(msgs) + with pytest.raises(TypeError, match=msg): + x >= y + with pytest.raises(TypeError, match=msg): + x > y + with pytest.raises(TypeError, match=msg): + x < y + with pytest.raises(TypeError, match=msg): + x <= y + + @pytest.mark.parametrize( + "left, right", + [ + ("gt", "lt"), + ("lt", "gt"), + ("ge", "le"), + ("le", "ge"), + ("eq", "eq"), + ("ne", "ne"), + ], + ) + def test_timestamp_compare(self, left, right): + # make sure we can compare Timestamps on the right AND left hand side + # GH#4982 + df = DataFrame( + { + "dates1": pd.date_range("20010101", periods=10), + "dates2": pd.date_range("20010102", periods=10), + "intcol": np.random.default_rng(2).integers(1000000000, size=10), + "floatcol": np.random.default_rng(2).standard_normal(10), + "stringcol": [chr(100 + i) for i in range(10)], + } + ) + df.loc[np.random.default_rng(2).random(len(df)) > 0.5, "dates2"] = pd.NaT + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # no nats + if left in ["eq", "ne"]: + expected = left_f(df, pd.Timestamp("20010109")) + result = right_f(pd.Timestamp("20010109"), df) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "'(<|>)=?' not supported between " + "instances of 'numpy.ndarray' and 'Timestamp'" + ) + with pytest.raises(TypeError, match=msg): + left_f(df, pd.Timestamp("20010109")) + with pytest.raises(TypeError, match=msg): + right_f(pd.Timestamp("20010109"), df) + # nats + if left in ["eq", "ne"]: + expected = left_f(df, pd.Timestamp("nat")) + result = right_f(pd.Timestamp("nat"), df) + tm.assert_frame_equal(result, expected) + else: + msg = ( + "'(<|>)=?' not supported between " + "instances of 'numpy.ndarray' and 'NaTType'" + ) + with pytest.raises(TypeError, match=msg): + left_f(df, pd.Timestamp("nat")) + with pytest.raises(TypeError, match=msg): + right_f(pd.Timestamp("nat"), df) + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="can't compare string and int" + ) + def test_mixed_comparison(self): + # GH#13128, GH#22163 != datetime64 vs non-dt64 should be False, + # not raise TypeError + # (this appears to be fixed before GH#22163, not sure when) + df = DataFrame([["1989-08-01", 1], ["1989-08-01", 2]]) + other = DataFrame([["a", "b"], ["c", "d"]]) + + result = df == other + assert not result.any().any() + + result = df != other + assert result.all().all() + + def test_df_boolean_comparison_error(self): + # GH#4576, GH#22880 + # comparing DataFrame against list/tuple with len(obj) matching + # len(df.columns) is supported as of GH#22800 + df = DataFrame(np.arange(6).reshape((3, 2))) + + expected = DataFrame([[False, False], [True, False], [False, False]]) + + result = df == (2, 2) + tm.assert_frame_equal(result, expected) + + result = df == [2, 2] + tm.assert_frame_equal(result, expected) + + def test_df_float_none_comparison(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), + index=range(8), + columns=["A", "B", "C"], + ) + + result = df.__eq__(None) + assert not result.any().any() + + def test_df_string_comparison(self): + df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}]) + mask_a = df.a > 1 + tm.assert_frame_equal(df[mask_a], df.loc[1:1, :]) + tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :]) + + mask_b = df.b == "foo" + tm.assert_frame_equal(df[mask_b], df.loc[0:0, :]) + tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :]) + + +class TestFrameFlexComparisons: + # TODO: test_bool_flex_frame needs a better name + @pytest.mark.parametrize("op", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_bool_flex_frame(self, op): + data = np.random.default_rng(2).standard_normal((5, 3)) + other_data = np.random.default_rng(2).standard_normal((5, 3)) + df = DataFrame(data) + other = DataFrame(other_data) + ndim_5 = np.ones(df.shape + (1, 3)) + + # DataFrame + assert df.eq(df).values.all() + assert not df.ne(df).values.any() + f = getattr(df, op) + o = getattr(operator, op) + # No NAs + tm.assert_frame_equal(f(other), o(df, other)) + # Unaligned + part_o = other.loc[3:, 1:].copy() + rs = f(part_o) + xp = o(df, part_o.reindex(index=df.index, columns=df.columns)) + tm.assert_frame_equal(rs, xp) + # ndarray + tm.assert_frame_equal(f(other.values), o(df, other.values)) + # scalar + tm.assert_frame_equal(f(0), o(df, 0)) + # NAs + msg = "Unable to coerce to Series/DataFrame" + tm.assert_frame_equal(f(np.nan), o(df, np.nan)) + with pytest.raises(ValueError, match=msg): + f(ndim_5) + + @pytest.mark.parametrize("box", [np.array, Series]) + def test_bool_flex_series(self, box): + # Series + # list/tuple + data = np.random.default_rng(2).standard_normal((5, 3)) + df = DataFrame(data) + idx_ser = box(np.random.default_rng(2).standard_normal(5)) + col_ser = box(np.random.default_rng(2).standard_normal(3)) + + idx_eq = df.eq(idx_ser, axis=0) + col_eq = df.eq(col_ser) + idx_ne = df.ne(idx_ser, axis=0) + col_ne = df.ne(col_ser) + tm.assert_frame_equal(col_eq, df == Series(col_ser)) + tm.assert_frame_equal(col_eq, -col_ne) + tm.assert_frame_equal(idx_eq, -idx_ne) + tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T) + tm.assert_frame_equal(col_eq, df.eq(list(col_ser))) + tm.assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0)) + tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0)) + + idx_gt = df.gt(idx_ser, axis=0) + col_gt = df.gt(col_ser) + idx_le = df.le(idx_ser, axis=0) + col_le = df.le(col_ser) + + tm.assert_frame_equal(col_gt, df > Series(col_ser)) + tm.assert_frame_equal(col_gt, -col_le) + tm.assert_frame_equal(idx_gt, -idx_le) + tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T) + + idx_ge = df.ge(idx_ser, axis=0) + col_ge = df.ge(col_ser) + idx_lt = df.lt(idx_ser, axis=0) + col_lt = df.lt(col_ser) + tm.assert_frame_equal(col_ge, df >= Series(col_ser)) + tm.assert_frame_equal(col_ge, -col_lt) + tm.assert_frame_equal(idx_ge, -idx_lt) + tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T) + + idx_ser = Series(np.random.default_rng(2).standard_normal(5)) + col_ser = Series(np.random.default_rng(2).standard_normal(3)) + + def test_bool_flex_frame_na(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + # NA + df.loc[0, 0] = np.nan + rs = df.eq(df) + assert not rs.loc[0, 0] + rs = df.ne(df) + assert rs.loc[0, 0] + rs = df.gt(df) + assert not rs.loc[0, 0] + rs = df.lt(df) + assert not rs.loc[0, 0] + rs = df.ge(df) + assert not rs.loc[0, 0] + rs = df.le(df) + assert not rs.loc[0, 0] + + def test_bool_flex_frame_complex_dtype(self): + # complex + arr = np.array([np.nan, 1, 6, np.nan]) + arr2 = np.array([2j, np.nan, 7, None]) + df = DataFrame({"a": arr}) + df2 = DataFrame({"a": arr2}) + + msg = "|".join( + [ + "'>' not supported between instances of '.*' and 'complex'", + r"unorderable types: .*complex\(\)", # PY35 + ] + ) + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df.gt(df2) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df["a"].gt(df2["a"]) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df.values > df2.values + + rs = df.ne(df2) + assert rs.values.all() + + arr3 = np.array([2j, np.nan, None]) + df3 = DataFrame({"a": arr3}) + + with pytest.raises(TypeError, match=msg): + # inequalities are not well-defined for complex numbers + df3.gt(2j) + with pytest.raises(TypeError, match=msg): + # regression test that we get the same behavior for Series + df3["a"].gt(2j) + with pytest.raises(TypeError, match=msg): + # Check that we match numpy behavior here + df3.values > 2j + + def test_bool_flex_frame_object_dtype(self): + # corner, dtype=object + df1 = DataFrame({"col": ["foo", np.nan, "bar"]}, dtype=object) + df2 = DataFrame({"col": ["foo", datetime.now(), "bar"]}, dtype=object) + result = df1.ne(df2) + exp = DataFrame({"col": [False, True, False]}) + tm.assert_frame_equal(result, exp) + + def test_flex_comparison_nat(self): + # GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT, + # and _definitely_ not be NaN + df = DataFrame([pd.NaT]) + + result = df == pd.NaT + # result.iloc[0, 0] is a np.bool_ object + assert result.iloc[0, 0].item() is False + + result = df.eq(pd.NaT) + assert result.iloc[0, 0].item() is False + + result = df != pd.NaT + assert result.iloc[0, 0].item() is True + + result = df.ne(pd.NaT) + assert result.iloc[0, 0].item() is True + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_df_flex_cmp_constant_return_types(self, opname): + # GH 15077, non-empty DataFrame + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + const = 2 + + result = getattr(df, opname)(const).dtypes.value_counts() + tm.assert_series_equal( + result, Series([2], index=[np.dtype(bool)], name="count") + ) + + @pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"]) + def test_df_flex_cmp_constant_return_types_empty(self, opname): + # GH 15077 empty DataFrame + df = DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}) + const = 2 + + empty = df.iloc[:0] + result = getattr(empty, opname)(const).dtypes.value_counts() + tm.assert_series_equal( + result, Series([2], index=[np.dtype(bool)], name="count") + ) + + def test_df_flex_cmp_ea_dtype_with_ndarray_series(self): + ii = pd.IntervalIndex.from_breaks([1, 2, 3]) + df = DataFrame({"A": ii, "B": ii}) + + ser = Series([0, 0]) + res = df.eq(ser, axis=0) + + expected = DataFrame({"A": [False, False], "B": [False, False]}) + tm.assert_frame_equal(res, expected) + + ser2 = Series([1, 2], index=["A", "B"]) + res2 = df.eq(ser2, axis=1) + tm.assert_frame_equal(res2, expected) + + +# ------------------------------------------------------------------- +# Arithmetic + + +class TestFrameFlexArithmetic: + def test_floordiv_axis0(self): + # make sure we df.floordiv(ser, axis=0) matches column-wise result + arr = np.arange(3) + ser = Series(arr) + df = DataFrame({"A": ser, "B": ser}) + + result = df.floordiv(ser, axis=0) + + expected = DataFrame({col: df[col] // ser for col in df.columns}) + + tm.assert_frame_equal(result, expected) + + result2 = df.floordiv(ser.values, axis=0) + tm.assert_frame_equal(result2, expected) + + def test_df_add_td64_columnwise(self): + # GH 22534 Check that column-wise addition broadcasts correctly + dti = pd.date_range("2016-01-01", periods=10) + tdi = pd.timedelta_range("1", periods=10) + tser = Series(tdi) + df = DataFrame({0: dti, 1: tdi}) + + result = df.add(tser, axis=0) + expected = DataFrame({0: dti + tdi, 1: tdi + tdi}) + tm.assert_frame_equal(result, expected) + + def test_df_add_flex_filled_mixed_dtypes(self): + # GH 19611 + dti = pd.date_range("2016-01-01", periods=3) + ser = Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]") + df = DataFrame({"A": dti, "B": ser}) + other = DataFrame({"A": ser, "B": ser}) + fill = pd.Timedelta(days=1).to_timedelta64() + result = df.add(other, fill_value=fill) + + expected = DataFrame( + { + "A": Series( + ["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]" + ), + "B": ser * 2, + } + ) + tm.assert_frame_equal(result, expected) + + def test_arith_flex_frame( + self, all_arithmetic_operators, float_frame, mixed_float_frame + ): + # one instance of parametrized fixture + op = all_arithmetic_operators + + def f(x, y): + # r-versions not in operator-stdlib; get op without "r" and invert + if op.startswith("__r"): + return getattr(operator, op.replace("__r", "__"))(y, x) + return getattr(operator, op)(x, y) + + result = getattr(float_frame, op)(2 * float_frame) + expected = f(float_frame, 2 * float_frame) + tm.assert_frame_equal(result, expected) + + # vs mix float + result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) + expected = f(mixed_float_frame, 2 * mixed_float_frame) + tm.assert_frame_equal(result, expected) + _check_mixed_float(result, dtype={"C": None}) + + @pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"]) + def test_arith_flex_frame_mixed( + self, + op, + int_frame, + mixed_int_frame, + mixed_float_frame, + switch_numexpr_min_elements, + ): + f = getattr(operator, op) + + # vs mix int + result = getattr(mixed_int_frame, op)(2 + mixed_int_frame) + expected = f(mixed_int_frame, 2 + mixed_int_frame) + + # no overflow in the uint + dtype = None + if op in ["__sub__"]: + dtype = {"B": "uint64", "C": None} + elif op in ["__add__", "__mul__"]: + dtype = {"C": None} + if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0: + # when using numexpr, the casting rules are slightly different: + # in the `2 + mixed_int_frame` operation, int32 column becomes + # and int64 column (not preserving dtype in operation with Python + # scalar), and then the int32/int64 combo results in int64 result + dtype["A"] = (2 + mixed_int_frame)["A"].dtype + tm.assert_frame_equal(result, expected) + _check_mixed_int(result, dtype=dtype) + + # vs mix float + result = getattr(mixed_float_frame, op)(2 * mixed_float_frame) + expected = f(mixed_float_frame, 2 * mixed_float_frame) + tm.assert_frame_equal(result, expected) + _check_mixed_float(result, dtype={"C": None}) + + # vs plain int + result = getattr(int_frame, op)(2 * int_frame) + expected = f(int_frame, 2 * int_frame) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dim", range(3, 6)) + def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame, dim): + # one instance of parametrized fixture + op = all_arithmetic_operators + + # Check that arrays with dim >= 3 raise + arr = np.ones((1,) * dim) + msg = "Unable to coerce to Series/DataFrame" + with pytest.raises(ValueError, match=msg): + getattr(float_frame, op)(arr) + + def test_arith_flex_frame_corner(self, float_frame): + const_add = float_frame.add(1) + tm.assert_frame_equal(const_add, float_frame + 1) + + # corner cases + result = float_frame.add(float_frame[:0]) + expected = float_frame.sort_index() * np.nan + tm.assert_frame_equal(result, expected) + + result = float_frame[:0].add(float_frame) + expected = float_frame.sort_index() * np.nan + tm.assert_frame_equal(result, expected) + + with pytest.raises(NotImplementedError, match="fill_value"): + float_frame.add(float_frame.iloc[0], fill_value=3) + + with pytest.raises(NotImplementedError, match="fill_value"): + float_frame.add(float_frame.iloc[0], axis="index", fill_value=3) + + @pytest.mark.parametrize("op", ["add", "sub", "mul", "mod"]) + def test_arith_flex_series_ops(self, simple_frame, op): + # after arithmetic refactor, add truediv here + df = simple_frame + + row = df.xs("a") + col = df["two"] + f = getattr(df, op) + op = getattr(operator, op) + tm.assert_frame_equal(f(row), op(df, row)) + tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T) + + def test_arith_flex_series(self, simple_frame): + df = simple_frame + + row = df.xs("a") + col = df["two"] + # special case for some reason + tm.assert_frame_equal(df.add(row, axis=None), df + row) + + # cases which will be refactored after big arithmetic refactor + tm.assert_frame_equal(df.div(row), df / row) + tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T) + + @pytest.mark.parametrize("dtype", ["int64", "float64"]) + def test_arith_flex_series_broadcasting(self, dtype): + # broadcasting issue in GH 7325 + df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype=dtype) + expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]]) + result = df.div(df[0], axis="index") + tm.assert_frame_equal(result, expected) + + def test_arith_flex_zero_len_raises(self): + # GH 19522 passing fill_value to frame flex arith methods should + # raise even in the zero-length special cases + ser_len0 = Series([], dtype=object) + df_len0 = DataFrame(columns=["A", "B"]) + df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"]) + + with pytest.raises(NotImplementedError, match="fill_value"): + df.add(ser_len0, fill_value="E") + + with pytest.raises(NotImplementedError, match="fill_value"): + df_len0.sub(df["A"], axis=None, fill_value=3) + + def test_flex_add_scalar_fill_value(self): + # GH#12723 + dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float") + df = DataFrame({"foo": dat}, index=range(6)) + + exp = df.fillna(0).add(2) + res = df.add(2, fill_value=0) + tm.assert_frame_equal(res, exp) + + def test_sub_alignment_with_duplicate_index(self): + # GH#5185 dup aligning operations should work + df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3]) + df2 = DataFrame([1, 2, 3], index=[1, 2, 3]) + expected = DataFrame([0, 2, 0, 2, 2], index=[1, 1, 2, 2, 3]) + result = df1.sub(df2) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("op", ["__add__", "__mul__", "__sub__", "__truediv__"]) + def test_arithmetic_with_duplicate_columns(self, op): + # operations + df = DataFrame({"A": np.arange(10), "B": np.random.default_rng(2).random(10)}) + expected = getattr(df, op)(df) + expected.columns = ["A", "A"] + df.columns = ["A", "A"] + result = getattr(df, op)(df) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("level", [0, None]) + def test_broadcast_multiindex(self, level): + # GH34388 + df1 = DataFrame({"A": [0, 1, 2], "B": [1, 2, 3]}) + df1.columns = df1.columns.set_names("L1") + + df2 = DataFrame({("A", "C"): [0, 0, 0], ("A", "D"): [0, 0, 0]}) + df2.columns = df2.columns.set_names(["L1", "L2"]) + + result = df1.add(df2, level=level) + expected = DataFrame({("A", "C"): [0, 1, 2], ("A", "D"): [0, 1, 2]}) + expected.columns = expected.columns.set_names(["L1", "L2"]) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations(self): + # GH 43321 + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: [1.4, 2.4, 3.4], 2020: [3.4, 4.4, 5.4]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_series_index_to_frame_index(self): + # GH 43321 + df = DataFrame( + {2010: [1], 2020: [3]}, + index=MultiIndex.from_product([["a"], ["b"]], names=["scen", "mod"]), + ) + + series = Series( + [10.0, 20.0, 30.0], + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + expected = DataFrame( + {2010: [11.0, 21, 31.0], 2020: [13.0, 23.0, 33.0]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_no_align(self): + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_product( + [["a"], ["b"], [0, 1, 2]], names=["scen", "mod", "id"] + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["c"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: np.nan, 2020: np.nan}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "b", 2), + ("a", "c", np.nan), + ], + names=["scen", "mod", "id"], + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + def test_frame_multiindex_operations_part_align(self): + df = DataFrame( + {2010: [1, 2, 3], 2020: [3, 4, 5]}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "c", 2), + ], + names=["scen", "mod", "id"], + ), + ) + + series = Series( + [0.4], + index=MultiIndex.from_product([["b"], ["a"]], names=["mod", "scen"]), + ) + + expected = DataFrame( + {2010: [1.4, 2.4, np.nan], 2020: [3.4, 4.4, np.nan]}, + index=MultiIndex.from_tuples( + [ + ("a", "b", 0), + ("a", "b", 1), + ("a", "c", 2), + ], + names=["scen", "mod", "id"], + ), + ) + result = df.add(series, axis=0) + + tm.assert_frame_equal(result, expected) + + +class TestFrameArithmetic: + def test_td64_op_nat_casting(self): + # Make sure we don't accidentally treat timedelta64(NaT) as datetime64 + # when calling dispatch_to_series in DataFrame arithmetic + ser = Series(["NaT", "NaT"], dtype="timedelta64[ns]") + df = DataFrame([[1, 2], [3, 4]]) + + result = df * ser + expected = DataFrame({0: ser, 1: ser}) + tm.assert_frame_equal(result, expected) + + def test_df_add_2d_array_rowlike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + expected = DataFrame( + [[2, 4], [4, 6], [6, 8]], + columns=df.columns, + index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype, + ) + result = df + rowlike + tm.assert_frame_equal(result, expected) + result = rowlike + df + tm.assert_frame_equal(result, expected) + + def test_df_add_2d_array_collike_broadcasts(self): + # GH#23000 + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + expected = DataFrame( + [[1, 2], [5, 6], [9, 10]], + columns=df.columns, + index=df.index, + # specify dtype explicitly to avoid failing + # on 32bit builds + dtype=arr.dtype, + ) + result = df + collike + tm.assert_frame_equal(result, expected) + result = collike + df + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_rowlike_broadcasts( + self, request, all_arithmetic_operators, using_array_manager + ): + # GH#23000 + opname = all_arithmetic_operators + + if using_array_manager and opname in ("__rmod__", "__rfloordiv__"): + # TODO(ArrayManager) decide on dtypes + td.mark_array_manager_not_yet_implemented(request) + + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + rowlike = arr[[1], :] # shape --> (1, ncols) + assert rowlike.shape == (1, df.shape[1]) + + exvals = [ + getattr(df.loc["A"], opname)(rowlike.squeeze()), + getattr(df.loc["B"], opname)(rowlike.squeeze()), + getattr(df.loc["C"], opname)(rowlike.squeeze()), + ] + + expected = DataFrame(exvals, columns=df.columns, index=df.index) + + result = getattr(df, opname)(rowlike) + tm.assert_frame_equal(result, expected) + + def test_df_arith_2d_array_collike_broadcasts( + self, request, all_arithmetic_operators, using_array_manager + ): + # GH#23000 + opname = all_arithmetic_operators + + if using_array_manager and opname in ("__rmod__", "__rfloordiv__"): + # TODO(ArrayManager) decide on dtypes + td.mark_array_manager_not_yet_implemented(request) + + arr = np.arange(6).reshape(3, 2) + df = DataFrame(arr, columns=[True, False], index=["A", "B", "C"]) + + collike = arr[:, [1]] # shape --> (nrows, 1) + assert collike.shape == (df.shape[0], 1) + + exvals = { + True: getattr(df[True], opname)(collike.squeeze()), + False: getattr(df[False], opname)(collike.squeeze()), + } + + dtype = None + if opname in ["__rmod__", "__rfloordiv__"]: + # Series ops may return mixed int/float dtypes in cases where + # DataFrame op will return all-float. So we upcast `expected` + dtype = np.common_type(*(x.values for x in exvals.values())) + + expected = DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype) + + result = getattr(df, opname)(collike) + tm.assert_frame_equal(result, expected) + + def test_df_bool_mul_int(self): + # GH 22047, GH 22163 multiplication by 1 should result in int dtype, + # not object dtype + df = DataFrame([[False, True], [False, False]]) + result = df * 1 + + # On appveyor this comes back as np.int32 instead of np.int64, + # so we check dtype.kind instead of just dtype + kinds = result.dtypes.apply(lambda x: x.kind) + assert (kinds == "i").all() + + result = 1 * df + kinds = result.dtypes.apply(lambda x: x.kind) + assert (kinds == "i").all() + + def test_arith_mixed(self): + left = DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]}) + + result = left + left + expected = DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("col", ["A", "B"]) + def test_arith_getitem_commute(self, all_arithmetic_functions, col): + df = DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]}) + result = all_arithmetic_functions(df, 1)[col] + expected = all_arithmetic_functions(df[col], 1) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])] + ) + def test_arith_alignment_non_pandas_object(self, values): + # GH#17901 + df = DataFrame({"A": [1, 1], "B": [1, 1]}) + expected = DataFrame({"A": [2, 2], "B": [3, 3]}) + result = df + values + tm.assert_frame_equal(result, expected) + + def test_arith_non_pandas_object(self): + df = DataFrame( + np.arange(1, 10, dtype="f8").reshape(3, 3), + columns=["one", "two", "three"], + index=["a", "b", "c"], + ) + + val1 = df.xs("a").values + added = DataFrame(df.values + val1, index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val1, added) + + added = DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val1, axis=0), added) + + val2 = list(df["two"]) + + added = DataFrame(df.values + val2, index=df.index, columns=df.columns) + tm.assert_frame_equal(df + val2, added) + + added = DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val2, axis="index"), added) + + val3 = np.random.default_rng(2).random(df.shape) + added = DataFrame(df.values + val3, index=df.index, columns=df.columns) + tm.assert_frame_equal(df.add(val3), added) + + def test_operations_with_interval_categories_index(self, all_arithmetic_operators): + # GH#27415 + op = all_arithmetic_operators + ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0)) + data = [1, 2] + df = DataFrame([data], columns=ind) + num = 10 + result = getattr(df, op)(num) + expected = DataFrame([[getattr(n, op)(num) for n in data]], columns=ind) + tm.assert_frame_equal(result, expected) + + def test_frame_with_frame_reindex(self): + # GH#31623 + df = DataFrame( + { + "foo": [pd.Timestamp("2019"), pd.Timestamp("2020")], + "bar": [pd.Timestamp("2018"), pd.Timestamp("2021")], + }, + columns=["foo", "bar"], + dtype="M8[ns]", + ) + df2 = df[["foo"]] + + result = df - df2 + + expected = DataFrame( + {"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]}, + columns=["bar", "foo"], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "value, dtype", + [ + (1, "i8"), + (1.0, "f8"), + (2**63, "f8"), + (1j, "complex128"), + (2**63, "complex128"), + (True, "bool"), + (np.timedelta64(20, "ns"), " b + tm.assert_frame_equal(result, expected) + + result = df.values > b + tm.assert_numpy_array_equal(result, expected.values) + + msg1d = "Unable to coerce to Series, length must be 2: given 3" + msg2d = "Unable to coerce to DataFrame, shape must be" + msg2db = "operands could not be broadcast together with shapes" + with pytest.raises(ValueError, match=msg1d): + # wrong shape + df > lst + + with pytest.raises(ValueError, match=msg1d): + # wrong shape + df > tup + + # broadcasts like ndarray (GH#23000) + result = df > b_r + tm.assert_frame_equal(result, expected) + + result = df.values > b_r + tm.assert_numpy_array_equal(result, expected.values) + + with pytest.raises(ValueError, match=msg2d): + df > b_c + + with pytest.raises(ValueError, match=msg2db): + df.values > b_c + + # == + expected = DataFrame([[False, False], [True, False], [False, False]]) + result = df == b + tm.assert_frame_equal(result, expected) + + with pytest.raises(ValueError, match=msg1d): + df == lst + + with pytest.raises(ValueError, match=msg1d): + df == tup + + # broadcasts like ndarray (GH#23000) + result = df == b_r + tm.assert_frame_equal(result, expected) + + result = df.values == b_r + tm.assert_numpy_array_equal(result, expected.values) + + with pytest.raises(ValueError, match=msg2d): + df == b_c + + assert df.values.shape != b_c.shape + + # with alignment + df = DataFrame( + np.arange(6).reshape((3, 2)), columns=list("AB"), index=list("abc") + ) + expected.index = df.index + expected.columns = df.columns + + with pytest.raises(ValueError, match=msg1d): + df == lst + + with pytest.raises(ValueError, match=msg1d): + df == tup + + def test_inplace_ops_alignment(self): + # inplace ops / ops alignment + # GH 8511 + + columns = list("abcdefg") + X_orig = DataFrame( + np.arange(10 * len(columns)).reshape(-1, len(columns)), + columns=columns, + index=range(10), + ) + Z = 100 * X_orig.iloc[:, 1:-1].copy() + block1 = list("bedcf") + subs = list("bcdef") + + # add + X = X_orig.copy() + result1 = (X[block1] + Z).reindex(columns=subs) + + X[block1] += Z + result2 = X.reindex(columns=subs) + + X = X_orig.copy() + result3 = (X[block1] + Z[block1]).reindex(columns=subs) + + X[block1] += Z[block1] + result4 = X.reindex(columns=subs) + + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + tm.assert_frame_equal(result1, result4) + + # sub + X = X_orig.copy() + result1 = (X[block1] - Z).reindex(columns=subs) + + X[block1] -= Z + result2 = X.reindex(columns=subs) + + X = X_orig.copy() + result3 = (X[block1] - Z[block1]).reindex(columns=subs) + + X[block1] -= Z[block1] + result4 = X.reindex(columns=subs) + + tm.assert_frame_equal(result1, result2) + tm.assert_frame_equal(result1, result3) + tm.assert_frame_equal(result1, result4) + + def test_inplace_ops_identity(self): + # GH 5104 + # make sure that we are actually changing the object + s_orig = Series([1, 2, 3]) + df_orig = DataFrame( + np.random.default_rng(2).integers(0, 5, size=10).reshape(-1, 5) + ) + + # no dtype change + s = s_orig.copy() + s2 = s + s += 1 + tm.assert_series_equal(s, s2) + tm.assert_series_equal(s_orig + 1, s) + assert s is s2 + assert s._mgr is s2._mgr + + df = df_orig.copy() + df2 = df + df += 1 + tm.assert_frame_equal(df, df2) + tm.assert_frame_equal(df_orig + 1, df) + assert df is df2 + assert df._mgr is df2._mgr + + # dtype change + s = s_orig.copy() + s2 = s + s += 1.5 + tm.assert_series_equal(s, s2) + tm.assert_series_equal(s_orig + 1.5, s) + + df = df_orig.copy() + df2 = df + df += 1.5 + tm.assert_frame_equal(df, df2) + tm.assert_frame_equal(df_orig + 1.5, df) + assert df is df2 + assert df._mgr is df2._mgr + + # mixed dtype + arr = np.random.default_rng(2).integers(0, 10, size=5) + df_orig = DataFrame({"A": arr.copy(), "B": "foo"}) + df = df_orig.copy() + df2 = df + df["A"] += 1 + expected = DataFrame({"A": arr.copy() + 1, "B": "foo"}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, expected) + assert df._mgr is df2._mgr + + df = df_orig.copy() + df2 = df + df["A"] += 1.5 + expected = DataFrame({"A": arr.copy() + 1.5, "B": "foo"}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, expected) + assert df._mgr is df2._mgr + + @pytest.mark.parametrize( + "op", + [ + "add", + "and", + pytest.param( + "div", + marks=pytest.mark.xfail( + raises=AttributeError, reason="__idiv__ not implemented" + ), + ), + "floordiv", + "mod", + "mul", + "or", + "pow", + "sub", + "truediv", + "xor", + ], + ) + def test_inplace_ops_identity2(self, op): + df = DataFrame({"a": [1.0, 2.0, 3.0], "b": [1, 2, 3]}) + + operand = 2 + if op in ("and", "or", "xor"): + # cannot use floats for boolean ops + df["a"] = [True, False, True] + + df_copy = df.copy() + iop = f"__i{op}__" + op = f"__{op}__" + + # no id change and value is correct + getattr(df, iop)(operand) + expected = getattr(df_copy, op)(operand) + tm.assert_frame_equal(df, expected) + expected = id(df) + assert id(df) == expected + + @pytest.mark.parametrize( + "val", + [ + [1, 2, 3], + (1, 2, 3), + np.array([1, 2, 3], dtype=np.int64), + range(1, 4), + ], + ) + def test_alignment_non_pandas(self, val): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + + expected = DataFrame({"X": val, "Y": val, "Z": val}, index=df.index) + tm.assert_frame_equal(align(df, val, axis=0)[1], expected) + + expected = DataFrame( + {"X": [1, 1, 1], "Y": [2, 2, 2], "Z": [3, 3, 3]}, index=df.index + ) + tm.assert_frame_equal(align(df, val, axis=1)[1], expected) + + @pytest.mark.parametrize("val", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3)]) + def test_alignment_non_pandas_length_mismatch(self, val): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + # length mismatch + msg = "Unable to coerce to Series, length must be 3: given 2" + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + def test_alignment_non_pandas_index_columns(self): + index = ["A", "B", "C"] + columns = ["X", "Y", "Z"] + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + index=index, + columns=columns, + ) + + align = DataFrame._align_for_op + val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + tm.assert_frame_equal( + align(df, val, axis=0)[1], + DataFrame(val, index=df.index, columns=df.columns), + ) + tm.assert_frame_equal( + align(df, val, axis=1)[1], + DataFrame(val, index=df.index, columns=df.columns), + ) + + # shape mismatch + msg = "Unable to coerce to DataFrame, shape must be" + val = np.array([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + val = np.zeros((3, 3, 3)) + msg = re.escape( + "Unable to coerce to Series/DataFrame, dimension must be <= 2: (3, 3, 3)" + ) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=0) + with pytest.raises(ValueError, match=msg): + align(df, val, axis=1) + + def test_no_warning(self, all_arithmetic_operators): + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + b = df["B"] + with tm.assert_produces_warning(None): + getattr(df, all_arithmetic_operators)(b) + + def test_dunder_methods_binary(self, all_arithmetic_operators): + # GH#??? frame.__foo__ should only accept one argument + df = DataFrame({"A": [0.0, 0.0], "B": [0.0, None]}) + b = df["B"] + with pytest.raises(TypeError, match="takes 2 positional arguments"): + getattr(df, all_arithmetic_operators)(b, 0) + + def test_align_int_fill_bug(self): + # GH#910 + X = np.arange(10 * 10, dtype="float64").reshape(10, 10) + Y = np.ones((10, 1), dtype=int) + + df1 = DataFrame(X) + df1["0.X"] = Y.squeeze() + + df2 = df1.astype(float) + + result = df1 - df1.mean() + expected = df2 - df2.mean() + tm.assert_frame_equal(result, expected) + + +def test_pow_with_realignment(): + # GH#32685 pow has special semantics for operating with null values + left = DataFrame({"A": [0, 1, 2]}) + right = DataFrame(index=[0, 1, 2]) + + result = left**right + expected = DataFrame({"A": [np.nan, 1.0, np.nan]}) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_series_extension_dtypes(): + # https://github.com/pandas-dev/pandas/issues/34311 + df = DataFrame( + np.random.default_rng(2).integers(0, 100, (10, 3)), columns=["a", "b", "c"] + ) + ser = Series([1, 2, 3], index=["a", "b", "c"]) + + expected = df.to_numpy("int64") + ser.to_numpy("int64").reshape(-1, 3) + expected = DataFrame(expected, columns=df.columns, dtype="Int64") + + df_ea = df.astype("Int64") + result = df_ea + ser + tm.assert_frame_equal(result, expected) + result = df_ea + ser.astype("Int64") + tm.assert_frame_equal(result, expected) + + +def test_dataframe_blockwise_slicelike(): + # GH#34367 + arr = np.random.default_rng(2).integers(0, 1000, (100, 10)) + df1 = DataFrame(arr) + # Explicit cast to float to avoid implicit cast when setting nan + df2 = df1.copy().astype({1: "float", 3: "float", 7: "float"}) + df2.iloc[0, [1, 3, 7]] = np.nan + + # Explicit cast to float to avoid implicit cast when setting nan + df3 = df1.copy().astype({5: "float"}) + df3.iloc[0, [5]] = np.nan + + # Explicit cast to float to avoid implicit cast when setting nan + df4 = df1.copy().astype({2: "float", 3: "float", 4: "float"}) + df4.iloc[0, np.arange(2, 5)] = np.nan + # Explicit cast to float to avoid implicit cast when setting nan + df5 = df1.copy().astype({4: "float", 5: "float", 6: "float"}) + df5.iloc[0, np.arange(4, 7)] = np.nan + + for left, right in [(df1, df2), (df2, df3), (df4, df5)]: + res = left + right + + expected = DataFrame({i: left[i] + right[i] for i in left.columns}) + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize( + "df, col_dtype", + [ + (DataFrame([[1.0, 2.0], [4.0, 5.0]], columns=list("ab")), "float64"), + ( + DataFrame([[1.0, "b"], [4.0, "b"]], columns=list("ab")).astype( + {"b": object} + ), + "object", + ), + ], +) +def test_dataframe_operation_with_non_numeric_types(df, col_dtype): + # GH #22663 + expected = DataFrame([[0.0, np.nan], [3.0, np.nan]], columns=list("ab")) + expected = expected.astype({"b": col_dtype}) + result = df + Series([-1.0], index=list("a")) + tm.assert_frame_equal(result, expected) + + +def test_arith_reindex_with_duplicates(): + # https://github.com/pandas-dev/pandas/issues/35194 + df1 = DataFrame(data=[[0]], columns=["second"]) + df2 = DataFrame(data=[[0, 0, 0]], columns=["first", "second", "second"]) + result = df1 + df2 + expected = DataFrame([[np.nan, 0, 0]], columns=["first", "second", "second"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("to_add", [[Series([1, 1])], [Series([1, 1]), Series([1, 1])]]) +def test_arith_list_of_arraylike_raise(to_add): + # GH 36702. Raise when trying to add list of array-like to DataFrame + df = DataFrame({"x": [1, 2], "y": [1, 2]}) + + msg = f"Unable to coerce list of {type(to_add[0])} to Series/DataFrame" + with pytest.raises(ValueError, match=msg): + df + to_add + with pytest.raises(ValueError, match=msg): + to_add + df + + +def test_inplace_arithmetic_series_update(using_copy_on_write, warn_copy_on_write): + # https://github.com/pandas-dev/pandas/issues/36373 + df = DataFrame({"A": [1, 2, 3]}) + df_orig = df.copy() + series = df["A"] + vals = series._values + + with tm.assert_cow_warning(warn_copy_on_write): + series += 1 + if using_copy_on_write: + assert series._values is not vals + tm.assert_frame_equal(df, df_orig) + else: + assert series._values is vals + + expected = DataFrame({"A": [2, 3, 4]}) + tm.assert_frame_equal(df, expected) + + +def test_arithmetic_multiindex_align(): + """ + Regression test for: https://github.com/pandas-dev/pandas/issues/33765 + """ + df1 = DataFrame( + [[1]], + index=["a"], + columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]), + ) + df2 = DataFrame([[1]], index=["a"], columns=Index([0], name="a")) + expected = DataFrame( + [[0]], + index=["a"], + columns=MultiIndex.from_product([[0], [1]], names=["a", "b"]), + ) + result = df1 - df2 + tm.assert_frame_equal(result, expected) + + +def test_bool_frame_mult_float(): + # GH 18549 + df = DataFrame(True, list("ab"), list("cd")) + result = df * 1.0 + expected = DataFrame(np.ones((2, 2)), list("ab"), list("cd")) + tm.assert_frame_equal(result, expected) + + +def test_frame_sub_nullable_int(any_int_ea_dtype): + # GH 32822 + series1 = Series([1, 2, None], dtype=any_int_ea_dtype) + series2 = Series([1, 2, 3], dtype=any_int_ea_dtype) + expected = DataFrame([0, 0, None], dtype=any_int_ea_dtype) + result = series1.to_frame() - series2.to_frame() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) +def test_frame_op_subclass_nonclass_constructor(): + # GH#43201 subclass._constructor is a function, not the subclass itself + + class SubclassedSeries(Series): + @property + def _constructor(self): + return SubclassedSeries + + @property + def _constructor_expanddim(self): + return SubclassedDataFrame + + class SubclassedDataFrame(DataFrame): + _metadata = ["my_extra_data"] + + def __init__(self, my_extra_data, *args, **kwargs) -> None: + self.my_extra_data = my_extra_data + super().__init__(*args, **kwargs) + + @property + def _constructor(self): + return functools.partial(type(self), self.my_extra_data) + + @property + def _constructor_sliced(self): + return SubclassedSeries + + sdf = SubclassedDataFrame("some_data", {"A": [1, 2, 3], "B": [4, 5, 6]}) + result = sdf * 2 + expected = SubclassedDataFrame("some_data", {"A": [2, 4, 6], "B": [8, 10, 12]}) + tm.assert_frame_equal(result, expected) + + result = sdf + sdf + tm.assert_frame_equal(result, expected) + + +def test_enum_column_equality(): + Cols = Enum("Cols", "col1 col2") + + q1 = DataFrame({Cols.col1: [1, 2, 3]}) + q2 = DataFrame({Cols.col1: [1, 2, 3]}) + + result = q1[Cols.col1] == q2[Cols.col1] + expected = Series([True, True, True], name=Cols.col1) + + tm.assert_series_equal(result, expected) + + +def test_mixed_col_index_dtype(): + # GH 47382 + df1 = DataFrame(columns=list("abc"), data=1.0, index=[0]) + df2 = DataFrame(columns=list("abc"), data=0.0, index=[0]) + df1.columns = df2.columns.astype("string") + result = df1 + df2 + expected = DataFrame(columns=list("abc"), data=1.0, index=[0]) + tm.assert_frame_equal(result, expected) diff --git a/openflamingo/lib/python3.10/site-packages/torch/lib/libnvrtc-672ee683.so.11.2 b/openflamingo/lib/python3.10/site-packages/torch/lib/libnvrtc-672ee683.so.11.2 new file mode 100644 index 0000000000000000000000000000000000000000..0dca630b467d3555a733841149f2465dc559215f --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/torch/lib/libnvrtc-672ee683.so.11.2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8f2b6074e0e5fc0c1aec44619b4de5d50851e634177a7cc7c52c9c6d845ecee +size 54417561