diff --git a/.gitattributes b/.gitattributes
index 70504299686892e45bf1817d84861900e6418969..fc20cfdf33761d7833f9690ce9763c6dfcbda13e 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -332,3 +332,4 @@ evalkit_tf437/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux
evalkit_tf437/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
+evalkit_tf437/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
diff --git a/evalkit_tf437/lib/python3.10/site-packages/aiosignal/__init__.pyi b/evalkit_tf437/lib/python3.10/site-packages/aiosignal/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..d4e3416d72246058259061578a82697e2bc0706e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/aiosignal/__init__.pyi
@@ -0,0 +1,12 @@
+from typing import Any, Generic, TypeVar
+
+from frozenlist import FrozenList
+
+__all__ = ("Signal",)
+
+_T = TypeVar("_T")
+
+class Signal(FrozenList[_T], Generic[_T]):
+ def __init__(self, owner: Any) -> None: ...
+ def __repr__(self) -> str: ...
+ async def send(self, *args: Any, **kwargs: Any) -> None: ...
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aa4b03f96d5c5b6a88cc02fcf3cbe3099b4761ab
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08276b191a5e07ffe99543ba3f6eb648f0c3f6c0
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c2f84875470189b693328f94ef52a58eca7cf1e
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84e2d3df1022c744837d9f377e26504bf7f3e55a
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..748bb04a2d00a994f081eca9e680d3de8a045d03
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b05f5e394bee0c8e1a2e9c9af1c3a5b7387d109
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..419fd23ae7d0480763130354adda84874c922f7c
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..991fb052fd1b6f508c785277178f446093e378ec
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a341ea3aea8f8e361faa4a62bb53b11ee7a51df
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a955a77f3cc87ec77bc2f45db2cb27c47a59335
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/combine.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/combine.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2aad87f0cc9278626d0be5111f91b6de49ef935
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/combine.py
@@ -0,0 +1,215 @@
+from typing import List, Optional, TypeVar
+
+from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .info import DatasetInfo
+from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
+from .splits import NamedSplit
+from .utils import logging
+from .utils.py_utils import Literal
+
+
+logger = logging.get_logger(__name__)
+
+
+DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
+
+
+def interleave_datasets(
+ datasets: List[DatasetType],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+) -> DatasetType:
+ """
+ Interleave several datasets (sources) into a single dataset.
+ The new dataset is constructed by alternating between the sources to get the examples.
+
+ You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
+
+ - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
+ - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
+
+ The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
+ in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
+
+ Note for iterable datasets:
+
+ In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
+ Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
+
+ Args:
+ datasets (`List[Dataset]` or `List[IterableDataset]`):
+ List of datasets to interleave.
+ probabilities (`List[float]`, *optional*, defaults to `None`):
+ If specified, the new dataset is constructed by sampling
+ examples from one source at a time according to these probabilities.
+ seed (`int`, *optional*, defaults to `None`):
+ The random seed used to choose a source for each example.
+ info ([`DatasetInfo`], *optional*):
+ Dataset information, like description, citation, etc.
+
+ split ([`NamedSplit`], *optional*):
+ Name of the dataset split.
+
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+ Returns:
+ [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
+ parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
+ `IterableDataset`.
+
+ Example:
+
+ For regular datasets (map-style):
+
+ ```python
+ >>> from datasets import Dataset, interleave_datasets
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2]
+ >>> dataset = interleave_datasets([d1, d2, d3])
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
+ >>> dataset = interleave_datasets([d1, d2, d3])
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2]
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
+ >>> dataset["a"]
+ [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
+ For datasets in streaming mode (iterable):
+
+ >>> from datasets import load_dataset, interleave_datasets
+ >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
+ >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
+ >>> dataset = interleave_datasets([d1, d2])
+ >>> iterator = iter(dataset)
+ >>> next(iterator)
+ {'text': 'Mtendere Village was inspired by the vision...}
+ >>> next(iterator)
+ {'text': "Média de débat d'idées, de culture...}
+ ```
+ """
+ from .arrow_dataset import Dataset
+ from .iterable_dataset import IterableDataset
+
+ if not datasets:
+ raise ValueError("Unable to interleave an empty list of datasets.")
+ for i, dataset in enumerate(datasets):
+ if not isinstance(dataset, (Dataset, IterableDataset)):
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
+ if not dataset:
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
+ "is an empty dataset dictionary."
+ )
+ raise ValueError(
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
+ )
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
+ )
+ if i == 0:
+ dataset_type, other_type = (
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
+ )
+ elif not isinstance(dataset, dataset_type):
+ raise ValueError(
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
+ )
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
+ raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
+ if dataset_type is Dataset:
+ return _interleave_map_style_datasets(
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
+ )
+ else:
+ return _interleave_iterable_datasets(
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
+ )
+
+
+def concatenate_datasets(
+ dsets: List[DatasetType],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+) -> DatasetType:
+ """
+ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`].
+
+ Args:
+ dsets (`List[datasets.Dataset]`):
+ List of Datasets to concatenate.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ axis (`{0, 1}`, defaults to `0`):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+
+ Example:
+
+ ```py
+ >>> ds3 = concatenate_datasets([ds1, ds2])
+ ```
+ """
+
+ if not dsets:
+ raise ValueError("Unable to concatenate an empty list of datasets.")
+ for i, dataset in enumerate(dsets):
+ if not isinstance(dataset, (Dataset, IterableDataset)):
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
+ if not dataset:
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
+ "is an empty dataset dictionary."
+ )
+ raise ValueError(
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
+ )
+ raise ValueError(
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
+ )
+ if i == 0:
+ dataset_type, other_type = (
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
+ )
+ elif not isinstance(dataset, dataset_type):
+ raise ValueError(
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
+ )
+ if dataset_type is Dataset:
+ return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis)
+ else:
+ return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8be42797476163d6ec0d7359b5072755fe90cd9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed601f60c9857ebe37cca80a27c963c4a61fa0c1
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67f246b804ed9d6c960272ff105bd478d66ac04a
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ead35e4e9fee90aa8e40809b8afe6c9f1119caf4
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b332b3639579db76e233a28c006d67737d06f9f6
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17e607b04c994901a2f12cf3704089afdbb8dc27
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02a9f19c4cc4fc2651be9089c3d79033216e1b65
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/features/audio.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/features/audio.py
new file mode 100644
index 0000000000000000000000000000000000000000..342ecd4f0286556bc5fa75b06fafc5c12323099c
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/features/audio.py
@@ -0,0 +1,277 @@
+import os
+from dataclasses import dataclass, field
+from io import BytesIO
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..download.download_config import DownloadConfig
+from ..download.streaming_download_manager import xopen, xsplitext
+from ..table import array_cast
+from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
+
+
+if TYPE_CHECKING:
+ from .features import FeatureType
+
+
+@dataclass
+class Audio:
+ """Audio [`Feature`] to extract audio data from an audio file.
+
+ Input: The Audio feature accepts as input:
+ - A `str`: Absolute path to the audio file (i.e. random access is allowed).
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the audio file to the archive file.
+ - `bytes`: Bytes content of the audio file.
+
+ This is useful for archived files with sequential access.
+
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the audio file to the archive file.
+ - `array`: Array containing the audio sample
+ - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
+
+ This is useful for archived files with sequential access.
+
+ Args:
+ sampling_rate (`int`, *optional*):
+ Target sampling rate. If `None`, the native sampling rate is used.
+ mono (`bool`, defaults to `True`):
+ Whether to convert the audio signal to mono by averaging samples across
+ channels.
+ decode (`bool`, defaults to `True`):
+ Whether to decode the audio data. If `False`,
+ returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, Audio
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
+ >>> ds[0]["audio"]
+ {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
+ 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
+ 'sampling_rate': 16000}
+ ```
+ """
+
+ sampling_rate: Optional[int] = None
+ mono: bool = True
+ decode: bool = True
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "dict"
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
+ _type: str = field(default="Audio", init=False, repr=False)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value: Union[str, bytes, dict]) -> dict:
+ """Encode example into a format for Arrow.
+
+ Args:
+ value (`str` or `dict`):
+ Data passed as input to Audio feature.
+
+ Returns:
+ `dict`
+ """
+ try:
+ import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
+ except ImportError as err:
+ raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
+ if isinstance(value, str):
+ return {"bytes": None, "path": value}
+ elif isinstance(value, bytes):
+ return {"bytes": value, "path": None}
+ elif "array" in value:
+ # convert the audio array to wav bytes
+ buffer = BytesIO()
+ sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
+ return {"bytes": buffer.getvalue(), "path": None}
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
+ # we set "bytes": None to not duplicate the data if they're already available locally
+ if value["path"].endswith("pcm"):
+ # "PCM" only has raw audio bytes
+ if value.get("sampling_rate") is None:
+ # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
+ raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
+ if value.get("bytes"):
+ # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
+ bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
+ else:
+ bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
+
+ buffer = BytesIO(bytes())
+ sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
+ return {"bytes": buffer.getvalue(), "path": None}
+ else:
+ return {"bytes": None, "path": value.get("path")}
+ elif value.get("bytes") is not None or value.get("path") is not None:
+ # store the audio bytes, and path is used to infer the audio format using the file extension
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
+ else:
+ raise ValueError(
+ f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
+ )
+
+ def decode_example(
+ self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
+ ) -> dict:
+ """Decode example audio file into audio data.
+
+ Args:
+ value (`dict`):
+ A dictionary with keys:
+
+ - `path`: String with relative audio file path.
+ - `bytes`: Bytes of the audio file.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode
+ audio files from private repositories on the Hub, you can pass
+ a dictionary repo_id (`str`) -> token (`bool` or `str`)
+
+ Returns:
+ `dict`
+ """
+ if not self.decode:
+ raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
+
+ path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
+ if path is None and file is None:
+ raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
+
+ try:
+ import librosa
+ import soundfile as sf
+ except ImportError as err:
+ raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
+
+ audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
+ if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
+ raise RuntimeError(
+ "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
+ )
+ elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
+ raise RuntimeError(
+ "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
+ )
+
+ if file is None:
+ token_per_repo_id = token_per_repo_id or {}
+ source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
+ )
+ try:
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
+ token = token_per_repo_id[repo_id]
+ except (ValueError, KeyError):
+ token = None
+
+ download_config = DownloadConfig(token=token)
+ with xopen(path, "rb", download_config=download_config) as f:
+ array, sampling_rate = sf.read(f)
+
+ else:
+ array, sampling_rate = sf.read(file)
+
+ array = array.T
+ if self.mono:
+ array = librosa.to_mono(array)
+ if self.sampling_rate and self.sampling_rate != sampling_rate:
+ array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
+ sampling_rate = self.sampling_rate
+
+ return {"path": path, "array": array, "sampling_rate": sampling_rate}
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
+ from .features import Value
+
+ if self.decode:
+ raise ValueError("Cannot flatten a decoded Audio feature.")
+ return {
+ "bytes": Value("binary"),
+ "path": Value("string"),
+ }
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
+ """Cast an Arrow array to the Audio arrow storage type.
+ The Arrow types that can be converted to the Audio pyarrow storage type are:
+
+ - `pa.string()` - it must contain the "path" data
+ - `pa.binary()` - it must contain the audio bytes
+ - `pa.struct({"bytes": pa.binary()})`
+ - `pa.struct({"path": pa.string()})`
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
+
+ Args:
+ storage (`Union[pa.StringArray, pa.StructArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`
+ """
+ if pa.types.is_string(storage.type):
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_binary(storage.type):
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
+ storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
+ elif pa.types.is_struct(storage.type):
+ if storage.type.get_field_index("bytes") >= 0:
+ bytes_array = storage.field("bytes")
+ else:
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ if storage.type.get_field_index("path") >= 0:
+ path_array = storage.field("path")
+ else:
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
+ return array_cast(storage, self.pa_type)
+
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
+ """Embed audio files into the Arrow array.
+
+ Args:
+ storage (`pa.StructArray`):
+ PyArrow array to embed.
+
+ Returns:
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+
+ @no_op_if_value_is_null
+ def path_to_bytes(path):
+ with xopen(path, "rb") as f:
+ bytes_ = f.read()
+ return bytes_
+
+ bytes_array = pa.array(
+ [
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
+ for x in storage.to_pylist()
+ ],
+ type=pa.binary(),
+ )
+ path_array = pa.array(
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
+ type=pa.string(),
+ )
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
+ return array_cast(storage, self.pa_type)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/features/features.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/features/features.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2c7d8ff17e47fa564c03908c99410ed88c4ef97
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/features/features.py
@@ -0,0 +1,2167 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""This class handle features definition in datasets and some utilities to display table type."""
+
+import copy
+import json
+import re
+import sys
+from collections.abc import Iterable, Mapping
+from collections.abc import Sequence as SequenceABC
+from dataclasses import InitVar, dataclass, field, fields
+from functools import reduce, wraps
+from operator import mul
+from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
+from typing import Sequence as Sequence_
+
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+import pyarrow.types
+import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
+from pandas.api.extensions import ExtensionArray as PandasExtensionArray
+from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
+
+from .. import config
+from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
+from ..table import array_cast
+from ..utils import logging
+from ..utils.py_utils import asdict, first_non_null_value, zip_dict
+from .audio import Audio
+from .image import Image, encode_pil_image
+from .translation import Translation, TranslationVariableLanguages
+
+
+logger = logging.get_logger(__name__)
+
+
+def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
+ """
+ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
+ """
+ if pyarrow.types.is_null(arrow_type):
+ return "null"
+ elif pyarrow.types.is_boolean(arrow_type):
+ return "bool"
+ elif pyarrow.types.is_int8(arrow_type):
+ return "int8"
+ elif pyarrow.types.is_int16(arrow_type):
+ return "int16"
+ elif pyarrow.types.is_int32(arrow_type):
+ return "int32"
+ elif pyarrow.types.is_int64(arrow_type):
+ return "int64"
+ elif pyarrow.types.is_uint8(arrow_type):
+ return "uint8"
+ elif pyarrow.types.is_uint16(arrow_type):
+ return "uint16"
+ elif pyarrow.types.is_uint32(arrow_type):
+ return "uint32"
+ elif pyarrow.types.is_uint64(arrow_type):
+ return "uint64"
+ elif pyarrow.types.is_float16(arrow_type):
+ return "float16" # pyarrow dtype is "halffloat"
+ elif pyarrow.types.is_float32(arrow_type):
+ return "float32" # pyarrow dtype is "float"
+ elif pyarrow.types.is_float64(arrow_type):
+ return "float64" # pyarrow dtype is "double"
+ elif pyarrow.types.is_time32(arrow_type):
+ return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
+ elif pyarrow.types.is_time64(arrow_type):
+ return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
+ elif pyarrow.types.is_timestamp(arrow_type):
+ if arrow_type.tz is None:
+ return f"timestamp[{arrow_type.unit}]"
+ elif arrow_type.tz:
+ return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
+ else:
+ raise ValueError(f"Unexpected timestamp object {arrow_type}.")
+ elif pyarrow.types.is_date32(arrow_type):
+ return "date32" # pyarrow dtype is "date32[day]"
+ elif pyarrow.types.is_date64(arrow_type):
+ return "date64" # pyarrow dtype is "date64[ms]"
+ elif pyarrow.types.is_duration(arrow_type):
+ return f"duration[{arrow_type.unit}]"
+ elif pyarrow.types.is_decimal128(arrow_type):
+ return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
+ elif pyarrow.types.is_decimal256(arrow_type):
+ return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
+ elif pyarrow.types.is_binary(arrow_type):
+ return "binary"
+ elif pyarrow.types.is_large_binary(arrow_type):
+ return "large_binary"
+ elif pyarrow.types.is_string(arrow_type):
+ return "string"
+ elif pyarrow.types.is_large_string(arrow_type):
+ return "large_string"
+ else:
+ raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
+
+
+def string_to_arrow(datasets_dtype: str) -> pa.DataType:
+ """
+ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
+
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
+
+ This is necessary because the datasets.Value() primitive type is constructed using a string dtype
+
+ Value(dtype=str)
+
+ But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
+ which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
+ purpose of this function.
+ """
+
+ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
+ msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
+ if examples:
+ examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
+ msg += f"\nValid examples include: {examples}."
+ if urls:
+ urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
+ msg += f"\nFor more insformation, see: {urls}."
+ return msg
+
+ if datasets_dtype in pa.__dict__:
+ return pa.__dict__[datasets_dtype]()
+
+ if (datasets_dtype + "_") in pa.__dict__:
+ return pa.__dict__[datasets_dtype + "_"]()
+
+ timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
+ if timestamp_matches:
+ timestamp_internals = timestamp_matches.group(1)
+ internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
+ if timestamp_internals in ["s", "ms", "us", "ns"]:
+ return pa.timestamp(timestamp_internals)
+ elif internals_matches:
+ return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "timestamp",
+ examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
+ )
+ )
+
+ duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
+ if duration_matches:
+ duration_internals = duration_matches.group(1)
+ if duration_internals in ["s", "ms", "us", "ns"]:
+ return pa.duration(duration_internals)
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "duration",
+ examples=["duration[s]", "duration[us]"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
+ )
+ )
+
+ time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
+ if time_matches:
+ time_internals_bits = time_matches.group(1)
+ if time_internals_bits == "32":
+ time_internals_unit = time_matches.group(2)
+ if time_internals_unit in ["s", "ms"]:
+ return pa.time32(time_internals_unit)
+ else:
+ raise ValueError(
+ f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
+ )
+ elif time_internals_bits == "64":
+ time_internals_unit = time_matches.group(2)
+ if time_internals_unit in ["us", "ns"]:
+ return pa.time64(time_internals_unit)
+ else:
+ raise ValueError(
+ f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
+ )
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "time",
+ examples=["time32[s]", "time64[us]"],
+ urls=[
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
+ ],
+ )
+ )
+
+ decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
+ if decimal_matches:
+ decimal_internals_bits = decimal_matches.group(1)
+ if decimal_internals_bits == "128":
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
+ if decimal_internals_precision_and_scale:
+ precision = decimal_internals_precision_and_scale.group(1)
+ scale = decimal_internals_precision_and_scale.group(2)
+ return pa.decimal128(int(precision), int(scale))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal128",
+ examples=["decimal128(10, 2)", "decimal128(4, -2)"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
+ )
+ )
+ elif decimal_internals_bits == "256":
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
+ if decimal_internals_precision_and_scale:
+ precision = decimal_internals_precision_and_scale.group(1)
+ scale = decimal_internals_precision_and_scale.group(2)
+ return pa.decimal256(int(precision), int(scale))
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal256",
+ examples=["decimal256(30, 2)", "decimal256(38, -4)"],
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
+ )
+ )
+ else:
+ raise ValueError(
+ _dtype_error_msg(
+ datasets_dtype,
+ "decimal",
+ examples=["decimal128(12, 3)", "decimal256(40, 6)"],
+ urls=[
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
+ ],
+ )
+ )
+
+ raise ValueError(
+ f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
+ f"Please make sure to use a correct data type, see: "
+ f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
+ )
+
+
+def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
+ """
+ Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
+ It works recursively.
+
+ If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
+
+ Args:
+ obj: the object (nested struct) to cast.
+ only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
+ Indeed Arrow only support converting 1-dimensional array values.
+ optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
+ and if it doesn't, not checking the rest of the list elements.
+
+ Returns:
+ casted_obj: the casted object
+ has_changed (bool): True if the object has been changed, False if it is identical
+ """
+
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
+ import tensorflow as tf
+
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
+ import jax.numpy as jnp
+
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ if isinstance(obj, np.ndarray):
+ if obj.ndim == 0:
+ return obj[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj, False
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj
+ ],
+ True,
+ )
+ elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
+ if obj.ndim == 0:
+ return obj.detach().cpu().numpy()[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj.detach().cpu().numpy(), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj.detach().cpu().numpy()
+ ],
+ True,
+ )
+ elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
+ if obj.ndim == 0:
+ return obj.numpy()[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return obj.numpy(), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in obj.numpy()
+ ],
+ True,
+ )
+ elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
+ if obj.ndim == 0:
+ return np.asarray(obj)[()], True
+ elif not only_1d_for_numpy or obj.ndim == 1:
+ return np.asarray(obj), True
+ else:
+ return (
+ [
+ _cast_to_python_objects(
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for x in np.asarray(obj)
+ ],
+ True,
+ )
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
+ return encode_pil_image(obj), True
+ elif isinstance(obj, pd.Series):
+ return (
+ _cast_to_python_objects(
+ obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0],
+ True,
+ )
+ elif isinstance(obj, pd.DataFrame):
+ return (
+ {
+ key: _cast_to_python_objects(
+ value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for key, value in obj.to_dict("series").items()
+ },
+ True,
+ )
+ elif isinstance(obj, pd.Timestamp):
+ return obj.to_pydatetime(), True
+ elif isinstance(obj, pd.Timedelta):
+ return obj.to_pytimedelta(), True
+ elif isinstance(obj, Mapping):
+ has_changed = not isinstance(obj, dict)
+ output = {}
+ for k, v in obj.items():
+ casted_v, has_changed_v = _cast_to_python_objects(
+ v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )
+ has_changed |= has_changed_v
+ output[k] = casted_v
+ return output if has_changed else obj, has_changed
+ elif hasattr(obj, "__array__"):
+ return (
+ _cast_to_python_objects(
+ obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0],
+ True,
+ )
+ elif isinstance(obj, (list, tuple)):
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt):
+ break
+ casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
+ first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )
+ if has_changed_first_elmt or not optimize_list_casting:
+ return (
+ [
+ _cast_to_python_objects(
+ elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+ for elmt in obj
+ ],
+ True,
+ )
+ else:
+ if isinstance(obj, (list, tuple)):
+ return obj, False
+ else:
+ return list(obj), True
+ else:
+ return obj, False
+ else:
+ return obj, False
+
+
+def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
+ """
+ Cast numpy/pytorch/tensorflow/pandas objects to python lists.
+ It works recursively.
+
+ If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
+
+ Args:
+ obj: the object (nested struct) to cast
+ only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
+ Indeed Arrow only support converting 1-dimensional array values.
+ optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
+ and if it doesn't, not checking the rest of the list elements.
+
+ Returns:
+ casted_obj: the casted object
+ """
+ return _cast_to_python_objects(
+ obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
+ )[0]
+
+
+@dataclass
+class Value:
+ """
+ The `Value` dtypes are as follows:
+
+ - `null`
+ - `bool`
+ - `int8`
+ - `int16`
+ - `int32`
+ - `int64`
+ - `uint8`
+ - `uint16`
+ - `uint32`
+ - `uint64`
+ - `float16`
+ - `float32` (alias float)
+ - `float64` (alias double)
+ - `time32[(s|ms)]`
+ - `time64[(us|ns)]`
+ - `timestamp[(s|ms|us|ns)]`
+ - `timestamp[(s|ms|us|ns), tz=(tzstring)]`
+ - `date32`
+ - `date64`
+ - `duration[(s|ms|us|ns)]`
+ - `decimal128(precision, scale)`
+ - `decimal256(precision, scale)`
+ - `binary`
+ - `large_binary`
+ - `string`
+ - `large_string`
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'stars': Value(dtype='int32')})
+ >>> features
+ {'stars': Value(dtype='int32', id=None)}
+ ```
+ """
+
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Value", init=False, repr=False)
+
+ def __post_init__(self):
+ if self.dtype == "double": # fix inferred type
+ self.dtype = "float64"
+ if self.dtype == "float": # fix inferred type
+ self.dtype = "float32"
+ self.pa_type = string_to_arrow(self.dtype)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value):
+ if pa.types.is_boolean(self.pa_type):
+ return bool(value)
+ elif pa.types.is_integer(self.pa_type):
+ return int(value)
+ elif pa.types.is_floating(self.pa_type):
+ return float(value)
+ elif pa.types.is_string(self.pa_type):
+ return str(value)
+ else:
+ return value
+
+
+class _ArrayXD:
+ def __post_init__(self):
+ self.shape = tuple(self.shape)
+
+ def __call__(self):
+ pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
+ return pa_type
+
+ def encode_example(self, value):
+ return value
+
+
+@dataclass
+class Array2D(_ArrayXD):
+ """Create a two-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array2D", init=False, repr=False)
+
+
+@dataclass
+class Array3D(_ArrayXD):
+ """Create a three-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array3D", init=False, repr=False)
+
+
+@dataclass
+class Array4D(_ArrayXD):
+ """Create a four-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array4D", init=False, repr=False)
+
+
+@dataclass
+class Array5D(_ArrayXD):
+ """Create a five-dimensional array.
+
+ Args:
+ shape (`tuple`):
+ The size of each dimension.
+ dtype (`str`):
+ The value of the data type.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
+ ```
+ """
+
+ shape: tuple
+ dtype: str
+ id: Optional[str] = None
+ # Automatically constructed
+ _type: str = field(default="Array5D", init=False, repr=False)
+
+
+class _ArrayXDExtensionType(pa.ExtensionType):
+ ndims: Optional[int] = None
+
+ def __init__(self, shape: tuple, dtype: str):
+ if self.ndims is None or self.ndims <= 1:
+ raise ValueError("You must instantiate an array type with a value for dim that is > 1")
+ if len(shape) != self.ndims:
+ raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
+ for dim in range(1, self.ndims):
+ if shape[dim] is None:
+ raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
+ self.shape = tuple(shape)
+ self.value_type = dtype
+ self.storage_dtype = self._generate_dtype(self.value_type)
+ pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
+
+ def __arrow_ext_serialize__(self):
+ return json.dumps((self.shape, self.value_type)).encode()
+
+ @classmethod
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
+ args = json.loads(serialized)
+ return cls(*args)
+
+ # This was added to pa.ExtensionType in pyarrow >= 13.0.0
+ def __reduce__(self):
+ return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
+
+ def __hash__(self):
+ return hash((self.__class__, self.shape, self.value_type))
+
+ def __arrow_ext_class__(self):
+ return ArrayExtensionArray
+
+ def _generate_dtype(self, dtype):
+ dtype = string_to_arrow(dtype)
+ for d in reversed(self.shape):
+ dtype = pa.list_(dtype)
+ # Don't specify the size of the list, since fixed length list arrays have issues
+ # being validated after slicing in pyarrow 0.17.1
+ return dtype
+
+ def to_pandas_dtype(self):
+ return PandasArrayExtensionDtype(self.value_type)
+
+
+class Array2DExtensionType(_ArrayXDExtensionType):
+ ndims = 2
+
+
+class Array3DExtensionType(_ArrayXDExtensionType):
+ ndims = 3
+
+
+class Array4DExtensionType(_ArrayXDExtensionType):
+ ndims = 4
+
+
+class Array5DExtensionType(_ArrayXDExtensionType):
+ ndims = 5
+
+
+# Register the extension types for deserialization
+pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
+pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
+pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
+pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
+
+
+def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
+ """
+ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
+ This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
+
+ # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
+ # primitive types are types for which the physical representation in arrow and in numpy
+ # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
+ # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
+ # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
+ """
+
+ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
+ if pa.types.is_list(pa_type):
+ return _unnest_pa_type(pa_type.value_type)
+ return pa_type
+
+ if unnest:
+ pa_type = _unnest_pa_type(pa_type)
+ return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
+
+
+class ArrayExtensionArray(pa.ExtensionArray):
+ def __array__(self):
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
+ return self.to_numpy(zero_copy_only=zero_copy_only)
+
+ def __getitem__(self, i):
+ return self.storage[i]
+
+ def to_numpy(self, zero_copy_only=True):
+ storage: pa.ListArray = self.storage
+ null_mask = storage.is_null().to_numpy(zero_copy_only=False)
+
+ if self.type.shape[0] is not None:
+ size = 1
+ null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
+
+ for i in range(self.type.ndims):
+ size *= self.type.shape[i]
+ storage = storage.flatten()
+ numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
+ numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
+
+ if len(null_indices):
+ numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
+
+ else:
+ shape = self.type.shape
+ ndims = self.type.ndims
+ arrays = []
+ first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
+ for i, is_null in enumerate(null_mask):
+ if is_null:
+ arrays.append(np.nan)
+ else:
+ storage_el = storage[i : i + 1]
+ first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
+ # flatten storage
+ for _ in range(ndims):
+ storage_el = storage_el.flatten()
+
+ numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
+ arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
+
+ if len(np.unique(np.diff(first_dim_offsets))) > 1:
+ # ragged
+ numpy_arr = np.empty(len(arrays), dtype=object)
+ numpy_arr[:] = arrays
+ else:
+ numpy_arr = np.array(arrays)
+
+ return numpy_arr
+
+ def to_pylist(self):
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
+ numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
+ if self.type.shape[0] is None and numpy_arr.dtype == object:
+ return [arr.tolist() for arr in numpy_arr.tolist()]
+ else:
+ return numpy_arr.tolist()
+
+
+class PandasArrayExtensionDtype(PandasExtensionDtype):
+ _metadata = "value_type"
+
+ def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
+ self._value_type = value_type
+
+ def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
+ if isinstance(array, pa.ChunkedArray):
+ array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
+ zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
+ numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
+ return PandasArrayExtensionArray(numpy_arr)
+
+ @classmethod
+ def construct_array_type(cls):
+ return PandasArrayExtensionArray
+
+ @property
+ def type(self) -> type:
+ return np.ndarray
+
+ @property
+ def kind(self) -> str:
+ return "O"
+
+ @property
+ def name(self) -> str:
+ return f"array[{self.value_type}]"
+
+ @property
+ def value_type(self) -> np.dtype:
+ return self._value_type
+
+
+class PandasArrayExtensionArray(PandasExtensionArray):
+ def __init__(self, data: np.ndarray, copy: bool = False):
+ self._data = data if not copy else np.array(data)
+ self._dtype = PandasArrayExtensionDtype(data.dtype)
+
+ def __array__(self, dtype=None):
+ """
+ Convert to NumPy Array.
+ Note that Pandas expects a 1D array when dtype is set to object.
+ But for other dtypes, the returned shape is the same as the one of ``data``.
+
+ More info about pandas 1D requirement for PandasExtensionArray here:
+ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
+
+ """
+ if dtype == object:
+ out = np.empty(len(self._data), dtype=object)
+ for i in range(len(self._data)):
+ out[i] = self._data[i]
+ return out
+ if dtype is None:
+ return self._data
+ else:
+ return self._data.astype(dtype)
+
+ def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
+ return PandasArrayExtensionArray(self._data, copy=True)
+
+ @classmethod
+ def _from_sequence(
+ cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
+ ) -> "PandasArrayExtensionArray":
+ if len(scalars) > 1 and all(
+ isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
+ ):
+ data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
+ else:
+ data = np.empty(len(scalars), dtype=object)
+ data[:] = scalars
+ return cls(data, copy=copy)
+
+ @classmethod
+ def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
+ if len(to_concat) > 1 and all(
+ va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
+ for va in to_concat
+ ):
+ data = np.vstack([va._data for va in to_concat])
+ else:
+ data = np.empty(len(to_concat), dtype=object)
+ data[:] = [va._data for va in to_concat]
+ return cls(data, copy=False)
+
+ @property
+ def dtype(self) -> PandasArrayExtensionDtype:
+ return self._dtype
+
+ @property
+ def nbytes(self) -> int:
+ return self._data.nbytes
+
+ def isna(self) -> np.ndarray:
+ return np.array([pd.isna(arr).any() for arr in self._data])
+
+ def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
+ raise NotImplementedError()
+
+ def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
+ if isinstance(item, int):
+ return self._data[item]
+ return PandasArrayExtensionArray(self._data[item], copy=False)
+
+ def take(
+ self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
+ ) -> "PandasArrayExtensionArray":
+ indices: np.ndarray = np.asarray(indices, dtype=int)
+ if allow_fill:
+ fill_value = (
+ self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
+ )
+ mask = indices == -1
+ if (indices < -1).any():
+ raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
+ elif len(self) > 0:
+ pass
+ elif not np.all(mask):
+ raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
+ else:
+ data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
+ return PandasArrayExtensionArray(data, copy=False)
+ took = self._data.take(indices, axis=0)
+ if allow_fill and mask.any():
+ took[mask] = [fill_value] * np.sum(mask)
+ return PandasArrayExtensionArray(took, copy=False)
+
+ def __len__(self) -> int:
+ return len(self._data)
+
+ def __eq__(self, other) -> np.ndarray:
+ if not isinstance(other, PandasArrayExtensionArray):
+ raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
+ return (self._data == other._data).all()
+
+
+def pandas_types_mapper(dtype):
+ if isinstance(dtype, _ArrayXDExtensionType):
+ return PandasArrayExtensionDtype(dtype.value_type)
+
+
+@dataclass
+class ClassLabel:
+ """Feature type for integer class labels.
+
+ There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
+
+ * `num_classes`: Create 0 to (num_classes-1) labels.
+ * `names`: List of label strings.
+ * `names_file`: File containing the list of labels.
+
+ Under the hood the labels are stored as integers.
+ You can use negative integers to represent unknown/missing labels.
+
+ Args:
+ num_classes (`int`, *optional*):
+ Number of classes. All labels must be < `num_classes`.
+ names (`list` of `str`, *optional*):
+ String names for the integer classes.
+ The order in which the names are provided is kept.
+ names_file (`str`, *optional*):
+ Path to a file with names for the integer classes, one per line.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features
+ >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
+ >>> features
+ {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
+ ```
+ """
+
+ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
+ names: List[str] = None
+ names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "int64"
+ pa_type: ClassVar[Any] = pa.int64()
+ _str2int: ClassVar[Dict[str, int]] = None
+ _int2str: ClassVar[Dict[int, int]] = None
+ _type: str = field(default="ClassLabel", init=False, repr=False)
+
+ def __post_init__(self, num_classes, names_file):
+ self.num_classes = num_classes
+ self.names_file = names_file
+ if self.names_file is not None and self.names is not None:
+ raise ValueError("Please provide either names or names_file but not both.")
+ # Set self.names
+ if self.names is None:
+ if self.names_file is not None:
+ self.names = self._load_names_from_file(self.names_file)
+ elif self.num_classes is not None:
+ self.names = [str(i) for i in range(self.num_classes)]
+ else:
+ raise ValueError("Please provide either num_classes, names or names_file.")
+ elif not isinstance(self.names, SequenceABC):
+ raise TypeError(f"Please provide names as a list, is {type(self.names)}")
+ # Set self.num_classes
+ if self.num_classes is None:
+ self.num_classes = len(self.names)
+ elif self.num_classes != len(self.names):
+ raise ValueError(
+ "ClassLabel number of names do not match the defined num_classes. "
+ f"Got {len(self.names)} names VS {self.num_classes} num_classes"
+ )
+ # Prepare mappings
+ self._int2str = [str(name) for name in self.names]
+ self._str2int = {name: i for i, name in enumerate(self._int2str)}
+ if len(self._int2str) != len(self._str2int):
+ raise ValueError("Some label names are duplicated. Each label name should be unique.")
+
+ def __call__(self):
+ return self.pa_type
+
+ def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
+ """Conversion class name `string` => `integer`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> ds.features["label"].str2int('neg')
+ 0
+ ```
+ """
+ if not isinstance(values, str) and not isinstance(values, Iterable):
+ raise ValueError(
+ f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
+ )
+ return_list = True
+ if isinstance(values, str):
+ values = [values]
+ return_list = False
+
+ output = [self._strval2int(value) for value in values]
+ return output if return_list else output[0]
+
+ def _strval2int(self, value: str) -> int:
+ failed_parse = False
+ value = str(value)
+ # first attempt - raw string value
+ int_value = self._str2int.get(value)
+ if int_value is None:
+ # second attempt - strip whitespace
+ int_value = self._str2int.get(value.strip())
+ if int_value is None:
+ # third attempt - convert str to int
+ try:
+ int_value = int(value)
+ except ValueError:
+ failed_parse = True
+ else:
+ if int_value < -1 or int_value >= self.num_classes:
+ failed_parse = True
+ if failed_parse:
+ raise ValueError(f"Invalid string class label {value}")
+ return int_value
+
+ def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
+ """Conversion `integer` => class name `string`.
+
+ Regarding unknown/missing labels: passing negative integers raises `ValueError`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> ds.features["label"].int2str(0)
+ 'neg'
+ ```
+ """
+ if not isinstance(values, int) and not isinstance(values, Iterable):
+ raise ValueError(
+ f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
+ )
+ return_list = True
+ if isinstance(values, int):
+ values = [values]
+ return_list = False
+
+ for v in values:
+ if not 0 <= v < self.num_classes:
+ raise ValueError(f"Invalid integer class label {v:d}")
+
+ output = [self._int2str[int(v)] for v in values]
+ return output if return_list else output[0]
+
+ def encode_example(self, example_data):
+ if self.num_classes is None:
+ raise ValueError(
+ "Trying to use ClassLabel feature with undefined number of class. "
+ "Please set ClassLabel.names or num_classes."
+ )
+
+ # If a string is given, convert to associated integer
+ if isinstance(example_data, str):
+ example_data = self.str2int(example_data)
+
+ # Allowing -1 to mean no label.
+ if not -1 <= example_data < self.num_classes:
+ raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
+ return example_data
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
+ """Cast an Arrow array to the `ClassLabel` arrow storage type.
+ The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
+
+ - `pa.string()`
+ - `pa.int()`
+
+ Args:
+ storage (`Union[pa.StringArray, pa.IntegerArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
+ """
+ if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
+ min_max = pc.min_max(storage).as_py()
+ if min_max["max"] is not None and min_max["max"] >= self.num_classes:
+ raise ValueError(
+ f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
+ )
+ elif isinstance(storage, pa.StringArray):
+ storage = pa.array(
+ [self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
+ )
+ return array_cast(storage, self.pa_type)
+
+ @staticmethod
+ def _load_names_from_file(names_filepath):
+ with open(names_filepath, encoding="utf-8") as f:
+ return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
+
+
+@dataclass
+class Sequence:
+ """Construct a list of feature from a single type or a dict of types.
+ Mostly here for compatiblity with tfds.
+
+ Args:
+ feature:
+ A list of features of a single type or a dictionary of types.
+ length (`int`):
+ Length of the sequence.
+
+ Example:
+
+ ```py
+ >>> from datasets import Features, Sequence, Value, ClassLabel
+ >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
+ >>> features
+ {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
+ ```
+ """
+
+ feature: Any
+ length: int = -1
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "list"
+ pa_type: ClassVar[Any] = None
+ _type: str = field(default="Sequence", init=False, repr=False)
+
+
+FeatureType = Union[
+ dict,
+ list,
+ tuple,
+ Value,
+ ClassLabel,
+ Translation,
+ TranslationVariableLanguages,
+ Sequence,
+ Array2D,
+ Array3D,
+ Array4D,
+ Array5D,
+ Audio,
+ Image,
+]
+
+
+def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
+ """
+ Check if the object is not None.
+ If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
+ """
+ if obj is None:
+ return False
+ elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
+ if len(obj) > 0:
+ if schema is None:
+ pass
+ elif isinstance(schema, (list, tuple)):
+ schema = schema[0]
+ else:
+ schema = schema.feature
+ return _check_non_null_non_empty_recursive(obj[0], schema)
+ else:
+ return False
+ else:
+ return True
+
+
+def get_nested_type(schema: FeatureType) -> pa.DataType:
+ """
+ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
+ generate_from_arrow_type().
+
+ It performs double-duty as the implementation of Features.type and handles the conversion of
+ datasets.Feature->pa.struct
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, Features):
+ return pa.struct(
+ {key: get_nested_type(schema[key]) for key in schema}
+ ) # Features is subclass of dict, and dict order is deterministic since Python 3.6
+ elif isinstance(schema, dict):
+ return pa.struct(
+ {key: get_nested_type(schema[key]) for key in schema}
+ ) # however don't sort on struct types since the order matters
+ elif isinstance(schema, (list, tuple)):
+ if len(schema) != 1:
+ raise ValueError("When defining list feature, you should just provide one example of the inner type")
+ value_type = get_nested_type(schema[0])
+ return pa.list_(value_type)
+ elif isinstance(schema, Sequence):
+ value_type = get_nested_type(schema.feature)
+ # We allow to reverse list of dict => dict of list for compatibility with tfds
+ if isinstance(schema.feature, dict):
+ return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
+ return pa.list_(value_type, schema.length)
+
+ # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
+ return schema()
+
+
+def encode_nested_example(schema, obj, level=0):
+ """Encode a nested example.
+ This is used since some features (in particular ClassLabel) have some logic during encoding.
+
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
+ If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, dict):
+ if level == 0 and obj is None:
+ raise ValueError("Got None but expected a dictionary instead")
+ return (
+ {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
+ if obj is not None
+ else None
+ )
+
+ elif isinstance(schema, (list, tuple)):
+ sub_schema = schema[0]
+ if obj is None:
+ return None
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
+ break
+ if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
+ return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
+ return list(obj)
+ elif isinstance(schema, Sequence):
+ if obj is None:
+ return None
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
+ if isinstance(schema.feature, dict):
+ # dict of list to fill
+ list_dict = {}
+ if isinstance(obj, (list, tuple)):
+ # obj is a list of dict
+ for k in schema.feature:
+ list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
+ return list_dict
+ else:
+ # obj is a single dict
+ for k in schema.feature:
+ list_dict[k] = (
+ [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
+ if k in obj
+ else None
+ )
+ return list_dict
+ # schema.feature is not a dict
+ if isinstance(obj, str): # don't interpret a string as a list
+ raise ValueError(f"Got a string but expected a list instead: '{obj}'")
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
+ break
+ # be careful when comparing tensors here
+ if (
+ not isinstance(first_elmt, list)
+ or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
+ ):
+ return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
+ return list(obj)
+ # Object with special encoding:
+ # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
+ elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
+ return schema.encode_example(obj) if obj is not None else None
+ # Other object should be directly convertible to a native Arrow type (like Translation and Translation)
+ return obj
+
+
+def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode a nested example.
+ This is used since some features (in particular Audio and Image) have some logic during decoding.
+
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
+ If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(schema, dict):
+ return (
+ {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
+ if obj is not None
+ else None
+ )
+ elif isinstance(schema, (list, tuple)):
+ sub_schema = schema[0]
+ if obj is None:
+ return None
+ else:
+ if len(obj) > 0:
+ for first_elmt in obj:
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
+ break
+ if decode_nested_example(sub_schema, first_elmt) != first_elmt:
+ return [decode_nested_example(sub_schema, o) for o in obj]
+ return list(obj)
+ elif isinstance(schema, Sequence):
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
+ if isinstance(schema.feature, dict):
+ return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
+ else:
+ return decode_nested_example([schema.feature], obj)
+ # Object with special decoding:
+ elif isinstance(schema, (Audio, Image)):
+ # we pass the token to read and decode files from private repositories in streaming mode
+ if obj is not None and schema.decode:
+ return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
+ return obj
+
+
+def generate_from_dict(obj: Any):
+ """Regenerate the nested feature object from a deserialized dict.
+ We use the '_type' fields to get the dataclass name to load.
+
+ generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
+ a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
+ :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
+ mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
+ that :class:`Value` automatically performs.
+ """
+ # Nested structures: we allow dict, list/tuples, sequences
+ if isinstance(obj, list):
+ return [generate_from_dict(value) for value in obj]
+ # Otherwise we have a dict or a dataclass
+ if "_type" not in obj or isinstance(obj["_type"], dict):
+ return {key: generate_from_dict(value) for key, value in obj.items()}
+ obj = dict(obj)
+ class_type = globals()[obj.pop("_type")]
+
+ if class_type == Sequence:
+ return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
+
+ field_names = {f.name for f in fields(class_type)}
+ return class_type(**{k: v for k, v in obj.items() if k in field_names})
+
+
+def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
+ """
+ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
+ a single field.
+
+ This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
+
+ This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
+ full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
+ """
+ if isinstance(pa_type, pa.StructType):
+ return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
+ elif isinstance(pa_type, pa.FixedSizeListType):
+ return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
+ elif isinstance(pa_type, pa.ListType):
+ feature = generate_from_arrow_type(pa_type.value_type)
+ if isinstance(feature, (dict, tuple, list)):
+ return [feature]
+ return Sequence(feature=feature)
+ elif isinstance(pa_type, _ArrayXDExtensionType):
+ array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
+ return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
+ elif isinstance(pa_type, pa.DictionaryType):
+ raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
+ elif isinstance(pa_type, pa.DataType):
+ return Value(dtype=_arrow_to_datasets_dtype(pa_type))
+ else:
+ raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
+
+
+def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
+ """Build a PyArrow ListArray from a multidimensional NumPy array"""
+ arr = np.array(arr)
+ values = pa.array(arr.flatten(), type=type)
+ for i in range(arr.ndim - 1):
+ n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
+ step_offsets = arr.shape[arr.ndim - i - 1]
+ offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
+ values = pa.ListArray.from_arrays(offsets, values)
+ return values
+
+
+def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
+ null_mask = np.array([arr is None for arr in l_arr])
+ null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
+ l_arr = [arr for arr in l_arr if arr is not None]
+ offsets = np.cumsum(
+ [0] + [len(arr) for arr in l_arr], dtype=object
+ ) # convert to dtype object to allow None insertion
+ offsets = np.insert(offsets, null_indices, None)
+ offsets = pa.array(offsets, type=pa.int32())
+ values = pa.concat_arrays(l_arr)
+ return pa.ListArray.from_arrays(offsets, values)
+
+
+def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
+ """Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
+ if len(l_arr) > 0:
+ return list_of_pa_arrays_to_pyarrow_listarray(
+ [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
+ )
+ else:
+ return pa.array([], type=type)
+
+
+def contains_any_np_array(data: Any):
+ """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
+
+ Args:
+ data (Any): Data.
+
+ Returns:
+ bool
+ """
+ if isinstance(data, np.ndarray):
+ return True
+ elif isinstance(data, list):
+ return contains_any_np_array(first_non_null_value(data)[1])
+ else:
+ return False
+
+
+def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
+ """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
+
+ Args:
+ data (Union[np.ndarray, List]): Data.
+ type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
+
+ Returns:
+ pa.ListArray
+ """
+ if isinstance(data, np.ndarray):
+ return numpy_to_pyarrow_listarray(data, type=type)
+ elif isinstance(data, list):
+ return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
+
+
+def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
+ """Convert to PyArrow ListArray.
+
+ Args:
+ data (Any): Sequence, iterable, np.ndarray or pd.Series.
+ pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
+
+ Returns:
+ pyarrow.Array
+ """
+ if contains_any_np_array(data):
+ return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
+ else:
+ return pa.array(data, pa_type.storage_dtype)
+
+
+def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
+ """Visit a (possibly nested) feature.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ visited feature (FeatureType)
+ """
+ if isinstance(feature, dict):
+ out = func({k: _visit(f, func) for k, f in feature.items()})
+ elif isinstance(feature, (list, tuple)):
+ out = func([_visit(feature[0], func)])
+ elif isinstance(feature, Sequence):
+ out = func(Sequence(_visit(feature.feature, func), length=feature.length))
+ else:
+ out = func(feature)
+ return feature if out is None else out
+
+
+def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
+ """Check if a (possibly nested) feature requires decoding.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
+ of the `decode` attribute of the decodable feature types.
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_decoding(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_decoding(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_decoding(feature.feature)
+ else:
+ return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
+
+
+def require_storage_cast(feature: FeatureType) -> bool:
+ """Check if a (possibly nested) feature requires storage casting.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_storage_cast(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_storage_cast(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_storage_cast(feature.feature)
+ else:
+ return hasattr(feature, "cast_storage")
+
+
+def require_storage_embed(feature: FeatureType) -> bool:
+ """Check if a (possibly nested) feature requires embedding data into storage.
+
+ Args:
+ feature (FeatureType): the feature type to be checked
+ Returns:
+ :obj:`bool`
+ """
+ if isinstance(feature, dict):
+ return any(require_storage_cast(f) for f in feature.values())
+ elif isinstance(feature, (list, tuple)):
+ return require_storage_cast(feature[0])
+ elif isinstance(feature, Sequence):
+ return require_storage_cast(feature.feature)
+ else:
+ return hasattr(feature, "embed_storage")
+
+
+def keep_features_dicts_synced(func):
+ """
+ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
+ in sync with the main dictionary.
+ """
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Features" = args[0]
+ args = args[1:]
+ else:
+ self: "Features" = kwargs.pop("self")
+ out = func(self, *args, **kwargs)
+ assert hasattr(self, "_column_requires_decoding")
+ self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
+ return out
+
+ wrapper._decorator_name_ = "_keep_dicts_synced"
+ return wrapper
+
+
+class Features(dict):
+ """A special dictionary that defines the internal structure of a dataset.
+
+ Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
+ and values are the type of that column.
+
+ `FieldType` can be one of the following:
+ - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
+ - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
+ associated to them and will be stored as integers in the dataset.
+ - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
+ features. It's possible to have nested fields of nested fields in an arbitrary manner.
+ - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
+ `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
+ type hosted in this list.
+
+
+
+ A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
+ lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
+ un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
+ [`~datasets.Sequence`].
+
+
+
+ - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
+ - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
+ to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
+ - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
+ or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
+ - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
+ """
+
+ def __init__(*args, **kwargs):
+ # self not in the signature to allow passing self as a kwarg
+ if not args:
+ raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
+ self, *args = args
+ super(Features, self).__init__(*args, **kwargs)
+ self._column_requires_decoding: Dict[str, bool] = {
+ col: require_decoding(feature) for col, feature in self.items()
+ }
+
+ __setitem__ = keep_features_dicts_synced(dict.__setitem__)
+ __delitem__ = keep_features_dicts_synced(dict.__delitem__)
+ update = keep_features_dicts_synced(dict.update)
+ setdefault = keep_features_dicts_synced(dict.setdefault)
+ pop = keep_features_dicts_synced(dict.pop)
+ popitem = keep_features_dicts_synced(dict.popitem)
+ clear = keep_features_dicts_synced(dict.clear)
+
+ def __reduce__(self):
+ return Features, (dict(self),)
+
+ @property
+ def type(self):
+ """
+ Features field types.
+
+ Returns:
+ :obj:`pyarrow.DataType`
+ """
+ return get_nested_type(self)
+
+ @property
+ def arrow_schema(self):
+ """
+ Features schema.
+
+ Returns:
+ :obj:`pyarrow.Schema`
+ """
+ hf_metadata = {"info": {"features": self.to_dict()}}
+ return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
+
+ @classmethod
+ def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
+ """
+ Construct [`Features`] from Arrow Schema.
+ It also checks the schema metadata for Hugging Face Datasets features.
+ Non-nullable fields are not supported and set to nullable.
+
+ Args:
+ pa_schema (`pyarrow.Schema`):
+ Arrow Schema.
+
+ Returns:
+ [`Features`]
+ """
+ # try to load features from the arrow schema metadata
+ metadata_features = Features()
+ if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
+ metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
+ if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
+ metadata_features = Features.from_dict(metadata["info"]["features"])
+ metadata_features_schema = metadata_features.arrow_schema
+ obj = {
+ field.name: (
+ metadata_features[field.name]
+ if field.name in metadata_features and metadata_features_schema.field(field.name) == field
+ else generate_from_arrow_type(field.type)
+ )
+ for field in pa_schema
+ }
+ return cls(**obj)
+
+ @classmethod
+ def from_dict(cls, dic) -> "Features":
+ """
+ Construct [`Features`] from dict.
+
+ Regenerate the nested feature object from a deserialized dict.
+ We use the `_type` key to infer the dataclass name of the feature `FieldType`.
+
+ It allows for a convenient constructor syntax
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
+ a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
+ [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
+ any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
+ dtypes that [`Value`] automatically performs.
+
+ Args:
+ dic (`dict[str, Any]`):
+ Python dictionary.
+
+ Returns:
+ `Features`
+
+ Example::
+ >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
+ {'_type': Value(dtype='string', id=None)}
+ """
+ obj = generate_from_dict(dic)
+ return cls(**obj)
+
+ def to_dict(self):
+ return asdict(self)
+
+ def _to_yaml_list(self) -> list:
+ # we compute the YAML list from the dict representation that is used for JSON dump
+ yaml_data = self.to_dict()
+
+ def simplify(feature: dict) -> dict:
+ if not isinstance(feature, dict):
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
+
+ #
+ # sequence: -> sequence: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
+ feature["sequence"] = feature["sequence"]["dtype"]
+
+ #
+ # sequence: -> sequence:
+ # struct: -> - name: foo
+ # - name: foo -> dtype: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
+ feature["sequence"] = feature["sequence"]["struct"]
+
+ #
+ # list: -> list: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
+ feature["list"] = feature["list"]["dtype"]
+
+ #
+ # list: -> list:
+ # struct: -> - name: foo
+ # - name: foo -> dtype: int32
+ # dtype: int32 ->
+ #
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
+ feature["list"] = feature["list"]["struct"]
+
+ #
+ # class_label: -> class_label:
+ # names: -> names:
+ # - negative -> '0': negative
+ # - positive -> '1': positive
+ #
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
+ # server-side requirement: keys must be strings
+ feature["class_label"]["names"] = {
+ str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
+ }
+ return feature
+
+ def to_yaml_inner(obj: Union[dict, list]) -> dict:
+ if isinstance(obj, dict):
+ _type = obj.pop("_type", None)
+ if _type == "Sequence":
+ _feature = obj.pop("feature")
+ return simplify({"sequence": to_yaml_inner(_feature), **obj})
+ elif _type == "Value":
+ return obj
+ elif _type and not obj:
+ return {"dtype": camelcase_to_snakecase(_type)}
+ elif _type:
+ return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
+ else:
+ return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
+ elif isinstance(obj, list):
+ return simplify({"list": simplify(to_yaml_inner(obj[0]))})
+ elif isinstance(obj, tuple):
+ return to_yaml_inner(list(obj))
+ else:
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
+
+ def to_yaml_types(obj: dict) -> dict:
+ if isinstance(obj, dict):
+ return {k: to_yaml_types(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [to_yaml_types(v) for v in obj]
+ elif isinstance(obj, tuple):
+ return to_yaml_types(list(obj))
+ else:
+ return obj
+
+ return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
+
+ @classmethod
+ def _from_yaml_list(cls, yaml_data: list) -> "Features":
+ yaml_data = copy.deepcopy(yaml_data)
+
+ # we convert the list obtained from YAML data into the dict representation that is used for JSON dump
+
+ def unsimplify(feature: dict) -> dict:
+ if not isinstance(feature, dict):
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
+ #
+ # sequence: int32 -> sequence:
+ # -> dtype: int32
+ #
+ if isinstance(feature.get("sequence"), str):
+ feature["sequence"] = {"dtype": feature["sequence"]}
+ #
+ # list: int32 -> list:
+ # -> dtype: int32
+ #
+ if isinstance(feature.get("list"), str):
+ feature["list"] = {"dtype": feature["list"]}
+
+ #
+ # class_label: -> class_label:
+ # names: -> names:
+ # '0': negative -> - negative
+ # '1': positive -> - positive
+ #
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
+ label_ids = sorted(feature["class_label"]["names"], key=int)
+ if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
+ raise ValueError(
+ f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
+ )
+ feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
+ return feature
+
+ def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
+ if isinstance(obj, dict):
+ if not obj:
+ return {}
+ _type = next(iter(obj))
+ if _type == "sequence":
+ _feature = unsimplify(obj).pop(_type)
+ return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
+ if _type == "list":
+ return [from_yaml_inner(unsimplify(obj)[_type])]
+ if _type == "struct":
+ return from_yaml_inner(obj["struct"])
+ elif _type == "dtype":
+ if isinstance(obj["dtype"], str):
+ # e.g. int32, float64, string, audio, image
+ try:
+ Value(obj["dtype"])
+ return {**obj, "_type": "Value"}
+ except ValueError:
+ # e.g. Audio, Image, ArrayXD
+ return {"_type": snakecase_to_camelcase(obj["dtype"])}
+ else:
+ return from_yaml_inner(obj["dtype"])
+ else:
+ return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
+ elif isinstance(obj, list):
+ names = [_feature.pop("name") for _feature in obj]
+ return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
+ else:
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
+
+ return cls.from_dict(from_yaml_inner(yaml_data))
+
+ def encode_example(self, example):
+ """
+ Encode example into a format for Arrow.
+
+ Args:
+ example (`dict[str, Any]`):
+ Data in a Dataset row.
+
+ Returns:
+ `dict[str, Any]`
+ """
+ example = cast_to_python_objects(example)
+ return encode_nested_example(self, example)
+
+ def encode_column(self, column, column_name: str):
+ """
+ Encode column into a format for Arrow.
+
+ Args:
+ column (`list[Any]`):
+ Data in a Dataset column.
+ column_name (`str`):
+ Dataset column name.
+
+ Returns:
+ `list[Any]`
+ """
+ column = cast_to_python_objects(column)
+ return [encode_nested_example(self[column_name], obj) for obj in column]
+
+ def encode_batch(self, batch):
+ """
+ Encode batch into a format for Arrow.
+
+ Args:
+ batch (`dict[str, list[Any]]`):
+ Data in a Dataset batch.
+
+ Returns:
+ `dict[str, list[Any]]`
+ """
+ encoded_batch = {}
+ if set(batch) != set(self):
+ raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
+ for key, column in batch.items():
+ column = cast_to_python_objects(column)
+ encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column]
+ return encoded_batch
+
+ def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode example with custom feature decoding.
+
+ Args:
+ example (`dict[str, Any]`):
+ Dataset row data.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode audio or image files from private repositories on the Hub, you can pass
+ a dictionary `repo_id (str) -> token (bool or str)`.
+
+ Returns:
+ `dict[str, Any]`
+ """
+
+ return {
+ column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
+ if self._column_requires_decoding[column_name]
+ else value
+ for column_name, (feature, value) in zip_dict(
+ {key: value for key, value in self.items() if key in example}, example
+ )
+ }
+
+ def decode_column(self, column: list, column_name: str):
+ """Decode column with custom feature decoding.
+
+ Args:
+ column (`list[Any]`):
+ Dataset column data.
+ column_name (`str`):
+ Dataset column name.
+
+ Returns:
+ `list[Any]`
+ """
+ return (
+ [decode_nested_example(self[column_name], value) if value is not None else None for value in column]
+ if self._column_requires_decoding[column_name]
+ else column
+ )
+
+ def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
+ """Decode batch with custom feature decoding.
+
+ Args:
+ batch (`dict[str, list[Any]]`):
+ Dataset batch data.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode audio or image files from private repositories on the Hub, you can pass
+ a dictionary repo_id (str) -> token (bool or str)
+
+ Returns:
+ `dict[str, list[Any]]`
+ """
+ decoded_batch = {}
+ for column_name, column in batch.items():
+ decoded_batch[column_name] = (
+ [
+ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
+ if value is not None
+ else None
+ for value in column
+ ]
+ if self._column_requires_decoding[column_name]
+ else column
+ )
+ return decoded_batch
+
+ def copy(self) -> "Features":
+ """
+ Make a deep copy of [`Features`].
+
+ Returns:
+ [`Features`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
+ >>> copy_of_features = ds.features.copy()
+ >>> copy_of_features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ return copy.deepcopy(self)
+
+ def reorder_fields_as(self, other: "Features") -> "Features":
+ """
+ Reorder Features fields to match the field order of other [`Features`].
+
+ The order of the fields is important since it matters for the underlying arrow data.
+ Re-ordering the fields allows to make the underlying arrow data type match.
+
+ Args:
+ other ([`Features`]):
+ The other [`Features`] to align with.
+
+ Returns:
+ [`Features`]
+
+ Example::
+
+ >>> from datasets import Features, Sequence, Value
+ >>> # let's say we have to features with a different order of nested fields (for a and b for example)
+ >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
+ >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
+ >>> assert f1.type != f2.type
+ >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
+ >>> f1.reorder_fields_as(f2)
+ {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
+ >>> assert f1.reorder_fields_as(f2).type == f2.type
+ """
+
+ def recursive_reorder(source, target, stack=""):
+ stack_position = " at " + stack[1:] if stack else ""
+ if isinstance(target, Sequence):
+ target = target.feature
+ if isinstance(target, dict):
+ target = {k: [v] for k, v in target.items()}
+ else:
+ target = [target]
+ if isinstance(source, Sequence):
+ source, id_, length = source.feature, source.id, source.length
+ if isinstance(source, dict):
+ source = {k: [v] for k, v in source.items()}
+ reordered = recursive_reorder(source, target, stack)
+ return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
+ else:
+ source = [source]
+ reordered = recursive_reorder(source, target, stack)
+ return Sequence(reordered[0], id=id_, length=length)
+ elif isinstance(source, dict):
+ if not isinstance(target, dict):
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
+ if sorted(source) != sorted(target):
+ message = (
+ f"Keys mismatch: between {source} (source) and {target} (target).\n"
+ f"{source.keys()-target.keys()} are missing from target "
+ f"and {target.keys()-source.keys()} are missing from source" + stack_position
+ )
+ raise ValueError(message)
+ return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
+ elif isinstance(source, list):
+ if not isinstance(target, list):
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
+ if len(source) != len(target):
+ raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
+ return [recursive_reorder(source[i], target[i], stack + ".") for i in range(len(target))]
+ else:
+ return source
+
+ return Features(recursive_reorder(self, other))
+
+ def flatten(self, max_depth=16) -> "Features":
+ """Flatten the features. Every dictionary column is removed and is replaced by
+ all the subfields it contains. The new fields are named by concatenating the
+ name of the original column and the subfield name like this: `.`.
+
+ If a column contains nested dictionaries, then all the lower-level subfields names are
+ also concatenated to form new columns: `..`, etc.
+
+ Returns:
+ [`Features`]:
+ The flattened features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad", split="train")
+ >>> ds.features.flatten()
+ {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
+ 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ ```
+ """
+ for depth in range(1, max_depth):
+ no_change = True
+ flattened = self.copy()
+ for column_name, subfeature in self.items():
+ if isinstance(subfeature, dict):
+ no_change = False
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
+ del flattened[column_name]
+ elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
+ no_change = False
+ flattened.update(
+ {
+ f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
+ for k, v in subfeature.feature.items()
+ }
+ )
+ del flattened[column_name]
+ elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
+ no_change = False
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
+ del flattened[column_name]
+ self = flattened
+ if no_change:
+ break
+ return self
+
+
+def _align_features(features_list: List[Features]) -> List[Features]:
+ """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
+ name2feature = {}
+ for features in features_list:
+ for k, v in features.items():
+ if k in name2feature and isinstance(v, dict):
+ # Recursively align features.
+ name2feature[k] = _align_features([name2feature[k], v])[0]
+ elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
+ name2feature[k] = v
+
+ return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
+
+
+def _check_if_features_can_be_aligned(features_list: List[Features]):
+ """Check if the dictionaries of features can be aligned.
+
+ Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
+ """
+ name2feature = {}
+ for features in features_list:
+ for k, v in features.items():
+ if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
+ name2feature[k] = v
+
+ for features in features_list:
+ for k, v in features.items():
+ if isinstance(v, dict) and isinstance(name2feature[k], dict):
+ # Deep checks for structure.
+ _check_if_features_can_be_aligned([name2feature[k], v])
+ elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
+ raise ValueError(
+ f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
+ )
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/features/image.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/features/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bd72c8efaff2ffa0a057f6ce1d3fb0977dbc61f
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/features/image.py
@@ -0,0 +1,376 @@
+import os
+import sys
+import warnings
+from dataclasses import dataclass, field
+from io import BytesIO
+from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..download.download_config import DownloadConfig
+from ..download.streaming_download_manager import xopen
+from ..table import array_cast
+from ..utils.file_utils import is_local_path
+from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
+
+
+if TYPE_CHECKING:
+ import PIL.Image
+
+ from .features import FeatureType
+
+
+_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
+_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
+# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
+_VALID_IMAGE_ARRAY_DTPYES = [
+ np.dtype("|b1"),
+ np.dtype("|u1"),
+ np.dtype("u2"),
+ np.dtype("i2"),
+ np.dtype("u4"),
+ np.dtype("i4"),
+ np.dtype("f4"),
+ np.dtype("f8"),
+]
+
+
+@dataclass
+class Image:
+ """Image [`Feature`] to read image data from an image file.
+
+ Input: The Image feature accepts as input:
+ - A `str`: Absolute path to the image file (i.e. random access is allowed).
+ - A `dict` with the keys:
+
+ - `path`: String with relative path of the image file to the archive file.
+ - `bytes`: Bytes of the image file.
+
+ This is useful for archived files with sequential access.
+
+ - An `np.ndarray`: NumPy array representing an image.
+ - A `PIL.Image.Image`: PIL image object.
+
+ Args:
+ decode (`bool`, defaults to `True`):
+ Whether to decode the image data. If `False`,
+ returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
+
+ Examples:
+
+ ```py
+ >>> from datasets import load_dataset, Image
+ >>> ds = load_dataset("beans", split="train")
+ >>> ds.features["image"]
+ Image(decode=True, id=None)
+ >>> ds[0]["image"]
+
+ >>> ds = ds.cast_column('image', Image(decode=False))
+ {'bytes': None,
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
+ ```
+ """
+
+ decode: bool = True
+ id: Optional[str] = None
+ # Automatically constructed
+ dtype: ClassVar[str] = "PIL.Image.Image"
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
+ _type: str = field(default="Image", init=False, repr=False)
+
+ def __call__(self):
+ return self.pa_type
+
+ def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
+ """Encode example into a format for Arrow.
+
+ Args:
+ value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
+ Data passed as input to Image feature.
+
+ Returns:
+ `dict` with "path" and "bytes" fields
+ """
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ if isinstance(value, list):
+ value = np.array(value)
+
+ if isinstance(value, str):
+ return {"path": value, "bytes": None}
+ elif isinstance(value, bytes):
+ return {"path": None, "bytes": value}
+ elif isinstance(value, np.ndarray):
+ # convert the image array to PNG/TIFF bytes
+ return encode_np_array(value)
+ elif isinstance(value, PIL.Image.Image):
+ # convert the PIL image to bytes (default format is PNG/TIFF)
+ return encode_pil_image(value)
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
+ # we set "bytes": None to not duplicate the data if they're already available locally
+ return {"bytes": None, "path": value.get("path")}
+ elif value.get("bytes") is not None or value.get("path") is not None:
+ # store the image bytes, and path is used to infer the image format using the file extension
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
+ else:
+ raise ValueError(
+ f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
+ )
+
+ def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
+ """Decode example image file into image data.
+
+ Args:
+ value (`str` or `dict`):
+ A string with the absolute image file path, a dictionary with
+ keys:
+
+ - `path`: String with absolute or relative image file path.
+ - `bytes`: The bytes of the image file.
+ token_per_repo_id (`dict`, *optional*):
+ To access and decode
+ image files from private repositories on the Hub, you can pass
+ a dictionary repo_id (`str`) -> token (`bool` or `str`).
+
+ Returns:
+ `PIL.Image.Image`
+ """
+ if not self.decode:
+ raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
+
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support decoding images, please install 'Pillow'.")
+
+ if token_per_repo_id is None:
+ token_per_repo_id = {}
+
+ path, bytes_ = value["path"], value["bytes"]
+ if bytes_ is None:
+ if path is None:
+ raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
+ else:
+ if is_local_path(path):
+ image = PIL.Image.open(path)
+ else:
+ source_url = path.split("::")[-1]
+ pattern = (
+ config.HUB_DATASETS_URL
+ if source_url.startswith(config.HF_ENDPOINT)
+ else config.HUB_DATASETS_HFFS_URL
+ )
+ try:
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
+ token = token_per_repo_id.get(repo_id)
+ except ValueError:
+ token = None
+ download_config = DownloadConfig(token=token)
+ with xopen(path, "rb", download_config=download_config) as f:
+ bytes_ = BytesIO(f.read())
+ image = PIL.Image.open(bytes_)
+ else:
+ image = PIL.Image.open(BytesIO(bytes_))
+ image.load() # to avoid "Too many open files" errors
+ return image
+
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
+ """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
+ from .features import Value
+
+ return (
+ self
+ if self.decode
+ else {
+ "bytes": Value("binary"),
+ "path": Value("string"),
+ }
+ )
+
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
+ """Cast an Arrow array to the Image arrow storage type.
+ The Arrow types that can be converted to the Image pyarrow storage type are:
+
+ - `pa.string()` - it must contain the "path" data
+ - `pa.binary()` - it must contain the image bytes
+ - `pa.struct({"bytes": pa.binary()})`
+ - `pa.struct({"path": pa.string()})`
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
+ - `pa.list(*)` - it must contain the image array data
+
+ Args:
+ storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
+ PyArrow array to cast.
+
+ Returns:
+ `pa.StructArray`: Array in the Image arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+ if pa.types.is_string(storage.type):
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_binary(storage.type):
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_struct(storage.type):
+ if storage.type.get_field_index("bytes") >= 0:
+ bytes_array = storage.field("bytes")
+ else:
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
+ if storage.type.get_field_index("path") >= 0:
+ path_array = storage.field("path")
+ else:
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
+ elif pa.types.is_list(storage.type):
+ bytes_array = pa.array(
+ [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
+ type=pa.binary(),
+ )
+ path_array = pa.array([None] * len(storage), type=pa.string())
+ storage = pa.StructArray.from_arrays(
+ [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
+ )
+ return array_cast(storage, self.pa_type)
+
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
+ """Embed image files into the Arrow array.
+
+ Args:
+ storage (`pa.StructArray`):
+ PyArrow array to embed.
+
+ Returns:
+ `pa.StructArray`: Array in the Image arrow storage type, that is
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
+ """
+
+ @no_op_if_value_is_null
+ def path_to_bytes(path):
+ with xopen(path, "rb") as f:
+ bytes_ = f.read()
+ return bytes_
+
+ bytes_array = pa.array(
+ [
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
+ for x in storage.to_pylist()
+ ],
+ type=pa.binary(),
+ )
+ path_array = pa.array(
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
+ type=pa.string(),
+ )
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
+ return array_cast(storage, self.pa_type)
+
+
+def list_image_compression_formats() -> List[str]:
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ global _IMAGE_COMPRESSION_FORMATS
+ if _IMAGE_COMPRESSION_FORMATS is None:
+ PIL.Image.init()
+ _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
+ return _IMAGE_COMPRESSION_FORMATS
+
+
+def image_to_bytes(image: "PIL.Image.Image") -> bytes:
+ """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
+ buffer = BytesIO()
+ if image.format in list_image_compression_formats():
+ format = image.format
+ else:
+ format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
+ image.save(buffer, format=format)
+ return buffer.getvalue()
+
+
+def encode_pil_image(image: "PIL.Image.Image") -> dict:
+ if hasattr(image, "filename") and image.filename != "":
+ return {"path": image.filename, "bytes": None}
+ else:
+ return {"path": None, "bytes": image_to_bytes(image)}
+
+
+def encode_np_array(array: np.ndarray) -> dict:
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ dtype = array.dtype
+ dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
+ dtype_kind = dtype.kind
+ dtype_itemsize = dtype.itemsize
+
+ dest_dtype = None
+
+ # Multi-channel array case (only np.dtype("|u1") is allowed)
+ if array.shape[2:]:
+ if dtype_kind not in ["u", "i"]:
+ raise TypeError(
+ f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
+ )
+ dest_dtype = np.dtype("|u1")
+ if dtype != dest_dtype:
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
+ # Exact match
+ elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
+ dest_dtype = dtype
+ else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
+ while dtype_itemsize >= 1:
+ dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
+ if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
+ dest_dtype = np.dtype(dtype_str)
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
+ break
+ else:
+ dtype_itemsize //= 2
+ if dest_dtype is None:
+ raise TypeError(
+ f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
+ )
+
+ image = PIL.Image.fromarray(array.astype(dest_dtype))
+ return {"path": None, "bytes": image_to_bytes(image)}
+
+
+def objects_to_list_of_image_dicts(
+ objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
+) -> List[dict]:
+ """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
+ if config.PIL_AVAILABLE:
+ import PIL.Image
+ else:
+ raise ImportError("To support encoding images, please install 'Pillow'.")
+
+ if objs:
+ _, obj = first_non_null_value(objs)
+ if isinstance(obj, str):
+ return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
+ if isinstance(obj, np.ndarray):
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
+ return [obj_to_image_dict_func(obj) for obj in objs]
+ elif isinstance(obj, PIL.Image.Image):
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
+ return [obj_to_image_dict_func(obj) for obj in objs]
+ else:
+ return objs
+ else:
+ return objs
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c773fe2c03eee57985cf085d18aaa881446b4134
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0f555107610ebbd949183d90ab520b6e5717d9f
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/np_formatter.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/np_formatter.py
new file mode 100644
index 0000000000000000000000000000000000000000..95bcff2b51728fdd9647dad382639724df163ce2
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/np_formatter.py
@@ -0,0 +1,106 @@
+# Copyright 2020 The HuggingFace Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+from collections.abc import Mapping
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..utils.py_utils import map_nested
+from .formatting import TensorFormatter
+
+
+class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
+ def __init__(self, features=None, **np_array_kwargs):
+ super().__init__(features=features)
+ self.np_array_kwargs = np_array_kwargs
+
+ def _consolidate(self, column):
+ if isinstance(column, list):
+ if column and all(
+ isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
+ ):
+ return np.stack(column)
+ else:
+ # don't use np.array(column, dtype=object)
+ # since it fails in certain cases
+ # see https://stackoverflow.com/q/51005699
+ out = np.empty(len(column), dtype=object)
+ out[:] = column
+ return out
+ return column
+
+ def _tensorize(self, value):
+ if isinstance(value, (str, bytes, type(None))):
+ return value
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
+ return value
+ elif isinstance(value, np.number):
+ return value
+
+ default_dtype = {}
+
+ if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
+ default_dtype = {"dtype": np.int64}
+ elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
+ default_dtype = {"dtype": np.float32}
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ if isinstance(value, PIL.Image.Image):
+ return np.asarray(value, **self.np_array_kwargs)
+
+ return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
+
+ def _recursive_tensorize(self, data_struct):
+ # support for torch, tf, jax etc.
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ if isinstance(data_struct, torch.Tensor):
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
+ data_struct = data_struct.__array__()
+ # support for nested types like struct of list of struct
+ if isinstance(data_struct, np.ndarray):
+ if data_struct.dtype == object:
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
+ if isinstance(data_struct, (list, tuple)):
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
+ return self._tensorize(data_struct)
+
+ def recursive_tensorize(self, data_struct: dict):
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
+
+ def format_row(self, pa_table: pa.Table) -> Mapping:
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
+ row = self.python_features_decoder.decode_row(row)
+ return self.recursive_tensorize(row)
+
+ def format_column(self, pa_table: pa.Table) -> np.ndarray:
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
+ column = self.recursive_tensorize(column)
+ column = self._consolidate(column)
+ return column
+
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
+ batch = self.python_features_decoder.decode_batch(batch)
+ batch = self.recursive_tensorize(batch)
+ for column_name in batch:
+ batch[column_name] = self._consolidate(batch[column_name])
+ return batch
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py
new file mode 100644
index 0000000000000000000000000000000000000000..adb15cda3815d77fa0272562e83fda029d1babee
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py
@@ -0,0 +1,115 @@
+# Copyright 2020 The HuggingFace Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+import sys
+from collections.abc import Mapping
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..utils.py_utils import map_nested
+from .formatting import TensorFormatter
+
+
+if TYPE_CHECKING:
+ import tensorflow as tf
+
+
+class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
+ def __init__(self, features=None, **tf_tensor_kwargs):
+ super().__init__(features=features)
+ self.tf_tensor_kwargs = tf_tensor_kwargs
+ import tensorflow as tf # noqa: F401 - import tf at initialization
+
+ def _consolidate(self, column):
+ import tensorflow as tf
+
+ if isinstance(column, list) and column:
+ if all(
+ isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
+ ):
+ return tf.stack(column)
+ elif all(
+ isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
+ for x in column
+ ):
+ # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
+ return tf.ragged.stack(column)
+
+ return column
+
+ def _tensorize(self, value):
+ import tensorflow as tf
+
+ if value is None:
+ return value
+
+ default_dtype = {}
+
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
+ default_dtype = {"dtype": tf.int64}
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
+ default_dtype = {"dtype": tf.float32}
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ if isinstance(value, PIL.Image.Image):
+ value = np.asarray(value)
+
+ return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
+
+ def _recursive_tensorize(self, data_struct):
+ import tensorflow as tf
+
+ # support for torch, tf, jax etc.
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ if isinstance(data_struct, torch.Tensor):
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
+ data_struct = data_struct.__array__()
+ # support for nested types like struct of list of struct
+ if isinstance(data_struct, np.ndarray):
+ if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
+ elif isinstance(data_struct, (list, tuple)):
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
+ return self._tensorize(data_struct)
+
+ def recursive_tensorize(self, data_struct: dict):
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
+
+ def format_row(self, pa_table: pa.Table) -> Mapping:
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
+ row = self.python_features_decoder.decode_row(row)
+ return self.recursive_tensorize(row)
+
+ def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
+ column = self.recursive_tensorize(column)
+ column = self._consolidate(column)
+ return column
+
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
+ batch = self.python_features_decoder.decode_batch(batch)
+ batch = self.recursive_tensorize(batch)
+ for column_name in batch:
+ batch[column_name] = self._consolidate(batch[column_name])
+ return batch
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf287e67eb806cbc2be0f0b4febdcd334854ec05
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py
@@ -0,0 +1,111 @@
+# Copyright 2020 The HuggingFace Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+import sys
+from collections.abc import Mapping
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pyarrow as pa
+
+from .. import config
+from ..utils.py_utils import map_nested
+from .formatting import TensorFormatter
+
+
+if TYPE_CHECKING:
+ import torch
+
+
+class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
+ def __init__(self, features=None, **torch_tensor_kwargs):
+ super().__init__(features=features)
+ self.torch_tensor_kwargs = torch_tensor_kwargs
+ import torch # noqa import torch at initialization
+
+ def _consolidate(self, column):
+ import torch
+
+ if isinstance(column, list) and column:
+ if all(
+ isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
+ for x in column
+ ):
+ return torch.stack(column)
+ return column
+
+ def _tensorize(self, value):
+ import torch
+
+ if isinstance(value, (str, bytes, type(None))):
+ return value
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
+ return value.tolist()
+
+ default_dtype = {}
+
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
+ default_dtype = {"dtype": torch.int64}
+
+ # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility.
+ # np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss.
+ if value.dtype in [np.uint16, np.uint32]:
+ value = value.astype(np.int64)
+
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
+ default_dtype = {"dtype": torch.float32}
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ if isinstance(value, PIL.Image.Image):
+ value = np.asarray(value)
+ return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
+
+ def _recursive_tensorize(self, data_struct):
+ import torch
+
+ # support for torch, tf, jax etc.
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
+ data_struct = data_struct.__array__()
+ # support for nested types like struct of list of struct
+ if isinstance(data_struct, np.ndarray):
+ if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
+ elif isinstance(data_struct, (list, tuple)):
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
+ return self._tensorize(data_struct)
+
+ def recursive_tensorize(self, data_struct: dict):
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
+
+ def format_row(self, pa_table: pa.Table) -> Mapping:
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
+ row = self.python_features_decoder.decode_row(row)
+ return self.recursive_tensorize(row)
+
+ def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
+ column = self.recursive_tensorize(column)
+ column = self._consolidate(column)
+ return column
+
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
+ batch = self.python_features_decoder.decode_batch(batch)
+ batch = self.recursive_tensorize(batch)
+ for column_name in batch:
+ batch[column_name] = self._consolidate(batch[column_name])
+ return batch
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b2cb8704cc114422d80304bfa9df14ab8e697de
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4bd95ef0c08a72a38826735c0bf0819017dbdd02
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c5221775d9f370607633e27220936f86d12d015
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24a5d8527e355eb4ad9dd086dcc16c4903680f48
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..146ef4e613b9d943b160c04b2286b2a2d331b80a
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py
@@ -0,0 +1,406 @@
+import collections
+import itertools
+import os
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Type
+
+import pandas as pd
+import pyarrow as pa
+import pyarrow.json as paj
+
+import datasets
+from datasets.features.features import FeatureType
+from datasets.tasks.base import TaskTemplate
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+def count_path_segments(path):
+ return path.replace("\\", "/").count("/")
+
+
+@dataclass
+class FolderBasedBuilderConfig(datasets.BuilderConfig):
+ """BuilderConfig for AutoFolder."""
+
+ features: Optional[datasets.Features] = None
+ drop_labels: bool = None
+ drop_metadata: bool = None
+
+
+class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
+ """
+ Base class for generic data loaders for vision and image data.
+
+
+ Abstract class attributes to be overridden by a child class:
+ BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
+ BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
+ BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
+ EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
+ will be included in a dataset)
+ CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure
+ """
+
+ BASE_FEATURE: Type[FeatureType]
+ BASE_COLUMN_NAME: str
+ BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
+ EXTENSIONS: List[str]
+ CLASSIFICATION_TASK: TaskTemplate
+
+ METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+
+ # Do an early pass if:
+ # * `drop_labels` is None (default) or False, to infer the class labels
+ # * `drop_metadata` is None (default) or False, to find the metadata files
+ do_analyze = not self.config.drop_labels or not self.config.drop_metadata
+ labels, path_depths = set(), set()
+ metadata_files = collections.defaultdict(set)
+
+ def analyze(files_or_archives, downloaded_files_or_dirs, split):
+ if len(downloaded_files_or_dirs) == 0:
+ return
+ # The files are separated from the archives at this point, so check the first sample
+ # to see if it's a file or a directory and iterate accordingly
+ if os.path.isfile(downloaded_files_or_dirs[0]):
+ original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
+ for original_file, downloaded_file in zip(original_files, downloaded_files):
+ original_file, downloaded_file = str(original_file), str(downloaded_file)
+ _, original_file_ext = os.path.splitext(original_file)
+ if original_file_ext.lower() in self.EXTENSIONS:
+ if not self.config.drop_labels:
+ labels.add(os.path.basename(os.path.dirname(original_file)))
+ path_depths.add(count_path_segments(original_file))
+ elif os.path.basename(original_file) in self.METADATA_FILENAMES:
+ metadata_files[split].add((original_file, downloaded_file))
+ else:
+ original_file_name = os.path.basename(original_file)
+ logger.debug(
+ f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
+ )
+ else:
+ archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
+ for archive, downloaded_dir in zip(archives, downloaded_dirs):
+ archive, downloaded_dir = str(archive), str(downloaded_dir)
+ for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
+ if downloaded_dir_file_ext in self.EXTENSIONS:
+ if not self.config.drop_labels:
+ labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
+ path_depths.add(count_path_segments(downloaded_dir_file))
+ elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
+ metadata_files[split].add((None, downloaded_dir_file))
+ else:
+ archive_file_name = os.path.basename(archive)
+ original_file_name = os.path.basename(downloaded_dir_file)
+ logger.debug(
+ f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
+ )
+
+ data_files = self.config.data_files
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ files, archives = self._split_files_and_archives(files)
+ downloaded_files = dl_manager.download(files)
+ downloaded_dirs = dl_manager.download_and_extract(archives)
+ if do_analyze: # drop_metadata is None or False, drop_labels is None or False
+ logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
+ analyze(files, downloaded_files, split_name)
+ analyze(archives, downloaded_dirs, split_name)
+
+ if metadata_files:
+ # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
+ add_metadata = not self.config.drop_metadata
+ # if `metadata_files` are found, add labels only if
+ # `drop_labels` is set up to False explicitly (not-default behavior)
+ add_labels = self.config.drop_labels is False
+ else:
+ # if `metadata_files` are not found, don't add metadata
+ add_metadata = False
+ # if `metadata_files` are not found and `drop_labels` is None (default) -
+ # add labels if files are on the same level in directory hierarchy and there is more than one label
+ add_labels = (
+ (len(labels) > 1 and len(path_depths) == 1)
+ if self.config.drop_labels is None
+ else not self.config.drop_labels
+ )
+
+ if add_labels:
+ logger.info("Adding the labels inferred from data directories to the dataset's features...")
+ if add_metadata:
+ logger.info("Adding metadata to the dataset...")
+ else:
+ add_labels, add_metadata, metadata_files = False, False, {}
+
+ splits.append(
+ datasets.SplitGenerator(
+ name=split_name,
+ gen_kwargs={
+ "files": list(zip(files, downloaded_files))
+ + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
+ "metadata_files": metadata_files,
+ "split_name": split_name,
+ "add_labels": add_labels,
+ "add_metadata": add_metadata,
+ },
+ )
+ )
+
+ if add_metadata:
+ # Verify that:
+ # * all metadata files have the same set of features
+ # * the `file_name` key is one of the metadata keys and is of type string
+ features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
+
+ # Check that all metadata files share the same format
+ metadata_ext = {
+ os.path.splitext(original_metadata_file)[-1]
+ for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
+ }
+ if len(metadata_ext) > 1:
+ raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
+ metadata_ext = metadata_ext.pop()
+
+ for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
+ pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
+ features_per_metadata_file.append(
+ (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
+ )
+ for downloaded_metadata_file, metadata_features in features_per_metadata_file:
+ if metadata_features != features_per_metadata_file[0][1]:
+ raise ValueError(
+ f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
+ )
+ metadata_features = features_per_metadata_file[0][1]
+ if "file_name" not in metadata_features:
+ raise ValueError("`file_name` must be present as dictionary key in metadata files")
+ if metadata_features["file_name"] != datasets.Value("string"):
+ raise ValueError("`file_name` key must be a string")
+ del metadata_features["file_name"]
+ else:
+ metadata_features = None
+
+ # Normally, we would do this in _info, but we need to know the labels and/or metadata
+ # before building the features
+ if self.config.features is None:
+ if add_labels:
+ self.info.features = datasets.Features(
+ {
+ self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
+ "label": datasets.ClassLabel(names=sorted(labels)),
+ }
+ )
+ self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)]
+ else:
+ self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})
+
+ if add_metadata:
+ # Warn if there are duplicated keys in metadata compared to the existing features
+ # (`BASE_COLUMN_NAME`, optionally "label")
+ duplicated_keys = set(self.info.features) & set(metadata_features)
+ if duplicated_keys:
+ logger.warning(
+ f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
+ f"the features dictionary."
+ )
+ # skip metadata duplicated keys
+ self.info.features.update(
+ {
+ feature: metadata_features[feature]
+ for feature in metadata_features
+ if feature not in duplicated_keys
+ }
+ )
+
+ return splits
+
+ def _split_files_and_archives(self, data_files):
+ files, archives = [], []
+ for data_file in data_files:
+ _, data_file_ext = os.path.splitext(data_file)
+ if data_file_ext.lower() in self.EXTENSIONS:
+ files.append(data_file)
+ elif os.path.basename(data_file) in self.METADATA_FILENAMES:
+ files.append(data_file)
+ else:
+ archives.append(data_file)
+ return files, archives
+
+ def _read_metadata(self, metadata_file, metadata_ext: str = ""):
+ if metadata_ext == ".csv":
+ # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
+ return pa.Table.from_pandas(pd.read_csv(metadata_file))
+ else:
+ with open(metadata_file, "rb") as f:
+ return paj.read_json(f)
+
+ def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
+ split_metadata_files = metadata_files.get(split_name, [])
+ sample_empty_metadata = (
+ {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
+ )
+ last_checked_dir = None
+ metadata_dir = None
+ metadata_dict = None
+ downloaded_metadata_file = None
+
+ metadata_ext = ""
+ if split_metadata_files:
+ metadata_ext = {
+ os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
+ }
+ metadata_ext = metadata_ext.pop()
+
+ file_idx = 0
+ for original_file, downloaded_file_or_dir in files:
+ if original_file is not None:
+ _, original_file_ext = os.path.splitext(original_file)
+ if original_file_ext.lower() in self.EXTENSIONS:
+ if add_metadata:
+ # If the file is a file of a needed type, and we've just entered a new directory,
+ # find the nereast metadata file (by counting path segments) for the directory
+ current_dir = os.path.dirname(original_file)
+ if last_checked_dir is None or last_checked_dir != current_dir:
+ last_checked_dir = current_dir
+ metadata_file_candidates = [
+ (
+ os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
+ metadata_file_candidate,
+ downloaded_metadata_file,
+ )
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
+ if metadata_file_candidate
+ is not None # ignore metadata_files that are inside archives
+ and not os.path.relpath(
+ original_file, os.path.dirname(metadata_file_candidate)
+ ).startswith("..")
+ ]
+ if metadata_file_candidates:
+ _, metadata_file, downloaded_metadata_file = min(
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
+ )
+ pa_metadata_table = self._read_metadata(
+ downloaded_metadata_file, metadata_ext=metadata_ext
+ )
+ pa_file_name_array = pa_metadata_table["file_name"]
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
+ metadata_dir = os.path.dirname(metadata_file)
+ metadata_dict = {
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
+ for file_name, sample_metadata in zip(
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
+ )
+ }
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
+ )
+ if metadata_dir is not None and downloaded_metadata_file is not None:
+ file_relpath = os.path.relpath(original_file, metadata_dir)
+ file_relpath = file_relpath.replace("\\", "/")
+ if file_relpath not in metadata_dict:
+ raise ValueError(
+ f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
+ )
+ sample_metadata = metadata_dict[file_relpath]
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
+ )
+ else:
+ sample_metadata = {}
+ if add_labels:
+ sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
+ else:
+ sample_label = {}
+ yield (
+ file_idx,
+ {
+ **sample_empty_metadata,
+ self.BASE_COLUMN_NAME: downloaded_file_or_dir,
+ **sample_metadata,
+ **sample_label,
+ },
+ )
+ file_idx += 1
+ else:
+ for downloaded_dir_file in downloaded_file_or_dir:
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
+ if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
+ if add_metadata:
+ current_dir = os.path.dirname(downloaded_dir_file)
+ if last_checked_dir is None or last_checked_dir != current_dir:
+ last_checked_dir = current_dir
+ metadata_file_candidates = [
+ (
+ os.path.relpath(
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
+ ),
+ metadata_file_candidate,
+ downloaded_metadata_file,
+ )
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
+ if metadata_file_candidate
+ is None # ignore metadata_files that are not inside archives
+ and not os.path.relpath(
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
+ ).startswith("..")
+ ]
+ if metadata_file_candidates:
+ _, metadata_file, downloaded_metadata_file = min(
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
+ )
+ pa_metadata_table = self._read_metadata(
+ downloaded_metadata_file, metadata_ext=metadata_ext
+ )
+ pa_file_name_array = pa_metadata_table["file_name"]
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
+ metadata_dir = os.path.dirname(downloaded_metadata_file)
+ metadata_dict = {
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
+ for file_name, sample_metadata in zip(
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
+ )
+ }
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
+ )
+ if metadata_dir is not None and downloaded_metadata_file is not None:
+ downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
+ downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
+ if downloaded_dir_file_relpath not in metadata_dict:
+ raise ValueError(
+ f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
+ )
+ sample_metadata = metadata_dict[downloaded_dir_file_relpath]
+ else:
+ raise ValueError(
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
+ )
+ else:
+ sample_metadata = {}
+ if add_labels:
+ sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
+ else:
+ sample_label = {}
+ yield (
+ file_idx,
+ {
+ **sample_empty_metadata,
+ self.BASE_COLUMN_NAME: downloaded_dir_file,
+ **sample_metadata,
+ **sample_label,
+ },
+ )
+ file_idx += 1
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c695d7595108377f0e86527e1736044d1a5debf4
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f3c04c8891ee9f270836c70385d91837ec5d833
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..203c4f5346a1bb3c59909c60faab78859e222ca6
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..087bc3e52fb16ed321d328d2e0b58175d1275ffe
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..deb270298c1ca74418e425a937beb62be5cb44d9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0a66ac906e35e166ceff9987b2eff1bb3982d3f
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py
@@ -0,0 +1,99 @@
+import itertools
+from dataclasses import dataclass
+from typing import List, Optional
+
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+import datasets
+from datasets.table import table_cast
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class ParquetConfig(datasets.BuilderConfig):
+ """BuilderConfig for Parquet."""
+
+ batch_size: Optional[int] = None
+ columns: Optional[List[str]] = None
+ features: Optional[datasets.Features] = None
+
+
+class Parquet(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = ParquetConfig
+
+ def _info(self):
+ if (
+ self.config.columns is not None
+ and self.config.features is not None
+ and set(self.config.columns) != set(self.config.features)
+ ):
+ raise ValueError(
+ "The columns and features argument must contain the same columns, but got ",
+ f"{self.config.columns} and {self.config.features}",
+ )
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ """We handle string, list and dicts in datafiles"""
+ if not self.config.data_files:
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
+ data_files = dl_manager.download_and_extract(self.config.data_files)
+ if isinstance(data_files, (str, list, tuple)):
+ files = data_files
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
+ splits = []
+ for split_name, files in data_files.items():
+ if isinstance(files, str):
+ files = [files]
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
+ files = [dl_manager.iter_files(file) for file in files]
+ # Infer features if they are stored in the arrow schema
+ if self.info.features is None:
+ for file in itertools.chain.from_iterable(files):
+ with open(file, "rb") as f:
+ self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
+ break
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
+ if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
+ self.info.features = datasets.Features(
+ {col: feat for col, feat in self.info.features.items() if col in self.config.columns}
+ )
+ return splits
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.info.features is not None:
+ # more expensive cast to support nested features with keys in a different order
+ # allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
+ return pa_table
+
+ def _generate_tables(self, files):
+ if self.config.features is not None and self.config.columns is not None:
+ if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
+ raise ValueError(
+ f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
+ )
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
+ with open(file, "rb") as f:
+ parquet_file = pq.ParquetFile(f)
+ if parquet_file.metadata.num_row_groups > 0:
+ batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
+ try:
+ for batch_idx, record_batch in enumerate(
+ parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
+ ):
+ pa_table = pa.Table.from_batches([record_batch])
+ # Uncomment for debugging (will print the Arrow table size and elements)
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
+ except ValueError as e:
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
+ raise
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed4129e1b86a4fd18a5c232fb669750234ec4c15
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d1c9a6040383e349d2567decd046dccb1218a14d
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0791ba88594fb8e76c957a11cca9936cf321bb4
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py
@@ -0,0 +1,118 @@
+import sys
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
+
+import pandas as pd
+import pyarrow as pa
+
+import datasets
+import datasets.config
+from datasets.features.features import require_storage_cast
+from datasets.table import table_cast
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import sqlalchemy
+
+
+logger = datasets.utils.logging.get_logger(__name__)
+
+
+@dataclass
+class SqlConfig(datasets.BuilderConfig):
+ """BuilderConfig for SQL."""
+
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
+ index_col: Optional[Union[str, List[str]]] = None
+ coerce_float: bool = True
+ params: Optional[Union[List, Tuple, Dict]] = None
+ parse_dates: Optional[Union[List, Dict]] = None
+ columns: Optional[List[str]] = None
+ chunksize: Optional[int] = 10_000
+ features: Optional[datasets.Features] = None
+
+ def __post_init__(self):
+ if self.sql is None:
+ raise ValueError("sql must be specified")
+ if self.con is None:
+ raise ValueError("con must be specified")
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[datasets.Features] = None,
+ ) -> str:
+ config_kwargs = config_kwargs.copy()
+ # We need to stringify the Selectable object to make its hash deterministic
+
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
+ sql = config_kwargs["sql"]
+ if not isinstance(sql, str):
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
+ import sqlalchemy
+
+ if isinstance(sql, sqlalchemy.sql.Selectable):
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
+ sql_str = str(sql.compile(dialect=engine.dialect))
+ config_kwargs["sql"] = sql_str
+ else:
+ raise TypeError(
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
+ )
+ else:
+ raise TypeError(
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
+ )
+ con = config_kwargs["con"]
+ if not isinstance(con, str):
+ config_kwargs["con"] = id(con)
+ logger.info(
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
+ )
+
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
+
+ @property
+ def pd_read_sql_kwargs(self):
+ pd_read_sql_kwargs = {
+ "index_col": self.index_col,
+ "columns": self.columns,
+ "params": self.params,
+ "coerce_float": self.coerce_float,
+ "parse_dates": self.parse_dates,
+ }
+ return pd_read_sql_kwargs
+
+
+class Sql(datasets.ArrowBasedBuilder):
+ BUILDER_CONFIG_CLASS = SqlConfig
+
+ def _info(self):
+ return datasets.DatasetInfo(features=self.config.features)
+
+ def _split_generators(self, dl_manager):
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
+
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
+ if self.config.features is not None:
+ schema = self.config.features.arrow_schema
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
+ # cheaper cast
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
+ else:
+ # more expensive cast; allows str <-> int/float or str to Audio for example
+ pa_table = table_cast(pa_table, schema)
+ return pa_table
+
+ def _generate_tables(self):
+ chunksize = self.config.chunksize
+ sql_reader = pd.read_sql(
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
+ )
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
+ for chunk_idx, df in enumerate(sql_reader):
+ pa_table = pa.Table.from_pandas(df)
+ yield chunk_idx, self._cast_table(pa_table)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5777bfb43385c8713c934473be5e525881d2836
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d83093588514bec18b3536f4287a699939af499e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__init__.py
@@ -0,0 +1 @@
+from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93543ac54f491719f61198e6e615873515a49e05
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/parallel.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e1a8546c586b94094f915e64268c58155c99fba
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/parallel.py
@@ -0,0 +1,113 @@
+import contextlib
+from multiprocessing import Pool, RLock
+
+from tqdm.auto import tqdm
+
+from ..utils import experimental, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ParallelBackendConfig:
+ backend_name = None
+
+
+@experimental
+def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
+ """
+ **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either
+ multiprocessing.Pool or joblib for parallelization.
+
+ Args:
+ function (`Callable[[Any], Any]`): Function to be applied to `iterable`.
+ iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.
+ num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).
+ types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.
+ disable_tqdm (`bool`): Whether to disable the tqdm progressbar.
+ desc (`str`): Prefix for the tqdm progressbar.
+ single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.
+ Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an
+ element of `iterable`, and `rank` is used for progress bar.
+ """
+ if ParallelBackendConfig.backend_name is None:
+ return _map_with_multiprocessing_pool(
+ function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func
+ )
+
+ return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func)
+
+
+def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
+ num_proc = num_proc if num_proc <= len(iterable) else len(iterable)
+ split_kwds = [] # We organize the splits ourselve (contiguous splits)
+ for index in range(num_proc):
+ div = len(iterable) // num_proc
+ mod = len(iterable) % num_proc
+ start = div * index + min(index, mod)
+ end = start + div + (1 if index < mod else 0)
+ split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc))
+
+ if len(iterable) != sum(len(i[1]) for i in split_kwds):
+ raise ValueError(
+ f"Error dividing inputs iterable among processes. "
+ f"Total number of objects {len(iterable)}, "
+ f"length: {sum(len(i[1]) for i in split_kwds)}"
+ )
+
+ logger.info(
+ f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"
+ )
+ initargs, initializer = None, None
+ if not disable_tqdm:
+ initargs, initializer = (RLock(),), tqdm.set_lock
+ with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:
+ mapped = pool.map(single_map_nested_func, split_kwds)
+ logger.info(f"Finished {num_proc} processes")
+ mapped = [obj for proc_res in mapped for obj in proc_res]
+ logger.info(f"Unpacked {len(mapped)} objects")
+
+ return mapped
+
+
+def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
+ # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
+ # and it requires monkey-patching joblib internal classes which is subject to change
+ import joblib
+
+ with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):
+ return joblib.Parallel()(
+ joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable
+ )
+
+
+@experimental
+@contextlib.contextmanager
+def parallel_backend(backend_name: str):
+ """
+ **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization
+ implemented by joblib.
+
+ Args:
+ backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.
+
+ Example usage:
+ ```py
+ with parallel_backend('spark'):
+ dataset = load_dataset(..., num_proc=2)
+ ```
+ """
+ ParallelBackendConfig.backend_name = backend_name
+
+ if backend_name == "spark":
+ from joblibspark import register_spark
+
+ register_spark()
+
+ # TODO: call create_cache_and_write_probe if "download" in steps
+ # TODO: raise NotImplementedError when Dataset.map etc is called
+
+ try:
+ yield
+ finally:
+ ParallelBackendConfig.backend_name = None
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a18a1e79349cfb32a743aeca4c3e9a809645a75
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__init__.py
@@ -0,0 +1,46 @@
+from typing import Optional
+
+from ..utils.logging import get_logger
+from .audio_classification import AudioClassification
+from .automatic_speech_recognition import AutomaticSpeechRecognition
+from .base import TaskTemplate
+from .image_classification import ImageClassification
+from .language_modeling import LanguageModeling
+from .question_answering import QuestionAnsweringExtractive
+from .summarization import Summarization
+from .text_classification import TextClassification
+
+
+__all__ = [
+ "AutomaticSpeechRecognition",
+ "AudioClassification",
+ "ImageClassification",
+ "LanguageModeling",
+ "QuestionAnsweringExtractive",
+ "Summarization",
+ "TaskTemplate",
+ "TextClassification",
+]
+
+logger = get_logger(__name__)
+
+
+NAME2TEMPLATE = {
+ AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
+ AudioClassification.task: AudioClassification,
+ ImageClassification.task: ImageClassification,
+ LanguageModeling.task: LanguageModeling,
+ QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
+ Summarization.task: Summarization,
+ TextClassification.task: TextClassification,
+}
+
+
+def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
+ """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
+ task_name = task_template_dict.get("task")
+ if task_name is None:
+ logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
+ return None
+ template = NAME2TEMPLATE.get(task_name)
+ return template.from_dict(task_template_dict)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d174408553f7992af2239130c8bd6f43f1d03cf
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__pycache__/question_answering.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/base.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..21a5337ffc0784a1ed12f4617a9a0ef6ba7253e5
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/base.py
@@ -0,0 +1,39 @@
+import abc
+import copy
+import dataclasses
+from dataclasses import dataclass
+from typing import ClassVar, Dict, Type, TypeVar
+
+from ..features import Features
+
+
+T = TypeVar("T", bound="TaskTemplate")
+
+
+@dataclass(frozen=True)
+class TaskTemplate(abc.ABC):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str
+ input_schema: ClassVar[Features]
+ label_schema: ClassVar[Features]
+
+ def align_with_features(self: T, features: Features) -> T:
+ """
+ Align features with the task template.
+ """
+ # No-op
+ return copy.deepcopy(self)
+
+ @property
+ def features(self) -> Features:
+ return Features(**self.input_schema, **self.label_schema)
+
+ @property
+ @abc.abstractmethod
+ def column_mapping(self) -> Dict[str, str]:
+ raise NotImplementedError
+
+ @classmethod
+ def from_dict(cls: Type[T], template_dict: dict) -> T:
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in template_dict.items() if k in field_names})
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/language_modeling.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2837744fa1718e57ffbeeca1a6e9a60c9468d8f
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/language_modeling.py
@@ -0,0 +1,18 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class LanguageModeling(TaskTemplate):
+ task: str = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True})
+
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({})
+ text_column: str = "text"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text"}
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/summarization.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/summarization.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0057b07b4f62947c1bfde1962bf06be1427c363
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/summarization.py
@@ -0,0 +1,19 @@
+from dataclasses import dataclass, field
+from typing import ClassVar, Dict
+
+from ..features import Features, Value
+from .base import TaskTemplate
+
+
+@dataclass(frozen=True)
+class Summarization(TaskTemplate):
+ # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
+ task: str = field(default="summarization", metadata={"include_in_asdict_even_if_is_default": True})
+ input_schema: ClassVar[Features] = Features({"text": Value("string")})
+ label_schema: ClassVar[Features] = Features({"summary": Value("string")})
+ text_column: str = "text"
+ summary_column: str = "summary"
+
+ @property
+ def column_mapping(self) -> Dict[str, str]:
+ return {self.text_column: "text", self.summary_column: "summary"}
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/_datasets_server.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/_datasets_server.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b4b742318717bb35cadf30908b1df18e34ec92b
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/_datasets_server.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..869cb27ae0ccc6185943e60b272bc389e36b24fd
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/deprecation_utils.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..161f1f50482aa0def6a6b99cc6fbd889281cc9c9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/extract.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..518ed8048c9d45c97deecb259ac9350b46e52ead
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/filelock.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10f136d1cf35a601bfd326a490849484dcc59ff9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/metadata.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..217e0e57fe61c765ddff41e334613d0dd17ad33e
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/py_utils.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c2ee7fc304a286dffec6b72844153f69bd9b913f
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/readme.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..444d0a4d170ede0395e6fb65ef17544b23b00c06
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/stratify.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f64f8d394e99072e03c893094f76326b284a1df0
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/tf_utils.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21803c4e50b298c85a521bafe92b9d0f64e87944
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/tqdm.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae7bfe02dfd5fed7ff95168ba7ccc2c204507d12
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/track.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb0a984025b483b42f38f12d8419b672e44b6281
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/__pycache__/typing.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/file_utils.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/file_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3532b9697684e1d2077dc4bdee3e2e88956c5ec
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/file_utils.py
@@ -0,0 +1,690 @@
+"""
+Utilities for working with the local dataset cache.
+This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
+Copyright by the AllenNLP authors.
+"""
+
+import copy
+import io
+import json
+import multiprocessing
+import os
+import posixpath
+import re
+import shutil
+import sys
+import time
+import urllib
+import warnings
+from contextlib import closing, contextmanager
+from functools import partial
+from pathlib import Path
+from typing import Optional, TypeVar, Union
+from unittest.mock import patch
+from urllib.parse import urljoin, urlparse
+
+import fsspec
+import huggingface_hub
+import requests
+from fsspec.core import strip_protocol
+from fsspec.utils import can_be_local
+from huggingface_hub.utils import insecure_hashlib
+from packaging import version
+
+from .. import __version__, config
+from ..download.download_config import DownloadConfig
+from . import _tqdm, logging
+from . import tqdm as hf_tqdm
+from ._filelock import FileLock
+from .extract import ExtractManager
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+INCOMPLETE_SUFFIX = ".incomplete"
+
+T = TypeVar("T", str, Path)
+
+
+def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
+ """
+ Add hf_modules_cache to the python path.
+ By default hf_modules_cache='~/.cache/huggingface/modules'.
+ It can also be set with the environment variable HF_MODULES_CACHE.
+ This is used to add modules such as `datasets_modules`
+ """
+ hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
+ hf_modules_cache = str(hf_modules_cache)
+ if hf_modules_cache not in sys.path:
+ sys.path.append(hf_modules_cache)
+
+ os.makedirs(hf_modules_cache, exist_ok=True)
+ if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
+ with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
+ pass
+ return hf_modules_cache
+
+
+def is_remote_url(url_or_filename: str) -> bool:
+ return urlparse(url_or_filename).scheme != "" and not os.path.ismount(urlparse(url_or_filename).scheme + ":/")
+
+
+def is_local_path(url_or_filename: str) -> bool:
+ # On unix the scheme of a local path is empty (for both absolute and relative),
+ # while on windows the scheme is the drive name (ex: "c") for absolute paths.
+ # for details on the windows behavior, see https://bugs.python.org/issue42215
+ return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
+
+
+def is_relative_path(url_or_filename: str) -> bool:
+ return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
+
+
+def relative_to_absolute_path(path: T) -> T:
+ """Convert relative path to absolute path."""
+ abs_path_str = os.path.abspath(os.path.expanduser(os.path.expandvars(str(path))))
+ return Path(abs_path_str) if isinstance(path, Path) else abs_path_str
+
+
+def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
+ if dataset:
+ endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
+ else:
+ endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
+ return "/".join((endpoint, identifier, filename))
+
+
+def head_hf_s3(
+ identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
+) -> Union[requests.Response, Exception]:
+ return http_head(
+ hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
+ max_retries=max_retries,
+ )
+
+
+def hf_github_url(path: str, name: str, dataset=True, revision: Optional[str] = None) -> str:
+ default_revision = "main" if version.parse(__version__).is_devrelease else __version__
+ revision = revision or default_revision
+ if dataset:
+ return config.REPO_DATASETS_URL.format(revision=revision, path=path, name=name)
+ else:
+ return config.REPO_METRICS_URL.format(revision=revision, path=path, name=name)
+
+
+def url_or_path_join(base_name: str, *pathnames: str) -> str:
+ if is_remote_url(base_name):
+ return posixpath.join(base_name, *(str(pathname).replace(os.sep, "/").lstrip("/") for pathname in pathnames))
+ else:
+ return Path(base_name, *pathnames).as_posix()
+
+
+def url_or_path_parent(url_or_path: str) -> str:
+ if is_remote_url(url_or_path):
+ return url_or_path[: url_or_path.rindex("/")]
+ else:
+ return os.path.dirname(url_or_path)
+
+
+def hash_url_to_filename(url, etag=None):
+ """
+ Convert `url` into a hashed filename in a repeatable way.
+ If `etag` is specified, append its hash to the url's, delimited
+ by a period.
+ If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
+ so that TF 2.0 can identify it as a HDF5 file
+ (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
+ """
+ url_bytes = url.encode("utf-8")
+ url_hash = insecure_hashlib.sha256(url_bytes)
+ filename = url_hash.hexdigest()
+
+ if etag:
+ etag_bytes = etag.encode("utf-8")
+ etag_hash = insecure_hashlib.sha256(etag_bytes)
+ filename += "." + etag_hash.hexdigest()
+
+ if url.endswith(".py"):
+ filename += ".py"
+
+ return filename
+
+
+def cached_path(
+ url_or_filename,
+ download_config=None,
+ **download_kwargs,
+) -> str:
+ """
+ Given something that might be a URL (or might be a local path),
+ determine which. If it's a URL, download the file and cache it, and
+ return the path to the cached file. If it's already a local path,
+ make sure the file exists and then return the path.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ ValueError: if it couldn't parse the url or filename correctly
+ requests.exceptions.ConnectionError: in case of internet connection issue
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+
+ cache_dir = download_config.cache_dir or config.DOWNLOADED_DATASETS_PATH
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+ if isinstance(url_or_filename, Path):
+ url_or_filename = str(url_or_filename)
+
+ # Convert fsspec URL in the format "file://local/path" to "local/path"
+ if can_be_local(url_or_filename):
+ url_or_filename = strip_protocol(url_or_filename)
+
+ if is_remote_url(url_or_filename):
+ # URL, so get it from the cache (downloading if necessary)
+ output_path = get_from_cache(
+ url_or_filename,
+ cache_dir=cache_dir,
+ force_download=download_config.force_download,
+ proxies=download_config.proxies,
+ resume_download=download_config.resume_download,
+ user_agent=download_config.user_agent,
+ local_files_only=download_config.local_files_only,
+ use_etag=download_config.use_etag,
+ max_retries=download_config.max_retries,
+ token=download_config.token,
+ ignore_url_params=download_config.ignore_url_params,
+ storage_options=download_config.storage_options,
+ download_desc=download_config.download_desc,
+ )
+ elif os.path.exists(url_or_filename):
+ # File, and it exists.
+ output_path = url_or_filename
+ elif is_local_path(url_or_filename):
+ # File, but it doesn't exist.
+ raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist")
+ else:
+ # Something unknown
+ raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path")
+
+ if output_path is None:
+ return output_path
+
+ if download_config.extract_compressed_file:
+ output_path = ExtractManager(cache_dir=download_config.cache_dir).extract(
+ output_path, force_extract=download_config.force_extract
+ )
+
+ return relative_to_absolute_path(output_path)
+
+
+def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
+ ua = f"datasets/{__version__}"
+ ua += f"; python/{config.PY_VERSION}"
+ ua += f"; huggingface_hub/{huggingface_hub.__version__}"
+ ua += f"; pyarrow/{config.PYARROW_VERSION}"
+ if config.TORCH_AVAILABLE:
+ ua += f"; torch/{config.TORCH_VERSION}"
+ if config.TF_AVAILABLE:
+ ua += f"; tensorflow/{config.TF_VERSION}"
+ if config.JAX_AVAILABLE:
+ ua += f"; jax/{config.JAX_VERSION}"
+ if config.BEAM_AVAILABLE:
+ ua += f"; apache_beam/{config.BEAM_VERSION}"
+ if isinstance(user_agent, dict):
+ ua += f"; {'; '.join(f'{k}/{v}' for k, v in user_agent.items())}"
+ elif isinstance(user_agent, str):
+ ua += "; " + user_agent
+ return ua
+
+
+def get_authentication_headers_for_url(
+ url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
+) -> dict:
+ """Handle the HF authentication"""
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if url.startswith(config.HF_ENDPOINT):
+ return huggingface_hub.utils.build_hf_headers(
+ token=token, library_name="datasets", library_version=__version__
+ )
+ else:
+ return {}
+
+
+class OfflineModeIsEnabled(ConnectionError):
+ pass
+
+
+def _raise_if_offline_mode_is_enabled(msg: Optional[str] = None):
+ """Raise an OfflineModeIsEnabled error (subclass of ConnectionError) if HF_DATASETS_OFFLINE is True."""
+ if config.HF_DATASETS_OFFLINE:
+ raise OfflineModeIsEnabled(
+ "Offline mode is enabled." if msg is None else "Offline mode is enabled. " + str(msg)
+ )
+
+
+def _request_with_retry(
+ method: str,
+ url: str,
+ max_retries: int = 0,
+ base_wait_time: float = 0.5,
+ max_wait_time: float = 2,
+ timeout: float = 10.0,
+ **params,
+) -> requests.Response:
+ """Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff.
+
+ Note that if the environment variable HF_DATASETS_OFFLINE is set to 1, then a OfflineModeIsEnabled error is raised.
+
+ Args:
+ method (str): HTTP method, such as 'GET' or 'HEAD'.
+ url (str): The URL of the resource to fetch.
+ max_retries (int): Maximum number of retries, defaults to 0 (no retries).
+ base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
+ retries then grows exponentially, capped by max_wait_time.
+ max_wait_time (float): Maximum amount of time between two retries, in seconds.
+ **params (additional keyword arguments): Params to pass to :obj:`requests.request`.
+ """
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ tries, success = 0, False
+ while not success:
+ tries += 1
+ try:
+ response = requests.request(method=method.upper(), url=url, timeout=timeout, **params)
+ success = True
+ except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as err:
+ if tries > max_retries:
+ raise err
+ else:
+ logger.info(f"{method} request to {url} timed out, retrying... [{tries/max_retries}]")
+ sleep_time = min(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
+ time.sleep(sleep_time)
+ return response
+
+
+def fsspec_head(url, storage_options=None):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options)
+ if len(paths) > 1:
+ raise ValueError(f"HEAD can be called with at most one path but was called with {paths}")
+ return fs.info(paths[0])
+
+
+def stack_multiprocessing_download_progress_bars():
+ # Stack downloads progress bars automatically using HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS=1
+ # We use environment variables since the download may happen in a subprocess
+ return patch.dict(os.environ, {"HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS": "1"})
+
+
+class TqdmCallback(fsspec.callbacks.TqdmCallback):
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
+ super().__init__(tqdm_kwargs, *args, **kwargs)
+ self._tqdm = _tqdm # replace tqdm.tqdm by datasets.tqdm.tqdm
+
+
+def fsspec_get(url, temp_file, storage_options=None, desc=None):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options)
+ if len(paths) > 1:
+ raise ValueError(f"GET can be called with at most one path but was called with {paths}")
+ callback = TqdmCallback(
+ tqdm_kwargs={
+ "desc": desc or "Downloading",
+ "unit": "B",
+ "unit_scale": True,
+ "position": multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
+ if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
+ and multiprocessing.current_process()._identity
+ else None,
+ }
+ )
+ fs.get_file(paths[0], temp_file.name, callback=callback)
+
+
+def ftp_head(url, timeout=10.0):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ try:
+ with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
+ r.read(1)
+ except Exception:
+ return False
+ return True
+
+
+def ftp_get(url, temp_file, timeout=10.0):
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ try:
+ logger.info(f"Getting through FTP {url} into {temp_file.name}")
+ with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
+ shutil.copyfileobj(r, temp_file)
+ except urllib.error.URLError as e:
+ raise ConnectionError(e) from None
+
+
+def http_get(
+ url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=100.0, max_retries=0, desc=None
+) -> Optional[requests.Response]:
+ headers = dict(headers) if headers is not None else {}
+ headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
+ if resume_size > 0:
+ headers["Range"] = f"bytes={resume_size:d}-"
+ response = _request_with_retry(
+ method="GET",
+ url=url,
+ stream=True,
+ proxies=proxies,
+ headers=headers,
+ cookies=cookies,
+ max_retries=max_retries,
+ timeout=timeout,
+ )
+ if temp_file is None:
+ return response
+ if response.status_code == 416: # Range not satisfiable
+ return
+ content_length = response.headers.get("Content-Length")
+ total = resume_size + int(content_length) if content_length is not None else None
+ with hf_tqdm(
+ unit="B",
+ unit_scale=True,
+ total=total,
+ initial=resume_size,
+ desc=desc or "Downloading",
+ position=multiprocessing.current_process()._identity[-1] # contains the ranks of subprocesses
+ if os.environ.get("HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS") == "1"
+ and multiprocessing.current_process()._identity
+ else None,
+ ) as progress:
+ for chunk in response.iter_content(chunk_size=1024):
+ progress.update(len(chunk))
+ temp_file.write(chunk)
+
+
+def http_head(
+ url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10.0, max_retries=0
+) -> requests.Response:
+ headers = copy.deepcopy(headers) or {}
+ headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
+ response = _request_with_retry(
+ method="HEAD",
+ url=url,
+ proxies=proxies,
+ headers=headers,
+ cookies=cookies,
+ allow_redirects=allow_redirects,
+ timeout=timeout,
+ max_retries=max_retries,
+ )
+ return response
+
+
+def request_etag(
+ url: str, token: Optional[Union[str, bool]] = None, use_auth_token: Optional[Union[str, bool]] = "deprecated"
+) -> Optional[str]:
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if urlparse(url).scheme not in ("http", "https"):
+ return None
+ headers = get_authentication_headers_for_url(url, token=token)
+ response = http_head(url, headers=headers, max_retries=3)
+ response.raise_for_status()
+ etag = response.headers.get("ETag") if response.ok else None
+ return etag
+
+
+def get_from_cache(
+ url,
+ cache_dir=None,
+ force_download=False,
+ proxies=None,
+ etag_timeout=100,
+ resume_download=False,
+ user_agent=None,
+ local_files_only=False,
+ use_etag=True,
+ max_retries=0,
+ token=None,
+ use_auth_token="deprecated",
+ ignore_url_params=False,
+ storage_options=None,
+ download_desc=None,
+) -> str:
+ """
+ Given a URL, look for the corresponding file in the local cache.
+ If it's not there, download it. Then return the path to the cached file.
+
+ Return:
+ Local path (string)
+
+ Raises:
+ FileNotFoundError: in case of non-recoverable file
+ (non-existent or no cache on disk)
+ ConnectionError: in case of unreachable url
+ and no cache on disk
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if cache_dir is None:
+ cache_dir = config.HF_DATASETS_CACHE
+ if isinstance(cache_dir, Path):
+ cache_dir = str(cache_dir)
+
+ os.makedirs(cache_dir, exist_ok=True)
+
+ if ignore_url_params:
+ # strip all query parameters and #fragments from the URL
+ cached_url = urljoin(url, urlparse(url).path)
+ else:
+ cached_url = url # additional parameters may be added to the given URL
+
+ connected = False
+ response = None
+ cookies = None
+ etag = None
+ head_error = None
+ scheme = None
+
+ # Try a first time to file the file on the local file system without eTag (None)
+ # if we don't ask for 'force_download' then we spare a request
+ filename = hash_url_to_filename(cached_url, etag=None)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download and not use_etag:
+ return cache_path
+
+ # Prepare headers for authentication
+ headers = get_authentication_headers_for_url(url, token=token)
+ if user_agent is not None:
+ headers["user-agent"] = user_agent
+
+ # We don't have the file locally or we need an eTag
+ if not local_files_only:
+ scheme = urlparse(url).scheme
+ if scheme == "ftp":
+ connected = ftp_head(url)
+ elif scheme not in ("http", "https"):
+ response = fsspec_head(url, storage_options=storage_options)
+ # s3fs uses "ETag", gcsfs uses "etag"
+ etag = (response.get("ETag", None) or response.get("etag", None)) if use_etag else None
+ connected = True
+ try:
+ response = http_head(
+ url,
+ allow_redirects=True,
+ proxies=proxies,
+ timeout=etag_timeout,
+ max_retries=max_retries,
+ headers=headers,
+ )
+ if response.status_code == 200: # ok
+ etag = response.headers.get("ETag") if use_etag else None
+ for k, v in response.cookies.items():
+ # In some edge cases, we need to get a confirmation token
+ if k.startswith("download_warning") and "drive.google.com" in url:
+ url += "&confirm=" + v
+ cookies = response.cookies
+ connected = True
+ # Fix Google Drive URL to avoid Virus scan warning
+ if "drive.google.com" in url and "confirm=" not in url:
+ url += "&confirm=t"
+ # In some edge cases, head request returns 400 but the connection is actually ok
+ elif (
+ (response.status_code == 400 and "firebasestorage.googleapis.com" in url)
+ or (response.status_code == 405 and "drive.google.com" in url)
+ or (
+ response.status_code == 403
+ and (
+ re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
+ or re.match(r"^https://.*?s3.*?amazonaws.com/.*?$", response.url)
+ )
+ )
+ or (response.status_code == 403 and "ndownloader.figstatic.com" in url)
+ ):
+ connected = True
+ logger.info(f"Couldn't get ETag version for url {url}")
+ elif response.status_code == 401 and config.HF_ENDPOINT in url and token is None:
+ raise ConnectionError(
+ f"Unauthorized for URL {url}. Please use the parameter `token=True` after logging in with `huggingface-cli login`"
+ )
+ except (OSError, requests.exceptions.Timeout) as e:
+ # not connected
+ head_error = e
+ pass
+
+ # connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
+ # try to get the last downloaded one
+ if not connected:
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+ if local_files_only:
+ raise FileNotFoundError(
+ f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
+ " disabled. To enable file online look-ups, set 'local_files_only' to False."
+ )
+ elif response is not None and response.status_code == 404:
+ raise FileNotFoundError(f"Couldn't find file at {url}")
+ _raise_if_offline_mode_is_enabled(f"Tried to reach {url}")
+ if head_error is not None:
+ raise ConnectionError(f"Couldn't reach {url} ({repr(head_error)})")
+ elif response is not None:
+ raise ConnectionError(f"Couldn't reach {url} (error {response.status_code})")
+ else:
+ raise ConnectionError(f"Couldn't reach {url}")
+
+ # Try a second time
+ filename = hash_url_to_filename(cached_url, etag)
+ cache_path = os.path.join(cache_dir, filename)
+
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+
+ # From now on, connected is True.
+ # Prevent parallel downloads of the same file with a lock.
+ lock_path = cache_path + ".lock"
+ with FileLock(lock_path):
+ # Retry in case previously locked processes just enter after the precedent process releases the lock
+ if os.path.exists(cache_path) and not force_download:
+ return cache_path
+
+ incomplete_path = cache_path + ".incomplete"
+
+ @contextmanager
+ def temp_file_manager(mode="w+b"):
+ with open(incomplete_path, mode) as f:
+ yield f
+
+ resume_size = 0
+ if resume_download:
+ temp_file_manager = partial(temp_file_manager, mode="a+b")
+ if os.path.exists(incomplete_path):
+ resume_size = os.stat(incomplete_path).st_size
+
+ # Download to temporary file, then copy to cache path once finished.
+ # Otherwise, you get corrupt cache entries if the download gets interrupted.
+ with temp_file_manager() as temp_file:
+ logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}")
+
+ # GET file object
+ if scheme == "ftp":
+ ftp_get(url, temp_file)
+ elif scheme not in ("http", "https"):
+ fsspec_get(url, temp_file, storage_options=storage_options, desc=download_desc)
+ else:
+ http_get(
+ url,
+ temp_file=temp_file,
+ proxies=proxies,
+ resume_size=resume_size,
+ headers=headers,
+ cookies=cookies,
+ max_retries=max_retries,
+ desc=download_desc,
+ )
+
+ logger.info(f"storing {url} in cache at {cache_path}")
+ shutil.move(temp_file.name, cache_path)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(cache_path, 0o666 & ~umask)
+
+ logger.info(f"creating metadata file for {cache_path}")
+ meta = {"url": url, "etag": etag}
+ meta_path = cache_path + ".json"
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
+ json.dump(meta, meta_file)
+
+ return cache_path
+
+
+def add_start_docstrings(*docstr):
+ def docstring_decorator(fn):
+ fn.__doc__ = "".join(docstr) + "\n\n" + (fn.__doc__ if fn.__doc__ is not None else "")
+ return fn
+
+ return docstring_decorator
+
+
+def add_end_docstrings(*docstr):
+ def docstring_decorator(fn):
+ fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "\n\n" + "".join(docstr)
+ return fn
+
+ return docstring_decorator
+
+
+def estimate_dataset_size(paths):
+ return sum(path.stat().st_size for path in paths)
+
+
+def readline(f: io.RawIOBase):
+ # From: https://github.com/python/cpython/blob/d27e2f4d118e7a9909b6a3e5da06c5ff95806a85/Lib/_pyio.py#L525
+ res = bytearray()
+ while True:
+ b = f.read(1)
+ if not b:
+ break
+ res += b
+ if res.endswith(b"\n"):
+ break
+ return bytes(res)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/resources/creators.json b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/resources/creators.json
new file mode 100644
index 0000000000000000000000000000000000000000..d9e15f0039cc27ed8abd9fdf394423a3fada2c95
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/resources/creators.json
@@ -0,0 +1,17 @@
+{
+ "language": [
+ "found",
+ "crowdsourced",
+ "expert-generated",
+ "machine-generated",
+ "other"
+ ],
+ "annotations": [
+ "found",
+ "crowdsourced",
+ "expert-generated",
+ "machine-generated",
+ "no-annotation",
+ "other"
+ ]
+}
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/resources/readme_structure.yaml b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/resources/readme_structure.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5b781e11e8258a446874ebf96104f642d0c190cf
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/resources/readme_structure.yaml
@@ -0,0 +1,116 @@
+name: "" # Filename comes here
+allow_empty: false
+allow_empty_text: true
+subsections:
+ - name: "Dataset Card for X" # First-level markdown heading
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Table of Contents"
+ allow_empty: false
+ allow_empty_text: false
+ subsections: null # meaning it should not be checked.
+ - name: "Dataset Description"
+ allow_empty: false
+ allow_empty_text: false
+ subsections:
+ - name: "Dataset Summary"
+ allow_empty: false
+ allow_empty_text: false
+ subsections: null
+ - name: "Supported Tasks and Leaderboards"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: Languages
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Dataset Structure"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Data Instances"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Data Fields"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Data Splits"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Dataset Creation"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Curation Rationale"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Source Data"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Initial Data Collection and Normalization"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Who are the source language producers?"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Annotations"
+ allow_empty: false
+ allow_empty_text: true
+ subsections:
+ - name: "Annotation process"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Who are the annotators?"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Personal and Sensitive Information"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Considerations for Using the Data"
+ allow_empty: true
+ allow_empty_text: true
+ subsections:
+ - name: "Social Impact of Dataset"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Discussion of Biases"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Other Known Limitations"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Additional Information"
+ allow_empty: true
+ allow_empty_text: true
+ subsections:
+ - name: "Dataset Curators"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Licensing Information"
+ allow_empty: true
+ allow_empty_text: true
+ subsections: null
+ - name: "Citation Information"
+ allow_empty: false
+ allow_empty_text: true
+ subsections: null
+ - name: "Contributions"
+ allow_empty: false
+ allow_empty_text: false
+ subsections: null
diff --git a/evalkit_tf437/lib/python3.10/site-packages/gcsfs/__pycache__/checkers.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/__pycache__/checkers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f9d1416cff15b245656991e44d48c4aebf2d6a2
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/__pycache__/checkers.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/gcsfs/__pycache__/mapping.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/__pycache__/mapping.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3596b94b02355b26aaea4e65fe79e516c4074ad9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/__pycache__/mapping.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/gcsfs/cli/gcsfuse.py b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/cli/gcsfuse.py
new file mode 100644
index 0000000000000000000000000000000000000000..a23f09ddeaf6ee5e45b15bdb9521230441fdcb3b
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/cli/gcsfuse.py
@@ -0,0 +1,69 @@
+import logging
+
+import click
+from fuse import FUSE
+
+from gcsfs.gcsfuse import GCSFS
+
+
+@click.command()
+@click.argument("bucket", type=str, required=True)
+@click.argument("mount_point", type=str, required=True)
+@click.option(
+ "--token",
+ type=str,
+ required=False,
+ default=None,
+ help="Token to use for authentication",
+)
+@click.option(
+ "--project-id", type=str, required=False, default="", help="Billing Project ID"
+)
+@click.option(
+ "--foreground/--background",
+ default=True,
+ help="Run in the foreground or as a background process",
+)
+@click.option(
+ "--threads/--no-threads", default=True, help="Whether to run with threads"
+)
+@click.option(
+ "--cache_files", type=int, default=10, help="Number of open files to cache"
+)
+@click.option(
+ "-v",
+ "--verbose",
+ count=True,
+ help="Set logging level. '-v' for 'gcsfuse' logging."
+ "'-v -v' for complete debug logging.",
+)
+def main(
+ bucket, mount_point, token, project_id, foreground, threads, cache_files, verbose
+):
+ """Mount a Google Cloud Storage (GCS) bucket to a local directory"""
+
+ if verbose == 1:
+ logging.basicConfig(level=logging.INFO)
+ logging.getLogger("gcsfs.gcsfuse").setLevel(logging.DEBUG)
+ if verbose > 1:
+ logging.basicConfig(level=logging.DEBUG)
+
+ fmt = "%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
+ if verbose == 1:
+ logging.basicConfig(level=logging.INFO, format=fmt)
+ logging.getLogger("gcsfs.gcsfuse").setLevel(logging.DEBUG)
+ if verbose > 1:
+ logging.basicConfig(level=logging.DEBUG, format=fmt)
+
+ print(f"Mounting bucket {bucket} to directory {mount_point}")
+ print("foreground:", foreground, ", nothreads:", not threads)
+ FUSE(
+ GCSFS(bucket, token=token, project=project_id, nfiles=cache_files),
+ mount_point,
+ nothreads=not threads,
+ foreground=foreground,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/gcsfs/credentials.py b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/credentials.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6e8d3711e41dba59f3e1e179661e27dcbf28683
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/credentials.py
@@ -0,0 +1,250 @@
+import json
+import logging
+import os
+import pickle
+import textwrap
+import threading
+import warnings
+
+import google.auth as gauth
+import google.auth.compute_engine
+import google.auth.credentials
+import google.auth.exceptions
+import requests
+from google.auth.transport.requests import Request
+from google.oauth2 import service_account
+from google.oauth2.credentials import Credentials
+from google_auth_oauthlib.flow import InstalledAppFlow
+
+logger = logging.getLogger("gcsfs.credentials")
+
+tfile = os.path.join(os.path.expanduser("~"), ".gcs_tokens")
+
+not_secret = {
+ "client_id": "586241054156-9kst7ltfj66svc342pcn43vp6ta3idin"
+ ".apps.googleusercontent.com",
+ "client_secret": "xto0LIFYX35mmHF9T1R2QBqT",
+}
+
+client_config = {
+ "installed": {
+ "client_id": not_secret["client_id"],
+ "client_secret": not_secret["client_secret"],
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://accounts.google.com/o/oauth2/token",
+ }
+}
+
+
+class GoogleCredentials:
+ def __init__(self, project, access, token, check_credentials=None):
+ self.scope = "https://www.googleapis.com/auth/devstorage." + access
+ self.project = project
+ self.access = access
+ self.heads = {}
+
+ self.credentials = None
+ self.method = None
+ self.lock = threading.Lock()
+ self.token = token
+ self.connect(method=token)
+
+ if check_credentials:
+ warnings.warn(
+ "The `check_credentials` argument is deprecated and will be removed in a future release.",
+ DeprecationWarning,
+ )
+
+ @classmethod
+ def load_tokens(cls):
+ """Get "browser" tokens from disc"""
+ try:
+ with open(tfile, "rb") as f:
+ tokens = pickle.load(f)
+ except Exception:
+ tokens = {}
+ GoogleCredentials.tokens = tokens
+
+ @staticmethod
+ def _save_tokens():
+ try:
+ with open(tfile, "wb") as f:
+ pickle.dump(GoogleCredentials.tokens, f, 2)
+ except Exception as e:
+ warnings.warn("Saving token cache failed: " + str(e))
+
+ def _connect_google_default(self):
+ credentials, project = gauth.default(scopes=[self.scope])
+ msg = textwrap.dedent(
+ """\
+ User-provided project '{}' does not match the google default project '{}'. Either
+
+ 1. Accept the google-default project by not passing a `project` to GCSFileSystem
+ 2. Configure the default project to match the user-provided project (gcloud config set project)
+ 3. Use an authorization method other than 'google_default' by providing 'token=...'
+ """
+ )
+ if self.project and self.project != project:
+ raise ValueError(msg.format(self.project, project))
+ self.project = project
+ self.credentials = credentials
+
+ def _connect_cloud(self):
+ self.credentials = gauth.compute_engine.Credentials()
+ try:
+ with requests.Session() as session:
+ req = Request(session)
+ self.credentials.refresh(req)
+ except gauth.exceptions.RefreshError as error:
+ raise ValueError("Invalid gcloud credentials") from error
+
+ def _connect_cache(self):
+
+ if len(self.tokens) == 0:
+ raise ValueError("No cached tokens")
+
+ project, access = self.project, self.access
+ if (project, access) in self.tokens:
+ credentials = self.tokens[(project, access)]
+ self.credentials = credentials
+
+ def _dict_to_credentials(self, token):
+ """
+ Convert old dict-style token.
+
+ Does not preserve access token itself, assumes refresh required.
+ """
+ try:
+ token = service_account.Credentials.from_service_account_info(
+ token, scopes=[self.scope]
+ )
+ except: # noqa: E722
+ # TODO: catch specific exceptions
+ # According https://github.com/googleapis/python-cloud-core/blob/master/google/cloud/client.py
+ # Scopes required for authenticating with a service. User authentication fails
+ # with invalid_scope if scope is specified.
+ token = Credentials(
+ None,
+ refresh_token=token["refresh_token"],
+ client_secret=token["client_secret"],
+ client_id=token["client_id"],
+ token_uri="https://oauth2.googleapis.com/token",
+ )
+ return token
+
+ def _connect_token(self, token):
+ """
+ Connect using a concrete token
+
+ Parameters
+ ----------
+ token: str, dict or Credentials
+ If a str and a valid file name, try to load as a Service file, or next as a JSON;
+ if not a valid file name, assume it's a valid raw (non-renewable/session) token, and pass to Credentials. If
+ dict, try to interpret as credentials; if Credentials, use directly.
+ """
+ if isinstance(token, str):
+ if os.path.exists(token):
+ try:
+ # is this a "service" token?
+ self._connect_service(token)
+ return
+ except: # noqa: E722
+ # TODO: catch specific exceptions
+ # some other kind of token file
+ # will raise exception if is not json
+ with open(token) as data:
+ token = json.load(data)
+ else:
+ token = Credentials(token)
+ if isinstance(token, dict):
+ credentials = self._dict_to_credentials(token)
+ elif isinstance(token, google.auth.credentials.Credentials):
+ credentials = token
+ else:
+ raise ValueError("Token format not understood")
+ self.credentials = credentials
+ if self.credentials.valid:
+ self.credentials.apply(self.heads)
+
+ def maybe_refresh(self):
+ # this uses requests and is blocking
+ if self.credentials is None:
+ return # anon
+ if self.credentials.valid:
+ return # still good
+ with requests.Session() as session:
+ req = Request(session)
+ with self.lock:
+ if self.credentials.valid:
+ return # repeat to avoid race (but don't want lock in common case)
+ logger.debug("GCS refresh")
+ self.credentials.refresh(req)
+ self.apply(self.heads)
+
+ def apply(self, out):
+ """Insert credential headers in-place to a dictionary"""
+ self.maybe_refresh()
+ if self.credentials is not None:
+ self.credentials.apply(out)
+
+ def _connect_service(self, fn):
+ # raises exception if file does not match expectation
+ credentials = service_account.Credentials.from_service_account_file(
+ fn, scopes=[self.scope]
+ )
+ self.credentials = credentials
+
+ def _connect_anon(self):
+ self.credentials = None
+
+ def _connect_browser(self):
+ flow = InstalledAppFlow.from_client_config(client_config, [self.scope])
+ credentials = flow.run_local_server()
+ self.tokens[(self.project, self.access)] = credentials
+ self._save_tokens()
+ self.credentials = credentials
+
+ def connect(self, method=None):
+ """
+ Establish session token. A new token will be requested if the current
+ one is within 100s of expiry.
+
+ Parameters
+ ----------
+ method: str (google_default|cache|cloud|token|anon|browser) or None
+ Type of authorisation to implement - calls `_connect_*` methods.
+ If None, will try sequence of methods.
+ """
+ if method not in [
+ "google_default",
+ "cache",
+ "cloud",
+ "token",
+ "anon",
+ "browser",
+ None,
+ ]:
+ self._connect_token(method)
+ elif method is None:
+ for meth in ["google_default", "cache", "cloud", "anon"]:
+ try:
+ self.connect(method=meth)
+ logger.debug("Connected with method %s", meth)
+ break
+ except (google.auth.exceptions.GoogleAuthError, ValueError) as e:
+ # GoogleAuthError is the base class for all authentication
+ # errors
+ logger.debug(
+ 'Connection with method "%s" failed' % meth, exc_info=e
+ )
+ # Reset credentials if they were set but the authentication failed
+ # (reverts to 'anon' behavior)
+ self.credentials = None
+ else:
+ # Since the 'anon' connection method should always succeed,
+ # getting here means something has gone terribly wrong.
+ raise RuntimeError("All connection methods have failed!")
+ else:
+ self.__getattribute__("_connect_" + method)()
+ self.method = method
diff --git a/evalkit_tf437/lib/python3.10/site-packages/gcsfs/mapping.py b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/mapping.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf6b1824d55ce9aad2ab9fae980d0b98d9eeb0fc
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/mapping.py
@@ -0,0 +1,7 @@
+from .core import GCSFileSystem
+
+
+def GCSMap(root, gcs=None, check=False, create=False):
+ """For backward compatibility"""
+ gcs = gcs or GCSFileSystem.current()
+ return gcs.get_mapper(root, check=check, create=create)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/gcsfs/retry.py b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bb860bafe8e171879c0ce84a116bcba4195bd6e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/gcsfs/retry.py
@@ -0,0 +1,158 @@
+import asyncio
+import json
+import logging
+import random
+
+import aiohttp.client_exceptions
+import google.auth.exceptions
+import requests.exceptions
+from decorator import decorator
+
+logger = logging.getLogger("gcsfs")
+
+
+class HttpError(Exception):
+ """Holds the message and code from cloud errors."""
+
+ def __init__(self, error_response=None):
+ # Save error_response for potential pickle.
+ self._error_response = error_response
+ if error_response:
+ self.code = error_response.get("code", None)
+ self.message = error_response.get("message", "")
+ if self.code:
+ if isinstance(self.message, bytes):
+ self.message += (", %s" % self.code).encode()
+ else:
+ self.message += ", %s" % self.code
+ else:
+ self.message = ""
+ self.code = None
+ # Call the base class constructor with the parameters it needs
+ super().__init__(self.message)
+
+ def __reduce__(self):
+ """This makes the Exception pickleable."""
+
+ # This is basically deconstructing the HttpError when pickled.
+ return HttpError, (self._error_response,)
+
+
+class ChecksumError(Exception):
+ """Raised when the md5 hash of the content does not match the header."""
+
+ pass
+
+
+RETRIABLE_EXCEPTIONS = (
+ requests.exceptions.ChunkedEncodingError,
+ requests.exceptions.ConnectionError,
+ requests.exceptions.ReadTimeout,
+ requests.exceptions.Timeout,
+ requests.exceptions.ProxyError,
+ requests.exceptions.SSLError,
+ requests.exceptions.ContentDecodingError,
+ google.auth.exceptions.RefreshError,
+ aiohttp.client_exceptions.ClientError,
+ ChecksumError,
+)
+
+
+def is_retriable(exception):
+ """Returns True if this exception is retriable."""
+ errs = list(range(500, 505)) + [
+ # Request Timeout
+ 408,
+ # Too Many Requests
+ 429,
+ ]
+ errs += [str(e) for e in errs]
+ if isinstance(exception, HttpError):
+ return exception.code in errs
+
+ return isinstance(exception, RETRIABLE_EXCEPTIONS)
+
+
+def validate_response(status, content, path, args=None):
+ """
+ Check the requests object r, raise error if it's not ok.
+
+ Parameters
+ ----------
+ r: requests response object
+ path: associated URL path, for error messages
+ """
+ if status >= 400 and status != 499:
+ # 499 is special "upload was cancelled" status
+ if args:
+ from .core import quote
+
+ path = path.format(*[quote(p) for p in args])
+ if status == 404:
+ raise FileNotFoundError(path)
+
+ error = None
+ if hasattr(content, "decode"):
+ content = content.decode()
+ try:
+ error = json.loads(content)["error"]
+ msg = error["message"]
+ except json.decoder.JSONDecodeError:
+ msg = content
+
+ if status == 403:
+ raise OSError(f"Forbidden: {path}\n{msg}")
+ elif status == 502:
+ raise requests.exceptions.ProxyError()
+ elif "invalid" in str(msg):
+ raise ValueError(f"Bad Request: {path}\n{msg}")
+ elif error:
+ raise HttpError(error)
+ elif status:
+ raise HttpError({"code": status, "message": msg}) # text-like
+ else:
+ raise RuntimeError(msg)
+
+
+@decorator
+async def retry_request(func, retries=6, *args, **kwargs):
+ for retry in range(retries):
+ try:
+ if retry > 0:
+ await asyncio.sleep(min(random.random() + 2 ** (retry - 1), 32))
+ return await func(*args, **kwargs)
+ except (
+ HttpError,
+ requests.exceptions.RequestException,
+ google.auth.exceptions.GoogleAuthError,
+ ChecksumError,
+ aiohttp.client_exceptions.ClientError,
+ ) as e:
+ if (
+ isinstance(e, HttpError)
+ and e.code == 400
+ and "requester pays" in e.message
+ ):
+ msg = (
+ "Bucket is requester pays. "
+ "Set `requester_pays=True` when creating the GCSFileSystem."
+ )
+ raise ValueError(msg) from e
+ # Special test for 404 to avoid retrying the request
+ if (
+ isinstance(e, aiohttp.client_exceptions.ClientResponseError)
+ and e.status == 404
+ ):
+ logger.debug("Request returned 404, no retries.")
+ raise e
+ if isinstance(e, HttpError) and e.code == 404:
+ logger.debug("Request returned 404, no retries.")
+ raise e
+ if retry == retries - 1:
+ logger.exception(f"{func.__name__} out of retries on exception: {e}")
+ raise e
+ if is_retriable(e):
+ logger.debug(f"{func.__name__} retrying after exception: {e}")
+ continue
+ logger.exception(f"{func.__name__} non-retriable exception: {e}")
+ raise e
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..287eab9490a7a46465d9c9d28d13b0f82055d397
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3c2e63ab59f1e4455a0b6c855038435e18d8a6d342eb05082b11cbd70865dd66
+size 137390
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pydantic-2.9.2.dist-info/REQUESTED b/evalkit_tf437/lib/python3.10/site-packages/pydantic-2.9.2.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391