diff --git a/.gitattributes b/.gitattributes
index e8ecbbc1a83689916eac2e03d6886551e1c44e04..8be5cf21d63118bf7352987d202d8809b8a22c7d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -330,3 +330,4 @@ evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxcb-xkb-9ba31ab
evalkit_tf437/lib/python3.10/site-packages/opencv_python.libs/libxkbcommon-71ae2972.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
+evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..39058314bdc320499e08a46d0246b54293f7365b
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/__init__.py
@@ -0,0 +1,70 @@
+# ruff: noqa
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "2.18.0"
+
+from .arrow_dataset import Dataset
+from .arrow_reader import ReadInstruction
+from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
+from .combine import concatenate_datasets, interleave_datasets
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download import *
+from .features import *
+from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
+from .info import DatasetInfo, MetricInfo
+from .inspect import (
+ get_dataset_config_info,
+ get_dataset_config_names,
+ get_dataset_default_config_name,
+ get_dataset_infos,
+ get_dataset_split_names,
+ inspect_dataset,
+ inspect_metric,
+ list_datasets,
+ list_metrics,
+)
+from .iterable_dataset import IterableDataset
+from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
+from .metric import Metric
+from .splits import (
+ NamedSplit,
+ NamedSplitAll,
+ Split,
+ SplitBase,
+ SplitDict,
+ SplitGenerator,
+ SplitInfo,
+ SubSplitInfo,
+ percent,
+)
+from .tasks import *
+from .utils import *
+from .utils import logging
+
+
+# deprecated modules
+from datasets import arrow_dataset as _arrow_dataset # isort:skip
+from datasets import utils as _utils # isort:skip
+from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
+
+_arrow_dataset.concatenate_datasets = concatenate_datasets
+_utils.DownloadConfig = DownloadConfig
+_utils.DownloadManager = DownloadManager
+_utils.DownloadMode = DownloadMode
+_deprecated_download_manager.DownloadConfig = DownloadConfig
+_deprecated_download_manager.DownloadMode = DownloadMode
+_deprecated_download_manager.DownloadManager = DownloadManager
+
+del _arrow_dataset, _utils, _deprecated_download_manager
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_dataset.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..95126664561437aec7f066a75f4ca0470c5c0a17
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_dataset.py
@@ -0,0 +1,6277 @@
+# Copyright 2020 The HuggingFace Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Simple Dataset wrapping an Arrow Table."""
+
+import contextlib
+import copy
+import fnmatch
+import itertools
+import json
+import math
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import time
+import warnings
+import weakref
+from collections import Counter
+from collections.abc import Mapping
+from copy import deepcopy
+from functools import partial, wraps
+from io import BytesIO
+from math import ceil, floor
+from pathlib import Path
+from random import sample
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ BinaryIO,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Tuple,
+ Union,
+ overload,
+)
+from typing import Sequence as Sequence_
+
+import fsspec
+import numpy as np
+import pandas as pd
+import pyarrow as pa
+import pyarrow.compute as pc
+from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter, OptimizedTypedSequence
+from .data_files import sanitize_patterns
+from .download.streaming_download_manager import xgetsize
+from .features import Audio, ClassLabel, Features, Image, Sequence, Value
+from .features.features import (
+ FeatureType,
+ _align_features,
+ _check_if_features_can_be_aligned,
+ generate_from_arrow_type,
+ pandas_types_mapper,
+ require_decoding,
+)
+from .filesystems import is_remote_filesystem
+from .fingerprint import (
+ fingerprint_transform,
+ format_kwargs_for_fingerprint,
+ format_transform_for_fingerprint,
+ generate_fingerprint,
+ generate_random_fingerprint,
+ get_temporary_cache_files_directory,
+ is_caching_enabled,
+ maybe_register_dataset_for_temp_dir_deletion,
+ update_fingerprint,
+ validate_fingerprint,
+)
+from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table
+from .formatting.formatting import LazyDict, _is_range_contiguous
+from .info import DatasetInfo, DatasetInfosDict
+from .naming import _split_re
+from .search import IndexableMixin
+from .splits import NamedSplit, Split, SplitDict, SplitInfo
+from .table import (
+ InMemoryTable,
+ MemoryMappedTable,
+ Table,
+ _memory_mapped_record_batch_reader_from_file,
+ cast_array_to_feature,
+ concat_tables,
+ embed_table_storage,
+ list_table_cache_files,
+ table_cast,
+ table_iter,
+ table_visitor,
+)
+from .tasks import TaskTemplate
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import estimate_dataset_size
+from .utils.hub import list_files_info, preupload_lfs_files
+from .utils.info_utils import is_small_dataset
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import (
+ Literal,
+ asdict,
+ convert_file_size_to_int,
+ glob_pattern_to_regex,
+ iflatmap_unordered,
+ string_to_dict,
+ unique_values,
+)
+from .utils.stratify import stratified_shuffle_split_generate_indices
+from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf
+from .utils.typing import ListLike, PathLike
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import pyspark
+ import sqlalchemy
+
+ from .dataset_dict import DatasetDict
+ from .iterable_dataset import IterableDataset
+
+logger = logging.get_logger(__name__)
+
+PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED = (
+ "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.parquet"
+)
+
+
+class DatasetInfoMixin:
+ """This base class exposes some attributes of DatasetInfo
+ at the base level of the Dataset for easy access.
+ """
+
+ def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):
+ self._info = info
+ self._split = split
+
+ @property
+ def info(self):
+ """[`~datasets.DatasetInfo`] object containing all the metadata in the dataset."""
+ return self._info
+
+ @property
+ def split(self):
+ """[`~datasets.NamedSplit`] object corresponding to a named dataset split."""
+ return self._split
+
+ @property
+ def builder_name(self) -> str:
+ return self._info.builder_name
+
+ @property
+ def citation(self) -> str:
+ return self._info.citation
+
+ @property
+ def config_name(self) -> str:
+ return self._info.config_name
+
+ @property
+ def dataset_size(self) -> Optional[int]:
+ return self._info.dataset_size
+
+ @property
+ def description(self) -> str:
+ return self._info.description
+
+ @property
+ def download_checksums(self) -> Optional[dict]:
+ return self._info.download_checksums
+
+ @property
+ def download_size(self) -> Optional[int]:
+ return self._info.download_size
+
+ @property
+ def features(self) -> Optional[Features]:
+ return self._info.features.copy() if self._info.features is not None else None
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._info.homepage
+
+ @property
+ def license(self) -> Optional[str]:
+ return self._info.license
+
+ @property
+ def size_in_bytes(self) -> Optional[int]:
+ return self._info.size_in_bytes
+
+ @property
+ def supervised_keys(self):
+ return self._info.supervised_keys
+
+ @property
+ def task_templates(self):
+ return self._info.task_templates
+
+ @property
+ def version(self):
+ return self._info.version
+
+
+class TensorflowDatasetMixin:
+ _TF_DATASET_REFS = set()
+
+ @staticmethod
+ def _get_output_signature(
+ dataset: "Dataset",
+ collate_fn: Callable,
+ collate_fn_args: dict,
+ cols_to_retain: Optional[List[str]] = None,
+ batch_size: Optional[int] = None,
+ num_test_batches: int = 20,
+ ):
+ """Private method used by `to_tf_dataset()` to find the shapes and dtypes of samples from this dataset
+ after being passed through the collate_fn. Tensorflow needs an exact signature for tf.numpy_function, so
+ the only way to do this is to run test batches - the collator may add or rename columns, so we can't figure
+ it out just by inspecting the dataset.
+
+ Args:
+ dataset (`Dataset`): Dataset to load samples from.
+ collate_fn(`bool`): Shuffle the dataset order when loading. Recommended True for training, False for
+ validation/evaluation.
+ collate_fn(`Callable`): A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`): A `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ batch_size (`int`, optional): The size of batches loaded from the dataset. Used for shape inference.
+ Can be None, which indicates that batch sizes can be variable.
+ num_test_batches (`int`): The number of batches to load from the dataset for shape inference.
+
+ Returns:
+ `dict`: Dict mapping column names to tf.Tensorspec objects
+ `dict`: Dict mapping column names to np.dtype objects
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if len(dataset) == 0:
+ raise ValueError("Unable to get the output signature because the dataset is empty.")
+ if batch_size is not None:
+ batch_size = min(len(dataset), batch_size)
+ test_batch_size = 1
+
+ if cols_to_retain is not None:
+ cols_to_retain = list(set(cols_to_retain + ["label_ids", "label", "labels"]))
+
+ test_batches = []
+ for _ in range(num_test_batches):
+ indices = sample(range(len(dataset)), test_batch_size)
+ test_batch = dataset[indices]
+ if cols_to_retain is not None:
+ test_batch = {key: value for key, value in test_batch.items() if key in cols_to_retain}
+ test_batch = [{key: value[i] for key, value in test_batch.items()} for i in range(test_batch_size)]
+ test_batch = collate_fn(test_batch, **collate_fn_args)
+ test_batches.append(test_batch)
+
+ tf_columns_to_signatures = {}
+ np_columns_to_dtypes = {}
+ for column in test_batches[0].keys():
+ raw_arrays = [batch[column] for batch in test_batches]
+ # In case the collate_fn returns something strange
+ np_arrays = []
+ for array in raw_arrays:
+ if isinstance(array, np.ndarray):
+ np_arrays.append(array)
+ elif isinstance(array, tf.Tensor):
+ np_arrays.append(array.numpy())
+ else:
+ np_arrays.append(np.array(array))
+
+ if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool:
+ tf_dtype = tf.int64
+ np_dtype = np.int64
+ elif np.issubdtype(np_arrays[0].dtype, np.number):
+ tf_dtype = tf.float32
+ np_dtype = np.float32
+ elif np_arrays[0].dtype.kind == "U": # Unicode strings
+ np_dtype = np.unicode_
+ tf_dtype = tf.string
+ else:
+ raise RuntimeError(
+ f"Unrecognized array dtype {np_arrays[0].dtype}. \n"
+ "Nested types and image/audio types are not supported yet."
+ )
+ shapes = [array.shape for array in np_arrays]
+ static_shape = []
+ for dim in range(len(shapes[0])):
+ sizes = {shape[dim] for shape in shapes}
+ if dim == 0:
+ static_shape.append(batch_size)
+ continue
+ if len(sizes) == 1: # This dimension looks constant
+ static_shape.append(sizes.pop())
+ else: # Use None for variable dimensions
+ static_shape.append(None)
+ tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype)
+ np_columns_to_dtypes[column] = np_dtype
+
+ return tf_columns_to_signatures, np_columns_to_dtypes
+
+ def to_tf_dataset(
+ self,
+ batch_size: Optional[int] = None,
+ columns: Optional[Union[str, List[str]]] = None,
+ shuffle: bool = False,
+ collate_fn: Optional[Callable] = None,
+ drop_remainder: bool = False,
+ collate_fn_args: Optional[Dict[str, Any]] = None,
+ label_cols: Optional[Union[str, List[str]]] = None,
+ prefetch: bool = True,
+ num_workers: int = 0,
+ num_test_batches: int = 20,
+ ):
+ """Create a `tf.data.Dataset` from the underlying Dataset. This `tf.data.Dataset` will load and collate batches from
+ the Dataset, and is suitable for passing to methods like `model.fit()` or `model.predict()`. The dataset will yield
+ `dicts` for both inputs and labels unless the `dict` would contain only a single key, in which case a raw
+ `tf.Tensor` is yielded instead.
+
+ Args:
+ batch_size (`int`, *optional*):
+ Size of batches to load from the dataset. Defaults to `None`, which implies that the dataset won't be
+ batched, but the returned dataset can be batched later with `tf_dataset.batch(batch_size)`.
+ columns (`List[str]` or `str`, *optional*):
+ Dataset column(s) to load in the `tf.data.Dataset`.
+ Column names that are created by the `collate_fn` and that do not exist in the original dataset can be used.
+ shuffle(`bool`, defaults to `False`):
+ Shuffle the dataset order when loading. Recommended `True` for training, `False` for
+ validation/evaluation.
+ drop_remainder(`bool`, defaults to `False`):
+ Drop the last incomplete batch when loading. Ensures
+ that all batches yielded by the dataset will have the same length on the batch dimension.
+ collate_fn(`Callable`, *optional*):
+ A function or callable object (such as a `DataCollator`) that will collate
+ lists of samples into a batch.
+ collate_fn_args (`Dict`, *optional*):
+ An optional `dict` of keyword arguments to be passed to the
+ `collate_fn`.
+ label_cols (`List[str]` or `str`, defaults to `None`):
+ Dataset column(s) to load as labels.
+ Note that many models compute loss internally rather than letting Keras do it, in which case
+ passing the labels here is optional, as long as they're in the input `columns`.
+ prefetch (`bool`, defaults to `True`):
+ Whether to run the dataloader in a separate thread and maintain
+ a small buffer of batches for training. Improves performance by allowing data to be loaded in the
+ background while the model is training.
+ num_workers (`int`, defaults to `0`):
+ Number of workers to use for loading the dataset. Only supported on Python versions >= 3.8.
+ num_test_batches (`int`, defaults to `20`):
+ Number of batches to use to infer the output signature of the dataset.
+ The higher this number, the more accurate the signature will be, but the longer it will take to
+ create the dataset.
+
+ Returns:
+ `tf.data.Dataset`
+
+ Example:
+
+ ```py
+ >>> ds_train = ds["train"].to_tf_dataset(
+ ... columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ ... shuffle=True,
+ ... batch_size=16,
+ ... collate_fn=data_collator,
+ ... )
+ ```
+ """
+ if config.TF_AVAILABLE:
+ import tensorflow as tf
+ else:
+ raise ImportError("Called a Tensorflow-specific function but Tensorflow is not installed.")
+
+ if (isinstance(columns, list) and len(columns) == 1) or (
+ isinstance(label_cols, list) and len(label_cols) == 1
+ ):
+ warnings.warn(
+ "The output of `to_tf_dataset` will change when a passing single element list for `labels` or "
+ "`columns` in the next datasets version. To return a tuple structure rather than dict, pass a "
+ "single string.\n"
+ "Old behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \n"
+ "New behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n"
+ " : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ",
+ FutureWarning,
+ )
+
+ if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
+ logger.warning(
+ "Note that to_tf_dataset() loads the data with a generator rather than a full tf.data "
+ "pipeline and is not compatible with remote TPU connections. If you encounter errors, please "
+ "try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of "
+ "Tensors instead of streaming with to_tf_dataset()."
+ )
+
+ if collate_fn is None:
+ # Set a very simple default collator that just stacks things together
+ collate_fn = minimal_tf_collate_fn
+ if collate_fn_args is None:
+ collate_fn_args = {}
+ if label_cols and not columns:
+ raise ValueError("Cannot specify label_cols without specifying columns!")
+ if label_cols is None:
+ label_cols = []
+ elif isinstance(label_cols, str):
+ label_cols = [label_cols]
+ if len(set(label_cols)) < len(label_cols):
+ raise ValueError("List of label_cols contains duplicates.")
+ if columns:
+ if isinstance(columns, str):
+ columns = [columns]
+ if len(set(columns)) < len(columns):
+ raise ValueError("List of columns contains duplicates.")
+ cols_to_retain = list(set(columns + label_cols))
+ else:
+ cols_to_retain = None # Indicates keeping all valid columns
+ columns = []
+
+ if self.format["type"] not in ["custom", "numpy"]:
+ dataset = self.with_format("numpy")
+ else:
+ dataset = self
+
+ # TODO(Matt, QL): deprecate the retention of label_ids and label
+
+ output_signature, columns_to_np_types = dataset._get_output_signature(
+ dataset,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ cols_to_retain=cols_to_retain,
+ batch_size=batch_size if drop_remainder else None,
+ num_test_batches=num_test_batches,
+ )
+
+ if "labels" in output_signature:
+ if ("label_ids" in columns or "label" in columns) and "labels" not in columns:
+ columns = [col for col in columns if col not in ["label_ids", "label"]] + ["labels"]
+ if ("label_ids" in label_cols or "label" in label_cols) and "labels" not in label_cols:
+ label_cols = [col for col in label_cols if col not in ["label_ids", "label"]] + ["labels"]
+
+ for col in columns:
+ if col not in output_signature:
+ raise ValueError(f"Column {col} not found in dataset!")
+
+ for col in label_cols:
+ if col not in output_signature:
+ raise ValueError(f"Label column {col} not found in dataset!")
+
+ if num_workers == 0:
+ tf_dataset = dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ )
+ elif num_workers > 0:
+ if batch_size is None:
+ raise NotImplementedError(
+ "`batch_size` must be specified when using multiple workers, as unbatched multiprocessing "
+ "is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0."
+ )
+ tf_dataset = multiprocess_dataset_to_tf(
+ dataset=dataset,
+ cols_to_retain=cols_to_retain,
+ collate_fn=collate_fn,
+ collate_fn_args=collate_fn_args,
+ columns_to_np_types=columns_to_np_types,
+ output_signature=output_signature,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ drop_remainder=drop_remainder,
+ num_workers=num_workers,
+ )
+ else:
+ raise ValueError("num_workers must be >= 0")
+
+ def split_features_and_labels(input_batch):
+ # TODO(Matt, QL): deprecate returning the dict content when there's only one key
+ features = {key: tensor for key, tensor in input_batch.items() if key in columns}
+ labels = {key: tensor for key, tensor in input_batch.items() if key in label_cols}
+ if len(features) == 1:
+ features = list(features.values())[0]
+ if len(labels) == 1:
+ labels = list(labels.values())[0]
+ if isinstance(labels, dict) and len(labels) == 0:
+ return features
+ else:
+ return features, labels
+
+ if cols_to_retain is not None:
+ tf_dataset = tf_dataset.map(split_features_and_labels)
+
+ if prefetch:
+ tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE)
+
+ # Remove a reference to the open Arrow file on delete
+ def cleanup_callback(ref):
+ dataset.__del__()
+ self._TF_DATASET_REFS.remove(ref)
+
+ self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback))
+
+ return tf_dataset
+
+
+class DatasetTransformationNotAllowedError(Exception):
+ pass
+
+
+def transmit_format(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the format of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None
+ unformatted_columns = set(self.column_names) - set(self._format_columns or [])
+ self_format = {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ # re-apply format to the output
+ for dataset in datasets:
+ new_format = self_format.copy()
+ if new_format["columns"] is not None: # new formatted columns = (columns - previously unformatted columns)
+ # sort the columns to have a deterministic list of columns that we can compare with `out_format`
+ new_format["columns"] = sorted(set(dataset.column_names) - unformatted_columns)
+ out_format = {
+ "type": dataset._format_type,
+ "format_kwargs": dataset._format_kwargs,
+ "columns": sorted(dataset._format_columns) if dataset._format_columns is not None else None,
+ "output_all_columns": dataset._output_all_columns,
+ }
+ if out_format != new_format:
+ fingerprint = dataset._fingerprint
+ dataset.set_format(**new_format)
+ dataset._fingerprint = fingerprint
+ return out
+
+ wrapper._decorator_name_ = "transmit_format"
+ return wrapper
+
+
+def transmit_tasks(func):
+ """Wrapper for dataset transforms that recreate a new Dataset to transmit the task templates of the original dataset to the new dataset"""
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ if args:
+ self: "Dataset" = args[0]
+ args = args[1:]
+ else:
+ self: "Dataset" = kwargs.pop("self")
+ # apply actual function
+ out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs)
+ datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out]
+ for dataset in datasets:
+ # Remove task templates if a column mapping of the template is no longer valid
+ if self.info.task_templates is not None:
+ dataset.info.task_templates = [
+ template
+ for template in self.info.task_templates
+ if all(
+ dataset._info.features.get(k) == self._info.features.get(k)
+ for k in template.column_mapping.keys()
+ )
+ ]
+ return out
+
+ wrapper._decorator_name_ = "transmit_tasks"
+ return wrapper
+
+
+def update_metadata_with_features(table: Table, features: Features):
+ """To be used in dataset transforms that modify the features of the dataset, in order to update the features stored in the metadata of its schema."""
+ features = Features({col_name: features[col_name] for col_name in table.column_names})
+ if table.schema.metadata is None or b"huggingface" not in table.schema.metadata:
+ pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features))
+ else:
+ metadata = json.loads(table.schema.metadata[b"huggingface"].decode())
+ if "info" not in metadata:
+ metadata["info"] = asdict(DatasetInfo(features=features))
+ else:
+ metadata["info"]["features"] = asdict(DatasetInfo(features=features))["features"]
+ pa_metadata = {"huggingface": json.dumps(metadata)}
+ table = table.replace_schema_metadata(pa_metadata)
+ return table
+
+
+def _check_table(table) -> Table:
+ """We check the table type to make sure it's an instance of :class:`datasets.table.Table`"""
+ if isinstance(table, pa.Table):
+ # for a pyarrow table, we can just consider it as a in-memory table
+ # this is here for backward compatibility
+ return InMemoryTable(table)
+ elif isinstance(table, Table):
+ return table
+ else:
+ raise TypeError(f"Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.")
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.")
+
+
+def _check_valid_indices_value(index, size):
+ if (index < 0 and index + size < 0) or (index >= size):
+ raise IndexError(f"Index {index} out of range for dataset of size {size}.")
+
+
+class NonExistentDatasetError(Exception):
+ """Used when we expect the existence of a dataset"""
+
+ pass
+
+
+class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin):
+ """A Dataset backed by an Arrow table."""
+
+ def __init__(
+ self,
+ arrow_table: Table,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_table: Optional[Table] = None,
+ fingerprint: Optional[str] = None,
+ ):
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+ IndexableMixin.__init__(self)
+
+ self._data: Table = _check_table(arrow_table)
+ self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None
+ maybe_register_dataset_for_temp_dir_deletion(self)
+
+ self._format_type: Optional[str] = None
+ self._format_kwargs: dict = {}
+ self._format_columns: Optional[list] = None
+ self._output_all_columns: bool = False
+ self._fingerprint: str = fingerprint
+
+ # Read metadata
+
+ if self._data.schema.metadata is not None and b"huggingface" in self._data.schema.metadata:
+ metadata = json.loads(self._data.schema.metadata[b"huggingface"].decode())
+ if (
+ "fingerprint" in metadata and self._fingerprint is None
+ ): # try to load fingerprint from the arrow file metadata
+ self._fingerprint = metadata["fingerprint"]
+
+ # Infer features if None
+ inferred_features = Features.from_arrow_schema(arrow_table.schema)
+ if self.info.features is None:
+ self.info.features = inferred_features
+ else: # make sure the nested columns are in the right order
+ try:
+ self.info.features = self.info.features.reorder_fields_as(inferred_features)
+ except ValueError as e:
+ raise ValueError(
+ f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file."
+ )
+
+ # Infer fingerprint if None
+
+ if self._fingerprint is None:
+ self._fingerprint = generate_fingerprint(self)
+
+ # Sanity checks
+
+ if self._info.features is None:
+ raise ValueError("Features can't be None in a Dataset object")
+ if self._fingerprint is None:
+ raise ValueError("Fingerprint can't be None in a Dataset object")
+ if self.info.features.type != inferred_features.type:
+ raise ValueError(
+ f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}"
+ )
+
+ if self._indices is not None:
+ if not pa.types.is_unsigned_integer(self._indices.column(0).type):
+ raise ValueError(
+ f"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}"
+ )
+ _check_column_names(self._data.column_names)
+
+ self._data = update_metadata_with_features(self._data, self._info.features)
+
+ @property
+ def features(self) -> Features:
+ features = super().features
+ if features is None: # this is already checked in __init__
+ raise ValueError("Features can't be None in a Dataset object")
+ return features
+
+ @classmethod
+ def from_file(
+ cls,
+ filename: str,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_filename: Optional[str] = None,
+ in_memory: bool = False,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_filename (`str`, *optional*):
+ File names of the indices.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
+
+ if indices_filename is not None:
+ indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory)
+ else:
+ indices_pa_table = None
+
+ return cls(
+ arrow_table=table,
+ info=info,
+ split=split,
+ indices_table=indices_pa_table,
+ )
+
+ @classmethod
+ def from_buffer(
+ cls,
+ buffer: pa.Buffer,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ ) -> "Dataset":
+ """Instantiate a Dataset backed by an Arrow buffer.
+
+ Args:
+ buffer (`pyarrow.Buffer`):
+ Arrow buffer.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ indices_buffer (`pyarrow.Buffer`, *optional*):
+ Indices Arrow buffer.
+
+ Returns:
+ [`Dataset`]
+ """
+ table = InMemoryTable.from_buffer(buffer)
+
+ if indices_buffer is not None:
+ indices_table = InMemoryTable.from_buffer(buffer)
+ else:
+ indices_table = None
+
+ return cls(table, info=info, split=split, indices_table=indices_table)
+
+ @classmethod
+ def from_pandas(
+ cls,
+ df: pd.DataFrame,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ preserve_index: Optional[bool] = None,
+ ) -> "Dataset":
+ """
+ Convert `pandas.DataFrame` to a `pyarrow.Table` to create a [`Dataset`].
+
+ The column types in the resulting Arrow Table are inferred from the dtypes of the `pandas.Series` in the
+ DataFrame. In the case of non-object Series, the NumPy dtype is translated to its Arrow equivalent. In the
+ case of `object`, we need to guess the datatype by looking at the Python objects in this Series.
+
+ Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow
+ type. In the case that we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only
+ contains `None/nan` objects, the type is set to `null`. This behavior can be avoided by constructing explicit
+ features and passing it to this function.
+
+ Args:
+ df (`pandas.DataFrame`):
+ Dataframe that contains the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+ preserve_index (`bool`, *optional*):
+ Whether to store the index as an additional column in the resulting Dataset.
+ The default of `None` will store the index as a column, except for `RangeIndex` which is stored as metadata only.
+ Use `preserve_index=True` to force it to be stored as a column.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_pandas(df)
+ ```
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ table = InMemoryTable.from_pandas(
+ df=df,
+ preserve_index=preserve_index,
+ )
+ if features is not None:
+ # more expensive cast than InMemoryTable.from_pandas(..., schema=features.arrow_schema)
+ # needed to support the str to Audio conversion for instance
+ table = table.cast(features.arrow_schema)
+ return cls(table, info=info, split=split)
+
+ @classmethod
+ def from_dict(
+ cls,
+ mapping: dict,
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert `dict` to a `pyarrow.Table` to create a [`Dataset`].
+
+ Args:
+ mapping (`Mapping`):
+ Mapping of strings to Arrays or Python lists.
+ features ([`Features`], *optional*):
+ Dataset features.
+ info (`DatasetInfo`, *optional*):
+ Dataset information, like description, citation, etc.
+ split (`NamedSplit`, *optional*):
+ Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ if info is not None and features is not None and info.features != features:
+ raise ValueError(
+ f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}"
+ )
+ features = features if features is not None else info.features if info is not None else None
+ arrow_typed_mapping = {}
+ for col, data in mapping.items():
+ if isinstance(data, (pa.Array, pa.ChunkedArray)):
+ data = cast_array_to_feature(data, features[col]) if features is not None else data
+ else:
+ data = OptimizedTypedSequence(
+ features.encode_column(data, col) if features is not None else data,
+ type=features[col] if features is not None else None,
+ col=col,
+ )
+ arrow_typed_mapping[col] = data
+ mapping = arrow_typed_mapping
+ pa_table = InMemoryTable.from_pydict(mapping=mapping)
+ if info is None:
+ info = DatasetInfo()
+ info.features = features
+ if info.features is None:
+ info.features = Features(
+ {
+ col: generate_from_arrow_type(data.type)
+ if isinstance(data, (pa.Array, pa.ChunkedArray))
+ else data.get_inferred_type()
+ for col, data in mapping.items()
+ }
+ )
+ return cls(pa_table, info=info, split=split)
+
+ @classmethod
+ def from_list(
+ cls,
+ mapping: List[dict],
+ features: Optional[Features] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ ) -> "Dataset":
+ """
+ Convert a list of dicts to a `pyarrow.Table` to create a [`Dataset`]`.
+
+ Note that the keys of the first entry will be used to determine the dataset columns,
+ regardless of what is passed to features.
+
+ Args:
+ mapping (`List[dict]`): A list of mappings of strings to row values.
+ features (`Features`, optional): Dataset features.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+
+ Returns:
+ [`Dataset`]
+ """
+ # for simplicity and consistency wrt OptimizedTypedSequence we do not use InMemoryTable.from_pylist here
+ mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {}
+ return cls.from_dict(mapping, features, info, split)
+
+ @staticmethod
+ def from_csv(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from CSV file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the CSV file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`pandas.read_csv`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_csv('path/to/dataset.csv')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetReader
+
+ return CsvDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ gen_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create a Dataset from a generator.
+
+ Args:
+ generator (:`Callable`):
+ A generator function that `yields` examples.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded dataset by passing the list of shards in `gen_kwargs` and setting `num_proc` greater than 1.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+ If `num_proc` is greater than one, then all list values in `gen_kwargs` must be the same length. These values will be split between calls to the generator. The number of shards will be the minimum of the shortest list in `gen_kwargs` and `num_proc`.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to :[`GeneratorConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = Dataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = Dataset.from_generator(gen, gen_kwargs={"shards": shards})
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ gen_kwargs=gen_kwargs,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_json(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ field: Optional[str] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from JSON or JSON Lines file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the JSON or JSON Lines file(s).
+ split ([`NamedSplit`], *optional*):
+ Split name to be assigned to the dataset.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ field (`str`, *optional*):
+ Field name of the JSON file where the dataset is contained in.
+ num_proc (`int`, *optional* defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`JsonConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_json('path/to/dataset.json')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetReader
+
+ return JsonDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ field=field,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_parquet(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ columns: Optional[List[str]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from Parquet file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the Parquet file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ columns (`List[str]`, *optional*):
+ If not `None`, only these columns will be read from the file.
+ A column name may be a prefix of a nested field, e.g. 'a' will select
+ 'a.b', 'a.c', and 'a.d.e'.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`ParquetConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_parquet('path/to/dataset.parquet')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetReader
+
+ return ParquetDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ columns=columns,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_text(
+ path_or_paths: Union[PathLike, List[PathLike]],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Create Dataset from text file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the text file(s).
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ This is helpful if the dataset is made of multiple files. Multiprocessing is disabled by default.
+
+
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`TextConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> ds = Dataset.from_text('path/to/dataset.txt')
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.text import TextDatasetReader
+
+ return TextDatasetReader(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ num_proc=num_proc,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ keep_in_memory: bool = False,
+ cache_dir: str = None,
+ working_dir: str = None,
+ load_from_cache_file: bool = True,
+ **kwargs,
+ ):
+ """Create a Dataset from Spark DataFrame. Dataset downloading is distributed over Spark workers.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data. When using a multi-node Spark cluster, the cache_dir must be accessible to both
+ workers and the driver.
+ keep_in_memory (`bool`):
+ Whether to copy the data in-memory.
+ working_dir (`str`, *optional*)
+ Intermediate directory for each Spark worker to write data to before moving it to `cache_dir`. Setting
+ a non-NFS intermediate directory may improve performance.
+ load_from_cache_file (`bool`):
+ Whether to load the dataset from the cache if possible.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = Dataset.from_spark(df)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("Dataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=False,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ working_dir=working_dir,
+ load_from_cache_file=load_from_cache_file,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_sql(
+ sql: Union[str, "sqlalchemy.sql.Selectable"],
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ):
+ """Create Dataset from SQL query or database table.
+
+ Args:
+ sql (`str` or `sqlalchemy.sql.Selectable`):
+ SQL query to be executed or a table name.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) used to instantiate a database connection or a SQLite3/SQLAlchemy connection object.
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`SqlConfig`].
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> # Fetch a database table
+ >>> ds = Dataset.from_sql("test_data", "postgres:///db_name")
+ >>> # Execute a SQL query on the table
+ >>> ds = Dataset.from_sql("SELECT sentence FROM test_data", "postgres:///db_name")
+ >>> # Use a Selectable object to specify the query
+ >>> from sqlalchemy import select, text
+ >>> stmt = select([text("sentence")]).select_from(text("test_data"))
+ >>> ds = Dataset.from_sql(stmt, "postgres:///db_name")
+ ```
+
+
+
+ The returned dataset can only be cached if `con` is specified as URI string.
+
+
+ """
+ from .io.sql import SqlDatasetReader
+
+ return SqlDatasetReader(
+ sql,
+ con,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ **kwargs,
+ ).read()
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ maybe_register_dataset_for_temp_dir_deletion(self)
+ return self
+
+ def __del__(self):
+ if hasattr(self, "_data"):
+ del self._data
+ if hasattr(self, "_indices"):
+ del self._indices
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ self.__del__()
+
+ def save_to_disk(
+ self,
+ dataset_path: PathLike,
+ fs="deprecated",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_shards: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ ):
+ """
+ Saves a dataset to a dataset directory, or in a filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ For [`Image`] and [`Audio`] data:
+
+ All the Image() and Audio() data are stored in the arrow files.
+ If you want to store paths or urls, please use the Value("string") type.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `dataset/train`) or remote URI (e.g. `s3://my-bucket/dataset/train`)
+ of the dataset directory where the dataset will be saved to.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"50MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
+
+
+ num_proc (`int`, *optional*):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> ds.save_to_disk("path/to/dataset/directory")
+ >>> ds.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
+ >>> ds.save_to_disk("path/to/dataset/directory", num_shards=1024)
+ ```
+ """
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ if self.list_indexes():
+ raise ValueError("please remove all the indexes using `dataset.drop_index` before saving a dataset")
+
+ if num_shards is None:
+ dataset_nbytes = self._estimate_nbytes()
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, num_proc or 1)
+
+ num_proc = num_proc if num_proc is not None else 1
+ num_shards = num_shards if num_shards is not None else num_proc
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
+
+ if not is_remote_filesystem(fs):
+ parent_cache_files_paths = {
+ Path(cache_filename["filename"]).resolve().parent for cache_filename in self.cache_files
+ }
+ # Check that the dataset doesn't overwrite iself. It can cause a permission error on Windows and a segfault on linux.
+ if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths:
+ raise PermissionError(
+ f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself."
+ )
+
+ fs.makedirs(dataset_path, exist_ok=True)
+
+ # Get json serializable state
+ state = {
+ key: self.__dict__[key]
+ for key in [
+ "_fingerprint",
+ "_format_columns",
+ "_format_kwargs",
+ "_format_type",
+ "_output_all_columns",
+ ]
+ }
+ state["_split"] = str(self.split) if self.split is not None else self.split
+ state["_data_files"] = [
+ {"filename": f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"} for shard_idx in range(num_shards)
+ ]
+ for k in state["_format_kwargs"].keys():
+ try:
+ json.dumps(state["_format_kwargs"][k])
+ except TypeError as e:
+ raise TypeError(
+ str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't."
+ ) from None
+ # Get json serializable dataset info
+ dataset_info = asdict(self._info)
+
+ shards_done = 0
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=len(self),
+ desc=f"Saving the dataset ({shards_done}/{num_shards} shards)",
+ )
+ kwargs_per_job = (
+ {
+ "job_id": shard_idx,
+ "shard": self.shard(num_shards=num_shards, index=shard_idx, contiguous=True),
+ "fpath": posixpath.join(dataset_path, f"data-{shard_idx:05d}-of-{num_shards:05d}.arrow"),
+ "storage_options": storage_options,
+ }
+ for shard_idx in range(num_shards)
+ )
+ shard_lengths = [None] * num_shards
+ shard_sizes = [None] * num_shards
+ if num_proc > 1:
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ else:
+ with pbar:
+ for kwargs in kwargs_per_job:
+ for job_id, done, content in Dataset._save_to_disk_single(**kwargs):
+ if done:
+ shards_done += 1
+ pbar.set_description(f"Saving the dataset ({shards_done}/{num_shards} shards)")
+ logger.debug(f"Finished writing shard number {job_id} of {num_shards}.")
+ shard_lengths[job_id], shard_sizes[job_id] = content
+ else:
+ pbar.update(content)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), "w", encoding="utf-8"
+ ) as state_file:
+ json.dump(state, state_file, indent=2, sort_keys=True)
+ with fs.open(
+ posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), "w", encoding="utf-8"
+ ) as dataset_info_file:
+ # Sort only the first level of keys, or we might shuffle fields of nested features if we use sort_keys=True
+ sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)}
+ json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2)
+
+ @staticmethod
+ def _save_to_disk_single(job_id: int, shard: "Dataset", fpath: str, storage_options: Optional[dict]):
+ batch_size = config.DEFAULT_MAX_BATCH_SIZE
+
+ num_examples_progress_update = 0
+ writer = ArrowWriter(
+ features=shard.features,
+ path=fpath,
+ storage_options=storage_options,
+ embed_local_files=True,
+ )
+ try:
+ _time = time.time()
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ writer.write_table(pa_table)
+ num_examples_progress_update += len(pa_table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+
+ yield job_id, True, (num_examples, num_bytes)
+
+ @staticmethod
+ def _build_local_temp_path(uri_or_path: str) -> Path:
+ """
+ Builds and returns a Path concatenating a local temporary dir with the dir path (or absolute/relative
+ path extracted from the uri) passed.
+
+ Args:
+ uri_or_path (`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g.
+ `"s3://my-bucket/dataset/train"`) to concatenate.
+
+ Returns:
+ :class:`Path`: the concatenated path (temp dir + path)
+ """
+ src_dataset_path = Path(uri_or_path)
+ tmp_dir = get_temporary_cache_files_directory()
+ return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor))
+
+ @staticmethod
+ def load_from_disk(
+ dataset_path: str,
+ fs="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ storage_options: Optional[dict] = None,
+ ) -> "Dataset":
+ """
+ Loads a dataset that was previously saved using [`save_to_disk`] from a dataset directory, or from a
+ filesystem using any implementation of `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
+ of the dataset directory where the dataset will be loaded from.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the
+ dataset will not be copied in-memory unless explicitly enabled by setting
+ `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
+ [improve performance](../cache#improve-performance) section.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`Dataset`] or [`DatasetDict`]:
+ - If `dataset_path` is a path of a dataset directory, the dataset requested.
+ - If `dataset_path` is a path of a dataset dict directory, a `datasets.DatasetDict` with each split.
+
+ Example:
+
+ ```py
+ >>> ds = load_from_disk("path/to/dataset/directory")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, [dataset_path] = fsspec.get_fs_token_paths(dataset_path, storage_options=storage_options)
+
+ dest_dataset_path = dataset_path
+ dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ dataset_dict_is_file = fs.isfile(dataset_dict_json_path)
+ dataset_info_is_file = fs.isfile(dataset_info_path)
+ dataset_state_is_file = fs.isfile(dataset_state_json_path)
+ if not dataset_info_is_file and not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_info_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+ if not dataset_state_is_file:
+ if dataset_dict_is_file:
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`."
+ )
+
+ # copies file from filesystem if it is remote filesystem to local filesystem and modifies dataset_path to temp directory containing local copies
+ if is_remote_filesystem(fs):
+ src_dataset_path = dest_dataset_path
+ dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path)
+ fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True)
+ dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME)
+
+ with open(dataset_state_json_path, encoding="utf-8") as state_file:
+ state = json.load(state_file)
+ with open(dataset_info_path, encoding="utf-8") as dataset_info_file:
+ dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file))
+
+ dataset_size = estimate_dataset_size(
+ Path(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]
+ )
+ keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size)
+ table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable
+
+ arrow_table = concat_tables(
+ thread_map(
+ table_cls.from_file,
+ [posixpath.join(dest_dataset_path, data_file["filename"]) for data_file in state["_data_files"]],
+ tqdm_class=hf_tqdm,
+ desc="Loading dataset from disk",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(state["_data_files"]) <= 16 or None,
+ )
+ )
+
+ split = state["_split"]
+ split = Split(split) if split is not None else split
+
+ dataset = Dataset(
+ arrow_table=arrow_table,
+ info=dataset_info,
+ split=split,
+ fingerprint=state["_fingerprint"],
+ )
+
+ format = {
+ "type": state["_format_type"],
+ "format_kwargs": state["_format_kwargs"],
+ "columns": state["_format_columns"],
+ "output_all_columns": state["_output_all_columns"],
+ }
+ dataset = dataset.with_format(**format)
+
+ return dataset
+
+ @property
+ def data(self) -> Table:
+ """The Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.data
+ MemoryMappedTable
+ text: string
+ label: int64
+ ----
+ text: [["compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .","the soundtrack alone is worth the price of admission .","rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .","beneath the film's obvious determination to shock at any cost lies considerable skill and determination , backed by sheer nerve .","bielinsky is a filmmaker of impressive talent .","so beautifully acted and directed , it's clear that washington most certainly has a new career ahead of him if he so chooses .","a visual spectacle full of stunning images and effects .","a gentle and engrossing character study .","it's enough to watch huppert scheming , with her small , intelligent eyes as steady as any noir villain , and to enjoy the perfectly pitched web of tension that chabrol spins .","an engrossing portrait of uncompromising artists trying to create something original against the backdrop of a corporate music industry that only seems to care about the bottom line .",...,"ultimately , jane learns her place as a girl , softens up and loses some of the intensity that made her an interesting character to begin with .","ah-nuld's action hero days might be over .","it's clear why deuces wild , which was shot two years ago , has been gathering dust on mgm's shelf .","feels like nothing quite so much as a middle-aged moviemaker's attempt to surround himself with beautiful , half-naked women .","when the precise nature of matthew's predicament finally comes into sharp focus , the revelation fails to justify the build-up .","this picture is murder by numbers , and as easy to be bored by as your abc's , despite a few whopping shootouts .","hilarious musical comedy though stymied by accents thick as mud .","if you are into splatter movies , then you will probably have a reasonably good time with the salton sea .","a dull , simple-minded and stereotypical tale of drugs , death and mind-numbing indifference on the inner-city streets .","the feature-length stretch . . . strains the show's concept ."]]
+ label: [[1,1,1,1,1,1,1,1,1,1,...,0,0,0,0,0,0,0,0,0,0]]
+ ```
+ """
+ return self._data
+
+ @property
+ def cache_files(self) -> List[dict]:
+ """The cache files containing the Apache Arrow table backing the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cache_files
+ [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]
+ ```
+ """
+ cache_files = list_table_cache_files(self._data)
+ if self._indices is not None:
+ cache_files += list_table_cache_files(self._indices)
+ return [{"filename": cache_filename} for cache_filename in cache_files]
+
+ @property
+ def num_columns(self) -> int:
+ """Number of columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_columns
+ 2
+ ```
+ """
+ return self._data.num_columns
+
+ @property
+ def num_rows(self) -> int:
+ """Number of rows in the dataset (same as [`Dataset.__len__`]).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.num_rows
+ 1066
+ ```
+ """
+ if self._indices is not None:
+ return self._indices.num_rows
+ return self._data.num_rows
+
+ @property
+ def column_names(self) -> List[str]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return self._data.column_names
+
+ @property
+ def shape(self) -> Tuple[int, int]:
+ """Shape of the dataset (number of columns, number of rows).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.shape
+ (1066, 2)
+ ```
+ """
+ if self._indices is not None:
+ return (self._indices.num_rows, self._data.num_columns)
+ return self._data.shape
+
+ def unique(self, column: str) -> List:
+ """Return a list of the unique elements in a column.
+
+ This is implemented in the low-level backend and as such, very fast.
+
+ Args:
+ column (`str`):
+ Column name (list all the column names with [`~datasets.Dataset.column_names`]).
+
+ Returns:
+ `list`: List of unique elements in the given column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.unique('label')
+ [1, 0]
+ ```
+ """
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+
+ if self._indices is not None and self._indices.num_rows != self._data.num_rows:
+ dataset = self.flatten_indices()
+ else:
+ dataset = self
+
+ return dataset._data.column(column).unique().to_pylist()
+
+ def class_encode_column(self, column: str, include_nulls: bool = False) -> "Dataset":
+ """Casts the given column as [`~datasets.features.ClassLabel`] and updates the table.
+
+ Args:
+ column (`str`):
+ The name of the column to cast (list all the column names with [`~datasets.Dataset.column_names`])
+ include_nulls (`bool`, defaults to `False`):
+ Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("boolq", split="validation")
+ >>> ds.features
+ {'answer': Value(dtype='bool', id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ >>> ds = ds.class_encode_column('answer')
+ >>> ds.features
+ {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ ```
+ """
+ # Sanity checks
+ if column not in self._data.column_names:
+ raise ValueError(f"Column ({column}) not in table columns ({self._data.column_names}).")
+ src_feat = self._info.features[column]
+ if not isinstance(src_feat, Value):
+ raise ValueError(
+ f"Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}."
+ )
+
+ if src_feat.dtype != "string" or (include_nulls and None in self.unique(column)):
+
+ def stringify_column(batch):
+ batch[column] = [
+ str(sample) if include_nulls or sample is not None else None for sample in batch[column]
+ ]
+ return batch
+
+ dset = self.map(
+ stringify_column,
+ batched=True,
+ desc="Stringifying the column",
+ )
+ else:
+ dset = self
+
+ # Create the new feature
+ class_names = sorted(str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)
+ dst_feat = ClassLabel(names=class_names)
+
+ def cast_to_class_labels(batch):
+ batch[column] = [
+ dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None
+ for sample in batch[column]
+ ]
+ return batch
+
+ new_features = dset.features.copy()
+ new_features[column] = dst_feat
+
+ dset = dset.map(
+ cast_to_class_labels,
+ batched=True,
+ features=new_features,
+ desc="Casting to class labels",
+ )
+
+ return dset
+
+ @fingerprint_transform(inplace=False)
+ def flatten(self, new_fingerprint: Optional[str] = None, max_depth=16) -> "Dataset":
+ """Flatten the table.
+ Each column with a struct type is flattened into one column per struct field.
+ Other columns are left unchanged.
+
+ Args:
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with flattened columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad", split="train")
+ >>> ds.features
+ {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ >>> ds.flatten()
+ Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 87599
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ for depth in range(1, max_depth):
+ if any(isinstance(field.type, pa.StructType) for field in dataset._data.schema):
+ dataset._data = dataset._data.flatten()
+ else:
+ break
+ dataset.info.features = self._info.features.flatten(max_depth=max_depth)
+ dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ logger.info(f'Flattened dataset from depth {depth} to depth {1 if depth + 1 < max_depth else "unknown"}.')
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def cast(
+ self,
+ features: Features,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ num_proc: Optional[int] = None,
+ ) -> "Dataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `str` <-> `ClassLabel` you should use [`~datasets.Dataset.map`] to update the Dataset.
+ batch_size (`int`, defaults to `1000`):
+ Number of examples per batch provided to cast.
+ If `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to cast.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ load_from_cache_file (`bool`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running [`~datasets.Dataset.map`].
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, ClassLabel, Value
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ if sorted(features) != sorted(self._data.column_names):
+ raise ValueError(
+ f"The columns in features ({list(features)}) must be identical "
+ f"as the columns in the dataset: {self._data.column_names}"
+ )
+
+ schema = features.arrow_schema
+ format = self.format
+ dataset = self.with_format("arrow")
+ # capture the PyArrow version here to make the lambda serializable on Windows
+ dataset = dataset.map(
+ partial(table_cast, schema=schema),
+ batched=True,
+ batch_size=batch_size,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ num_proc=num_proc,
+ features=features,
+ desc="Casting the dataset",
+ )
+ dataset = dataset.with_format(**format)
+ return dataset
+
+ @fingerprint_transform(inplace=False)
+ def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`FeatureType`):
+ Target feature.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ if hasattr(feature, "decode_example"):
+ dataset = copy.deepcopy(self)
+ dataset._info.features[column] = feature
+ dataset._fingerprint = new_fingerprint
+ dataset._data = dataset._data.cast(dataset.features.arrow_schema)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ return dataset
+ else:
+ features = self.features
+ features[column] = feature
+ return self.cast(features)
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+
+ You can also remove a column using [`~datasets.Dataset.map`] with `remove_columns` but the present method
+ is in-place (doesn't copy the data to a new dataset) and is thus faster.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.remove_columns('label')
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ >>> ds.remove_columns(column_names=ds.column_names) # Removing all the columns returns an empty dataset with the `num_rows` property set to 0
+ Dataset({
+ features: [],
+ num_rows: 0
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ for column_name in column_names:
+ del dataset._info.features[column_name]
+
+ dataset._data = dataset._data.drop(column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_column(
+ self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str] = None
+ ) -> "Dataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.rename_column('label', 'label_new')
+ Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ if original_column_name not in dataset._data.column_names:
+ raise ValueError(
+ f"Original column name {original_column_name} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if new_column_name in dataset._data.column_names:
+ raise ValueError(
+ f"New column name {new_column_name} already in the dataset. "
+ f"Please choose a column name which is not already in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+ if not new_column_name:
+ raise ValueError("New column name is empty.")
+
+ def rename(columns):
+ return [new_column_name if col == original_column_name else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ new_column_name if col == original_column_name else col: feature
+ for col, feature in self._info.features.items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @fingerprint_transform(inplace=False)
+ def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset with renamed columns
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
+ Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ ```
+ """
+ dataset = copy.deepcopy(self)
+
+ extra_columns = set(column_mapping.keys()) - set(dataset.column_names)
+ if extra_columns:
+ raise ValueError(
+ f"Original column names {extra_columns} not in the dataset. "
+ f"Current columns in the dataset: {dataset._data.column_names}"
+ )
+
+ number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values()))
+ if number_of_duplicates_in_new_columns != 0:
+ raise ValueError(
+ "New column names must all be different, but this column mapping "
+ f"has {number_of_duplicates_in_new_columns} duplicates"
+ )
+
+ empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col]
+ if empty_new_columns:
+ raise ValueError(f"New column names {empty_new_columns} are empty.")
+
+ def rename(columns):
+ return [column_mapping[col] if col in column_mapping else col for col in columns]
+
+ new_column_names = rename(self._data.column_names)
+ if self._format_columns is not None:
+ dataset._format_columns = rename(self._format_columns)
+
+ dataset._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping else col: feature
+ for col, feature in (self._info.features or {}).items()
+ }
+ )
+
+ dataset._data = dataset._data.rename_columns(new_column_names)
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ @transmit_tasks
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str] = None) -> "Dataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform. If `None`,
+ the new fingerprint is computed using a hash of the previous
+ fingerprint, and the transform arguments.
+
+ Returns:
+ [`Dataset`]: A copy of the dataset object which only consists of
+ selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select_columns(['text'])
+ Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ missing_columns = set(column_names) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Current columns in the dataset: "
+ f"{self._data.column_names}."
+ )
+
+ dataset = copy.deepcopy(self)
+ dataset._data = dataset._data.select(column_names)
+ dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names})
+ dataset._data = update_metadata_with_features(dataset._data, dataset.features)
+ dataset._fingerprint = new_fingerprint
+ return dataset
+
+ def __len__(self):
+ """Number of rows in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.__len__
+
+ ```
+ """
+ return self.num_rows
+
+ def __iter__(self):
+ """Iterate through the examples.
+
+ If a formatting is set with :meth:`Dataset.set_format` rows will be returned with the
+ selected format.
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER
+ for pa_subtable in table_iter(self.data, batch_size=batch_size):
+ for i in range(pa_subtable.num_rows):
+ pa_subtable_ex = pa_subtable.slice(i, 1)
+ formatted_output = format_table(
+ pa_subtable_ex,
+ 0,
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_output
+ else:
+ for i in range(self.num_rows):
+ yield self._getitem(
+ i,
+ )
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ If a formatting is set with [`~datasets.Dataset.set_format`] rows will be returned with the
+ selected format.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+ if self._indices is None:
+ # Fast iteration
+ # Benchmark: https://gist.github.com/mariosasko/0248288a2e3a7556873969717c1fe52b (fast_iter_batch)
+ format_kwargs = self._format_kwargs if self._format_kwargs is not None else {}
+ formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs)
+ for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch):
+ formatted_batch = format_table(
+ pa_subtable,
+ range(pa_subtable.num_rows),
+ formatter=formatter,
+ format_columns=self._format_columns,
+ output_all_columns=self._output_all_columns,
+ )
+ yield formatted_batch
+ else:
+ num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size
+ for i in range(0, num_rows, batch_size):
+ yield self._getitem(
+ slice(i, i + batch_size),
+ )
+
+ def __repr__(self):
+ return f"Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})"
+
+ @property
+ def format(self):
+ return {
+ "type": self._format_type,
+ "format_kwargs": self._format_kwargs,
+ "columns": self.column_names if self._format_columns is None else self._format_columns,
+ "output_all_columns": self._output_all_columns,
+ }
+
+ @contextlib.contextmanager
+ def formatted_as(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__`` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+ """
+ old_format_type = self._format_type
+ old_format_kwargs = self._format_kwargs
+ old_format_columns = self._format_columns
+ old_output_all_columns = self._output_all_columns
+ try:
+ self.set_format(type, columns, output_all_columns, **format_kwargs)
+ yield
+ finally:
+ self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)
+
+ @fingerprint_transform(inplace=True)
+ def set_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.set_transform`].
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ It is possible to call [`~datasets.Dataset.map`] after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
+ gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted as:
+
+ ```
+ new formatted columns = (all columns - previously unformatted columns)
+ ```
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['text', 'label'])
+ >>> ds.format
+ {'type': 'numpy',
+ 'format_kwargs': {},
+ 'columns': ['text', 'label'],
+ 'output_all_columns': False}
+ ```
+ """
+ format_kwargs.update(format_kwargs.pop("format_kwargs", {})) # allow to use self.set_format(**self.format)
+
+ # Check that the format_type and format_kwargs are valid and make it possible to have a Formatter
+ type = get_format_type_from_alias(type)
+ get_formatter(type, features=self._info.features, **format_kwargs)
+
+ # Check filter column
+ if isinstance(columns, str):
+ columns = [columns]
+ if isinstance(columns, tuple):
+ columns = list(columns)
+ if columns is not None:
+ missing_columns = set(columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+ if columns is not None:
+ columns = columns.copy() # Ensures modifications made to the list after this call don't cause bugs
+
+ self._format_type = type
+ self._format_kwargs = format_kwargs
+ self._format_columns = columns
+ self._output_all_columns = output_all_columns
+ logger.debug(
+ "Set __getitem__(key) output type to %s for %s columns "
+ " (when key is int or slice) and %s output other (un-formatted) columns.",
+ "python objects" if type is None else type,
+ "no" if columns is None else str(columns),
+ "do" if output_all_columns else "don't",
+ )
+
+ def reset_format(self):
+ """Reset `__getitem__` return format to python objects and all columns.
+
+ Same as `self.set_format()`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type='numpy', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ >>> ds.reset_format()
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ ```
+ """
+ self.set_format()
+
+ def set_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Args:
+ transform (`Callable`, *optional*):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to True, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
+ >>> def encode(batch):
+ ... return tokenizer(batch['text'], padding=True, truncation=True, return_tensors='pt')
+ >>> ds.set_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1]),
+ 'input_ids': tensor([ 101, 29353, 2135, 15102, 1996, 9428, 20868, 2890, 8663, 6895,
+ 20470, 2571, 3663, 2090, 4603, 3017, 3008, 1998, 2037, 24211,
+ 5637, 1998, 11690, 2336, 1012, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0])}
+ ```
+ """
+ self.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
+
+ Contrary to [`~datasets.Dataset.set_format`], `with_format` returns a new [`Dataset`] object.
+
+ Args:
+ type (`str`, *optional*):
+ Either output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds.format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds.format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'tensorflow'}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+ return dataset
+
+ def with_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Contrary to [`~datasets.Dataset.set_transform`], `with_transform` returns a new [`Dataset`] object.
+
+ Args:
+ transform (`Callable`, `optional`):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a `dict`) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, `optional`):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to `True`, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> def encode(example):
+ ... return tokenizer(example["text"], padding=True, truncation=True, return_tensors='pt')
+ >>> ds = ds.with_transform(encode)
+ >>> ds[0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1]),
+ 'input_ids': tensor([ 101, 18027, 16310, 16001, 1103, 9321, 178, 11604, 7235, 6617,
+ 1742, 2165, 2820, 1206, 6588, 22572, 12937, 1811, 2153, 1105,
+ 1147, 12890, 19587, 6463, 1105, 15026, 1482, 119, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0])}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
+ return dataset
+
+ @deprecated()
+ def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "Dataset":
+ """
+ Prepare a dataset for the given task by casting the dataset's [`Features`] to standardized column names and types as detailed in [`datasets.tasks`](./task_templates).
+
+ Casts [`datasets.DatasetInfo.features`] according to a task-specific schema. Intended for single-use only, so all task templates are removed from [`datasets.DatasetInfo.task_templates`] after casting.
+
+ Args:
+ task (`Union[str, TaskTemplate]`):
+ The task to prepare the dataset for during training and evaluation. If `str`, supported tasks include:
+
+ - `"text-classification"`
+ - `"question-answering"`
+
+ If [`TaskTemplate`], must be one of the task templates in [`datasets.tasks`](./task_templates).
+ id (`int`, defaults to `0`):
+ The id required to unambiguously identify the task template when multiple task templates of the same type are supported.
+ """
+ # TODO(lewtun): Add support for casting nested features like answers.text and answers.answer_start in SQuAD
+ if isinstance(task, str):
+ tasks = [template.task for template in (self.info.task_templates or [])]
+ compatible_templates = [template for template in (self.info.task_templates or []) if template.task == task]
+ if not compatible_templates:
+ raise ValueError(
+ f"Task {task} is not compatible with this dataset! Available tasks: {list(unique_values(tasks))}"
+ )
+
+ if not 0 <= id < len(compatible_templates):
+ templates_list_str = "\n".join(
+ f"- `{idx}` for task {template}" for idx, template in enumerate(compatible_templates)
+ )
+ raise ValueError(
+ f"Id {id} for task {task} is not in a valid range. Supported ids:\n{templates_list_str}"
+ )
+ template = compatible_templates[id]
+ elif isinstance(task, TaskTemplate):
+ template = task
+ else:
+ raise ValueError(
+ f"Expected a `str` or `datasets.TaskTemplate` object but got task {task} with type {type(task)}."
+ )
+ template = template.align_with_features(self.info.features)
+ column_mapping = template.column_mapping
+ columns_to_drop = [column for column in self.column_names if column not in column_mapping]
+ dataset = self.remove_columns(columns_to_drop)
+ dataset = dataset.rename_columns(column_mapping)
+ # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__`
+ dataset.info.task_templates = None
+ dataset = dataset.cast(features=template.features)
+ return dataset
+
+ def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]:
+ """
+ Can be used to index columns (by string names) or rows (by integer, slice, or list-like of integer indices)
+ """
+ if isinstance(key, bool):
+ raise TypeError("dataset index must be int, str, slice or collection of int, not bool")
+ format_type = kwargs["format_type"] if "format_type" in kwargs else self._format_type
+ format_columns = kwargs["format_columns"] if "format_columns" in kwargs else self._format_columns
+ output_all_columns = (
+ kwargs["output_all_columns"] if "output_all_columns" in kwargs else self._output_all_columns
+ )
+ format_kwargs = kwargs["format_kwargs"] if "format_kwargs" in kwargs else self._format_kwargs
+ format_kwargs = format_kwargs if format_kwargs is not None else {}
+ formatter = get_formatter(format_type, features=self._info.features, **format_kwargs)
+ pa_subtable = query_table(self._data, key, indices=self._indices)
+ formatted_output = format_table(
+ pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns
+ )
+ return formatted_output
+
+ @overload
+ def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: # noqa: F811
+ ...
+
+ @overload
+ def __getitem__(self, key: str) -> List: # noqa: F811
+ ...
+
+ def __getitem__(self, key): # noqa: F811
+ """Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)."""
+ return self._getitem(key)
+
+ def __getitems__(self, keys: List) -> List:
+ """Can be used to get a batch using a list of integers indices."""
+ batch = self.__getitem__(keys)
+ n_examples = len(batch[next(iter(batch))])
+ return [{col: array[i] for col, array in batch.items()} for i in range(n_examples)]
+
+ def cleanup_cache_files(self) -> int:
+ """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is
+ one.
+
+ Be careful when running this command that no other process is currently using other cache files.
+
+ Returns:
+ `int`: Number of removed files.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.cleanup_cache_files()
+ 10
+ ```
+ """
+ current_cache_files = [os.path.abspath(cache_file["filename"]) for cache_file in self.cache_files]
+ if not current_cache_files:
+ return 0
+ cache_directory = os.path.dirname(current_cache_files[0])
+ logger.info(f"Listing files in {cache_directory}")
+ files: List[str] = os.listdir(cache_directory)
+ files_to_remove = []
+ for f_name in files:
+ full_name = os.path.abspath(os.path.join(cache_directory, f_name))
+ if f_name.startswith("cache-") and f_name.endswith(".arrow"):
+ if full_name in current_cache_files:
+ logger.info(f"Keeping currently used cache file at {full_name}")
+ continue
+ files_to_remove.append(full_name)
+ for file_path in files_to_remove:
+ logger.info(f"Removing {file_path}")
+ os.remove(file_path)
+ return len(files_to_remove)
+
+ def _get_cache_file_path(self, fingerprint):
+ if is_caching_enabled() and self.cache_files:
+ cache_file_name = "cache-" + fingerprint + ".arrow"
+ cache_directory = os.path.dirname(self.cache_files[0]["filename"])
+ else:
+ cache_file_name = "cache-" + generate_random_fingerprint() + ".arrow"
+ cache_directory = get_temporary_cache_files_directory()
+ cache_file_path = os.path.join(cache_directory, cache_file_name)
+ return cache_file_path
+
+ @transmit_tasks
+ @transmit_format
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """
+ Apply a function to all the examples in the table (individually or in batches) and update the table.
+ If your function returns a column that already exists, then it overwrites it.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
+ - If batched is `True` and `batch_size` is `n > 1`, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`): Function with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Disallow null values in the table.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix
+ will be added at the end of the base name of each. Defaults to `"_{rank:05d}_of_{num_proc:05d}"`. For example, if `cache_file_name` is "processed.arrow", then for
+ `rank=1` and `num_proc=4`, the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while mapping examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> ds[0:3]["text"]
+ ['Review: compassionately explores the seemingly irreconcilable situation between conservative christian parents and their estranged gay and lesbian children .',
+ 'Review: the soundtrack alone is worth the price of admission .',
+ 'Review: rodriguez does a splendid job of racial profiling hollywood style--casting excellent latin actors of all ages--a trend long overdue .']
+
+ # process a batch of examples
+ >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
+ # set number of processors
+ >>> ds = ds.map(add_prefix, num_proc=4)
+ ```
+ """
+ if keep_in_memory and cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `cache_file_name` but not both.")
+
+ if num_proc is not None and num_proc <= 0:
+ raise ValueError("num_proc must be an integer > 0.")
+
+ # If the array is empty we do nothing (but we make sure to handle an empty indices mapping and remove the requested columns anyway)
+ if len(self) == 0:
+ if self._indices is not None: # empty indices mapping
+ self = Dataset(
+ self.data.slice(0, 0),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ if remove_columns:
+ return self.remove_columns(remove_columns)
+ else:
+ return self
+
+ if function is None:
+ function = lambda x: x # noqa: E731
+
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ if input_columns is not None:
+ missing_columns = set(input_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+
+ if remove_columns is not None:
+ missing_columns = set(remove_columns) - set(self._data.column_names)
+ if missing_columns:
+ raise ValueError(
+ f"Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}"
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ if num_proc is not None and num_proc > len(self):
+ num_proc = len(self)
+ logger.warning(
+ f"num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}."
+ )
+
+ dataset_kwargs = {
+ "shard": self,
+ "function": function,
+ "with_indices": with_indices,
+ "with_rank": with_rank,
+ "input_columns": input_columns,
+ "batched": batched,
+ "batch_size": batch_size,
+ "drop_last_batch": drop_last_batch,
+ "remove_columns": remove_columns,
+ "keep_in_memory": keep_in_memory,
+ "writer_batch_size": writer_batch_size,
+ "features": features,
+ "disable_nullable": disable_nullable,
+ "fn_kwargs": fn_kwargs,
+ }
+
+ if new_fingerprint is None:
+ # we create a unique hash from the function,
+ # current dataset file and the mapping args
+ transform = format_transform_for_fingerprint(Dataset._map_single)
+ kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs)
+ kwargs_for_fingerprint["fingerprint_name"] = "new_fingerprint"
+ new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint)
+ else:
+ validate_fingerprint(new_fingerprint)
+ dataset_kwargs["new_fingerprint"] = new_fingerprint
+
+ if self.cache_files:
+ if cache_file_name is None:
+ cache_file_name = self._get_cache_file_path(new_fingerprint)
+ dataset_kwargs["cache_file_name"] = cache_file_name
+
+ def load_processed_shard_from_cache(shard_kwargs):
+ """Load a processed shard from cache if it exists, otherwise throw an error."""
+ shard = shard_kwargs["shard"]
+ # Check if we've already cached this computation (indexed by a hash)
+ if shard_kwargs["cache_file_name"] is not None:
+ if os.path.exists(shard_kwargs["cache_file_name"]) and load_from_cache_file:
+ info = shard.info.copy()
+ info.features = features
+ info.task_templates = None
+ return Dataset.from_file(shard_kwargs["cache_file_name"], info=info, split=shard.split)
+ raise NonExistentDatasetError
+
+ num_shards = num_proc if num_proc is not None else 1
+ if batched and drop_last_batch:
+ pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size
+ else:
+ pbar_total = len(self)
+
+ shards_done = 0
+ if num_proc is None or num_proc == 1:
+ transformed_dataset = None
+ try:
+ transformed_dataset = load_processed_shard_from_cache(dataset_kwargs)
+ logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}")
+ except NonExistentDatasetError:
+ pass
+ if transformed_dataset is None:
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=desc or "Map",
+ ) as pbar:
+ for rank, done, content in Dataset._map_single(**dataset_kwargs):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_dataset = content
+ else:
+ pbar.update(content)
+ assert transformed_dataset is not None, "Failed to retrieve the result from map"
+ # update fingerprint if the dataset changed
+ if transformed_dataset._fingerprint != self._fingerprint:
+ transformed_dataset._fingerprint = new_fingerprint
+ return transformed_dataset
+ else:
+
+ def format_cache_file_name(
+ cache_file_name: Optional[str],
+ rank: Union[int, Literal["*"]], # noqa: F722
+ ) -> Optional[str]:
+ if not cache_file_name:
+ return cache_file_name
+ sep = cache_file_name.rindex(".")
+ base_name, extension = cache_file_name[:sep], cache_file_name[sep:]
+ if isinstance(rank, int):
+ cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension
+ logger.info(f"Process #{rank} will write at {cache_file_name}")
+ else:
+ cache_file_name = (
+ base_name
+ + suffix_template.replace("{rank:05d}", "{rank}").format(rank=rank, num_proc=num_proc)
+ + extension
+ )
+ return cache_file_name
+
+ def format_new_fingerprint(new_fingerprint: str, rank: int) -> str:
+ new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc)
+ validate_fingerprint(new_fingerprint)
+ return new_fingerprint
+
+ prev_env = deepcopy(os.environ)
+ # check if parallelism if off
+ # from https://github.com/huggingface/tokenizers/blob/bb668bc439dc34389b71dbb8ce0c597f15707b53/tokenizers/src/utils/parallelism.rs#L22
+ if prev_env.get("TOKENIZERS_PARALLELISM", "false").lower() not in (
+ "",
+ "off",
+ "false",
+ "f",
+ "no",
+ "n",
+ "0",
+ ):
+ logger.warning("Setting TOKENIZERS_PARALLELISM=false for forked processes.")
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
+ shards = [
+ self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)
+ for rank in range(num_proc)
+ ]
+ kwargs_per_job = [
+ {
+ **dataset_kwargs,
+ "shard": shards[rank],
+ "cache_file_name": format_cache_file_name(cache_file_name, rank),
+ "rank": rank,
+ "offset": sum(len(s) for s in shards[:rank]),
+ "new_fingerprint": format_new_fingerprint(new_fingerprint, rank),
+ }
+ for rank in range(num_shards)
+ ]
+
+ transformed_shards = [None] * num_shards
+ for rank in range(num_shards):
+ try:
+ transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank])
+ kwargs_per_job[rank] = None
+ except NonExistentDatasetError:
+ pass
+
+ kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None]
+
+ # We try to create a pool with as many workers as dataset not yet cached.
+ if kwargs_per_job:
+ if len(kwargs_per_job) < num_shards:
+ logger.info(
+ f"Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache."
+ )
+ with Pool(len(kwargs_per_job)) as pool:
+ os.environ = prev_env
+ logger.info(f"Spawning {num_proc} processes")
+ with hf_tqdm(
+ unit=" examples",
+ total=pbar_total,
+ desc=(desc or "Map") + f" (num_proc={num_proc})",
+ ) as pbar:
+ for rank, done, content in iflatmap_unordered(
+ pool, Dataset._map_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ shards_done += 1
+ logger.debug(f"Finished processing shard number {rank} of {num_shards}.")
+ transformed_shards[rank] = content
+ else:
+ pbar.update(content)
+ # Avoids PermissionError on Windows (the error: https://github.com/huggingface/datasets/actions/runs/4026734820/jobs/6921621805)
+ for kwargs in kwargs_per_job:
+ del kwargs["shard"]
+ else:
+ logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}")
+ assert (
+ None not in transformed_shards
+ ), f"Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results"
+ logger.info(f"Concatenating {num_proc} shards")
+ result = _concatenate_map_style_datasets(transformed_shards)
+ # update fingerprint if the dataset changed
+ if any(
+ transformed_shard._fingerprint != shard._fingerprint
+ for transformed_shard, shard in zip(transformed_shards, shards)
+ ):
+ result._fingerprint = new_fingerprint
+ else:
+ result._fingerprint = self._fingerprint
+ return result
+
+ @staticmethod
+ def _map_single(
+ shard: "Dataset",
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ new_fingerprint: Optional[str] = None,
+ rank: Optional[int] = None,
+ offset: int = 0,
+ ) -> Iterable[Tuple[int, bool, Union[int, "Dataset"]]]:
+ """Apply a function to all the elements in the table (individually or in batches)
+ and update the table (if function does update examples).
+
+ Args:
+ shard (`datasets.Dataset`): Dataset to map the transform on.
+ function (`Callable`): with one of the following signature:
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> Dict[str, Any]` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> Dict[str, List]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: lambda x: x
+ with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, default `False`): Provide process rank to `function`. Note that in this case the signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`Optional[List[str]]`, defaults to `None`): The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`): Provide batch of examples to `function`
+ batch_size (`int`, optional, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`
+ `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`
+ drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, optional, defaults to `None`): Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`): Disallow null values in the table.
+ fn_kwargs (`Dict`, optional, defaults to `None`): Keyword arguments to be passed to `function`
+ new_fingerprint (`str`, optional, defaults to `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ rank: (`int`, optional, defaults to `None`): If specified, this is the process rank when doing multiprocessing
+ offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`.
+ """
+ if fn_kwargs is None:
+ fn_kwargs = {}
+
+ # If we do batch computation but no batch size is provided, default to the full dataset
+ if batched and (batch_size is None or batch_size <= 0):
+ batch_size = shard.num_rows
+
+ # We set this variable to True after processing the first example/batch in
+ # `apply_function_on_filtered_inputs` if the map function returns a dict.
+ # If set to False, no new arrow table will be created
+
+ update_data = None
+
+ format_kwargs = shard._format_kwargs.copy()
+ # Lazy formatting is only available for the default format (None/python)
+ if not input_columns and shard._format_type is None:
+ format_kwargs["lazy"] = True
+ input_formatter = get_formatter(
+ shard._format_type,
+ features=shard.features,
+ **format_kwargs,
+ )
+
+ class NumExamplesMismatchError(Exception):
+ pass
+
+ def validate_function_output(processed_inputs, indices):
+ """Validate output of the map function."""
+ if processed_inputs is not None and not isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame)):
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects."
+ )
+ elif isinstance(indices, list) and isinstance(processed_inputs, Mapping):
+ allowed_batch_return_types = (list, np.ndarray, pd.Series)
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
+ import tensorflow as tf
+
+ allowed_batch_return_types += (tf.Tensor,)
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
+ import torch
+
+ allowed_batch_return_types += (torch.Tensor,)
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
+ import jax.numpy as jnp
+
+ allowed_batch_return_types += (jnp.ndarray,)
+ all_dict_values_are_lists = all(
+ isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()
+ )
+ if all_dict_values_are_lists is False:
+ raise TypeError(
+ f"Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`."
+ )
+
+ def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0):
+ """Utility to apply the function on a selection of columns."""
+ nonlocal update_data
+ inputs = format_table(
+ pa_inputs,
+ 0 if not batched else range(pa_inputs.num_rows),
+ format_columns=input_columns,
+ formatter=input_formatter,
+ )
+ fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]
+ if offset == 0:
+ effective_indices = indices
+ else:
+ effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset
+ additional_args = ()
+ if with_indices:
+ additional_args += (effective_indices,)
+ if with_rank:
+ additional_args += (rank,)
+ processed_inputs = function(*fn_args, *additional_args, **fn_kwargs)
+ if isinstance(processed_inputs, LazyDict):
+ processed_inputs = {
+ k: v for k, v in processed_inputs.data.items() if k not in processed_inputs.keys_to_format
+ }
+ returned_lazy_dict = True
+ else:
+ returned_lazy_dict = False
+ if update_data is None:
+ # Check if the function returns updated examples
+ update_data = isinstance(processed_inputs, (Mapping, pa.Table, pd.DataFrame))
+ validate_function_output(processed_inputs, indices)
+ if not update_data:
+ return None # Nothing to update, let's move on
+ if shard._format_type or input_columns:
+ # TODO(QL, MS): ideally the behavior should be the same even if the dataset is formatted (may require major release)
+ inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns()))
+ elif isinstance(inputs, LazyDict):
+ inputs_to_merge = {
+ k: (v if k not in inputs.keys_to_format else pa_inputs[k]) for k, v in inputs.data.items()
+ }
+ else:
+ inputs_to_merge = inputs
+ if remove_columns is not None:
+ for column in remove_columns:
+ # `function` can modify input in-place causing column to be already removed.
+ if column in inputs_to_merge:
+ inputs_to_merge.pop(column)
+ if returned_lazy_dict and column in processed_inputs:
+ processed_inputs.pop(column)
+ if check_same_num_examples:
+ input_num_examples = len(pa_inputs)
+ processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])
+ if input_num_examples != processed_inputs_num_examples:
+ raise NumExamplesMismatchError()
+ if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping):
+ # The .map() transform *updates* the dataset:
+ # the output dictionary contains both the the input data and the output data.
+ # The output dictionary may contain Arrow values from `inputs_to_merge` so that we can re-write them efficiently.
+ return {**inputs_to_merge, **processed_inputs}
+ else:
+ return processed_inputs
+
+ def init_buffer_and_writer():
+ # Prepare output buffer and batched writer in memory or on file if we update the table
+ writer_features = features
+ if writer_features is None:
+ writer_features = shard.features
+ update_features = True
+ else:
+ update_features = False
+ if keep_in_memory or cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ features=writer_features,
+ stream=buf_writer,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching processed dataset at {cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(cache_file_name), delete=False)
+ writer = ArrowWriter(
+ features=writer_features,
+ path=tmp_file.name,
+ writer_batch_size=writer_batch_size,
+ update_features=update_features,
+ fingerprint=new_fingerprint,
+ disable_nullable=disable_nullable,
+ )
+ return buf_writer, writer, tmp_file
+
+ num_examples_progress_update = 0
+ # If `update_data` is True after processing the first example/batch, initalize these resources with `init_buffer_and_writer`
+ buf_writer, writer, tmp_file = None, None, None
+
+ # Optionally initialize the writer as a context manager
+ with contextlib.ExitStack() as stack:
+ try:
+ arrow_formatted_shard = shard.with_format("arrow")
+
+ # Loop over single examples or batches and write to buffer/file if examples are to be updated
+ if not batched:
+ shard_iterable = enumerate(arrow_formatted_shard)
+ else:
+ num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size
+ shard_iterable = zip(
+ range(0, num_rows, batch_size),
+ arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch),
+ )
+ if not batched:
+ _time = time.time()
+ for i, example in shard_iterable:
+ example = apply_function_on_filtered_inputs(example, i, offset=offset)
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(example, pa.Table):
+ writer.write_row(example)
+ elif isinstance(example, pd.DataFrame):
+ writer.write_row(pa.Table.from_pandas(example))
+ else:
+ writer.write(example)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ else:
+ _time = time.time()
+ for i, batch in shard_iterable:
+ num_examples_in_batch = len(batch)
+ indices = list(
+ range(*(slice(i, i + batch_size).indices(shard.num_rows)))
+ ) # Something simpler?
+ try:
+ batch = apply_function_on_filtered_inputs(
+ batch,
+ indices,
+ check_same_num_examples=len(shard.list_indexes()) > 0,
+ offset=offset,
+ )
+ except NumExamplesMismatchError:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it."
+ ) from None
+ if update_data:
+ if i == 0:
+ buf_writer, writer, tmp_file = init_buffer_and_writer()
+ stack.enter_context(writer)
+ if isinstance(batch, pa.Table):
+ writer.write_table(batch)
+ elif isinstance(batch, pd.DataFrame):
+ writer.write_table(pa.Table.from_pandas(batch))
+ else:
+ writer.write_batch(batch)
+ num_examples_progress_update += num_examples_in_batch
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield rank, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ if update_data and writer is not None:
+ writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ yield rank, False, num_examples_progress_update
+ if update_data:
+ if writer is not None:
+ writer.finalize()
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ yield rank, False, num_examples_progress_update
+ if update_data and tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(cache_file_name, 0o666 & ~umask)
+
+ if update_data:
+ # Create new Dataset from buffer or file
+ info = shard.info.copy()
+ info.features = writer._features
+ info.task_templates = None
+ if buf_writer is None:
+ yield rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)
+ else:
+ yield rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)
+ else:
+ yield rank, True, shard
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, ignore_kwargs=["load_from_cache_file", "cache_file_name", "desc"], version="2.0.1"
+ )
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ suffix_template: str = "_{rank:05d}_of_{num_proc:05d}",
+ new_fingerprint: Optional[str] = None,
+ desc: Optional[str] = None,
+ ) -> "Dataset":
+ """Apply a filter function to all the elements in the table in batches
+ and update the table so that the dataset only includes examples according to the filter function.
+
+ Args:
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a `dict` mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if
+ `batched = True`. If `batched = False`, one example per batch is passed to `function`.
+ If `batch_size <= 0` or `batch_size == None`, provide the full dataset as a single batch to `function`.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ fn_kwargs (`dict`, *optional*):
+ Keyword arguments to be passed to `function`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ suffix_template (`str`):
+ If `cache_file_name` is specified, then this suffix will be added at the end of the base name of each.
+ For example, if `cache_file_name` is `"processed.arrow"`, then for `rank = 1` and `num_proc = 4`,
+ the resulting file would be `"processed_00001_of_00004.arrow"` for the default suffix (default
+ `_{rank:05d}_of_{num_proc:05d}`).
+ new_fingerprint (`str`, *optional*):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while filtering examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.filter(lambda x: x["label"] == 1)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`"
+ )
+
+ if function is None:
+ function = lambda x: True # noqa: E731
+
+ if len(self) == 0:
+ return self
+
+ indices = self.map(
+ function=partial(
+ get_indices_from_mask_function,
+ function,
+ batched,
+ with_indices,
+ with_rank,
+ input_columns,
+ self._indices,
+ ),
+ with_indices=True,
+ with_rank=True,
+ features=Features({"indices": Value("uint64")}),
+ batched=True,
+ batch_size=batch_size,
+ remove_columns=self.column_names,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ suffix_template=suffix_template,
+ new_fingerprint=new_fingerprint,
+ input_columns=input_columns,
+ desc=desc or "Filter",
+ )
+ new_dataset = copy.deepcopy(self)
+ new_dataset._indices = indices.data
+ new_dataset._fingerprint = new_fingerprint
+ return new_dataset
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["cache_file_name"])
+ def flatten_indices(
+ self,
+ keep_in_memory: bool = False,
+ cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ num_proc: Optional[int] = None,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create and cache a new Dataset by flattening the indices mapping.
+
+ Args:
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_name (`str`, *optional*, default `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Allow null values in the table.
+ num_proc (`int`, optional, default `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ """
+
+ return self.map(
+ batched=True, # for speed
+ keep_in_memory=keep_in_memory,
+ cache_file_name=cache_file_name,
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ new_fingerprint=new_fingerprint,
+ desc="Flattening the indices",
+ num_proc=num_proc,
+ )
+
+ def _new_dataset_with_indices(
+ self,
+ indices_cache_file_name: Optional[str] = None,
+ indices_buffer: Optional[pa.Buffer] = None,
+ fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the
+ current Dataset.
+ """
+
+ if indices_cache_file_name is None and indices_buffer is None:
+ raise ValueError("At least one of indices_cache_file_name or indices_buffer must be provided.")
+
+ if fingerprint is None:
+ raise ValueError("please specify a fingerprint for the dataset with indices")
+
+ if indices_cache_file_name is not None:
+ indices_table = MemoryMappedTable.from_file(indices_cache_file_name)
+ else:
+ indices_table = InMemoryTable.from_buffer(indices_buffer)
+
+ # Return new Dataset object
+ # don't forget to copy the objects
+ return Dataset(
+ self._data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def select(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+
+ Args:
+ indices (`range`, `list`, `iterable`, `ndarray` or `Series`):
+ Range, list or 1D-array of integer indices for indexing.
+ If the indices correspond to a contiguous range, the Arrow table is simply sliced.
+ However passing a list of indices that are not contiguous creates indices mapping, which is much less efficient,
+ but still faster than recreating an Arrow table made of the requested rows.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.select(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # If indices is a PyArrow array, we convert to NumPy
+ if isinstance(indices, (pa.Array, pa.ChunkedArray)):
+ indices = indices.to_numpy().astype(np.int64)
+
+ # Convert generator objects to lists
+ if isinstance(indices, Iterator):
+ indices = list(indices)
+
+ # If the indices are contiguous, simply slice the arrow table
+ if isinstance(indices, range):
+ if _is_range_contiguous(indices) and indices.start >= 0:
+ start, length = indices.start, indices.stop - indices.start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+ else:
+ try:
+ start = next(iter(indices))
+ except StopIteration:
+ # if `indices` is an empty iterable, we return an empty dataset
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+ if start >= 0:
+ counter_from_start = itertools.count(start=start)
+ if all(i == j for i, j in zip(indices, counter_from_start)):
+ length = next(counter_from_start) - start
+ return self._select_contiguous(start, length, new_fingerprint=new_fingerprint)
+
+ # If not contiguous, we need to create a new indices mapping
+ return self._select_with_indices_mapping(
+ indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def _select_contiguous(
+ self,
+ start: int,
+ length: int,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows from a contiguous slice of data.
+ The slice is defined by that start index and its length.
+
+ Args:
+ start (`int`): start index.
+ length (`int`): length of the slice to select.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_contiguous(0, 4)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ _check_valid_indices_value(start, len(self))
+ _check_valid_indices_value(start + length - 1, len(self))
+ if self._indices is None or length == 0:
+ return Dataset(
+ self.data.slice(start, length),
+ info=self.info.copy(),
+ split=self.split,
+ fingerprint=new_fingerprint,
+ )
+ else:
+ return Dataset(
+ self.data,
+ info=self.info.copy(),
+ split=self.split,
+ indices_table=self._indices.slice(start, length),
+ fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["indices_cache_file_name"])
+ def _select_with_indices_mapping(
+ self,
+ indices: Iterable,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset with rows selected following the list/array of indices.
+ The new dataset is made by creating a new indices mapping on top of the main arrow table.
+
+ Args:
+ indices (sequence, iterable, range, ndarray or Series): List or 1D-array of integer indices for indexing.
+ keep_in_memory (`bool`, default `False`): Keep the indices mapping in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, optional, default `None`): Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ writer_batch_size (`int`, default `1000`): Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `.map()`.
+ new_fingerprint (`str`, optional, default `None`): the new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds._select_with_indices_mapping(range(4))
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 4
+ })
+ ```
+ """
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Prepare the writer for our indices arrow table
+ if keep_in_memory or indices_cache_file_name is None:
+ buf_writer = pa.BufferOutputStream()
+ tmp_file = None
+ writer = ArrowWriter(
+ stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+ else:
+ buf_writer = None
+ logger.info(f"Caching indices mapping at {indices_cache_file_name}")
+ tmp_file = tempfile.NamedTemporaryFile("wb", dir=os.path.dirname(indices_cache_file_name), delete=False)
+ writer = ArrowWriter(
+ path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit="indices"
+ )
+
+ indices = indices if isinstance(indices, list) else list(indices)
+
+ size = len(self)
+ if indices:
+ _check_valid_indices_value(int(max(indices)), size=size)
+ _check_valid_indices_value(int(min(indices)), size=size)
+ else:
+ return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint)
+
+ indices_array = pa.array(indices, type=pa.uint64())
+ # Check if we need to convert indices
+ if self._indices is not None:
+ indices_array = self._indices.column(0).take(indices_array)
+
+ indices_table = pa.Table.from_arrays([indices_array], names=["indices"])
+
+ with writer:
+ try:
+ writer.write_table(indices_table)
+ writer.finalize() # close_stream=bool(buf_writer is None)) We only close if we are writing in a file
+ except (Exception, KeyboardInterrupt):
+ if tmp_file is not None:
+ tmp_file.close()
+ if os.path.exists(tmp_file.name):
+ os.remove(tmp_file.name)
+ raise
+
+ if tmp_file is not None:
+ tmp_file.close()
+ shutil.move(tmp_file.name, indices_cache_file_name)
+ umask = os.umask(0o666)
+ os.umask(umask)
+ os.chmod(indices_cache_file_name, 0o666 & ~umask)
+
+ # Return new Dataset object
+ if buf_writer is None:
+ return self._new_dataset_with_indices(
+ indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint
+ )
+ else:
+ return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)
+
+ @transmit_format
+ @fingerprint_transform(inplace=False, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"])
+ def sort(
+ self,
+ column_names: Union[str, Sequence_[str]],
+ reverse: Union[bool, Sequence_[bool]] = False,
+ kind="deprecated",
+ null_placement: str = "at_end",
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new dataset sorted according to a single or multiple columns.
+
+ Args:
+ column_names (`Union[str, Sequence[str]]`):
+ Column name(s) to sort by.
+ reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
+ If `True`, sort by descending order rather than ascending. If a single bool is provided,
+ the value is applied to the sorting of all column names. Otherwise a list of bools with the
+ same length and order as column_names must be provided.
+ kind (`str`, *optional*):
+ Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
+ The default is `quicksort`. Note that both `stable` and `mergesort` use `timsort` under the covers and, in general,
+ the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
+
+
+ `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
+
+
+ null_placement (`str`, defaults to `at_end`):
+ Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
+
+
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the sorted indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the sorted indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ sorted indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ Higher value gives smaller cache files, lower value consume less temporary memory.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes', split='validation')
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> sorted_ds = ds.sort('label')
+ >>> sorted_ds['label'][:10]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
+ >>> another_sorted_ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ # Deprecation warning
+ if kind != "deprecated":
+ warnings.warn(
+ "'kind' was deprecated in version 2.10.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+
+ # Check proper format of and for duplicates in column_names
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ # Check proper format and length of reverse
+ if not isinstance(reverse, bool):
+ if len(reverse) != len(column_names):
+ raise ValueError(
+ "Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'."
+ )
+ else:
+ reverse = [reverse] * len(column_names)
+
+ # Check whether column name(s) exist in dataset
+ for column in column_names:
+ if not isinstance(column, str) or column not in self._data.column_names:
+ raise ValueError(
+ f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}"
+ )
+
+ # Change null_placement to conform to pyarrow's sort_indices() while ensuring backwards compatability
+ if null_placement not in ["at_start", "at_end"]:
+ if null_placement == "first":
+ null_placement = "at_start"
+ elif null_placement == "last":
+ null_placement = "at_end"
+ else:
+ raise ValueError(
+ f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached sorted indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ sort_table = query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ )
+
+ sort_keys = [
+ (col, "ascending" if not col_reverse else "descending") for col, col_reverse in zip(column_names, reverse)
+ ]
+
+ indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False, randomized_function=True, ignore_kwargs=["load_from_cache_file", "indices_cache_file_name"]
+ )
+ def shuffle(
+ self,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ new_fingerprint: Optional[str] = None,
+ ) -> "Dataset":
+ """Create a new Dataset where the rows are shuffled.
+
+ Currently shuffling uses numpy random generators.
+ You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
+
+ Shuffling takes the list of indices `[0:len(my_dataset)]` and shuffles it to create an indices mapping.
+ However as soon as your [`Dataset`] has an indices mapping, the speed can become 10x slower.
+ This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
+ To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
+ This may take a lot of time depending of the size of your dataset though:
+
+ ```python
+ my_dataset[0] # fast
+ my_dataset = my_dataset.shuffle(seed=42)
+ my_dataset[0] # up to 10x slower
+ my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
+ my_dataset[0] # fast again
+ ```
+
+ In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
+ It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal:
+
+ ```python
+ my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=128)
+ for example in enumerate(my_iterable_dataset): # fast
+ pass
+
+ shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
+
+ for example in enumerate(shuffled_iterable_dataset): # as fast as before
+ pass
+ ```
+
+ Args:
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, default `False`):
+ Keep the shuffled indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the shuffled indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ shuffled indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ # set a seed
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> shuffled_ds['label'][:10]
+ [1, 0, 1, 1, 0, 0, 0, 0, 0, 0]
+ ```
+ """
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return self
+
+ if keep_in_memory and indices_cache_file_name is not None:
+ raise ValueError("Please use either `keep_in_memory` or `indices_cache_file_name` but not both.")
+
+ if seed is not None and generator is not None:
+ raise ValueError("Both `seed` and `generator` were provided. Please specify just one of them.")
+
+ if generator is not None and not isinstance(generator, np.random.Generator):
+ raise ValueError("The provided generator must be an instance of numpy.random.Generator")
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+ indices_cache_file_name = self._get_cache_file_path(new_fingerprint)
+ if os.path.exists(indices_cache_file_name) and load_from_cache_file:
+ logger.info(f"Loading cached shuffled indices for dataset at {indices_cache_file_name}")
+ return self._new_dataset_with_indices(
+ fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name
+ )
+
+ permutation = generator.permutation(len(self))
+
+ return self.select(
+ indices=permutation,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=new_fingerprint,
+ )
+
+ @transmit_format
+ @fingerprint_transform(
+ inplace=False,
+ randomized_function=True,
+ fingerprint_names=["train_new_fingerprint", "test_new_fingerprint"],
+ ignore_kwargs=["load_from_cache_file", "train_indices_cache_file_name", "test_indices_cache_file_name"],
+ )
+ def train_test_split(
+ self,
+ test_size: Union[float, int, None] = None,
+ train_size: Union[float, int, None] = None,
+ shuffle: bool = True,
+ stratify_by_column: Optional[str] = None,
+ seed: Optional[int] = None,
+ generator: Optional[np.random.Generator] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ train_indices_cache_file_name: Optional[str] = None,
+ test_indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ train_new_fingerprint: Optional[str] = None,
+ test_new_fingerprint: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Return a dictionary ([`datasets.DatasetDict`]) with two random train and test subsets (`train` and `test` `Dataset` splits).
+ Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.
+
+ This method is similar to scikit-learn `train_test_split`.
+
+ Args:
+ test_size (`numpy.random.Generator`, *optional*):
+ Size of the test split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the test split.
+ If `int`, represents the absolute number of test samples.
+ If `None`, the value is set to the complement of the train size.
+ If `train_size` is also `None`, it will be set to `0.25`.
+ train_size (`numpy.random.Generator`, *optional*):
+ Size of the train split
+ If `float`, should be between `0.0` and `1.0` and represent the proportion of the dataset to include in the train split.
+ If `int`, represents the absolute number of train samples.
+ If `None`, the value is automatically set to the complement of the test size.
+ shuffle (`bool`, *optional*, defaults to `True`):
+ Whether or not to shuffle the data before splitting.
+ stratify_by_column (`str`, *optional*, defaults to `None`):
+ The column name of labels to be used to perform stratified split of data.
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the splits indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the splits indices
+ can be identified, use it instead of recomputing.
+ train_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ train split indices instead of the automatically generated cache file name.
+ test_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ test split indices instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ train_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the train set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ test_new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the test set after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds = ds.train_test_split(test_size=0.2, shuffle=True)
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 852
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 214
+ })
+ })
+
+ # set a seed
+ >>> ds = ds.train_test_split(test_size=0.2, seed=42)
+
+ # stratified split
+ >>> ds = load_dataset("imdb",split="train")
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 25000
+ })
+ >>> ds = ds.train_test_split(test_size=0.2, stratify_by_column="label")
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 20000
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 5000
+ })
+ })
+ ```
+ """
+ from .dataset_dict import DatasetDict # import here because of circular dependency
+
+ if len(self.list_indexes()) > 0:
+ raise DatasetTransformationNotAllowedError(
+ "Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it."
+ )
+ # If the array is empty we do nothing
+ if len(self) == 0:
+ return DatasetDict({"train": self, "test": self})
+
+ if test_size is None and train_size is None:
+ test_size = 0.25
+
+ # Safety checks similar to scikit-learn's ones.
+ # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)
+ n_samples = len(self)
+ if (
+ isinstance(test_size, int)
+ and (test_size >= n_samples or test_size <= 0)
+ or isinstance(test_size, float)
+ and (test_size <= 0 or test_size >= 1)
+ ):
+ raise ValueError(
+ f"test_size={test_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if (
+ isinstance(train_size, int)
+ and (train_size >= n_samples or train_size <= 0)
+ or isinstance(train_size, float)
+ and (train_size <= 0 or train_size >= 1)
+ ):
+ raise ValueError(
+ f"train_size={train_size} should be either positive and smaller "
+ f"than the number of samples {n_samples} or a float in the (0, 1) range"
+ )
+
+ if train_size is not None and not isinstance(train_size, (int, float)):
+ raise ValueError(f"Invalid value for train_size: {train_size} of type {type(train_size)}")
+ if test_size is not None and not isinstance(test_size, (int, float)):
+ raise ValueError(f"Invalid value for test_size: {test_size} of type {type(test_size)}")
+
+ if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:
+ raise ValueError(
+ f"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)"
+ " range. Reduce test_size and/or train_size."
+ )
+
+ if isinstance(test_size, float):
+ n_test = ceil(test_size * n_samples)
+ elif isinstance(test_size, int):
+ n_test = float(test_size)
+
+ if isinstance(train_size, float):
+ n_train = floor(train_size * n_samples)
+ elif isinstance(train_size, int):
+ n_train = float(train_size)
+
+ if train_size is None:
+ n_train = n_samples - n_test
+ elif test_size is None:
+ n_test = n_samples - n_train
+
+ if n_train + n_test > n_samples:
+ raise ValueError(
+ f"The sum of train_size and test_size = {n_train + n_test}, "
+ "should be smaller than the number of "
+ f"samples {n_samples}. Reduce test_size and/or "
+ "train_size."
+ )
+
+ n_train, n_test = int(n_train), int(n_test)
+
+ if n_train == 0:
+ raise ValueError(
+ f"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the "
+ "resulting train set will be empty. Adjust any of the "
+ "aforementioned parameters."
+ )
+
+ load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled()
+
+ if generator is None and shuffle is True:
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ seed = seed[pos] if pos < 624 else seed[0]
+ _ = np.random.random() # do 1 step of rng
+ generator = np.random.default_rng(seed)
+
+ # Check if we've already cached this computation (indexed by a hash)
+ if self.cache_files:
+ if train_indices_cache_file_name is None or test_indices_cache_file_name is None:
+ # we create a unique hash from the function, current dataset file and the mapping args
+
+ if train_indices_cache_file_name is None:
+ train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)
+ if test_indices_cache_file_name is None:
+ test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)
+ if (
+ os.path.exists(train_indices_cache_file_name)
+ and os.path.exists(test_indices_cache_file_name)
+ and load_from_cache_file
+ ):
+ logger.info(
+ f"Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}"
+ )
+ return DatasetDict(
+ {
+ "train": self._new_dataset_with_indices(
+ fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name
+ ),
+ "test": self._new_dataset_with_indices(
+ fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name
+ ),
+ }
+ )
+ if not shuffle:
+ if stratify_by_column is not None:
+ raise ValueError("Stratified train/test split is not implemented for `shuffle=False`")
+ train_indices = np.arange(n_train)
+ test_indices = np.arange(n_train, n_train + n_test)
+ else:
+ # stratified partition
+ if stratify_by_column is not None:
+ if stratify_by_column not in self._info.features.keys():
+ raise ValueError(f"Key {stratify_by_column} not found in {self._info.features.keys()}")
+ if not isinstance(self._info.features[stratify_by_column], ClassLabel):
+ raise ValueError(
+ f"Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}."
+ )
+ try:
+ train_indices, test_indices = next(
+ stratified_shuffle_split_generate_indices(
+ self.with_format("numpy")[stratify_by_column], n_train, n_test, rng=generator
+ )
+ )
+ except Exception as error:
+ if str(error) == "Minimum class count error":
+ raise ValueError(
+ f"The least populated class in {stratify_by_column} column has only 1"
+ " member, which is too few. The minimum"
+ " number of groups for any class cannot"
+ " be less than 2."
+ )
+ else:
+ raise error
+
+ # random partition
+ else:
+ permutation = generator.permutation(len(self))
+ test_indices = permutation[:n_test]
+ train_indices = permutation[n_test : (n_test + n_train)]
+
+ train_split = self.select(
+ indices=train_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=train_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=train_new_fingerprint,
+ )
+ test_split = self.select(
+ indices=test_indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=test_indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ new_fingerprint=test_new_fingerprint,
+ )
+
+ return DatasetDict({"train": train_split, "test": test_split})
+
+ def shard(
+ self,
+ num_shards: int,
+ index: int,
+ contiguous: bool = False,
+ keep_in_memory: bool = False,
+ indices_cache_file_name: Optional[str] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "Dataset":
+ """Return the `index`-nth shard from dataset split into `num_shards` pieces.
+
+ This shards deterministically. `dset.shard(n, i)` will contain all elements of dset whose
+ index mod `n = i`.
+
+ `dset.shard(n, i, contiguous=True)` will instead split dset into contiguous chunks,
+ so it can be easily concatenated back together after processing. If `n % i == l`, then the
+ first `l` shards will have length `(n // i) + 1`, and the remaining shards will have length `(n // i)`.
+ `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return
+ a dataset with the same order as the original.
+
+ Be sure to shard before using any randomizing operator (such as `shuffle`).
+ It is best if the shard operator is used early in the dataset pipeline.
+
+
+ Args:
+ num_shards (`int`):
+ How many shards to split the dataset into.
+ index (`int`):
+ Which shard to select and return.
+ contiguous: (`bool`, defaults to `False`):
+ Whether to select contiguous blocks of indices for shards.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ indices_cache_file_name (`str`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ indices of each shard instead of the automatically generated cache file name.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 1066
+ })
+ >>> ds.shard(num_shards=2, index=0)
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ ```
+ """
+ if not 0 <= index < num_shards:
+ raise ValueError("index should be in [0, num_shards-1]")
+ if contiguous:
+ div = len(self) // num_shards
+ mod = len(self) % num_shards
+ start = div * index + min(index, mod)
+ end = start + div + (1 if index < mod else 0)
+ indices = range(start, end)
+ else:
+ indices = np.arange(index, len(self), num_shards)
+
+ return self.select(
+ indices=indices,
+ keep_in_memory=keep_in_memory,
+ indices_cache_file_name=indices_cache_file_name,
+ writer_batch_size=writer_batch_size,
+ )
+
+ @deprecated()
+ def export(
+ self,
+ filename: str,
+ format: str = "tfrecord",
+ ):
+ """Writes the Arrow dataset to a TFRecord file.
+
+ The dataset must already be in tensorflow format. The records will be written with
+ keys from `dataset._format_columns`.
+
+ Args:
+ filename (`str`): The filename, including the `.tfrecord` extension, to write to.
+ format (`str`, optional, default `"tfrecord"`): The type of output file. Currently this is a no-op, as
+ TFRecords are the only option. This enables a more flexible function signature later.
+ """
+ try:
+ import tensorflow as tf # noqa: F401
+ except ImportError:
+ logger.error("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
+
+ # From https://www.tensorflow.org/tutorials/load_data/tfrecord
+ def _bytes_feature(values):
+ """Returns a bytes_list from a list of string / byte."""
+ return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
+
+ def _float_feature(values):
+ """Returns a float_list from a list of float / double."""
+ return tf.train.Feature(float_list=tf.train.FloatList(value=values))
+
+ def _int64_feature(values):
+ """Returns an int64_list from a list of bool / enum / int / uint."""
+ return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
+
+ def _feature(values: Union[float, int, str, np.ndarray, list]) -> "tf.train.Feature":
+ """Typechecks `values` and returns the corresponding tf.train.Feature."""
+ if isinstance(values, list):
+ if values and isinstance(values[0], str):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(f"values={values} is empty or contains items that cannot be serialized")
+ elif isinstance(values, np.ndarray):
+ if values.dtype == np.dtype(float):
+ return _float_feature(values)
+ elif values.dtype == np.int64:
+ return _int64_feature(values)
+ elif values.dtype == np.dtype(str) or (
+ values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str)
+ ):
+ return _bytes_feature([v.encode() for v in values])
+ else:
+ raise ValueError(
+ f"values={values} is empty or is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized"
+ )
+ elif hasattr(values, "dtype"):
+ if np.issubdtype(values.dtype, np.floating):
+ return _float_feature([values.item()])
+ elif np.issubdtype(values.dtype, np.integer):
+ return _int64_feature([values.item()])
+ elif np.issubdtype(values.dtype, str):
+ return _bytes_feature([values.item().encode()])
+ else:
+ raise ValueError(f"values={values} has dtype {values.dtype}, which cannot be serialized")
+ else:
+ raise ValueError(f"values={values} are not numpy objects or strings, and so cannot be serialized")
+
+ def serialize_example(ex):
+ feature = {key: _feature(value) for key, value in ex.items()}
+ example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
+ return example_proto.SerializeToString()
+
+ def tf_serialize_example(ex):
+ tf_string = tf.py_function(serialize_example, (ex,), tf.string)
+ return tf.reshape(tf_string, ())
+
+ def generator():
+ for ex in self:
+ yield serialize_example(ex)
+
+ if self._format_type != "numpy":
+ raise ValueError("Dataset format must be numpy before exporting")
+ if not filename.endswith(".tfrecord"):
+ raise ValueError("filename {filename} must end with .tfrecord")
+ tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=())
+ writer = tf.data.experimental.TFRecordWriter(filename)
+ logger.info(f"Writing TFRecord to {filename}")
+ writer.write(tf_dataset)
+ logger.info(f"Finished writing TFRecord to {filename}")
+ self = None # delete the dataset reference used by tf_dataset
+
+ def to_csv(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ **to_csv_kwargs,
+ ) -> int:
+ """Exports the dataset to csv
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file or a BinaryIO.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ **to_csv_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_csv`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_csv("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetWriter
+
+ return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_csv_kwargs).write()
+
+ def to_dict(self, batch_size: Optional[int] = None, batched="deprecated") -> Union[dict, Iterator[dict]]:
+ """Returns the dataset as a Python dict. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+
+
+
+ Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.
+
+
+
+ batch_size (`int`, *optional*): The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `dict` or `Iterator[dict]`
+
+ Example:
+
+ ```py
+ >>> ds.to_dict()
+ ```
+ """
+ if batched != "deprecated":
+ warnings.warn(
+ "'batched' was deprecated in version 2.11.0 and will be removed in version 3.0.0. Use `.iter(batch_size=batch_size)` followed by `.to_dict()` on the individual batches instead.",
+ FutureWarning,
+ )
+ else:
+ batched = False
+
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pydict()
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pydict()
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_list(self) -> list:
+ """Returns the dataset as a Python list.
+
+ Returns:
+ `list`
+
+ Example:
+
+ ```py
+ >>> ds.to_list()
+ ```
+ """
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pylist()
+
+ def to_json(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ **to_json_kwargs,
+ ) -> int:
+ """Export the dataset to JSON Lines or JSON.
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file or a BinaryIO.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ num_proc (`int`, *optional*):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing. `batch_size` in this case defaults to
+ `datasets.config.DEFAULT_MAX_BATCH_SIZE` but feel free to make it 5x or 10x of the default
+ value if you have sufficient compute power.
+ **to_json_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_json`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_json.html).
+
+
+
+ Now, `index` defaults to `False` if `orient` is `"split"` or `"table"`.
+
+ If you would like to write the index, pass `index=True`.
+
+
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_json("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetWriter
+
+ return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, **to_json_kwargs).write()
+
+ def to_pandas(
+ self, batch_size: Optional[int] = None, batched: bool = False
+ ) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
+ """Returns the dataset as a `pandas.DataFrame`. Can also return a generator for large datasets.
+
+ Args:
+ batched (`bool`):
+ Set to `True` to return a generator that yields the dataset as batches
+ of `batch_size` rows. Defaults to `False` (returns the whole datasets once).
+ batch_size (`int`, *optional*):
+ The size (number of rows) of the batches if `batched` is `True`.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+
+ Returns:
+ `pandas.DataFrame` or `Iterator[pandas.DataFrame]`
+
+ Example:
+
+ ```py
+ >>> ds.to_pandas()
+ ```
+ """
+ if not batched:
+ return query_table(
+ table=self._data,
+ key=slice(0, len(self)),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ else:
+ batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ return (
+ query_table(
+ table=self._data,
+ key=slice(offset, offset + batch_size),
+ indices=self._indices,
+ ).to_pandas(types_mapper=pandas_types_mapper)
+ for offset in range(0, len(self), batch_size)
+ )
+
+ def to_parquet(
+ self,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ **parquet_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to parquet
+
+ Args:
+ path_or_buf (`PathLike` or `FileOrBuffer`):
+ Either a path to a file or a BinaryIO.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ **parquet_writer_kwargs (additional keyword arguments):
+ Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`.
+
+ Returns:
+ `int`: The number of characters or bytes written.
+
+ Example:
+
+ ```py
+ >>> ds.to_parquet("path/to/dataset/directory")
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetWriter
+
+ return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, **parquet_writer_kwargs).write()
+
+ def to_sql(
+ self,
+ name: str,
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ batch_size: Optional[int] = None,
+ **sql_writer_kwargs,
+ ) -> int:
+ """Exports the dataset to a SQL database.
+
+ Args:
+ name (`str`):
+ Name of SQL table.
+ con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`):
+ A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection object used to write to a database.
+ batch_size (`int`, *optional*):
+ Size of the batch to load in memory and write at once.
+ Defaults to `datasets.config.DEFAULT_MAX_BATCH_SIZE`.
+ **sql_writer_kwargs (additional keyword arguments):
+ Parameters to pass to pandas's [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_sql.html).
+
+
+
+ Now, `index` defaults to `False` if not specified.
+
+ If you would like to write the index, pass `index=True` and also set a name for the index column by
+ passing `index_label`.
+
+
+
+ Returns:
+ `int`: The number of records written.
+
+ Example:
+
+ ```py
+ >>> # con provided as a connection URI string
+ >>> ds.to_sql("data", "sqlite:///my_own_db.sql")
+ >>> # con provided as a sqlite3 connection object
+ >>> import sqlite3
+ >>> con = sqlite3.connect("my_own_db.sql")
+ >>> with con:
+ ... ds.to_sql("data", con)
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.sql import SqlDatasetWriter
+
+ return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write()
+
+ def _estimate_nbytes(self) -> int:
+ dataset_nbytes = self.data.nbytes
+
+ # Find decodable columns, because if there are any, we need to
+ # adjust the dataset size computation (needed for sharding) to account for possible external files
+ decodable_columns = [
+ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)
+ ]
+
+ if decodable_columns:
+ # Approximate the space needed to store the bytes from the external files by analyzing the first 1000 examples
+ extra_nbytes = 0
+
+ def extra_nbytes_visitor(array, feature):
+ nonlocal extra_nbytes
+ if isinstance(feature, (Audio, Image)):
+ for x in array.to_pylist():
+ if x is not None and x["bytes"] is None and x["path"] is not None:
+ size = xgetsize(x["path"])
+ extra_nbytes += size
+ extra_nbytes -= array.field("path").nbytes
+
+ table = self.with_format("arrow")[:1000]
+ table_visitor(table, extra_nbytes_visitor)
+
+ extra_nbytes = extra_nbytes * len(self.data) / len(table)
+ dataset_nbytes = dataset_nbytes + extra_nbytes
+
+ if self._indices is not None:
+ dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data)
+ return dataset_nbytes
+
+ @staticmethod
+ def _generate_tables_from_shards(shards: List["Dataset"], batch_size: int):
+ for shard_idx, shard in enumerate(shards):
+ for pa_table in shard.with_format("arrow").iter(batch_size):
+ yield shard_idx, pa_table
+
+ @staticmethod
+ def _generate_tables_from_cache_file(filename: str):
+ for batch_idx, batch in enumerate(_memory_mapped_record_batch_reader_from_file(filename)):
+ yield batch_idx, pa.Table.from_batches([batch])
+
+ def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset":
+ """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`].
+ This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local files.
+
+ Contrary to map-style datasets, iterable datasets are lazy and can only be iterated over (e.g. using a for loop).
+ Since they are read sequentially in training loops, iterable datasets are much faster than map-style datasets.
+ All the transformations applied to iterable datasets like filtering or processing are done on-the-fly when you start iterating over the dataset.
+
+ Still, it is possible to shuffle an iterable dataset using [`datasets.IterableDataset.shuffle`].
+ This is a fast approximate shuffling that works best if you have multiple shards and if you specify a buffer size that is big enough.
+
+ To get the best speed performance, make sure your dataset doesn't have an indices mapping.
+ If this is the case, the data are not read contiguously, which can be slow sometimes.
+ You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switching to an iterable dataset.
+
+ Args:
+ num_shards (`int`, default to `1`):
+ Number of shards to define when instantiating the iterable dataset. This is especially useful for big datasets to be able to shuffle properly,
+ and also to enable fast parallel loading using a PyTorch DataLoader or in distributed setups for example.
+ Shards are defined using [`datasets.Dataset.shard`]: it simply slices the data without writing anything on disk.
+
+ Returns:
+ [`datasets.IterableDataset`]
+
+ Example:
+
+ Basic usage:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With lazy filtering and processing:
+ ```python
+ >>> ids = ds.to_iterable_dataset()
+ >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and process on-the-fly when you start iterating over the iterable dataset
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With sharding to enable efficient shuffling:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64) # the dataset is split into 64 shards to be iterated over
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer for fast approximate shuffling when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.filter(filter_fn).map(process_fn)
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter and process when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With a PyTorch DataLoader and shuffling:
+ ```python
+ >>> import torch
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from the shuffled list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling
+ ```python
+ >>> from datasets.distributed import split_dataset_by_node
+ >>> ids = ds.to_iterable_dataset(num_shards=512)
+ >>> ids = ids.shuffle(buffer_size=10_000) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> ids = split_dataset_by_node(ds, world_size=8, rank=0) # will keep only 512 / 8 = 64 shards from the shuffled lists of shards when you start iterating
+ >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards from this node's list of shards to each worker when you start iterating
+ >>> for example in ids:
+ ... pass
+ ```
+
+ With shuffling and multiple epochs:
+ ```python
+ >>> ids = ds.to_iterable_dataset(num_shards=64)
+ >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating
+ >>> for epoch in range(n_epochs):
+ ... ids.set_epoch(epoch) # will use effective_seed = seed + epoch to shuffle the shards and for the shuffle buffer when you start iterating
+ ... for example in ids:
+ ... pass
+ ```
+ Feel free to also use [`IterableDataset.set_epoch`] when using a PyTorch DataLoader or in distributed setups.
+ """
+ from .iterable_dataset import ArrowExamplesIterable, IterableDataset
+
+ if self._format_type is not None:
+ raise NotImplementedError(
+ "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset"
+ )
+ if num_shards > len(self):
+ raise ValueError(
+ f"Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples)."
+ )
+ if self._indices is not None:
+ logger.info(
+ "Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. "
+ "You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed."
+ )
+ shards = (
+ [copy.deepcopy(self)]
+ if num_shards == 1
+ else [
+ self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)
+ ]
+ )
+ ex_iterable = ArrowExamplesIterable(
+ Dataset._generate_tables_from_shards,
+ kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE},
+ )
+ return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features))
+
+ def _push_parquet_shards_to_hub(
+ self,
+ repo_id: str,
+ data_dir: str = "data",
+ split: Optional[str] = None,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> Tuple[str, str, int, int, List[str], int]:
+ """Pushes the dataset shards as Parquet files to the hub.
+
+ Returns:
+ additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards
+ uploaded_size (`int`): number of uploaded bytes to the repository
+ dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression
+ """
+ # Find decodable columns, because if there are any, we need to:
+ # embed the bytes from the files in the shards
+ decodable_columns = (
+ [k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)]
+ if embed_external_files
+ else []
+ )
+
+ dataset_nbytes = self._estimate_nbytes()
+
+ if num_shards is None:
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+ num_shards = int(dataset_nbytes / max_shard_size) + 1
+ num_shards = max(num_shards, 1)
+
+ shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
+
+ if decodable_columns:
+
+ def shards_with_embedded_external_files(shards):
+ for shard in shards:
+ format = shard.format
+ shard = shard.with_format("arrow")
+ shard = shard.map(
+ embed_table_storage,
+ batched=True,
+ batch_size=1000,
+ keep_in_memory=True,
+ )
+ shard = shard.with_format(**format)
+ yield shard
+
+ shards = shards_with_embedded_external_files(shards)
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ uploaded_size = 0
+ additions = []
+ for index, shard in hf_tqdm(
+ enumerate(shards),
+ desc="Uploading the dataset shards",
+ total=num_shards,
+ ):
+ shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet"
+ buffer = BytesIO()
+ shard.to_parquet(buffer)
+ uploaded_size += buffer.tell()
+ shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer)
+ preupload_lfs_files(
+ api,
+ repo_id=repo_id,
+ additions=[shard_addition],
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ additions.append(shard_addition)
+
+ return additions, uploaded_size, dataset_nbytes
+
+ def push_to_hub(
+ self,
+ repo_id: str,
+ config_name: str = "default",
+ set_default: Optional[bool] = None,
+ split: Optional[str] = None,
+ data_dir: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ commit_description: Optional[str] = None,
+ private: Optional[bool] = False,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ branch="deprecated",
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[int] = None,
+ embed_external_files: bool = True,
+ ) -> CommitInfo:
+ """Pushes the dataset to the hub as a Parquet dataset.
+ The dataset is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
+
+ The resulting Parquet files are self-contained by default. If your dataset contains [`Image`] or [`Audio`]
+ data, the Parquet files will store the bytes of your images or audio files.
+ You can disable this by setting `embed_external_files` to `False`.
+
+ Args:
+ repo_id (`str`):
+ The ID of the repository to push to in the following format: `/` or
+ `/`. Also accepts ``, which will default to the namespace
+ of the logged-in user.
+ config_name (`str`, defaults to "default"):
+ The configuration name (or subset) of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
+ split (`str`, *optional*):
+ The name of the split that will be given to that dataset. Defaults to `self.split`.
+ data_dir (`str`, *optional*):
+ Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
+ from "default", else "data".
+
+
+ commit_message (`str`, *optional*):
+ Message to commit while pushing. Will default to `"Upload dataset"`.
+ commit_description (`str`, *optional*):
+ Description of the commit that will be created.
+ Additionally, description of the PR if a PR is created (`create_pr` is True).
+
+
+ private (`bool`, *optional*, defaults to `False`):
+ Whether the dataset repository should be set to private or not. Only affects repository creation:
+ a repository that already exists will not be affected by that parameter.
+ token (`str`, *optional*):
+ An optional authentication token for the Hugging Face Hub. If no token is passed, will default
+ to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
+ if no token is passed and the user is not logged-in.
+ revision (`str`, *optional*):
+ Branch to push the uploaded files to. Defaults to the `"main"` branch.
+
+
+ branch (`str`, *optional*):
+ The git branch on which to push the dataset. This defaults to the default branch as specified
+ in your repository, which defaults to `"main"`.
+
+
+
+ `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
+
+
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether to create a PR with the uploaded files or directly commit.
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by
+ a unit (like `"5MB"`).
+ num_shards (`int`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
+
+
+ embed_external_files (`bool`, defaults to `True`):
+ Whether to embed file bytes in the shards.
+ In particular, this will do the following before the push for the fields of type:
+
+ - [`Audio`] and [`Image`]: remove local path information and embed file content in the Parquet files.
+
+ Return:
+ huggingface_hub.CommitInfo
+
+ Example:
+
+ ```python
+ >>> dataset.push_to_hub("/")
+ >>> dataset_dict.push_to_hub("/", private=True)
+ >>> dataset.push_to_hub("/", max_shard_size="1GB")
+ >>> dataset.push_to_hub("/", num_shards=1024)
+ ```
+
+ If your dataset has multiple splits (e.g. train/validation/test):
+
+ ```python
+ >>> train_dataset.push_to_hub("/", split="train")
+ >>> val_dataset.push_to_hub("/", split="validation")
+ >>> # later
+ >>> dataset = load_dataset("/")
+ >>> train_dataset = dataset["train"]
+ >>> val_dataset = dataset["validation"]
+ ```
+
+ If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
+
+ ```python
+ >>> english_dataset.push_to_hub("/", "en")
+ >>> french_dataset.push_to_hub("/", "fr")
+ >>> # later
+ >>> english_dataset = load_dataset("/", "en")
+ >>> french_dataset = load_dataset("/", "fr")
+ ```
+ """
+ if config_name == "data":
+ raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.")
+
+ if max_shard_size is not None and num_shards is not None:
+ raise ValueError(
+ "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both."
+ )
+
+ if split is None:
+ split = str(self.split) if self.split is not None else "train"
+
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
+
+ if branch != "deprecated":
+ warnings.warn(
+ "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'revision={branch}' instead.",
+ FutureWarning,
+ )
+ revision = branch
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ repo_url = api.create_repo(
+ repo_id,
+ token=token,
+ repo_type="dataset",
+ private=private,
+ exist_ok=True,
+ )
+ repo_id = repo_url.repo_id
+
+ if revision is not None:
+ api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
+
+ if not data_dir:
+ data_dir = config_name if config_name != "default" else "data" # for backward compatibility
+
+ additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub(
+ repo_id=repo_id,
+ data_dir=data_dir,
+ split=split,
+ token=token,
+ revision=revision,
+ max_shard_size=max_shard_size,
+ num_shards=num_shards,
+ create_pr=create_pr,
+ embed_external_files=embed_external_files,
+ )
+
+ # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
+ # and delete old split shards (if they exist)
+ repo_with_dataset_card, repo_with_dataset_infos = False, False
+ deletions, deleted_size = [], 0
+ repo_splits = [] # use a list to keep the order of the splits
+ repo_files_to_add = [addition.path_in_repo for addition in additions]
+ for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token):
+ if repo_file.rfilename == config.REPOCARD_FILENAME:
+ repo_with_dataset_card = True
+ elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
+ repo_with_dataset_infos = True
+ elif (
+ repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add
+ ):
+ deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
+ deleted_size += repo_file.size
+ elif fnmatch.fnmatch(
+ repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
+ ):
+ repo_split = string_to_dict(
+ repo_file.rfilename,
+ glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
+ )["split"]
+ if repo_split not in repo_splits:
+ repo_splits.append(repo_split)
+
+ organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id)
+ info_to_dump = self.info.copy()
+ info_to_dump.download_checksums = None
+ info_to_dump.download_size = uploaded_size
+ info_to_dump.dataset_size = dataset_nbytes
+ info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes
+ info_to_dump.config_name = config_name
+ info_to_dump.splits = SplitDict(
+ {split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}
+ )
+ # get the info from the README to update them
+ if repo_with_dataset_card:
+ dataset_card_path = api.hf_hub_download(
+ repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
+ )
+ dataset_card = DatasetCard.load(Path(dataset_card_path))
+ dataset_card_data = dataset_card.data
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data)
+ if dataset_infos and config_name in dataset_infos:
+ repo_info = dataset_infos[config_name]
+ else:
+ repo_info = None
+ # get the deprecated dataset_infos.json to update them
+ elif repo_with_dataset_infos:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None
+ repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ repo_info = None
+ # update the total info to dump from existing info
+ if repo_info is not None:
+ logger.info("Updating downloaded metadata with the new split.")
+ if repo_info.splits and list(repo_info.splits) != [split]:
+ if self._info.features != repo_info.features:
+ raise ValueError(
+ f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}"
+ )
+
+ if split in repo_info.splits:
+ repo_info.download_size -= deleted_size
+ repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0
+
+ repo_info.download_checksums = None
+ repo_info.download_size = (repo_info.download_size or 0) + uploaded_size
+ repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes
+ repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size
+ repo_info.splits.pop(split, None)
+ repo_info.splits[split] = SplitInfo(
+ split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name
+ )
+ info_to_dump = repo_info
+ # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
+ if not metadata_configs and repo_splits:
+ default_metadata_configs_to_dump = {
+ "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
+ }
+ MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ # update the metadata configs
+ if config_name in metadata_configs:
+ metadata_config = metadata_configs[config_name]
+ if "data_files" in metadata_config:
+ data_files_to_dump = sanitize_patterns(metadata_config["data_files"])
+ else:
+ data_files_to_dump = {}
+ # add the new split
+ data_files_to_dump[split] = [f"{data_dir}/{split}-*"]
+ metadata_config_to_dump = {
+ "data_files": [
+ {
+ "split": _split,
+ "path": _pattern[0] if len(_pattern) == 1 else _pattern,
+ }
+ for _split, _pattern in data_files_to_dump.items()
+ ]
+ }
+ else:
+ metadata_config_to_dump = {"data_files": [{"split": split, "path": f"{data_dir}/{split}-*"}]}
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
+ # push to the deprecated dataset_infos.json
+ if repo_with_dataset_infos:
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_infos[config_name] = asdict(info_to_dump)
+ buffer = BytesIO()
+ buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
+ )
+ # push to README
+ DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
+ MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
+ dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+
+ commit_message = commit_message if commit_message is not None else "Upload dataset"
+ if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
+ commit_info = api.create_commit(
+ repo_id,
+ operations=additions + deletions,
+ commit_message=commit_message,
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ else:
+ logger.info(
+ f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
+ )
+ num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
+ for i in range(0, num_commits):
+ operations = additions[
+ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
+ ] + (deletions if i == 0 else [])
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ logger.info(
+ f"Commit #{i+1} completed"
+ + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ + "."
+ )
+ return commit_info
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str):
+ """Add column to Dataset.
+
+
+
+ Args:
+ name (`str`):
+ Column name.
+ column (`list` or `np.array`):
+ Column data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> more_text = ds["text"]
+ >>> ds.add_column(name="text_2", column=more_text)
+ Dataset({
+ features: ['text', 'label', 'text_2'],
+ num_rows: 1066
+ })
+ ```
+ """
+ column_table = InMemoryTable.from_pydict({name: column})
+ _check_column_names(self._data.column_names + column_table.column_names)
+ dataset = self.flatten_indices() if self._indices is not None else self
+ # Concatenate tables horizontally
+ table = concat_tables([dataset._data, column_table], axis=1)
+ # Update features
+ info = dataset.info.copy()
+ info.features.update(Features.from_arrow_schema(column_table.schema))
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint)
+
+ def add_faiss_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ By default the index is done over the vectors of the specified column.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ column (`str`):
+ The column of the vectors to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ By default it corresponds to `column`.
+ device (`Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`):
+ Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to `False`):
+ Enable the verbosity of the Faiss index.
+ dtype (`data-type`):
+ The dtype of the numpy arrays that are indexed.
+ Default is `np.float32`.
+
+ Example:
+
+ ```python
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))
+ >>> ds_with_embeddings.add_faiss_index(column='embeddings')
+ >>> # query
+ >>> scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ >>> # save index
+ >>> ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')
+
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> # load index
+ >>> ds.load_faiss_index('embeddings', 'my_index.faiss')
+ >>> # query
+ >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)
+ ```
+ """
+ with self.formatted_as(type="numpy", columns=[column], dtype=dtype):
+ super().add_faiss_index(
+ column=column,
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+ return self
+
+ def add_faiss_index_from_external_arrays(
+ self,
+ external_arrays: np.array,
+ index_name: str,
+ device: Optional[int] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None, # noqa: F821
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ dtype=np.float32,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of `external_arrays`.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+
+ - For [string factory](https://github.com/facebookresearch/faiss/wiki/The-index-factory)
+
+ Args:
+ external_arrays (`np.array`):
+ If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
+ It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.Dataset.search`].
+ device (Optional `Union[int, List[int]]`, *optional*):
+ If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (`str`, *optional*):
+ This is passed to the index factory of Faiss to create the index.
+ Default index class is `IndexFlat`.
+ metric_type (`int`, *optional*):
+ Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (`faiss.Index`, *optional*):
+ Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (`int`, *optional*):
+ Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (`int`, *optional*):
+ If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False):
+ Enable the verbosity of the Faiss index.
+ dtype (`numpy.dtype`):
+ The dtype of the numpy arrays that are indexed. Default is np.float32.
+ """
+ super().add_faiss_index_from_external_arrays(
+ external_arrays=external_arrays.astype(dtype),
+ index_name=index_name,
+ device=device,
+ string_factory=string_factory,
+ metric_type=metric_type,
+ custom_index=custom_index,
+ batch_size=batch_size,
+ train_size=train_size,
+ faiss_verbose=faiss_verbose,
+ )
+
+ def add_elasticsearch_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Add a text index using ElasticSearch for fast retrieval. This is done in-place.
+
+ Args:
+ column (`str`):
+ The column of the documents to add to the index.
+ index_name (`str`, *optional*):
+ The `index_name`/identifier of the index.
+ This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`Dataset.search`].
+ By default it corresponds to `column`.
+ host (`str`, *optional*, defaults to `localhost`):
+ Host of where ElasticSearch is running.
+ port (`str`, *optional*, defaults to `9200`):
+ Port of where ElasticSearch is running.
+ es_client (`elasticsearch.Elasticsearch`, *optional*):
+ The elasticsearch client used to create the index if host and port are `None`.
+ es_index_name (`str`, *optional*):
+ The elasticsearch index name used to create the index.
+ es_index_config (`dict`, *optional*):
+ The configuration of the elasticsearch index.
+ Default config is:
+ ```
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ ```
+ Example:
+
+ ```python
+ >>> es_client = elasticsearch.Elasticsearch()
+ >>> ds = datasets.load_dataset('crime_and_punish', split='train')
+ >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index")
+ >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)
+ ```
+ """
+ with self.formatted_as(type=None, columns=[column]):
+ super().add_elasticsearch_index(
+ column=column,
+ index_name=index_name,
+ host=host,
+ port=port,
+ es_client=es_client,
+ es_index_name=es_index_name,
+ es_index_config=es_index_config,
+ )
+ return self
+
+ @transmit_format
+ @fingerprint_transform(inplace=False)
+ def add_item(self, item: dict, new_fingerprint: str):
+ """Add item to Dataset.
+
+
+
+ Args:
+ item (`dict`):
+ Item data to be added.
+
+ Returns:
+ [`Dataset`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ >>> ds = ds.add_item(new_review)
+ >>> ds[-1]
+ {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'}
+ ```
+ """
+ item_table = InMemoryTable.from_pydict({k: [v] for k, v in item.items()})
+ # We don't call _check_if_features_can_be_aligned here so this cast is "unsafe"
+ dset_features, item_features = _align_features(
+ [self._info.features, Features.from_arrow_schema(item_table.schema)]
+ )
+ # Cast to align the schemas of the tables and concatenate the tables
+ table = concat_tables(
+ [
+ self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data,
+ item_table.cast(item_features.arrow_schema),
+ ]
+ )
+ if self._indices is None:
+ indices_table = None
+ else:
+ item_indices_array = pa.array([len(self._data)], type=pa.uint64())
+ item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"])
+ indices_table = concat_tables([self._indices, item_indices_table])
+ info = self.info.copy()
+ info.features.update(item_features)
+ table = update_metadata_with_features(table, info.features)
+ return Dataset(
+ table,
+ info=info,
+ split=self.split,
+ indices_table=indices_table,
+ fingerprint=new_fingerprint,
+ )
+
+ def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset":
+ """Align the dataset's label ID and label name mapping to match an input `label2id` mapping.
+ This is useful when you want to ensure that a model's predicted labels are aligned with the dataset.
+ The alignment in done using the lowercase label names.
+
+ Args:
+ label2id (`dict`):
+ The label name to ID mapping to align the dataset with.
+ label_column (`str`):
+ The column name of labels to align on.
+
+ Example:
+
+ ```python
+ >>> # dataset with mapping {'entailment': 0, 'neutral': 1, 'contradiction': 2}
+ >>> ds = load_dataset("glue", "mnli", split="train")
+ >>> # mapping to align with
+ >>> label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
+ >>> ds_aligned = ds.align_labels_with_mapping(label2id, "label")
+ ```
+
+ """
+ # Sanity checks
+ if label_column not in self._data.column_names:
+ raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).")
+
+ label_feature = self._info.features[label_column]
+ if not (
+ isinstance(label_feature, ClassLabel)
+ or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))
+ ):
+ raise ValueError(
+ f"Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}."
+ )
+
+ # Sort input mapping by ID value to ensure the label names are aligned
+ label2id = dict(sorted(label2id.items(), key=lambda item: item[1]))
+ label_names = list(label2id.keys())
+ # Some label mappings use uppercase label names so we lowercase them during alignment
+ label2id = {k.lower(): v for k, v in label2id.items()}
+ int2str_function = (
+ label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str
+ )
+
+ if isinstance(label_feature, ClassLabel):
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ int2str_function(label_id).lower() if label_id is not None else None
+ for label_id in batch[label_column]
+ ]
+ batch[label_column] = [
+ label2id[label_name] if label_name is not None else None for label_name in dset_label_names
+ ]
+ return batch
+
+ else:
+
+ def process_label_ids(batch):
+ dset_label_names = [
+ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq]
+ for seq in batch[label_column]
+ ]
+ batch[label_column] = [
+ [label2id[label_name] if label_name is not None else None for label_name in seq]
+ for seq in dset_label_names
+ ]
+ return batch
+
+ features = self.features
+ features[label_column] = (
+ ClassLabel(num_classes=len(label_names), names=label_names)
+ if isinstance(label_feature, ClassLabel)
+ else Sequence(ClassLabel(num_classes=len(label_names), names=label_names))
+ )
+ return self.map(process_label_ids, features=features, batched=True, desc="Aligning the labels")
+
+
+def _concatenate_map_style_datasets(
+ dsets: List[Dataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+):
+ """
+ Converts a list of :class:`Dataset` with the same schema into a single :class:`Dataset`.
+ When you concatenate on axis 0, missing data are filled with None values.
+
+ Args:
+ dsets (`List[datasets.Dataset]`): List of Datasets to concatenate.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_map_style_datasets([ds1, ds2])
+ ```
+ """
+ # Ignore datasets with no rows
+ if any(dset.num_rows > 0 for dset in dsets):
+ dsets = [dset for dset in dsets if dset.num_rows > 0]
+ else:
+ # Return first dataset if all datasets are empty
+ return dsets[0]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ if not all(dset.num_rows == dsets[0].num_rows for dset in dsets):
+ raise ValueError("Number of rows must match for all datasets")
+ _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names])
+
+ # Find common format or reset format
+ format = dsets[0].format
+ if any(dset.format != format for dset in dsets):
+ format = {}
+ logger.info("Some of the datasets have disparate format. Resetting the format of the concatenated dataset.")
+
+ def apply_offset_to_indices_table(table, offset):
+ if offset == 0:
+ return table
+ else:
+ array = table["indices"]
+ new_array = pc.add(array, pa.scalar(offset, type=pa.uint64()))
+ return InMemoryTable.from_arrays([new_array], names=["indices"])
+
+ # Concatenate indices if they exist
+ if any(dset._indices is not None for dset in dsets):
+ if axis == 0:
+ # Datasets with no indices tables are replaced with a dataset with an indices table in memory.
+ # Applying an offset to an indices table also brings the table in memory.
+ indices_tables = []
+ for i in range(len(dsets)):
+ if dsets[i]._indices is None:
+ dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i])))
+ indices_tables.append(dsets[i]._indices)
+
+ # An offset needs to be applied to the indices before concatenating
+ offset = 0
+ for i in range(len(dsets)):
+ indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset)
+ offset += len(dsets[i]._data)
+
+ # Concatenate indices
+ indices_tables = [t for t in indices_tables if len(t) > 0]
+ if indices_tables:
+ indices_table = concat_tables(indices_tables)
+ else:
+ indices_table = InMemoryTable.from_batches([], schema=pa.schema({"indices": pa.int64()}))
+ else:
+ if len(dsets) == 1:
+ indices_table = dsets[0]._indices
+ else:
+ for i in range(len(dsets)):
+ dsets[i] = dsets[i].flatten_indices()
+ indices_table = None
+ else:
+ indices_table = None
+
+ table = concat_tables([dset._data for dset in dsets], axis=axis)
+ if axis == 0:
+ features_list = _align_features([dset.features for dset in dsets])
+ else:
+ features_list = [dset.features for dset in dsets]
+ table = update_metadata_with_features(table, {k: v for features in features_list for k, v in features.items()})
+
+ # Concatenate infos
+ if info is None:
+ info = DatasetInfo.from_merge([dset.info for dset in dsets])
+ fingerprint = update_fingerprint(
+ "".join(dset._fingerprint for dset in dsets), _concatenate_map_style_datasets, {"info": info, "split": split}
+ )
+
+ # Make final concatenated dataset
+ concatenated_dataset = Dataset(
+ table,
+ info=info,
+ split=split,
+ indices_table=indices_table,
+ fingerprint=fingerprint,
+ )
+ concatenated_dataset.set_format(**format)
+ return concatenated_dataset
+
+
+def _interleave_map_style_datasets(
+ datasets: List["Dataset"],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ **kwargs,
+) -> "Dataset":
+ """
+ Interleave several map-style datasets (sources) into a single map-style dataset.
+ The new dataset is constructed by alternating between the sources to get the examples.
+ If `probabilities = None` (default) the new dataset is constructed by cycling between each source to get the examples.
+ If `probabilities` is not `None, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
+
+ Args:
+ datasets (`List[Dataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new dataset is constructed by sampling
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ info (:class:`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (:class:`NamedSplit`, optional): Name of the dataset split.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+ **kwargs (additional keyword arguments): Keyword arguments to be passed to :meth:`datasets.Datasets.select` when selecting the indices used to interleave the datasets.
+
+ Output:
+ :class:`datasets.Dataset`
+ """
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
+ raise ValueError(
+ f"{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}"
+ )
+
+ # To interleave the datasets, we concatenate them and then we re-order the indices
+ concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split)
+
+ # Let's now build the indices to pass to .select()
+ lengths = [len(dset) for dset in datasets]
+ offsets = np.cumsum([0] + lengths[:-1])
+
+ # if stopping_strategy is "first_exhausted", it is an undersampling situation whereas it is an oversampling situation if it is "all_exhausted"
+ oversampling = stopping_strategy == "all_exhausted"
+
+ if probabilities is None and not oversampling:
+ # Undersampling situation with cycling between each sources
+ # Example:: If lengths of the datasets are [3, 4, 5]
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 6, 9]
+ # Note that we only have 3 examples per dataset since the first dataset ran out of examples
+
+ # Reasoning behind the following operation: keeping the min_length first indices of each dataset
+ # while offsetting in order to correspond to the right indices of the concatenated dataset
+ # and flattening to effectively interleave the datasets
+ indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist()
+ elif probabilities is None:
+ # Oversampling situation with cycling between each sources
+ # Then the resulting indices should be [0, 3, 7, 1, 4, 8, 2, 5, 9, 0, 6, 10, 1, 3, 11]
+ # Note that we have 5 examples per dataset with a rolling window since the longest dataset has 5 samples
+
+ # Reasoning behind the following operation: for each dataset indices (i.e column) repeat the indices to have max_length indices per dataset
+ # For example, if the max_length is 5 and the i-th dataset has 3 samples, the i-th column will be [0,1,2,0,1]
+ indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1))
+
+ # We have to keep the indices to their respective dataset offsets and to flatten to effectively interleave the datasets
+ indices = (indices + offsets).flatten().tolist()
+
+ else:
+ # boolean array indicating if at index i if the dataset_i has been fully exhausted
+ is_exhausted = np.full(len(lengths), False)
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ bool_strategy_func = np.all if oversampling else np.any
+
+ def iter_random_indices():
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ rng = np.random.default_rng(seed)
+ while True:
+ yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities))
+
+ current_index = [0] * len(datasets)
+ indices = []
+ for source_idx in iter_random_indices():
+ # If no oversampling, we stop as soon as a dataset has ran out of examples (np.any)
+ # Otherwise, we stop as soon as every dataset has ran out of examples (np.all)
+ if bool_strategy_func(is_exhausted):
+ # the stopping condition was reached, let's stop
+ break
+
+ # let's add the example at the current index of the `source_idx`-th dataset
+ indices.append(current_index[source_idx] + offsets[source_idx])
+ current_index[source_idx] += 1
+
+ # we've ran out of examples for the current dataset, let's update our boolean array and bring the current_index back to 0
+ if current_index[source_idx] >= lengths[source_idx]:
+ is_exhausted[source_idx] = True
+ current_index[source_idx] = 0
+
+ return concatenated_datasets.select(indices, **kwargs)
+
+
+def _split_by_node_map_style_dataset(dataset: Dataset, rank: int, world_size: int) -> Dataset:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ Args:
+ dataset ([`Dataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ return dataset.shard(num_shards=world_size, index=rank, contiguous=True)
+
+
+# This is outside Dataset.filter as it needs to be picklable for multiprocessing
+
+
+def get_indices_from_mask_function(
+ function: Callable,
+ batched: bool,
+ with_indices: bool,
+ with_rank: bool,
+ input_columns: Optional[Union[str, List[str]]],
+ indices_mapping: Optional[Table] = None,
+ *args,
+ **fn_kwargs,
+):
+ if batched:
+ # we extract indices and rank from args
+ *inputs, indices, rank = args
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices,)
+ if with_rank:
+ additional_args += (rank,)
+ mask = function(*inputs, *additional_args, **fn_kwargs)
+ else:
+ # we get batched data (to do less look-ups) but `function` only accepts one example
+ # therefore we need to call `function` on each example of the batch to get the mask
+ *inputs, indices, rank = args
+ mask = []
+ if input_columns is None:
+ # inputs only contains a batch of examples
+ batch: dict = inputs[0]
+ num_examples = len(batch[next(iter(batch.keys()))])
+ for i in range(num_examples):
+ example = {key: batch[key][i] for key in batch}
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(example, *additional_args, **fn_kwargs))
+ else:
+ # inputs is a list of columns
+ columns: List[List] = inputs
+ num_examples = len(columns[0])
+ for i in range(num_examples):
+ input = [column[i] for column in columns]
+ additional_args = ()
+ if with_indices:
+ additional_args += (indices[i],)
+ if with_rank:
+ additional_args += (rank,)
+ mask.append(function(*input, *additional_args, **fn_kwargs))
+ indices_array = [i for i, to_keep in zip(indices, mask) if to_keep]
+ if indices_mapping is not None:
+ indices_array = pa.array(indices_array, type=pa.uint64())
+ indices_array = indices_mapping.column(0).take(indices_array)
+ indices_array = indices_array.to_pylist()
+ return {"indices": indices_array}
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_reader.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ac14e28ce64bded311dc0f88a83edabc75c12bd
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_reader.py
@@ -0,0 +1,661 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Arrow ArrowReader."""
+
+import copy
+import math
+import os
+import re
+import shutil
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING, List, Optional, Union
+
+import pyarrow as pa
+import pyarrow.parquet as pq
+from tqdm.contrib.concurrent import thread_map
+
+from .download.download_config import DownloadConfig
+from .naming import _split_re, filenames_for_dataset_split
+from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import cached_path
+
+
+if TYPE_CHECKING:
+ from .info import DatasetInfo # noqa: F401
+ from .splits import Split, SplitInfo # noqa: F401
+
+
+logger = logging.get_logger(__name__)
+
+HF_GCP_BASE_URL = "https://storage.googleapis.com/huggingface-nlp/cache/datasets"
+
+_SUB_SPEC_RE = re.compile(
+ rf"""
+^
+ (?P{_split_re[1:-1]})
+ (\[
+ ((?P-?\d+)
+ (?P%)?)?
+ :
+ ((?P-?\d+)
+ (?P%)?)?
+ \])?(\((?P[^\)]*)\))?
+$
+""", # remove ^ and $
+ re.X,
+)
+
+_ADDITION_SEP_RE = re.compile(r"\s*\+\s*")
+
+
+class DatasetNotOnHfGcsError(ConnectionError):
+ """When you can't get the dataset from the Hf google cloud storage"""
+
+ pass
+
+
+class MissingFilesOnHfGcsError(ConnectionError):
+ """When some files are missing on the Hf oogle cloud storage"""
+
+ pass
+
+
+@dataclass(frozen=True)
+class FileInstructions:
+ """The file instructions associated with a split ReadInstruction.
+
+ Attributes:
+ num_examples: `int`, The total number of examples
+ file_instructions: List[dict(filename, skip, take)], the files information.
+ The filenames contains the relative path, not absolute.
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
+ """
+
+ num_examples: int
+ file_instructions: List[dict]
+
+
+def make_file_instructions(
+ name: str,
+ split_infos: List["SplitInfo"],
+ instruction: Union[str, "ReadInstruction"],
+ filetype_suffix: Optional[str] = None,
+ prefix_path: Optional[str] = None,
+) -> FileInstructions:
+ """Returns instructions of the split dict.
+
+ Args:
+ name (`str`): Name of the dataset.
+ split_infos (`list` of `[SplitInfo]`): Dataset splits information.
+ instruction ([`ReadInstruction`] or `str`): Reading instruction for a dataset.
+ filetype_suffix (`str`, *optional*): Suffix of dataset files, e.g. 'arrow' or 'parquet'.
+ prefix_path (`str`, *optional*): Prefix of dataset files, e.g. directory name.
+
+ Returns:
+ [`FileInstructions`]
+ """
+ if not isinstance(name, str):
+ raise TypeError(f"Expected str 'name', but got: {type(name).__name__}")
+ elif not name:
+ raise ValueError("Expected non-empty str 'name'")
+ name2len = {info.name: info.num_examples for info in split_infos}
+ name2shard_lengths = {info.name: info.shard_lengths for info in split_infos}
+ name2filenames = {
+ info.name: filenames_for_dataset_split(
+ path=prefix_path,
+ dataset_name=name,
+ split=info.name,
+ filetype_suffix=filetype_suffix,
+ shard_lengths=name2shard_lengths[info.name],
+ )
+ for info in split_infos
+ }
+ if not isinstance(instruction, ReadInstruction):
+ instruction = ReadInstruction.from_spec(instruction)
+ # Create the absolute instruction (per split)
+ absolute_instructions = instruction.to_absolute(name2len)
+
+ # For each split, return the files instruction (skip/take)
+ file_instructions = []
+ num_examples = 0
+ for abs_instr in absolute_instructions:
+ split_length = name2len[abs_instr.splitname]
+ filenames = name2filenames[abs_instr.splitname]
+ shard_lengths = name2shard_lengths[abs_instr.splitname]
+ from_ = 0 if abs_instr.from_ is None else abs_instr.from_
+ to = split_length if abs_instr.to is None else abs_instr.to
+ if shard_lengths is None: # not sharded
+ for filename in filenames:
+ take = to - from_
+ if take == 0:
+ continue
+ num_examples += take
+ file_instructions.append({"filename": filename, "skip": from_, "take": take})
+ else: # sharded
+ index_start = 0 # Beginning (included) of moving window.
+ index_end = 0 # End (excluded) of moving window.
+ for filename, shard_length in zip(filenames, shard_lengths):
+ index_end += shard_length
+ if from_ < index_end and to > index_start: # There is something to take.
+ skip = from_ - index_start if from_ > index_start else 0
+ take = to - index_start - skip if to < index_end else -1
+ if take == 0:
+ continue
+ file_instructions.append({"filename": filename, "skip": skip, "take": take})
+ num_examples += shard_length - skip if take == -1 else take
+ index_start += shard_length
+ return FileInstructions(
+ num_examples=num_examples,
+ file_instructions=file_instructions,
+ )
+
+
+class BaseReader:
+ """
+ Build a Dataset object out of Instruction instance(s).
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ArrowReader.
+
+ Args:
+ path (str): path where tfrecords are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ self._path: str = path
+ self._info: Optional["DatasetInfo"] = info
+ self._filetype_suffix: Optional[str] = None
+
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ raise NotImplementedError
+
+ def _read_files(self, files, in_memory=False) -> Table:
+ """Returns Dataset for given file instructions.
+
+ Args:
+ files: List[dict(filename, skip, take)], the files information.
+ The filenames contain the absolute path, not relative.
+ skip/take indicates which example read in the file: `ds.slice(skip, take)`
+ in_memory (bool, default False): Whether to copy the data in-memory.
+ """
+ if len(files) == 0 or not all(isinstance(f, dict) for f in files):
+ raise ValueError("please provide valid file informations")
+ files = copy.deepcopy(files)
+ for f in files:
+ f["filename"] = os.path.join(self._path, f["filename"])
+
+ pa_tables = thread_map(
+ partial(self._get_table_from_filename, in_memory=in_memory),
+ files,
+ tqdm_class=hf_tqdm,
+ desc="Loading dataset shards",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(files) <= 16 or None,
+ )
+ pa_tables = [t for t in pa_tables if len(t) > 0]
+ if not pa_tables and (self._info is None or self._info.features is None):
+ raise ValueError(
+ "Tried to read an empty table. Please specify at least info.features to create an empty table with the right type."
+ )
+ pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))]
+ pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0]
+ return pa_table
+
+ def get_file_instructions(self, name, instruction, split_infos):
+ """Return list of dict {'filename': str, 'skip': int, 'take': int}"""
+ file_instructions = make_file_instructions(
+ name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path
+ )
+ files = file_instructions.file_instructions
+ return files
+
+ def read(
+ self,
+ name,
+ instructions,
+ split_infos,
+ in_memory=False,
+ ):
+ """Returns Dataset instance(s).
+
+ Args:
+ name (str): name of the dataset.
+ instructions (ReadInstruction): instructions to read.
+ Instruction can be string and will then be passed to the Instruction
+ constructor as it.
+ split_infos (list of SplitInfo proto): the available splits for dataset.
+ in_memory (bool, default False): Whether to copy the data in-memory.
+
+ Returns:
+ kwargs to build a single Dataset instance.
+ """
+
+ files = self.get_file_instructions(name, instructions, split_infos)
+ if not files:
+ msg = f'Instruction "{instructions}" corresponds to no data!'
+ raise ValueError(msg)
+ return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory)
+
+ def read_files(
+ self,
+ files: List[dict],
+ original_instructions: Union[None, "ReadInstruction", "Split"] = None,
+ in_memory=False,
+ ):
+ """Returns single Dataset instance for the set of file instructions.
+
+ Args:
+ files: List[dict(filename, skip, take)], the files information.
+ The filenames contains the relative path, not absolute.
+ skip/take indicates which example read in the file: `ds.skip().take()`
+ original_instructions: store the original instructions used to build the dataset split in the dataset.
+ in_memory (bool, default False): Whether to copy the data in-memory.
+
+ Returns:
+ kwargs to build a Dataset instance.
+ """
+ # Prepend path to filename
+ pa_table = self._read_files(files, in_memory=in_memory)
+ # If original_instructions is not None, convert it to a human-readable NamedSplit
+ if original_instructions is not None:
+ from .splits import Split # noqa
+
+ split = Split(str(original_instructions))
+ else:
+ split = None
+ dataset_kwargs = {"arrow_table": pa_table, "info": self._info, "split": split}
+ return dataset_kwargs
+
+ def download_from_hf_gcs(self, download_config: DownloadConfig, relative_data_dir):
+ """
+ Download the dataset files from the Hf GCS
+
+ Args:
+ dl_cache_dir: `str`, the local cache directory used to download files
+ relative_data_dir: `str`, the relative directory of the remote files from
+ the `datasets` directory on GCS.
+
+ """
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ try:
+ remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json")
+ downloaded_dataset_info = cached_path(
+ remote_dataset_info.replace(os.sep, "/"), download_config=download_config
+ )
+ shutil.move(downloaded_dataset_info, os.path.join(self._path, "dataset_info.json"))
+ if self._info is not None:
+ self._info.update(self._info.from_directory(self._path))
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+ try:
+ for split in self._info.splits:
+ file_instructions = self.get_file_instructions(
+ name=self._info.builder_name,
+ instruction=split,
+ split_infos=self._info.splits.values(),
+ )
+ for file_instruction in file_instructions:
+ file_to_download = str(Path(file_instruction["filename"]).relative_to(self._path))
+ remote_prepared_filename = os.path.join(remote_cache_dir, file_to_download)
+ downloaded_prepared_filename = cached_path(
+ remote_prepared_filename.replace(os.sep, "/"), download_config=download_config
+ )
+ shutil.move(downloaded_prepared_filename, file_instruction["filename"])
+ except FileNotFoundError as err:
+ raise MissingFilesOnHfGcsError(err) from None
+
+
+class ArrowReader(BaseReader):
+ """
+ Build a Dataset object out of Instruction instance(s).
+ This Reader uses either memory mapping or file descriptors (in-memory) on arrow files.
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ArrowReader.
+
+ Args:
+ path (str): path where Arrow files are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ super().__init__(path, info)
+ self._filetype_suffix = "arrow"
+
+ def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table:
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ filename, skip, take = (
+ filename_skip_take["filename"],
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
+ )
+ table = ArrowReader.read_table(filename, in_memory=in_memory)
+ if take == -1:
+ take = len(table) - skip
+ # here we don't want to slice an empty table, or it may segfault
+ if skip is not None and take is not None and not (skip == 0 and take == len(table)):
+ table = table.slice(skip, take)
+ return table
+
+ @staticmethod
+ def read_table(filename, in_memory=False) -> Table:
+ """
+ Read table from file.
+
+ Args:
+ filename (str): File name of the table.
+ in_memory (bool, default=False): Whether to copy the data in-memory.
+
+ Returns:
+ pyarrow.Table
+ """
+ table_cls = InMemoryTable if in_memory else MemoryMappedTable
+ return table_cls.from_file(filename)
+
+
+class ParquetReader(BaseReader):
+ """
+ Build a Dataset object out of Instruction instance(s).
+ This Reader uses memory mapping on parquet files.
+ """
+
+ def __init__(self, path: str, info: Optional["DatasetInfo"]):
+ """Initializes ParquetReader.
+
+ Args:
+ path (str): path where tfrecords are stored.
+ info (DatasetInfo): info about the dataset.
+ """
+ super().__init__(path, info)
+ self._filetype_suffix = "parquet"
+
+ def _get_table_from_filename(self, filename_skip_take, **kwargs):
+ """Returns a Dataset instance from given (filename, skip, take)."""
+ filename, skip, take = (
+ filename_skip_take["filename"],
+ filename_skip_take["skip"] if "skip" in filename_skip_take else None,
+ filename_skip_take["take"] if "take" in filename_skip_take else None,
+ )
+ # Parquet read_table always loads data in memory, independently of memory_map
+ pa_table = pq.read_table(filename, memory_map=True)
+ # here we don't want to slice an empty table, or it may segfault
+ if skip is not None and take is not None and not (skip == 0 and take == len(pa_table)):
+ pa_table = pa_table.slice(skip, take)
+ return pa_table
+
+
+@dataclass(frozen=True)
+class _AbsoluteInstruction:
+ """A machine friendly slice: defined absolute positive boundaries."""
+
+ splitname: str
+ from_: int # uint (starting index).
+ to: int # uint (ending index).
+
+
+@dataclass(frozen=True)
+class _RelativeInstruction:
+ """Represents a single parsed slicing instruction, can use % and negatives."""
+
+ splitname: str
+ from_: Optional[int] = None # int (starting index) or None if no lower boundary.
+ to: Optional[int] = None # int (ending index) or None if no upper boundary.
+ unit: Optional[str] = None
+ rounding: Optional[str] = None
+
+ def __post_init__(self):
+ if self.unit is not None and self.unit not in ["%", "abs"]:
+ raise ValueError("unit must be either % or abs")
+ if self.rounding is not None and self.rounding not in ["closest", "pct1_dropremainder"]:
+ raise ValueError("rounding must be either closest or pct1_dropremainder")
+ if self.unit != "%" and self.rounding is not None:
+ raise ValueError("It is forbidden to specify rounding if not using percent slicing.")
+ if self.unit == "%" and self.from_ is not None and abs(self.from_) > 100:
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
+ if self.unit == "%" and self.to is not None and abs(self.to) > 100:
+ raise ValueError("Percent slice boundaries must be > -100 and < 100.")
+ # Update via __dict__ due to instance being "frozen"
+ self.__dict__["rounding"] = "closest" if self.rounding is None and self.unit == "%" else self.rounding
+
+
+def _str_to_read_instruction(spec):
+ """Returns ReadInstruction for given string."""
+ res = _SUB_SPEC_RE.match(spec)
+ if not res:
+ raise ValueError(f"Unrecognized instruction format: {spec}")
+ unit = "%" if res.group("from_pct") or res.group("to_pct") else "abs"
+ return ReadInstruction(
+ split_name=res.group("split"),
+ rounding=res.group("rounding"),
+ from_=int(res.group("from")) if res.group("from") else None,
+ to=int(res.group("to")) if res.group("to") else None,
+ unit=unit,
+ )
+
+
+def _pct_to_abs_pct1(boundary, num_examples):
+ # Using math.trunc here, since -99.5% should give -99%, not -100%.
+ if num_examples < 100:
+ msg = (
+ 'Using "pct1_dropremainder" rounding on a split with less than 100 '
+ "elements is forbidden: it always results in an empty dataset."
+ )
+ raise ValueError(msg)
+ return boundary * math.trunc(num_examples / 100.0)
+
+
+def _pct_to_abs_closest(boundary, num_examples):
+ return int(round(boundary * num_examples / 100.0))
+
+
+def _rel_to_abs_instr(rel_instr, name2len):
+ """Returns _AbsoluteInstruction instance for given RelativeInstruction.
+
+ Args:
+ rel_instr: RelativeInstruction instance.
+ name2len: dict {split_name: num_examples}.
+ """
+ pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == "closest" else _pct_to_abs_pct1
+ split = rel_instr.splitname
+ if split not in name2len:
+ raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.')
+ num_examples = name2len[split]
+ from_ = rel_instr.from_
+ to = rel_instr.to
+ if rel_instr.unit == "%":
+ from_ = 0 if from_ is None else pct_to_abs(from_, num_examples)
+ to = num_examples if to is None else pct_to_abs(to, num_examples)
+ else:
+ from_ = 0 if from_ is None else from_
+ to = num_examples if to is None else to
+ if from_ < 0:
+ from_ = max(num_examples + from_, 0)
+ if to < 0:
+ to = max(num_examples + to, 0)
+ from_ = min(from_, num_examples)
+ to = min(to, num_examples)
+ return _AbsoluteInstruction(split, from_, to)
+
+
+class ReadInstruction:
+ """Reading instruction for a dataset.
+
+ Examples::
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%]')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec('test[:33%]'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction('test', to=33, unit='%'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
+ 'test', from_=0, to=33, unit='%'))
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%]+train[1:-1]')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
+ 'test[:33%]+train[1:-1]'))
+ ds = datasets.load_dataset('mnist', split=(
+ datasets.ReadInstruction('test', to=33, unit='%') +
+ datasets.ReadInstruction('train', from_=1, to=-1, unit='abs')))
+
+ # The following lines are equivalent:
+ ds = datasets.load_dataset('mnist', split='test[:33%](pct1_dropremainder)')
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction.from_spec(
+ 'test[:33%](pct1_dropremainder)'))
+ ds = datasets.load_dataset('mnist', split=datasets.ReadInstruction(
+ 'test', from_=0, to=33, unit='%', rounding="pct1_dropremainder"))
+
+ # 10-fold validation:
+ tests = datasets.load_dataset(
+ 'mnist',
+ [datasets.ReadInstruction('train', from_=k, to=k+10, unit='%')
+ for k in range(0, 100, 10)])
+ trains = datasets.load_dataset(
+ 'mnist',
+ [datasets.ReadInstruction('train', to=k, unit='%') + datasets.ReadInstruction('train', from_=k+10, unit='%')
+ for k in range(0, 100, 10)])
+
+ """
+
+ def _init(self, relative_instructions):
+ # Private initializer.
+ self._relative_instructions = relative_instructions
+
+ @classmethod
+ def _read_instruction_from_relative_instructions(cls, relative_instructions):
+ """Returns ReadInstruction obj initialized with relative_instructions."""
+ # Use __new__ to bypass __init__ used by public API and not conveniant here.
+ result = cls.__new__(cls)
+ result._init(relative_instructions) # pylint: disable=protected-access
+ return result
+
+ def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None):
+ """Initialize ReadInstruction.
+
+ Args:
+ split_name (str): name of the split to read. Eg: 'train'.
+ rounding (str, optional): The rounding behaviour to use when percent slicing is
+ used. Ignored when slicing with absolute indices.
+ Possible values:
+ - 'closest' (default): The specified percentages are rounded to the
+ closest value. Use this if you want specified percents to be as
+ much exact as possible.
+ - 'pct1_dropremainder': the specified percentages are treated as
+ multiple of 1%. Use this option if you want consistency. Eg:
+ len(5%) == 5 * len(1%).
+ Using this option, one might not be able to use the full set of
+ examples, if the number of those is not a multiple of 100.
+ from_ (int):
+ to (int): alternative way of specifying slicing boundaries. If any of
+ {from_, to, unit} argument is used, slicing cannot be specified as
+ string.
+ unit (str): optional, one of:
+ '%': to set the slicing unit as percents of the split size.
+ 'abs': to set the slicing unit as absolute numbers.
+ """
+ # This constructor is not always called. See factory method
+ # `_read_instruction_from_relative_instructions`. Common init instructions
+ # MUST be placed in the _init method.
+ self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)])
+
+ @classmethod
+ def from_spec(cls, spec):
+ """Creates a `ReadInstruction` instance out of a string spec.
+
+ Args:
+ spec (`str`):
+ Split(s) + optional slice(s) to read + optional rounding
+ if percents are used as the slicing unit. A slice can be specified,
+ using absolute numbers (`int`) or percentages (`int`).
+
+ Examples:
+
+ ```
+ test: test split.
+ test + validation: test split + validation split.
+ test[10:]: test split, minus its first 10 records.
+ test[:10%]: first 10% records of test split.
+ test[:20%](pct1_dropremainder): first 10% records, rounded with the pct1_dropremainder rounding.
+ test[:-5%]+train[40%:60%]: first 95% of test + middle 20% of train.
+ ```
+
+ Returns:
+ ReadInstruction instance.
+ """
+ spec = str(spec) # Need to convert to str in case of NamedSplit instance.
+ subs = _ADDITION_SEP_RE.split(spec)
+ if not subs:
+ raise ValueError(f"No instructions could be built out of {spec}")
+ instruction = _str_to_read_instruction(subs[0])
+ return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction)
+
+ def to_spec(self):
+ rel_instr_specs = []
+ for rel_instr in self._relative_instructions:
+ rel_instr_spec = rel_instr.splitname
+ if rel_instr.from_ is not None or rel_instr.to is not None:
+ from_ = rel_instr.from_
+ to = rel_instr.to
+ unit = rel_instr.unit
+ rounding = rel_instr.rounding
+ unit = unit if unit == "%" else ""
+ from_ = str(from_) + unit if from_ is not None else ""
+ to = str(to) + unit if to is not None else ""
+ slice_str = f"[{from_}:{to}]"
+ rounding_str = (
+ f"({rounding})" if unit == "%" and rounding is not None and rounding != "closest" else ""
+ )
+ rel_instr_spec += slice_str + rounding_str
+ rel_instr_specs.append(rel_instr_spec)
+ return "+".join(rel_instr_specs)
+
+ def __add__(self, other):
+ """Returns a new ReadInstruction obj, result of appending other to self."""
+ if not isinstance(other, ReadInstruction):
+ msg = "ReadInstruction can only be added to another ReadInstruction obj."
+ raise TypeError(msg)
+ self_ris = self._relative_instructions
+ other_ris = other._relative_instructions # pylint: disable=protected-access
+ if (
+ self_ris[0].unit != "abs"
+ and other_ris[0].unit != "abs"
+ and self._relative_instructions[0].rounding != other_ris[0].rounding
+ ):
+ raise ValueError("It is forbidden to sum ReadInstruction instances with different rounding values.")
+ return self._read_instruction_from_relative_instructions(self_ris + other_ris)
+
+ def __str__(self):
+ return self.to_spec()
+
+ def __repr__(self):
+ return f"ReadInstruction({self._relative_instructions})"
+
+ def to_absolute(self, name2len):
+ """Translate instruction into a list of absolute instructions.
+
+ Those absolute instructions are then to be added together.
+
+ Args:
+ name2len (`dict`):
+ Associating split names to number of examples.
+
+ Returns:
+ list of _AbsoluteInstruction instances (corresponds to the + in spec).
+ """
+ return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions]
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_writer.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fed7e14e389174657b68c74c6043177e46f7302
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/arrow_writer.py
@@ -0,0 +1,745 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""To write records into Parquet files."""
+
+import errno
+import json
+import os
+import sys
+from pathlib import Path
+from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
+
+import fsspec
+import numpy as np
+import pyarrow as pa
+import pyarrow.parquet as pq
+
+from . import config
+from .features import Features, Image, Value
+from .features.features import (
+ FeatureType,
+ _ArrayXDExtensionType,
+ cast_to_python_objects,
+ generate_from_arrow_type,
+ get_nested_type,
+ list_of_np_array_to_pyarrow_listarray,
+ numpy_to_pyarrow_listarray,
+ to_pyarrow_listarray,
+)
+from .filesystems import is_remote_filesystem
+from .info import DatasetInfo
+from .keyhash import DuplicatedKeysError, KeyHasher
+from .table import array_cast, cast_array_to_feature, embed_table_storage, table_cast
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import hash_url_to_filename
+from .utils.py_utils import asdict, first_non_null_value
+
+
+logger = logging.get_logger(__name__)
+
+type_ = type # keep python's type function
+
+
+class SchemaInferenceError(ValueError):
+ pass
+
+
+class TypedSequence:
+ """
+ This data container generalizes the typing when instantiating pyarrow arrays, tables or batches.
+
+ More specifically it adds several features:
+ - Support extension types like ``datasets.features.Array2DExtensionType``:
+ By default pyarrow arrays don't return extension arrays. One has to call
+ ``pa.ExtensionArray.from_storage(type, pa.array(data, type.storage_type))``
+ in order to get an extension array.
+ - Support for ``try_type`` parameter that can be used instead of ``type``:
+ When an array is transformed, we like to keep the same type as before if possible.
+ For example when calling :func:`datasets.Dataset.map`, we don't want to change the type
+ of each column by default.
+ - Better error message when a pyarrow array overflows.
+
+ Example::
+
+ from datasets.features import Array2D, Array2DExtensionType, Value
+ from datasets.arrow_writer import TypedSequence
+ import pyarrow as pa
+
+ arr = pa.array(TypedSequence([1, 2, 3], type=Value("int32")))
+ assert arr.type == pa.int32()
+
+ arr = pa.array(TypedSequence([1, 2, 3], try_type=Value("int32")))
+ assert arr.type == pa.int32()
+
+ arr = pa.array(TypedSequence(["foo", "bar"], try_type=Value("int32")))
+ assert arr.type == pa.string()
+
+ arr = pa.array(TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64")))
+ assert arr.type == Array2DExtensionType((1, 3), "int64")
+
+ table = pa.Table.from_pydict({
+ "image": TypedSequence([[[1, 2, 3]]], type=Array2D((1, 3), "int64"))
+ })
+ assert table["image"].type == Array2DExtensionType((1, 3), "int64")
+
+ """
+
+ def __init__(
+ self,
+ data: Iterable,
+ type: Optional[FeatureType] = None,
+ try_type: Optional[FeatureType] = None,
+ optimized_int_type: Optional[FeatureType] = None,
+ ):
+ # assert type is None or try_type is None,
+ if type is not None and try_type is not None:
+ raise ValueError("You cannot specify both type and try_type")
+ # set attributes
+ self.data = data
+ self.type = type
+ self.try_type = try_type # is ignored if it doesn't match the data
+ self.optimized_int_type = optimized_int_type
+ # when trying a type (is ignored if data is not compatible)
+ self.trying_type = self.try_type is not None
+ self.trying_int_optimization = optimized_int_type is not None and type is None and try_type is None
+ # used to get back the inferred type after __arrow_array__() is called once
+ self._inferred_type = None
+
+ def get_inferred_type(self) -> FeatureType:
+ """Return the inferred feature type.
+ This is done by converting the sequence to an Arrow array, and getting the corresponding
+ feature type.
+
+ Since building the Arrow array can be expensive, the value of the inferred type is cached
+ as soon as pa.array is called on the typed sequence.
+
+ Returns:
+ FeatureType: inferred feature type of the sequence.
+ """
+ if self._inferred_type is None:
+ self._inferred_type = generate_from_arrow_type(pa.array(self).type)
+ return self._inferred_type
+
+ @staticmethod
+ def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]:
+ """Implement type inference for custom objects like PIL.Image.Image -> Image type.
+
+ This function is only used for custom python objects that can't be direclty passed to build
+ an Arrow array. In such cases is infers the feature type to use, and it encodes the data so
+ that they can be passed to an Arrow array.
+
+ Args:
+ data (Iterable): array of data to infer the type, e.g. a list of PIL images.
+
+ Returns:
+ Tuple[Iterable, Optional[FeatureType]]: a tuple with:
+ - the (possibly encoded) array, if the inferred feature type requires encoding
+ - the inferred feature type if the array is made of supported custom objects like
+ PIL images, else None.
+ """
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
+ import PIL.Image
+
+ non_null_idx, non_null_value = first_non_null_value(data)
+ if isinstance(non_null_value, PIL.Image.Image):
+ return [Image().encode_example(value) if value is not None else None for value in data], Image()
+ return data, None
+
+ def __arrow_array__(self, type: Optional[pa.DataType] = None):
+ """This function is called when calling pa.array(typed_sequence)"""
+
+ if type is not None:
+ raise ValueError("TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)")
+ del type # make sure we don't use it
+ data = self.data
+ # automatic type inference for custom objects
+ if self.type is None and self.try_type is None:
+ data, self._inferred_type = self._infer_custom_type_and_encode(data)
+ if self._inferred_type is None:
+ type = self.try_type if self.trying_type else self.type
+ else:
+ type = self._inferred_type
+ pa_type = get_nested_type(type) if type is not None else None
+ optimized_int_pa_type = (
+ get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None
+ )
+ trying_cast_to_python_objects = False
+ try:
+ # custom pyarrow types
+ if isinstance(pa_type, _ArrayXDExtensionType):
+ storage = to_pyarrow_listarray(data, pa_type)
+ return pa.ExtensionArray.from_storage(pa_type, storage)
+
+ # efficient np array to pyarrow array
+ if isinstance(data, np.ndarray):
+ out = numpy_to_pyarrow_listarray(data)
+ elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray):
+ out = list_of_np_array_to_pyarrow_listarray(data)
+ else:
+ trying_cast_to_python_objects = True
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
+ # use smaller integer precisions if possible
+ if self.trying_int_optimization:
+ if pa.types.is_int64(out.type):
+ out = out.cast(optimized_int_pa_type)
+ elif pa.types.is_list(out.type):
+ if pa.types.is_int64(out.type.value_type):
+ out = array_cast(out, pa.list_(optimized_int_pa_type))
+ elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type):
+ out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type)))
+ # otherwise we can finally use the user's type
+ elif type is not None:
+ # We use cast_array_to_feature to support casting to custom types like Audio and Image
+ # Also, when trying type "string", we don't want to convert integers or floats to "string".
+ # We only do it if trying_type is False - since this is what the user asks for.
+ out = cast_array_to_feature(out, type, allow_number_to_str=not self.trying_type)
+ return out
+ except (
+ TypeError,
+ pa.lib.ArrowInvalid,
+ pa.lib.ArrowNotImplementedError,
+ ) as e: # handle type errors and overflows
+ # Ignore ArrowNotImplementedError caused by trying type, otherwise re-raise
+ if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError):
+ raise
+
+ if self.trying_type:
+ try: # second chance
+ if isinstance(data, np.ndarray):
+ return numpy_to_pyarrow_listarray(data)
+ elif isinstance(data, list) and data and any(isinstance(value, np.ndarray) for value in data):
+ return list_of_np_array_to_pyarrow_listarray(data)
+ else:
+ trying_cast_to_python_objects = True
+ return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True))
+ except pa.lib.ArrowInvalid as e:
+ if "overflow" in str(e):
+ raise OverflowError(
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
+ ) from None
+ elif self.trying_int_optimization and "not in range" in str(e):
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
+ logger.info(
+ f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64."
+ )
+ return out
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
+ out = pa.array(
+ cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)
+ )
+ if type is not None:
+ out = cast_array_to_feature(out, type, allow_number_to_str=True)
+ return out
+ else:
+ raise
+ elif "overflow" in str(e):
+ raise OverflowError(
+ f"There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})"
+ ) from None
+ elif self.trying_int_optimization and "not in range" in str(e):
+ optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name
+ logger.info(f"Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.")
+ return out
+ elif trying_cast_to_python_objects and "Could not convert" in str(e):
+ out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False))
+ if type is not None:
+ out = cast_array_to_feature(out, type, allow_number_to_str=True)
+ return out
+ else:
+ raise
+
+
+class OptimizedTypedSequence(TypedSequence):
+ def __init__(
+ self,
+ data,
+ type: Optional[FeatureType] = None,
+ try_type: Optional[FeatureType] = None,
+ col: Optional[str] = None,
+ optimized_int_type: Optional[FeatureType] = None,
+ ):
+ optimized_int_type_by_col = {
+ "attention_mask": Value("int8"), # binary tensor
+ "special_tokens_mask": Value("int8"),
+ "input_ids": Value("int32"), # typical vocab size: 0-50k (max ~500k, never > 1M)
+ "token_type_ids": Value(
+ "int8"
+ ), # binary mask; some (XLNetModel) use an additional token represented by a 2
+ }
+ if type is None and try_type is None:
+ optimized_int_type = optimized_int_type_by_col.get(col, None)
+ super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type)
+
+
+class ArrowWriter:
+ """Shuffles and writes Examples to Arrow files."""
+
+ _WRITER_CLASS = pa.RecordBatchStreamWriter
+
+ def __init__(
+ self,
+ schema: Optional[pa.Schema] = None,
+ features: Optional[Features] = None,
+ path: Optional[str] = None,
+ stream: Optional[pa.NativeFile] = None,
+ fingerprint: Optional[str] = None,
+ writer_batch_size: Optional[int] = None,
+ hash_salt: Optional[str] = None,
+ check_duplicates: Optional[bool] = False,
+ disable_nullable: bool = False,
+ update_features: bool = False,
+ with_metadata: bool = True,
+ unit: str = "examples",
+ embed_local_files: bool = False,
+ storage_options: Optional[dict] = None,
+ ):
+ if path is None and stream is None:
+ raise ValueError("At least one of path and stream must be provided.")
+ if features is not None:
+ self._features = features
+ self._schema = None
+ elif schema is not None:
+ self._schema: pa.Schema = schema
+ self._features = Features.from_arrow_schema(self._schema)
+ else:
+ self._features = None
+ self._schema = None
+
+ if hash_salt is not None:
+ # Create KeyHasher instance using split name as hash salt
+ self._hasher = KeyHasher(hash_salt)
+ else:
+ self._hasher = KeyHasher("")
+
+ self._check_duplicates = check_duplicates
+ self._disable_nullable = disable_nullable
+
+ if stream is None:
+ fs_token_paths = fsspec.get_fs_token_paths(path, storage_options=storage_options)
+ self._fs: fsspec.AbstractFileSystem = fs_token_paths[0]
+ self._path = (
+ fs_token_paths[2][0]
+ if not is_remote_filesystem(self._fs)
+ else self._fs.unstrip_protocol(fs_token_paths[2][0])
+ )
+ self.stream = self._fs.open(fs_token_paths[2][0], "wb")
+ self._closable_stream = True
+ else:
+ self._fs = None
+ self._path = None
+ self.stream = stream
+ self._closable_stream = False
+
+ self.fingerprint = fingerprint
+ self.disable_nullable = disable_nullable
+ self.writer_batch_size = writer_batch_size or config.DEFAULT_MAX_BATCH_SIZE
+ self.update_features = update_features
+ self.with_metadata = with_metadata
+ self.unit = unit
+ self.embed_local_files = embed_local_files
+
+ self._num_examples = 0
+ self._num_bytes = 0
+ self.current_examples: List[Tuple[Dict[str, Any], str]] = []
+ self.current_rows: List[pa.Table] = []
+ self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None
+ self.hkey_record = []
+
+ def __len__(self):
+ """Return the number of writed and staged examples"""
+ return self._num_examples + len(self.current_examples) + len(self.current_rows)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+ def close(self):
+ # Try closing if opened; if closed: pyarrow.lib.ArrowInvalid: Invalid operation on closed file
+ if self.pa_writer: # it might be None
+ try:
+ self.pa_writer.close()
+ except Exception: # pyarrow.lib.ArrowInvalid, OSError
+ pass
+ if self._closable_stream and not self.stream.closed:
+ self.stream.close() # This also closes self.pa_writer if it is opened
+
+ def _build_writer(self, inferred_schema: pa.Schema):
+ schema = self.schema
+ inferred_features = Features.from_arrow_schema(inferred_schema)
+ if self._features is not None:
+ if self.update_features: # keep original features it they match, or update them
+ fields = {field.name: field for field in self._features.type}
+ for inferred_field in inferred_features.type:
+ name = inferred_field.name
+ if name in fields:
+ if inferred_field == fields[name]:
+ inferred_features[name] = self._features[name]
+ self._features = inferred_features
+ schema: pa.Schema = inferred_schema
+ else:
+ self._features = inferred_features
+ schema: pa.Schema = inferred_features.arrow_schema
+ if self.disable_nullable:
+ schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in schema)
+ if self.with_metadata:
+ schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint))
+ else:
+ schema = schema.with_metadata({})
+ self._schema = schema
+ self.pa_writer = self._WRITER_CLASS(self.stream, schema)
+
+ @property
+ def schema(self):
+ _schema = (
+ self._schema
+ if self._schema is not None
+ else (pa.schema(self._features.type) if self._features is not None else None)
+ )
+ if self._disable_nullable and _schema is not None:
+ _schema = pa.schema(pa.field(field.name, field.type, nullable=False) for field in _schema)
+ return _schema if _schema is not None else []
+
+ @staticmethod
+ def _build_metadata(info: DatasetInfo, fingerprint: Optional[str] = None) -> Dict[str, str]:
+ info_keys = ["features"] # we can add support for more DatasetInfo keys in the future
+ info_as_dict = asdict(info)
+ metadata = {}
+ metadata["info"] = {key: info_as_dict[key] for key in info_keys}
+ if fingerprint is not None:
+ metadata["fingerprint"] = fingerprint
+ return {"huggingface": json.dumps(metadata)}
+
+ def write_examples_on_file(self):
+ """Write stored examples from the write-pool of examples. It makes a table out of the examples and write it."""
+ if not self.current_examples:
+ return
+ # preserve the order the columns
+ if self.schema:
+ schema_cols = set(self.schema.names)
+ examples_cols = self.current_examples[0][0].keys() # .keys() preserves the order (unlike set)
+ common_cols = [col for col in self.schema.names if col in examples_cols]
+ extra_cols = [col for col in examples_cols if col not in schema_cols]
+ cols = common_cols + extra_cols
+ else:
+ cols = list(self.current_examples[0][0])
+ batch_examples = {}
+ for col in cols:
+ # We use row[0][col] since current_examples contains (example, key) tuples.
+ # Morever, examples could be Arrow arrays of 1 element.
+ # This can happen in `.map()` when we want to re-write the same Arrow data
+ if all(isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples):
+ arrays = [row[0][col] for row in self.current_examples]
+ arrays = [
+ chunk
+ for array in arrays
+ for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])
+ ]
+ batch_examples[col] = pa.concat_arrays(arrays)
+ else:
+ batch_examples[col] = [
+ row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col]
+ for row in self.current_examples
+ ]
+ self.write_batch(batch_examples=batch_examples)
+ self.current_examples = []
+
+ def write_rows_on_file(self):
+ """Write stored rows from the write-pool of rows. It concatenates the single-row tables and it writes the resulting table."""
+ if not self.current_rows:
+ return
+ table = pa.concat_tables(self.current_rows)
+ self.write_table(table)
+ self.current_rows = []
+
+ def write(
+ self,
+ example: Dict[str, Any],
+ key: Optional[Union[str, int, bytes]] = None,
+ writer_batch_size: Optional[int] = None,
+ ):
+ """Add a given (Example,Key) pair to the write-pool of examples which is written to file.
+
+ Args:
+ example: the Example to add.
+ key: Optional, a unique identifier(str, int or bytes) associated with each example
+ """
+ # Utilize the keys and duplicate checking when `self._check_duplicates` is passed True
+ if self._check_duplicates:
+ # Create unique hash from key and store as (key, example) pairs
+ hash = self._hasher.hash(key)
+ self.current_examples.append((example, hash))
+ # Maintain record of keys and their respective hashes for checking duplicates
+ self.hkey_record.append((hash, key))
+ else:
+ # Store example as a tuple so as to keep the structure of `self.current_examples` uniform
+ self.current_examples.append((example, ""))
+
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size:
+ if self._check_duplicates:
+ self.check_duplicate_keys()
+ # Re-intializing to empty list for next batch
+ self.hkey_record = []
+
+ self.write_examples_on_file()
+
+ def check_duplicate_keys(self):
+ """Raises error if duplicates found in a batch"""
+ tmp_record = set()
+ for hash, key in self.hkey_record:
+ if hash in tmp_record:
+ duplicate_key_indices = [
+ str(self._num_examples + index)
+ for index, (duplicate_hash, _) in enumerate(self.hkey_record)
+ if duplicate_hash == hash
+ ]
+
+ raise DuplicatedKeysError(key, duplicate_key_indices)
+ else:
+ tmp_record.add(hash)
+
+ def write_row(self, row: pa.Table, writer_batch_size: Optional[int] = None):
+ """Add a given single-row Table to the write-pool of rows which is written to file.
+
+ Args:
+ row: the row to add.
+ """
+ if len(row) != 1:
+ raise ValueError(f"Only single-row pyarrow tables are allowed but got table with {len(row)} rows.")
+ self.current_rows.append(row)
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size:
+ self.write_rows_on_file()
+
+ def write_batch(
+ self,
+ batch_examples: Dict[str, List],
+ writer_batch_size: Optional[int] = None,
+ ):
+ """Write a batch of Example to file.
+ Ignores the batch if it appears to be empty,
+ preventing a potential schema update of unknown types.
+
+ Args:
+ batch_examples: the batch of examples to add.
+ """
+ if batch_examples and len(next(iter(batch_examples.values()))) == 0:
+ return
+ features = None if self.pa_writer is None and self.update_features else self._features
+ try_features = self._features if self.pa_writer is None and self.update_features else None
+ arrays = []
+ inferred_features = Features()
+ # preserve the order the columns
+ if self.schema:
+ schema_cols = set(self.schema.names)
+ batch_cols = batch_examples.keys() # .keys() preserves the order (unlike set)
+ common_cols = [col for col in self.schema.names if col in batch_cols]
+ extra_cols = [col for col in batch_cols if col not in schema_cols]
+ cols = common_cols + extra_cols
+ else:
+ cols = list(batch_examples)
+ for col in cols:
+ col_values = batch_examples[col]
+ col_type = features[col] if features else None
+ if isinstance(col_values, (pa.Array, pa.ChunkedArray)):
+ array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values
+ arrays.append(array)
+ inferred_features[col] = generate_from_arrow_type(col_values.type)
+ else:
+ col_try_type = try_features[col] if try_features is not None and col in try_features else None
+ typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col)
+ arrays.append(pa.array(typed_sequence))
+ inferred_features[col] = typed_sequence.get_inferred_type()
+ schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema
+ pa_table = pa.Table.from_arrays(arrays, schema=schema)
+ self.write_table(pa_table, writer_batch_size)
+
+ def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int] = None):
+ """Write a Table to file.
+
+ Args:
+ example: the Table to add.
+ """
+ if writer_batch_size is None:
+ writer_batch_size = self.writer_batch_size
+ if self.pa_writer is None:
+ self._build_writer(inferred_schema=pa_table.schema)
+ pa_table = pa_table.combine_chunks()
+ pa_table = table_cast(pa_table, self._schema)
+ if self.embed_local_files:
+ pa_table = embed_table_storage(pa_table)
+ self._num_bytes += pa_table.nbytes
+ self._num_examples += pa_table.num_rows
+ self.pa_writer.write_table(pa_table, writer_batch_size)
+
+ def finalize(self, close_stream=True):
+ self.write_rows_on_file()
+ # In case current_examples < writer_batch_size, but user uses finalize()
+ if self._check_duplicates:
+ self.check_duplicate_keys()
+ # Re-intializing to empty list for next batch
+ self.hkey_record = []
+ self.write_examples_on_file()
+ # If schema is known, infer features even if no examples were written
+ if self.pa_writer is None and self.schema:
+ self._build_writer(self.schema)
+ if self.pa_writer is not None:
+ self.pa_writer.close()
+ self.pa_writer = None
+ if close_stream:
+ self.stream.close()
+ else:
+ if close_stream:
+ self.stream.close()
+ raise SchemaInferenceError("Please pass `features` or at least one example when writing data")
+ logger.debug(
+ f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {self._path if self._path else ''}."
+ )
+ return self._num_examples, self._num_bytes
+
+
+class ParquetWriter(ArrowWriter):
+ _WRITER_CLASS = pq.ParquetWriter
+
+
+class BeamWriter:
+ """
+ Shuffles and writes Examples to Arrow files.
+ The Arrow files are converted from Parquet files that are the output of Apache Beam pipelines.
+ """
+
+ def __init__(
+ self,
+ features: Optional[Features] = None,
+ schema: Optional[pa.Schema] = None,
+ path: Optional[str] = None,
+ namespace: Optional[str] = None,
+ cache_dir: Optional[str] = None,
+ ):
+ if features is None and schema is None:
+ raise ValueError("At least one of features and schema must be provided.")
+ if path is None:
+ raise ValueError("Path must be provided.")
+
+ if features is not None:
+ self._features: Features = features
+ self._schema: pa.Schema = features.arrow_schema
+ else:
+ self._schema: pa.Schema = schema
+ self._features: Features = Features.from_arrow_schema(schema)
+
+ self._path = path
+ self._parquet_path = os.path.splitext(path)[0] # remove extension
+ self._namespace = namespace or "default"
+ self._num_examples = None
+ self._cache_dir = cache_dir or config.HF_DATASETS_CACHE
+
+ def write_from_pcollection(self, pcoll_examples):
+ """Add the final steps of the beam pipeline: write to parquet files."""
+ import apache_beam as beam
+
+ def inc_num_examples(example):
+ beam.metrics.Metrics.counter(self._namespace, "num_examples").inc()
+
+ # count examples
+ _ = pcoll_examples | "Count N. Examples" >> beam.Map(inc_num_examples)
+
+ # save dataset
+ return (
+ pcoll_examples
+ | "Get values" >> beam.Values()
+ | "Save to parquet"
+ >> beam.io.parquetio.WriteToParquet(
+ self._parquet_path, self._schema, shard_name_template="-SSSSS-of-NNNNN.parquet"
+ )
+ )
+
+ def finalize(self, metrics_query_result: dict):
+ """
+ Run after the pipeline has finished.
+ It converts the resulting parquet files to arrow and it completes the info from the pipeline metrics.
+
+ Args:
+ metrics_query_result: `dict` obtained from pipeline_results.metrics().query(m_filter). Make sure
+ that the filter keeps only the metrics for the considered split, under the namespace `split_name`.
+ """
+
+ # Beam FileSystems require the system's path separator in the older versions
+ fs, _, [parquet_path] = fsspec.get_fs_token_paths(self._parquet_path)
+ parquet_path = str(Path(parquet_path)) if not is_remote_filesystem(fs) else fs.unstrip_protocol(parquet_path)
+
+ shards = fs.glob(parquet_path + "*.parquet")
+ num_bytes = sum(fs.sizes(shards))
+ shard_lengths = get_parquet_lengths(shards)
+
+ # Convert to arrow
+ if self._path.endswith(".arrow"):
+ logger.info(f"Converting parquet files {self._parquet_path} to arrow {self._path}")
+ try: # stream conversion
+ num_bytes = 0
+ for shard in hf_tqdm(shards, unit="shards"):
+ with fs.open(shard, "rb") as source:
+ with fs.open(shard.replace(".parquet", ".arrow"), "wb") as destination:
+ shard_num_bytes, _ = parquet_to_arrow(source, destination)
+ num_bytes += shard_num_bytes
+ except OSError as e: # broken pipe can happen if the connection is unstable, do local conversion instead
+ if e.errno != errno.EPIPE: # not a broken pipe
+ raise
+ logger.warning(
+ "Broken Pipe during stream conversion from parquet to arrow. Using local convert instead"
+ )
+ local_convert_dir = os.path.join(self._cache_dir, "beam_convert")
+ os.makedirs(local_convert_dir, exist_ok=True)
+ num_bytes = 0
+ for shard in hf_tqdm(shards, unit="shards"):
+ local_parquet_path = os.path.join(local_convert_dir, hash_url_to_filename(shard) + ".parquet")
+ fs.download(shard, local_parquet_path)
+ local_arrow_path = local_parquet_path.replace(".parquet", ".arrow")
+ shard_num_bytes, _ = parquet_to_arrow(local_parquet_path, local_arrow_path)
+ num_bytes += shard_num_bytes
+ remote_arrow_path = shard.replace(".parquet", ".arrow")
+ fs.upload(local_arrow_path, remote_arrow_path)
+
+ # Save metrics
+ counters_dict = {metric.key.metric.name: metric.result for metric in metrics_query_result["counters"]}
+ self._num_examples = counters_dict["num_examples"]
+ self._num_bytes = num_bytes
+ self._shard_lengths = shard_lengths
+ return self._num_examples, self._num_bytes
+
+
+def get_parquet_lengths(sources) -> List[int]:
+ shard_lengths = []
+ for source in hf_tqdm(sources, unit="parquet files"):
+ parquet_file = pa.parquet.ParquetFile(source)
+ shard_lengths.append(parquet_file.metadata.num_rows)
+ return shard_lengths
+
+
+def parquet_to_arrow(source, destination) -> List[int]:
+ """Convert parquet file to arrow file. Inputs can be str paths or file-like objects"""
+ stream = None if isinstance(destination, str) else destination
+ parquet_file = pa.parquet.ParquetFile(source)
+ # Beam can create empty Parquet files, so we need to pass the source Parquet file's schema
+ with ArrowWriter(schema=parquet_file.schema_arrow, path=destination, stream=stream) as writer:
+ for record_batch in parquet_file.iter_batches():
+ pa_table = pa.Table.from_batches([record_batch])
+ writer.write_table(pa_table)
+ num_bytes, num_examples = writer.finalize()
+ return num_bytes, num_examples
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/builder.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f4bc1962b60e9080542e29ed402212865cb3b1e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/builder.py
@@ -0,0 +1,2270 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetBuilder base class."""
+
+import abc
+import contextlib
+import copy
+import inspect
+import os
+import posixpath
+import shutil
+import textwrap
+import time
+import urllib
+import warnings
+from dataclasses import dataclass
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, Union
+from unittest.mock import patch
+
+import fsspec
+import pyarrow as pa
+from multiprocess import Pool
+from tqdm.contrib.concurrent import thread_map
+
+from . import config, utils
+from .arrow_dataset import Dataset
+from .arrow_reader import (
+ HF_GCP_BASE_URL,
+ ArrowReader,
+ DatasetNotOnHfGcsError,
+ MissingFilesOnHfGcsError,
+ ReadInstruction,
+)
+from .arrow_writer import ArrowWriter, BeamWriter, ParquetWriter, SchemaInferenceError
+from .data_files import DataFilesDict, DataFilesPatternsDict, sanitize_patterns
+from .dataset_dict import DatasetDict, IterableDatasetDict
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager, DownloadMode
+from .download.mock_download_manager import MockDownloadManager
+from .download.streaming_download_manager import StreamingDownloadManager, xjoin, xopen
+from .exceptions import DatasetGenerationCastError, DatasetGenerationError, FileFormatError, ManualDownloadError
+from .features import Features
+from .filesystems import (
+ is_remote_filesystem,
+ rename,
+)
+from .fingerprint import Hasher
+from .info import DatasetInfo, DatasetInfosDict, PostProcessedInfo
+from .iterable_dataset import ArrowExamplesIterable, ExamplesIterable, IterableDataset
+from .keyhash import DuplicatedKeysError
+from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH, camelcase_to_snakecase
+from .splits import Split, SplitDict, SplitGenerator, SplitInfo
+from .streaming import extend_dataset_builder_for_streaming
+from .table import CastError
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils._filelock import FileLock
+from .utils.file_utils import cached_path, is_remote_url
+from .utils.info_utils import VerificationMode, get_size_checksum_dict, verify_checksums, verify_splits
+from .utils.py_utils import (
+ classproperty,
+ convert_file_size_to_int,
+ has_sufficient_disk_space,
+ iflatmap_unordered,
+ map_nested,
+ memoize,
+ size_str,
+ temporary_assignment,
+)
+from .utils.sharding import _number_of_shards_in_gen_kwargs, _split_gen_kwargs
+from .utils.track import tracked_list
+
+
+if TYPE_CHECKING:
+ from .load import DatasetModule
+
+
+logger = logging.get_logger(__name__)
+
+
+class InvalidConfigName(ValueError):
+ pass
+
+
+@dataclass
+class BuilderConfig:
+ """Base class for `DatasetBuilder` data configuration.
+
+ `DatasetBuilder` subclasses with data configuration options should subclass
+ `BuilderConfig` and add their own properties.
+
+ Attributes:
+ name (`str`, defaults to `default`):
+ The name of the configuration.
+ version (`Version` or `str`, defaults to `0.0.0`):
+ The version of the configuration.
+ data_dir (`str`, *optional*):
+ Path to the directory containing the source data.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ description (`str`, *optional*):
+ A human description of the configuration.
+ """
+
+ name: str = "default"
+ version: Optional[Union[utils.Version, str]] = utils.Version("0.0.0")
+ data_dir: Optional[str] = None
+ data_files: Optional[Union[DataFilesDict, DataFilesPatternsDict]] = None
+ description: Optional[str] = None
+
+ def __post_init__(self):
+ # The config name is used to name the cache directory.
+ for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH:
+ if invalid_char in self.name:
+ raise InvalidConfigName(
+ f"Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{self.name}'. "
+ f"They could create issues when creating a directory for this config on Windows filesystem."
+ )
+ if self.data_files is not None and not isinstance(self.data_files, (DataFilesDict, DataFilesPatternsDict)):
+ raise ValueError(f"Expected a DataFilesDict in data_files but got {self.data_files}")
+
+ def __eq__(self, o):
+ # we need to override the default dataclass __eq__ since it doesn't check for
+ # other attributes that the ones of the signature.
+ if set(self.__dict__.keys()) != set(o.__dict__.keys()):
+ return False
+ return all((k, getattr(self, k)) == (k, getattr(o, k)) for k in self.__dict__.keys())
+
+ def create_config_id(
+ self,
+ config_kwargs: dict,
+ custom_features: Optional[Features] = None,
+ ) -> str:
+ """
+ The config id is used to build the cache directory.
+ By default it is equal to the config name.
+ However the name of a config is not sufficient to have a unique identifier for the dataset being generated
+ since it doesn't take into account:
+ - the config kwargs that can be used to overwrite attributes
+ - the custom features used to write the dataset
+ - the data_files for json/text/csv/pandas datasets
+
+ Therefore the config id is just the config name with an optional suffix based on these.
+ """
+ # Possibly add a suffix to the name to handle custom features/data_files/config_kwargs
+ suffix: Optional[str] = None
+ config_kwargs_to_add_to_suffix = config_kwargs.copy()
+ # name and version are already used to build the cache directory
+ config_kwargs_to_add_to_suffix.pop("name", None)
+ config_kwargs_to_add_to_suffix.pop("version", None)
+ # data dir handling (when specified it points to the manually downloaded data):
+ # it was previously ignored before the introduction of config id because we didn't want
+ # to change the config name. Now it's fine to take it into account for the config id.
+ # config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ if "data_dir" in config_kwargs_to_add_to_suffix:
+ if config_kwargs_to_add_to_suffix["data_dir"] is None:
+ config_kwargs_to_add_to_suffix.pop("data_dir", None)
+ else:
+ # canonicalize the data dir to avoid two paths to the same location having different
+ # hashes
+ data_dir = config_kwargs_to_add_to_suffix["data_dir"]
+ data_dir = os.path.normpath(data_dir)
+ config_kwargs_to_add_to_suffix["data_dir"] = data_dir
+ if config_kwargs_to_add_to_suffix:
+ # we don't care about the order of the kwargs
+ config_kwargs_to_add_to_suffix = {
+ k: config_kwargs_to_add_to_suffix[k] for k in sorted(config_kwargs_to_add_to_suffix)
+ }
+ if all(isinstance(v, (str, bool, int, float)) for v in config_kwargs_to_add_to_suffix.values()):
+ suffix = ",".join(
+ str(k) + "=" + urllib.parse.quote_plus(str(v)) for k, v in config_kwargs_to_add_to_suffix.items()
+ )
+ if len(suffix) > 32: # hash if too long
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+ else:
+ suffix = Hasher.hash(config_kwargs_to_add_to_suffix)
+
+ if custom_features is not None:
+ m = Hasher()
+ if suffix:
+ m.update(suffix)
+ m.update(custom_features)
+ suffix = m.hexdigest()
+
+ if suffix:
+ config_id = self.name + "-" + suffix
+ if len(config_id) > config.MAX_DATASET_CONFIG_ID_READABLE_LENGTH:
+ config_id = self.name + "-" + Hasher.hash(suffix)
+ return config_id
+ else:
+ return self.name
+
+ def _resolve_data_files(self, base_path: str, download_config: DownloadConfig) -> None:
+ if isinstance(self.data_files, DataFilesPatternsDict):
+ base_path = xjoin(base_path, self.data_dir) if self.data_dir else base_path
+ self.data_files = self.data_files.resolve(base_path, download_config)
+
+
+class DatasetBuilder:
+ """Abstract base class for all datasets.
+
+ `DatasetBuilder` has 3 key methods:
+
+ - [`DatasetBuilder.info`]: Documents the dataset, including feature
+ names, types, shapes, version, splits, citation, etc.
+ - [`DatasetBuilder.download_and_prepare`]: Downloads the source data
+ and writes it to disk.
+ - [`DatasetBuilder.as_dataset`]: Generates a [`Dataset`].
+
+ Some `DatasetBuilder`s expose multiple variants of the
+ dataset by defining a [`BuilderConfig`] subclass and accepting a
+ config object (or name) on construction. Configurable datasets expose a
+ pre-defined set of configurations in [`DatasetBuilder.builder_configs`].
+
+ Args:
+ cache_dir (`str`, *optional*):
+ Directory to cache data. Defaults to `"~/.cache/huggingface/datasets"`.
+ dataset_name (`str`, *optional*):
+ Name of the dataset, if different from the builder name. Useful for packaged builders
+ like csv, imagefolder, audiofolder, etc. to reflect the difference between datasets
+ that use the same packaged builder.
+ config_name (`str`, *optional*):
+ Name of the dataset configuration.
+ It affects the data generated on disk. Different configurations will have their own subdirectories and
+ versions.
+ If not provided, the default configuration is used (if it exists).
+
+
+
+ Parameter `name` was renamed to `config_name`.
+
+
+ hash (`str`, *optional*):
+ Hash specific to the dataset code. Used to update the caching directory when the
+ dataset loading script code is updated (to avoid reusing old data).
+ The typical caching directory (defined in `self._relative_data_dir`) is `name/version/hash/`.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files.
+ This can be a remote URL.
+ features ([`Features`], *optional*):
+ Features types to use with this dataset.
+ It can be used to change the [`Features`] types of a dataset, for example.
+ token (`str` or `bool`, *optional*):
+ String or boolean to use as Bearer token for remote files on the
+ Datasets Hub. If `True`, will get token from `"~/.huggingface"`.
+ repo_id (`str`, *optional*):
+ ID of the dataset repository.
+ Used to distinguish builders with the same name but not coming from the same namespace, for example "squad"
+ and "lhoestq/squad" repo IDs. In the latter, the builder name would be "lhoestq___squad".
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ For builders like "csv" or "json" that need the user to specify data files. They can be either
+ local or remote files. For convenience, you can use a `DataFilesDict`.
+ data_dir (`str`, *optional*):
+ Path to directory containing source data file(s).
+ Use only if `data_files` is not passed, in which case it is equivalent to passing
+ `os.path.join(data_dir, "**")` as `data_files`.
+ For builders that require manual download, it must be the path to the local directory containing the
+ manually downloaded data.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the dataset file-system backend, if any.
+ writer_batch_size (`int`, *optional*):
+ Batch size used by the ArrowWriter.
+ It defines the number of samples that are kept in memory before writing them
+ and also the length of the arrow chunks.
+ None means that the ArrowWriter will use its default value.
+ name (`str`): Configuration name for the dataset.
+
+
+
+ Use `config_name` instead.
+
+
+
+ **config_kwargs (additional keyword arguments): Keyword arguments to be passed to the corresponding builder
+ configuration class, set on the class attribute [`DatasetBuilder.BUILDER_CONFIG_CLASS`]. The builder
+ configuration class is [`BuilderConfig`] or a subclass of it.
+ """
+
+ # Default version
+ VERSION = None # Default version set in BuilderConfig
+
+ # Class for the builder config.
+ BUILDER_CONFIG_CLASS = BuilderConfig
+
+ # Named configurations that modify the data generated by download_and_prepare.
+ BUILDER_CONFIGS = []
+
+ # Optional default config name to be used when name is None
+ DEFAULT_CONFIG_NAME = None
+
+ # Default batch size used by the ArrowWriter
+ # It defines the number of samples that are kept in memory before writing them
+ # and also the length of the arrow chunks
+ # None means that the ArrowWriter will use its default value
+ DEFAULT_WRITER_BATCH_SIZE = None
+
+ def __init__(
+ self,
+ cache_dir: Optional[str] = None,
+ dataset_name: Optional[str] = None,
+ config_name: Optional[str] = None,
+ hash: Optional[str] = None,
+ base_path: Optional[str] = None,
+ info: Optional[DatasetInfo] = None,
+ features: Optional[Features] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ repo_id: Optional[str] = None,
+ data_files: Optional[Union[str, list, dict, DataFilesDict]] = None,
+ data_dir: Optional[str] = None,
+ storage_options: Optional[dict] = None,
+ writer_batch_size: Optional[int] = None,
+ name="deprecated",
+ **config_kwargs,
+ ):
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'token={use_auth_token}' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ if name != "deprecated":
+ warnings.warn(
+ "Parameter 'name' was renamed to 'config_name' in version 2.3.0 and will be removed in 3.0.0.",
+ category=FutureWarning,
+ )
+ config_name = name
+ # DatasetBuilder name
+ self.name: str = camelcase_to_snakecase(self.__module__.split(".")[-1])
+ self.hash: Optional[str] = hash
+ self.base_path = base_path
+ self.token = token
+ # For backwards compatibility (e.g. if accessed in a dataset script)
+ self.use_auth_token = token
+ self.repo_id = repo_id
+ self.storage_options = storage_options or {}
+ self.dataset_name = camelcase_to_snakecase(dataset_name) if dataset_name else self.name
+ self._writer_batch_size = writer_batch_size or self.DEFAULT_WRITER_BATCH_SIZE
+
+ if data_files is not None and not isinstance(data_files, DataFilesDict):
+ data_files = DataFilesDict.from_patterns(
+ sanitize_patterns(data_files),
+ base_path=base_path,
+ download_config=DownloadConfig(token=token, storage_options=self.storage_options),
+ )
+
+ # Prepare config: DatasetConfig contains name, version and description but can be extended by each dataset
+ if "features" in inspect.signature(self.BUILDER_CONFIG_CLASS.__init__).parameters and features is not None:
+ config_kwargs["features"] = features
+ if data_files is not None:
+ config_kwargs["data_files"] = data_files
+ if data_dir is not None:
+ config_kwargs["data_dir"] = data_dir
+ self.config, self.config_id = self._create_builder_config(
+ config_name=config_name,
+ custom_features=features,
+ **config_kwargs,
+ )
+
+ # prepare info: DatasetInfo are a standardized dataclass across all datasets
+ # Prefill datasetinfo
+ if info is None:
+ # TODO FOR PACKAGED MODULES IT IMPORTS DATA FROM src/packaged_modules which doesn't make sense
+ info = self.get_exported_dataset_info()
+ info.update(self._info())
+ info.builder_name = self.name
+ info.dataset_name = self.dataset_name
+ info.config_name = self.config.name
+ info.version = self.config.version
+ self.info = info
+ # update info with user specified infos
+ if features is not None:
+ self.info.features = features
+
+ # Prepare data dirs:
+ # cache_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ self._cache_dir_root = str(cache_dir or config.HF_DATASETS_CACHE)
+ self._cache_dir_root = (
+ self._cache_dir_root if is_remote_url(self._cache_dir_root) else os.path.expanduser(self._cache_dir_root)
+ )
+ self._cache_downloaded_dir = (
+ posixpath.join(self._cache_dir_root, config.DOWNLOADED_DATASETS_DIR)
+ if cache_dir
+ else str(config.DOWNLOADED_DATASETS_PATH)
+ )
+ self._cache_downloaded_dir = (
+ self._cache_downloaded_dir
+ if is_remote_url(self._cache_downloaded_dir)
+ else os.path.expanduser(self._cache_downloaded_dir)
+ )
+
+ # In case there exists a legacy cache directory
+ self._legacy_relative_data_dir = None
+
+ self._cache_dir = self._build_cache_dir()
+ if not is_remote_url(self._cache_dir_root):
+ os.makedirs(self._cache_dir_root, exist_ok=True)
+ lock_path = os.path.join(
+ self._cache_dir_root, Path(self._cache_dir).as_posix().replace("/", "_") + ".lock"
+ )
+ with FileLock(lock_path):
+ if os.path.exists(self._cache_dir): # check if data exist
+ if len(os.listdir(self._cache_dir)) > 0:
+ if os.path.exists(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)):
+ logger.info("Overwrite dataset info from restored data version if exists.")
+ self.info = DatasetInfo.from_directory(self._cache_dir)
+ else: # dir exists but no data, remove the empty dir as data aren't available anymore
+ logger.warning(
+ f"Old caching folder {self._cache_dir} for dataset {self.dataset_name} exists but no data were found. Removing it. "
+ )
+ os.rmdir(self._cache_dir)
+
+ # Store in the cache by default unless the user specifies a custom output_dir to download_and_prepare
+ self._output_dir = self._cache_dir
+ self._fs: fsspec.AbstractFileSystem = fsspec.filesystem("file")
+
+ # Set download manager
+ self.dl_manager = None
+
+ # Set to True by "datasets-cli test" to generate file checksums for (deprecated) dataset_infos.json independently of verification_mode value.
+ self._record_infos = False
+
+ # Set in `.download_and_prepare` once the format of the generated dataset is known
+ self._file_format = None
+
+ # Enable streaming (e.g. it patches "open" to work with remote files)
+ extend_dataset_builder_for_streaming(self)
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-enable streaming, since patched functions are not kept when pickling
+ extend_dataset_builder_for_streaming(self)
+
+ # Must be set for datasets that use 'data_dir' functionality - the ones
+ # that require users to do additional steps to download the data
+ # (this is usually due to some external regulations / rules).
+ # This field should contain a string with user instructions, including
+ # the list of files that should be present. It will be
+ # displayed in the dataset documentation.
+ @property
+ def manual_download_instructions(self) -> Optional[str]:
+ return None
+
+ def _check_legacy_cache(self) -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{builder_name} from 2.13"""
+ if (
+ self.__module__.startswith("datasets.")
+ and not is_remote_url(self._cache_dir_root)
+ and self.config.name == "default"
+ ):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ config_name = self.repo_id.replace("/", "--") if self.repo_id is not None else self.dataset_name
+ config_id = config_name + self.config_id[len(self.config.name) :]
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ def _check_legacy_cache2(self, dataset_module: "DatasetModule") -> Optional[str]:
+ """Check for the old cache directory template {cache_dir}/{namespace}___{dataset_name}/{config_name}-xxx from 2.14 and 2.15"""
+ if self.__module__.startswith("datasets.") and not is_remote_url(self._cache_dir_root):
+ from .packaged_modules import _PACKAGED_DATASETS_MODULES
+ from .utils._dill import Pickler
+
+ def update_hash_with_config_parameters(hash: str, config_parameters: dict) -> str:
+ """
+ Used to update hash of packaged modules which is used for creating unique cache directories to reflect
+ different config parameters which are passed in metadata from readme.
+ """
+ params_to_exclude = {"config_name", "version", "description"}
+ params_to_add_to_hash = {
+ param: value
+ for param, value in sorted(config_parameters.items())
+ if param not in params_to_exclude
+ }
+ m = Hasher()
+ m.update(hash)
+ m.update(params_to_add_to_hash)
+ return m.hexdigest()
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ with patch.object(Pickler, "_legacy_no_dict_keys_sorting", True):
+ config_id = self.config.name + "-" + Hasher.hash({"data_files": self.config.data_files})
+ hash = _PACKAGED_DATASETS_MODULES.get(self.name, "missing")[1]
+ if (
+ dataset_module.builder_configs_parameters.metadata_configs
+ and self.config.name in dataset_module.builder_configs_parameters.metadata_configs
+ ):
+ hash = update_hash_with_config_parameters(
+ hash, dataset_module.builder_configs_parameters.metadata_configs[self.config.name]
+ )
+ legacy_relative_data_dir = posixpath.join(
+ self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}",
+ config_id,
+ "0.0.0",
+ hash,
+ )
+ legacy_cache_dir = posixpath.join(self._cache_dir_root, legacy_relative_data_dir)
+ if os.path.isdir(legacy_cache_dir):
+ return legacy_relative_data_dir
+
+ @classmethod
+ def get_all_exported_dataset_infos(cls) -> DatasetInfosDict:
+ """Empty dict if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_all_exported_dataset_infos()
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)}
+ ```
+ """
+ return DatasetInfosDict.from_directory(cls.get_imported_module_dir())
+
+ def get_exported_dataset_info(self) -> DatasetInfo:
+ """Empty `DatasetInfo` if doesn't exist
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> ds_builder = load_dataset_builder('rotten_tomatoes')
+ >>> ds_builder.get_exported_dataset_info()
+ DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews. This data was first used in Bo\nPang and Lillian Lee, ``Seeing stars: Exploiting class relationships for\nsentiment categorization with respect to rating scales.'', Proceedings of the\nACL, 2005.\n", citation='@InProceedings{Pang+Lee:05a,\n author = {Bo Pang and Lillian Lee},\n title = {Seeing stars: Exploiting class relationships for sentiment\n categorization with respect to rating scales},\n booktitle = {Proceedings of the ACL},\n year = 2005\n}\n', homepage='http://www.cs.cornell.edu/people/pabo/movie-review-data/', license='', features={'text': Value(dtype='string', id=None), 'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None)}, post_processed=None, supervised_keys=SupervisedKeysData(input='', output=''), task_templates=[TextClassification(task='text-classification', text_column='text', label_column='label')], builder_name='rotten_tomatoes_movie_review', config_name='default', version=1.0.0, splits={'train': SplitInfo(name='train', num_bytes=1074810, num_examples=8530, dataset_name='rotten_tomatoes_movie_review'), 'validation': SplitInfo(name='validation', num_bytes=134679, num_examples=1066, dataset_name='rotten_tomatoes_movie_review'), 'test': SplitInfo(name='test', num_bytes=135972, num_examples=1066, dataset_name='rotten_tomatoes_movie_review')}, download_checksums={'https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz': {'num_bytes': 487770, 'checksum': 'a05befe52aafda71d458d188a1c54506a998b1308613ba76bbda2e5029409ce9'}}, download_size=487770, post_processing_size=None, dataset_size=1345461, size_in_bytes=1833231)
+ ```
+ """
+ return self.get_all_exported_dataset_infos().get(self.config.name, DatasetInfo())
+
+ def _create_builder_config(
+ self, config_name=None, custom_features=None, **config_kwargs
+ ) -> Tuple[BuilderConfig, str]:
+ """Create and validate BuilderConfig object as well as a unique config id for this config.
+ Raises ValueError if there are multiple builder configs and config_name and DEFAULT_CONFIG_NAME are None.
+ config_kwargs override the defaults kwargs in config
+ """
+ builder_config = None
+
+ # try default config
+ if config_name is None and self.BUILDER_CONFIGS:
+ if self.DEFAULT_CONFIG_NAME is not None:
+ builder_config = self.builder_configs.get(self.DEFAULT_CONFIG_NAME)
+ logger.info(f"No config specified, defaulting to: {self.dataset_name}/{builder_config.name}")
+ else:
+ if len(self.BUILDER_CONFIGS) > 1:
+ if not config_kwargs:
+ example_of_usage = f"load_dataset('{self.dataset_name}', '{self.BUILDER_CONFIGS[0].name}')"
+ raise ValueError(
+ "Config name is missing."
+ f"\nPlease pick one among the available configs: {list(self.builder_configs.keys())}"
+ + f"\nExample of usage:\n\t`{example_of_usage}`"
+ )
+ else:
+ builder_config = self.BUILDER_CONFIGS[0]
+ logger.info(
+ f"No config specified, defaulting to the single config: {self.dataset_name}/{builder_config.name}"
+ )
+
+ # try to get config by name
+ if isinstance(config_name, str):
+ builder_config = self.builder_configs.get(config_name)
+ if builder_config is None and self.BUILDER_CONFIGS:
+ raise ValueError(
+ f"BuilderConfig '{config_name}' not found. Available: {list(self.builder_configs.keys())}"
+ )
+
+ # if not using an existing config, then create a new config on the fly
+ if not builder_config:
+ if config_name is not None:
+ config_kwargs["name"] = config_name
+ elif self.DEFAULT_CONFIG_NAME and not config_kwargs:
+ # Use DEFAULT_CONFIG_NAME only if no config_kwargs are passed
+ config_kwargs["name"] = self.DEFAULT_CONFIG_NAME
+ if "version" not in config_kwargs and hasattr(self, "VERSION") and self.VERSION:
+ config_kwargs["version"] = self.VERSION
+ builder_config = self.BUILDER_CONFIG_CLASS(**config_kwargs)
+
+ # otherwise use the config_kwargs to overwrite the attributes
+ else:
+ builder_config = copy.deepcopy(builder_config) if config_kwargs else builder_config
+ for key, value in config_kwargs.items():
+ if value is not None:
+ if not hasattr(builder_config, key):
+ raise ValueError(f"BuilderConfig {builder_config} doesn't have a '{key}' key.")
+ setattr(builder_config, key, value)
+
+ if not builder_config.name:
+ raise ValueError(f"BuilderConfig must have a name, got {builder_config.name}")
+
+ # resolve data files if needed
+ builder_config._resolve_data_files(
+ base_path=self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ )
+
+ # compute the config id that is going to be used for caching
+ config_id = builder_config.create_config_id(
+ config_kwargs,
+ custom_features=custom_features,
+ )
+ is_custom = (config_id not in self.builder_configs) and config_id != "default"
+ if is_custom:
+ logger.info(f"Using custom data configuration {config_id}")
+ else:
+ if (
+ builder_config.name in self.builder_configs
+ and builder_config != self.builder_configs[builder_config.name]
+ ):
+ raise ValueError(
+ "Cannot name a custom BuilderConfig the same as an available "
+ f"BuilderConfig. Change the name. Available BuilderConfigs: {list(self.builder_configs.keys())}"
+ )
+ if not builder_config.version:
+ raise ValueError(f"BuilderConfig {builder_config.name} must have a version")
+
+ return builder_config, config_id
+
+ @classproperty
+ @classmethod
+ @memoize()
+ def builder_configs(cls) -> Dict[str, BuilderConfig]:
+ """Dictionary of pre-defined configurations for this builder class."""
+ configs = {config.name: config for config in cls.BUILDER_CONFIGS}
+ if len(configs) != len(cls.BUILDER_CONFIGS):
+ names = [config.name for config in cls.BUILDER_CONFIGS]
+ raise ValueError(f"Names in BUILDER_CONFIGS must not be duplicated. Got {names}")
+ return configs
+
+ @property
+ def cache_dir(self):
+ return self._cache_dir
+
+ def _use_legacy_cache_dir_if_possible(self, dataset_module: "DatasetModule"):
+ # Check for the legacy cache directory template (datasets<3.0.0)
+ self._legacy_relative_data_dir = (
+ self._check_legacy_cache2(dataset_module) or self._check_legacy_cache() or None
+ )
+ self._cache_dir = self._build_cache_dir()
+ self._output_dir = self._cache_dir
+
+ def _relative_data_dir(self, with_version=True, with_hash=True) -> str:
+ """Relative path of this dataset in cache_dir:
+ Will be:
+ self.dataset_name/self.config.version/self.hash/
+ or if a repo_id with a namespace has been specified:
+ self.namespace___self.dataset_name/self.config.version/self.hash/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ if self._legacy_relative_data_dir is not None and with_version and with_hash:
+ return self._legacy_relative_data_dir
+
+ namespace = self.repo_id.split("/")[0] if self.repo_id and self.repo_id.count("/") > 0 else None
+ builder_data_dir = self.dataset_name if namespace is None else f"{namespace}___{self.dataset_name}"
+ builder_data_dir = posixpath.join(builder_data_dir, self.config_id)
+ if with_version:
+ builder_data_dir = posixpath.join(builder_data_dir, str(self.config.version))
+ if with_hash and self.hash and isinstance(self.hash, str):
+ builder_data_dir = posixpath.join(builder_data_dir, self.hash)
+ return builder_data_dir
+
+ def _build_cache_dir(self):
+ """Return the data directory for the current version."""
+ builder_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=False))
+ version_data_dir = posixpath.join(self._cache_dir_root, self._relative_data_dir(with_version=True))
+
+ def _other_versions_on_disk():
+ """Returns previous versions on disk."""
+ if not os.path.exists(builder_data_dir):
+ return []
+
+ version_dirnames = []
+ for dir_name in os.listdir(builder_data_dir):
+ try:
+ version_dirnames.append((utils.Version(dir_name), dir_name))
+ except ValueError: # Invalid version (ex: incomplete data dir)
+ pass
+ version_dirnames.sort(reverse=True)
+ return version_dirnames
+
+ # Check and warn if other versions exist
+ if not is_remote_url(builder_data_dir):
+ version_dirs = _other_versions_on_disk()
+ if version_dirs:
+ other_version = version_dirs[0][0]
+ if other_version != self.config.version:
+ warn_msg = (
+ f"Found a different version {str(other_version)} of dataset {self.dataset_name} in "
+ f"cache_dir {self._cache_dir_root}. Using currently defined version "
+ f"{str(self.config.version)}."
+ )
+ logger.warning(warn_msg)
+
+ return version_data_dir
+
+ @abc.abstractmethod
+ def _info(self) -> DatasetInfo:
+ """Construct the DatasetInfo object. See `DatasetInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (DatasetInfo) The dataset information
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def get_imported_module_dir(cls):
+ """Return the path of the module of this class or subclass."""
+ return os.path.dirname(inspect.getfile(inspect.getmodule(cls)))
+
+ def _rename(self, src: str, dst: str):
+ rename(self._fs, src, dst)
+
+ def download_and_prepare(
+ self,
+ output_dir: Optional[str] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ try_from_hf_gcs: bool = True,
+ dl_manager: Optional[DownloadManager] = None,
+ base_path: Optional[str] = None,
+ use_auth_token="deprecated",
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ **download_and_prepare_kwargs,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ output_dir (`str`, *optional*):
+ Output directory for the dataset.
+ Default to this builder's `cache_dir`, which is inside `~/.cache/huggingface/datasets` by default.
+
+
+ download_config (`DownloadConfig`, *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, *optional*):
+ Select the download/generate mode, default to `REUSE_DATASET_IF_EXISTS`.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ try_from_hf_gcs (`bool`):
+ If `True`, it will try to download the already prepared dataset from the HF Google cloud storage.
+ dl_manager (`DownloadManager`, *optional*):
+ Specific `DownloadManger` to use.
+ base_path (`str`, *optional*):
+ Base path for relative paths that are used to download files. This can be a remote url.
+ If not specified, the value of the `base_path` attribute (`self.base_path`) will be used instead.
+ use_auth_token (`Union[str, bool]`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from ~/.huggingface.
+
+
+
+ Pass `use_auth_token` to `load_dataset_builder` instead.
+
+
+ file_format (`str`, *optional*):
+ Format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ If the format is "parquet", then image and audio data are embedded into the Parquet files instead of pointing to local files.
+
+
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+
+
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the caching file-system backend, if any.
+
+
+ **download_and_prepare_kwargs (additional keyword arguments): Keyword arguments.
+
+ Example:
+
+ Download and prepare the dataset as Arrow files that can be loaded as a Dataset using `builder.as_dataset()`:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare()
+ ```
+
+ Download and prepare the dataset as sharded Parquet files locally:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("./output_dir", file_format="parquet")
+ ```
+
+ Download and prepare the dataset as sharded Parquet files in a cloud storage:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key}
+ >>> builder = load_dataset_builder("rotten_tomatoes")
+ >>> builder.download_and_prepare("s3://my-bucket/my_rotten_tomatoes", storage_options=storage_options, file_format="parquet")
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = VerificationMode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification_mode' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in version 2.7.1 and will be removed in 3.0.0. Pass `token` to `load_dataset_builder` instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+ else:
+ token = self.token
+
+ output_dir = output_dir if output_dir is not None else self._cache_dir
+ # output_dir can be a remote bucket on GCS or S3 (when using BeamBasedBuilder for distributed data processing)
+ fs, _, [output_dir] = fsspec.get_fs_token_paths(output_dir, storage_options=storage_options)
+ self._fs = fs
+ self._output_dir = output_dir if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(output_dir)
+
+ download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS)
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+ base_path = base_path if base_path is not None else self.base_path
+
+ if file_format is not None and file_format not in ["arrow", "parquet"]:
+ raise ValueError(f"Unsupported file_format: {file_format}. Expected 'arrow' or 'parquet'")
+ self._file_format = file_format
+
+ if self._fs._strip_protocol(self._output_dir) == "":
+ # We don't support the root directory, because it has no dirname,
+ # and we need a dirname to use a .incomplete directory
+ # when the dataset is being written
+ raise RuntimeError(
+ f"Unable to download and prepare the dataset at the root {self._output_dir}. "
+ f"Please specify a subdirectory, e.g. '{self._output_dir + self.dataset_name}'"
+ )
+
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig(
+ cache_dir=self._cache_downloaded_dir,
+ force_download=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ force_extract=download_mode == DownloadMode.FORCE_REDOWNLOAD,
+ use_etag=False,
+ num_proc=num_proc,
+ token=token,
+ storage_options=self.storage_options,
+ ) # We don't use etag for data files to speed up the process
+
+ dl_manager = DownloadManager(
+ dataset_name=self.dataset_name,
+ download_config=download_config,
+ data_dir=self.config.data_dir,
+ base_path=base_path,
+ record_checksums=(self._record_infos or verification_mode == VerificationMode.ALL_CHECKS),
+ )
+
+ is_local = not is_remote_filesystem(self._fs)
+
+ if (
+ isinstance(dl_manager, MockDownloadManager)
+ or not is_local
+ or file_format != "arrow"
+ or max_shard_size is not None
+ ):
+ try_from_hf_gcs = False
+ self.dl_manager = dl_manager
+
+ # Prevent parallel local disk operations
+ if is_local:
+ # Create parent directory of the output_dir to put the lock file in there
+ Path(self._output_dir).parent.mkdir(parents=True, exist_ok=True)
+ lock_path = self._output_dir + "_builder.lock"
+
+ # File locking only with local paths; no file locking on GCS or S3
+ with FileLock(lock_path) if is_local else contextlib.nullcontext():
+ # Check if the data already exists
+ data_exists = self._fs.exists(posixpath.join(self._output_dir, config.DATASET_INFO_FILENAME))
+ if data_exists and download_mode == DownloadMode.REUSE_DATASET_IF_EXISTS:
+ logger.info(f"Found cached dataset {self.dataset_name} ({self._output_dir})")
+ # We need to update the info in case some splits were added in the meantime
+ # for example when calling load_dataset from multiple workers.
+ self.info = self._load_info()
+ self.download_post_processing_resources(dl_manager)
+ return
+
+ logger.info(f"Generating dataset {self.dataset_name} ({self._output_dir})")
+ if is_local: # if cache dir is local, check for available space
+ if not has_sufficient_disk_space(
+ self.info.size_in_bytes or 0, directory=Path(self._output_dir).parent
+ ):
+ raise OSError(
+ f"Not enough disk space. Needed: {size_str(self.info.size_in_bytes or 0)} (download: {size_str(self.info.download_size or 0)}, generated: {size_str(self.info.dataset_size or 0)}, post-processed: {size_str(self.info.post_processing_size or 0)})"
+ )
+
+ @contextlib.contextmanager
+ def incomplete_dir(dirname):
+ """Create temporary dir for dirname and rename on exit."""
+ if not is_local:
+ self._fs.makedirs(dirname, exist_ok=True)
+ yield dirname
+ else:
+ tmp_dir = dirname + ".incomplete"
+ os.makedirs(tmp_dir, exist_ok=True)
+ try:
+ yield tmp_dir
+ if os.path.isdir(dirname):
+ shutil.rmtree(dirname)
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply rename a local directory
+ shutil.move(tmp_dir, dirname)
+ finally:
+ if os.path.exists(tmp_dir):
+ shutil.rmtree(tmp_dir)
+
+ # Print is intentional: we want this to always go to stdout so user has
+ # information needed to cancel download/preparation if needed.
+ # This comes right before the progress bar.
+ if self.info.size_in_bytes:
+ logger.info(
+ f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} "
+ f"(download: {size_str(self.info.download_size)}, generated: {size_str(self.info.dataset_size)}, "
+ f"post-processed: {size_str(self.info.post_processing_size)}, "
+ f"total: {size_str(self.info.size_in_bytes)}) to {self._output_dir}..."
+ )
+ else:
+ _dest = self._fs._strip_protocol(self._output_dir) if is_local else self._output_dir
+ logger.info(f"Downloading and preparing dataset {self.dataset_name}/{self.config.name} to {_dest}...")
+
+ self._check_manual_download(dl_manager)
+
+ # Create a tmp dir and rename to self._output_dir on successful exit.
+ with incomplete_dir(self._output_dir) as tmp_output_dir:
+ # Temporarily assign _output_dir to tmp_data_dir to avoid having to forward
+ # it to every sub function.
+ with temporary_assignment(self, "_output_dir", tmp_output_dir):
+ # Try to download the already prepared dataset files
+ downloaded_from_gcs = False
+ if try_from_hf_gcs:
+ try:
+ self._download_prepared_from_hf_gcs(dl_manager.download_config)
+ downloaded_from_gcs = True
+ except (DatasetNotOnHfGcsError, MissingFilesOnHfGcsError):
+ logger.info("Dataset not on Hf google storage. Downloading and preparing it from source")
+ except ConnectionError:
+ logger.warning("HF google storage unreachable. Downloading and preparing it from source")
+ if not downloaded_from_gcs:
+ prepare_split_kwargs = {"file_format": file_format}
+ if max_shard_size is not None:
+ prepare_split_kwargs["max_shard_size"] = max_shard_size
+ if num_proc is not None:
+ prepare_split_kwargs["num_proc"] = num_proc
+ self._download_and_prepare(
+ dl_manager=dl_manager,
+ verification_mode=verification_mode,
+ **prepare_split_kwargs,
+ **download_and_prepare_kwargs,
+ )
+ # Sync info
+ self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
+ self.info.download_checksums = dl_manager.get_recorded_sizes_checksums()
+ self.info.size_in_bytes = self.info.dataset_size + self.info.download_size
+ # Save info
+ self._save_info()
+
+ # Download post processing resources
+ self.download_post_processing_resources(dl_manager)
+
+ logger.info(
+ f"Dataset {self.dataset_name} downloaded and prepared to {self._output_dir}. "
+ f"Subsequent calls will reuse this data."
+ )
+
+ def _check_manual_download(self, dl_manager):
+ if self.manual_download_instructions is not None and dl_manager.manual_dir is None:
+ raise ManualDownloadError(
+ textwrap.dedent(
+ f"""\
+ The dataset {self.dataset_name} with config {self.config.name} requires manual data.
+ Please follow the manual download instructions:
+ {self.manual_download_instructions}
+ Manual data can be loaded with:
+ datasets.load_dataset("{self.dataset_name}", data_dir="")"""
+ )
+ )
+
+ def _download_prepared_from_hf_gcs(self, download_config: DownloadConfig):
+ relative_data_dir = self._relative_data_dir(with_version=True, with_hash=False)
+ reader = ArrowReader(self._output_dir, self.info)
+ # use reader instructions to download the right files
+ reader.download_from_hf_gcs(download_config, relative_data_dir)
+ downloaded_info = DatasetInfo.from_directory(self._output_dir)
+ self.info.update(downloaded_info)
+ # download post processing resources
+ remote_cache_dir = HF_GCP_BASE_URL + "/" + relative_data_dir.replace(os.sep, "/")
+ for split in self.info.splits:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ try:
+ resource_path = cached_path(remote_cache_dir + "/" + resource_file_name)
+ shutil.move(resource_path, os.path.join(self._output_dir, resource_file_name))
+ except ConnectionError:
+ logger.info(f"Couldn't download resourse file {resource_file_name} from Hf google storage.")
+ logger.info("Dataset downloaded from Hf google storage.")
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs):
+ """Downloads and prepares dataset for reading.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required data and generate
+ the pre-processed datasets files.
+
+ Args:
+ dl_manager ([`DownloadManager`]):
+ `DownloadManager` used to download and cache data.
+ verification_mode ([`VerificationMode`]):
+ if `ALL_CHECKS`, perform all the verifications including checksums.
+ if `BASIC_CHECKS`, do not perform checksums, only perform split tests.
+ if `NO_CHECKS`, do not perform any verification.
+ prepare_split_kwargs: Additional options, such as `file_format`, `max_shard_size`
+ """
+ # Generating data for all splits
+ split_dict = SplitDict(dataset_name=self.dataset_name)
+ split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs)
+ split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
+
+ # Checksums verification
+ if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums:
+ verify_checksums(
+ self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), "dataset source files"
+ )
+
+ # Build splits
+ for split_generator in split_generators:
+ if str(split_generator.split_info.name).lower() == "all":
+ raise ValueError(
+ "`all` is a special split keyword corresponding to the "
+ "union of all splits, so cannot be used as key in "
+ "._split_generator()."
+ )
+
+ logger.info(f"Generating {split_generator.split_info.name} split")
+ split_dict.add(split_generator.split_info)
+
+ try:
+ # Prepare split will record examples associated to the split
+ self._prepare_split(split_generator, **prepare_split_kwargs)
+ except OSError as e:
+ raise OSError(
+ "Cannot find data file. "
+ + (self.manual_download_instructions or "")
+ + "\nOriginal error:\n"
+ + str(e)
+ ) from None
+ # If check_duplicates is set to True , then except DuplicatedKeysError
+ except DuplicatedKeysError as e:
+ raise DuplicatedKeysError(
+ e.key,
+ e.duplicate_key_indices,
+ fix_msg=f"To avoid duplicate keys, please fix the dataset script {self.name}.py",
+ ) from None
+ dl_manager.manage_extracted_files()
+
+ if verification_mode == VerificationMode.BASIC_CHECKS or verification_mode == VerificationMode.ALL_CHECKS:
+ verify_splits(self.info.splits, split_dict)
+
+ # Update the info object with the splits.
+ self.info.splits = split_dict
+ self.info.download_size = dl_manager.downloaded_size
+
+ def download_post_processing_resources(self, dl_manager):
+ for split in self.info.splits or []:
+ for resource_name, resource_file_name in self._post_processing_resources(split).items():
+ if not not is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Post processing is not supported on filesystem {self._fs}")
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resource_path = os.path.join(self._output_dir, resource_file_name)
+ if not os.path.exists(resource_path):
+ downloaded_resource_path = self._download_post_processing_resources(
+ split, resource_name, dl_manager
+ )
+ if downloaded_resource_path:
+ logger.info(f"Downloaded post-processing resource {resource_name} as {resource_file_name}")
+ shutil.move(downloaded_resource_path, resource_path)
+
+ def _load_info(self) -> DatasetInfo:
+ return DatasetInfo.from_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_info(self):
+ file_lock = (
+ FileLock(self._output_dir + "_info.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ self.info.write_to_directory(self._output_dir, storage_options=self._fs.storage_options)
+
+ def _save_infos(self):
+ file_lock = (
+ FileLock(self._output_dir + "_infos.lock")
+ if not is_remote_filesystem(self._fs)
+ else contextlib.nullcontext()
+ )
+ with file_lock:
+ DatasetInfosDict(**{self.config.name: self.info}).write_to_directory(self.get_imported_module_dir())
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ """Get kwargs for `self._split_generators()` from `prepare_split_kwargs`."""
+ del prepare_split_kwargs
+ return {}
+
+ def as_dataset(
+ self,
+ split: Optional[Split] = None,
+ run_post_process=True,
+ verification_mode: Optional[Union[VerificationMode, str]] = None,
+ ignore_verifications="deprecated",
+ in_memory=False,
+ ) -> Union[Dataset, DatasetDict]:
+ """Return a Dataset for the specified split.
+
+ Args:
+ split (`datasets.Split`):
+ Which subset of the data to return.
+ run_post_process (`bool`, defaults to `True`):
+ Whether to run post-processing dataset transforms and/or add
+ indexes.
+ verification_mode ([`VerificationMode`] or `str`, defaults to `BASIC_CHECKS`):
+ Verification mode determining the checks to run on the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+ ignore_verifications (`bool`, defaults to `False`):
+ Whether to ignore the verifications of the
+ downloaded/processed dataset information (checksums/size/splits/...).
+
+
+
+ `ignore_verifications` was deprecated in version 2.9.1 and will be removed in 3.0.0.
+ Please use `verification_mode` instead.
+
+
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ datasets.Dataset
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset_builder
+ >>> builder = load_dataset_builder('rotten_tomatoes')
+ >>> builder.download_and_prepare()
+ >>> ds = builder.as_dataset(split='train')
+ >>> ds
+ Dataset({
+ features: ['text', 'label'],
+ num_rows: 8530
+ })
+ ```
+ """
+ if ignore_verifications != "deprecated":
+ verification_mode = verification_mode.NO_CHECKS if ignore_verifications else VerificationMode.ALL_CHECKS
+ warnings.warn(
+ "'ignore_verifications' was deprecated in favor of 'verification' in version 2.9.1 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'verification_mode={verification_mode.value}' instead.",
+ FutureWarning,
+ )
+ if self._file_format is not None and self._file_format != "arrow":
+ raise FileFormatError('Loading a dataset not written in the "arrow" format is not supported.')
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(f"Loading a dataset cached in a {type(self._fs).__name__} is not supported.")
+ if not os.path.exists(self._output_dir):
+ raise FileNotFoundError(
+ f"Dataset {self.dataset_name}: could not find data in {self._output_dir}. Please make sure to call "
+ "builder.download_and_prepare(), or use "
+ "datasets.load_dataset() before trying to access the Dataset object."
+ )
+
+ logger.debug(f'Constructing Dataset for split {split or ", ".join(self.info.splits)}, from {self._output_dir}')
+
+ # By default, return all splits
+ if split is None:
+ split = {s: s for s in self.info.splits}
+
+ verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS)
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ partial(
+ self._build_single_dataset,
+ run_post_process=run_post_process,
+ verification_mode=verification_mode,
+ in_memory=in_memory,
+ ),
+ split,
+ map_tuple=True,
+ disable_tqdm=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = DatasetDict(datasets)
+ return datasets
+
+ def _build_single_dataset(
+ self,
+ split: Union[str, ReadInstruction, Split],
+ run_post_process: bool,
+ verification_mode: VerificationMode,
+ in_memory: bool = False,
+ ):
+ """as_dataset for a single split."""
+ if not isinstance(split, ReadInstruction):
+ split = str(split)
+ if split == "all":
+ split = "+".join(self.info.splits.keys())
+ split = Split(split)
+
+ # Build base dataset
+ ds = self._as_dataset(
+ split=split,
+ in_memory=in_memory,
+ )
+ if run_post_process:
+ for resource_file_name in self._post_processing_resources(split).values():
+ if os.sep in resource_file_name:
+ raise ValueError(f"Resources shouldn't be in a sub-directory: {resource_file_name}")
+ resources_paths = {
+ resource_name: os.path.join(self._output_dir, resource_file_name)
+ for resource_name, resource_file_name in self._post_processing_resources(split).items()
+ }
+ post_processed = self._post_process(ds, resources_paths)
+ if post_processed is not None:
+ ds = post_processed
+ recorded_checksums = {}
+ record_checksums = False
+ for resource_name, resource_path in resources_paths.items():
+ size_checksum = get_size_checksum_dict(resource_path)
+ recorded_checksums[resource_name] = size_checksum
+ if verification_mode == VerificationMode.ALL_CHECKS and record_checksums:
+ if self.info.post_processed is None or self.info.post_processed.resources_checksums is None:
+ expected_checksums = None
+ else:
+ expected_checksums = self.info.post_processed.resources_checksums.get(split)
+ verify_checksums(expected_checksums, recorded_checksums, "post processing resources")
+ if self.info.post_processed is None:
+ self.info.post_processed = PostProcessedInfo()
+ if self.info.post_processed.resources_checksums is None:
+ self.info.post_processed.resources_checksums = {}
+ self.info.post_processed.resources_checksums[str(split)] = recorded_checksums
+ self.info.post_processing_size = sum(
+ checksums_dict["num_bytes"]
+ for split_checksums_dicts in self.info.post_processed.resources_checksums.values()
+ for checksums_dict in split_checksums_dicts.values()
+ )
+ if self.info.dataset_size is not None and self.info.download_size is not None:
+ self.info.size_in_bytes = (
+ self.info.dataset_size + self.info.download_size + self.info.post_processing_size
+ )
+ self._save_info()
+ ds._info.post_processed = self.info.post_processed
+ ds._info.post_processing_size = self.info.post_processing_size
+ ds._info.size_in_bytes = self.info.size_in_bytes
+ if self.info.post_processed.features is not None:
+ if self.info.post_processed.features.type != ds.features.type:
+ raise ValueError(
+ f"Post-processed features info don't match the dataset:\nGot\n{self.info.post_processed.features}\nbut expected something like\n{ds.features}"
+ )
+ else:
+ ds.info.features = self.info.post_processed.features
+
+ return ds
+
+ def _as_dataset(self, split: Union[ReadInstruction, Split] = Split.TRAIN, in_memory: bool = False) -> Dataset:
+ """Constructs a `Dataset`.
+
+ This is the internal implementation to overwrite called when user calls
+ `as_dataset`. It should read the pre-processed datasets files and generate
+ the `Dataset` object.
+
+ Args:
+ split (`datasets.Split`):
+ which subset of the data to read.
+ in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+
+ Returns:
+ `Dataset`
+ """
+ cache_dir = self._fs._strip_protocol(self._output_dir)
+ dataset_name = self.dataset_name
+ if self._check_legacy_cache():
+ dataset_name = self.name
+ dataset_kwargs = ArrowReader(cache_dir, self.info).read(
+ name=dataset_name,
+ instructions=split,
+ split_infos=self.info.splits.values(),
+ in_memory=in_memory,
+ )
+ fingerprint = self._get_dataset_fingerprint(split)
+ return Dataset(fingerprint=fingerprint, **dataset_kwargs)
+
+ def _get_dataset_fingerprint(self, split: Union[ReadInstruction, Split]) -> str:
+ """The dataset fingerprint is the hash of the relative directory dataset_name/config_name/version/hash, as well as the split specs."""
+ hasher = Hasher()
+ hasher.update(Path(self._relative_data_dir()).as_posix())
+ hasher.update(str(split)) # for example: train, train+test, train[:10%], test[:33%](pct1_dropremainder)
+ fingerprint = hasher.hexdigest()
+ return fingerprint
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ base_path: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ if is_remote_filesystem(self._fs):
+ raise NotImplementedError(
+ f"Loading a streaming dataset cached in a {type(self._fs).__name__} is not supported yet."
+ )
+
+ dl_manager = StreamingDownloadManager(
+ base_path=base_path or self.base_path,
+ download_config=DownloadConfig(token=self.token, storage_options=self.storage_options),
+ dataset_name=self.dataset_name,
+ data_dir=self.config.data_dir,
+ )
+ self._check_manual_download(dl_manager)
+ splits_generators = {sg.name: sg for sg in self._split_generators(dl_manager)}
+ # By default, return all splits
+ if split is None:
+ splits_generator = splits_generators
+ elif split in splits_generators:
+ splits_generator = splits_generators[split]
+ else:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(splits_generators)}")
+
+ # Create a dataset for each of the given splits
+ datasets = map_nested(
+ self._as_streaming_dataset_single,
+ splits_generator,
+ map_tuple=True,
+ )
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _as_streaming_dataset_single(
+ self,
+ splits_generator,
+ ) -> IterableDataset:
+ ex_iterable = self._get_examples_iterable_for_split(splits_generator)
+ # add auth to be able to access and decode audio/image files from private repositories.
+ token_per_repo_id = {self.repo_id: self.token} if self.repo_id else {}
+ return IterableDataset(
+ ex_iterable, info=self.info, split=splits_generator.name, token_per_repo_id=token_per_repo_id
+ )
+
+ def _post_process(self, dataset: Dataset, resources_paths: Mapping[str, str]) -> Optional[Dataset]:
+ """Run dataset transforms or add indexes"""
+ return None
+
+ def _post_processing_resources(self, split: str) -> Dict[str, str]:
+ """Mapping resource_name -> resource_file_name"""
+ return {}
+
+ def _download_post_processing_resources(
+ self, split: str, resource_name: str, dl_manager: DownloadManager
+ ) -> Optional[str]:
+ """Download the resource using the download manager and return the downloaded path."""
+ return None
+
+ @abc.abstractmethod
+ def _split_generators(self, dl_manager: DownloadManager):
+ """Specify feature dictionary generators and dataset splits.
+
+ This function returns a list of `SplitGenerator`s defining how to generate
+ data and what splits to use.
+
+ Example:
+
+ return [
+ datasets.SplitGenerator(
+ name=datasets.Split.TRAIN,
+ gen_kwargs={'file': 'train_data.zip'},
+ ),
+ datasets.SplitGenerator(
+ name=datasets.Split.TEST,
+ gen_kwargs={'file': 'test_data.zip'},
+ ),
+ ]
+
+ The above code will first call `_generate_examples(file='train_data.zip')`
+ to write the train data, then `_generate_examples(file='test_data.zip')` to
+ write the test data.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ Note that for datasets without a `VALIDATION` split, you can use a
+ fraction of the `TRAIN` data for evaluation as you iterate on your model
+ so as not to overfit to the `TEST` data.
+
+ For downloads and extractions, use the given `download_manager`.
+ Note that the `DownloadManager` caches downloads, so it is fine to have each
+ generator attempt to download the source data.
+
+ A good practice is to download all data in this function, and then
+ distribute the relevant parts to each split with the `gen_kwargs` argument
+
+ Args:
+ dl_manager (`DownloadManager`):
+ Download manager to download the data
+
+ Returns:
+ `list`.
+ """
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ """Generate the examples and record them on disk.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ file_format (`str`, *optional*):
+ format of the data files in which the dataset will be written.
+ Supported formats: "arrow", "parquet". Default to "arrow" format.
+ max_shard_size (`Union[str, int]`, *optional*):
+ Maximum number of bytes written per shard, default is "500MB".
+ The size is based on uncompressed data size, so in practice your shard files may be smaller than
+ `max_shard_size` thanks to Parquet compression for example.
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ **kwargs: Additional kwargs forwarded from _download_and_prepare (ex:
+ beam pipeline)
+ """
+ raise NotImplementedError()
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ """Generate the examples on the fly.
+
+ Args:
+ split_generator (`SplitGenerator`):
+ Split generator to process
+ """
+ raise NotImplementedError()
+
+
+class GeneratorBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on dict generators.
+
+ `GeneratorBasedBuilder` is a convenience class that abstracts away much
+ of the data writing and reading of `DatasetBuilder`. It expects subclasses to
+ implement generators of feature dictionaries across the dataset splits
+ (`_split_generators`). See the method docstrings for details.
+ """
+
+ @abc.abstractmethod
+ def _generate_examples(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `dict`, a feature dictionary
+ ready to be encoded and written to disk. The example will be
+ encoded with `self.info.features.encode_example({...})`.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ check_duplicate_keys: bool,
+ file_format="arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[int, str]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ if self.info.splits is not None:
+ split_info = self.info.splits[split_generator.name]
+ else:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ "split_info": split_info,
+ "check_duplicate_keys": check_duplicate_keys,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_and_job: Tuple[int]):
+ shard_id, job_id = shard_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shards_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shards_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self,
+ gen_kwargs: dict,
+ fpath: str,
+ file_format: str,
+ max_shard_size: int,
+ split_info: SplitInfo,
+ check_duplicate_keys: bool,
+ job_id: int,
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ generator = self._generate_examples(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for key, record in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ hash_salt=split_info.name,
+ check_duplicates=check_duplicate_keys,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ example = self.info.features.encode_example(record) if self.info.features is not None else record
+ writer.write(example, key)
+ num_examples_progress_update += 1
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ super()._download_and_prepare(
+ dl_manager,
+ verification_mode,
+ check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
+ or verification_mode == VerificationMode.ALL_CHECKS,
+ **prepare_splits_kwargs,
+ )
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples, split_generator.gen_kwargs)
+
+
+class ArrowBasedBuilder(DatasetBuilder):
+ """Base class for datasets with data generation based on Arrow loading functions (CSV/JSON/Parquet)."""
+
+ @abc.abstractmethod
+ def _generate_tables(self, **kwargs):
+ """Default function generating examples for each `SplitGenerator`.
+
+ This function preprocess the examples from the raw data to the preprocessed
+ dataset files.
+ This function is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples yielded here will be written on
+ disk.
+
+ Args:
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs
+
+ Yields:
+ key: `str` or `int`, a unique deterministic example identification key.
+ * Unique: An error will be raised if two examples are yield with the
+ same key.
+ * Deterministic: When generating the dataset twice, the same example
+ should have the same key.
+ Good keys can be the image id, or line number if examples are extracted
+ from a text file.
+ The key will be hashed and sorted to shuffle examples deterministically,
+ such as generating the dataset multiple times keep examples in the
+ same order.
+ example: `pyarrow.Table`, a feature table
+ ready to be encoded and written to disk.
+ """
+ raise NotImplementedError()
+
+ def _prepare_split(
+ self,
+ split_generator: SplitGenerator,
+ file_format: str = "arrow",
+ num_proc: Optional[int] = None,
+ max_shard_size: Optional[Union[str, int]] = None,
+ ):
+ max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
+
+ try:
+ split_info = self.info.splits[split_generator.name]
+ except Exception:
+ split_info = split_generator.split_info
+
+ SUFFIX = "-JJJJJ-SSSSS-of-NNNNN"
+ fname = f"{self.dataset_name}-{split_generator.name}{SUFFIX}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+
+ if num_proc and num_proc > 1:
+ num_input_shards = _number_of_shards_in_gen_kwargs(split_generator.gen_kwargs)
+ if num_input_shards <= 1:
+ logger.warning(
+ f"Setting num_proc from {num_proc} back to 1 for the {split_info.name} split to disable multiprocessing as it only contains one shard."
+ )
+ num_proc = 1
+ elif num_input_shards < num_proc:
+ logger.warning(
+ f"Setting num_proc from {num_proc} to {num_input_shards} for the {split_info.name} split as it only contains {num_input_shards} shards."
+ )
+ num_proc = num_input_shards
+
+ pbar = hf_tqdm(
+ unit=" examples",
+ total=split_info.num_examples,
+ desc=f"Generating {split_info.name} split",
+ )
+
+ _prepare_split_args = {
+ "fpath": fpath,
+ "file_format": file_format,
+ "max_shard_size": max_shard_size,
+ }
+
+ if num_proc is None or num_proc == 1:
+ result = None
+ gen_kwargs = split_generator.gen_kwargs
+ job_id = 0
+ with pbar:
+ for job_id, done, content in self._prepare_split_single(
+ gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
+ ):
+ if done:
+ result = content
+ else:
+ pbar.update(content)
+ # wrapping everything into lists for consistency with the multiprocessed code path
+ assert result is not None, "Failed to retrieve results from prepare_split"
+ examples_per_job, bytes_per_job, features_per_job, shards_per_job, shard_lengths_per_job = [
+ [item] for item in result
+ ]
+ else:
+ kwargs_per_job = [
+ {"gen_kwargs": gen_kwargs, "job_id": job_id, **_prepare_split_args}
+ for job_id, gen_kwargs in enumerate(
+ _split_gen_kwargs(split_generator.gen_kwargs, max_num_jobs=num_proc)
+ )
+ ]
+ num_jobs = len(kwargs_per_job)
+
+ examples_per_job = [None] * num_jobs
+ bytes_per_job = [None] * num_jobs
+ features_per_job = [None] * num_jobs
+ shards_per_job = [None] * num_jobs
+ shard_lengths_per_job = [None] * num_jobs
+
+ with Pool(num_proc) as pool:
+ with pbar:
+ for job_id, done, content in iflatmap_unordered(
+ pool, self._prepare_split_single, kwargs_iterable=kwargs_per_job
+ ):
+ if done:
+ # the content is the result of the job
+ (
+ examples_per_job[job_id],
+ bytes_per_job[job_id],
+ features_per_job[job_id],
+ shards_per_job[job_id],
+ shard_lengths_per_job[job_id],
+ ) = content
+ else:
+ # the content is the number of examples progress update
+ pbar.update(content)
+
+ assert (
+ None not in examples_per_job
+ ), f"Failed to retrieve results from prepare_split: result list {examples_per_job} still contains None - at least one worker failed to return its results"
+
+ total_shards = sum(shards_per_job)
+ total_num_examples = sum(examples_per_job)
+ total_num_bytes = sum(bytes_per_job)
+ features = features_per_job[0]
+
+ split_generator.split_info.num_examples = total_num_examples
+ split_generator.split_info.num_bytes = total_num_bytes
+
+ # should rename everything at the end
+ logger.debug(f"Renaming {total_shards} shards.")
+ if total_shards > 1:
+ # use the -SSSSS-of-NNNNN pattern
+
+ def _rename_shard(shard_id_and_job: Tuple[int]):
+ shard_id, job_id = shard_id_and_job
+ global_shard_id = sum(shards_per_job[:job_id]) + shard_id
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace("JJJJJ-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"),
+ )
+
+ shard_ids_and_jobs = [
+ (shard_id, job_id)
+ for job_id, num_shards in enumerate(shards_per_job)
+ for shard_id in range(num_shards)
+ ]
+ thread_map(_rename_shard, shard_ids_and_jobs, disable=True, max_workers=64)
+
+ split_generator.split_info.shard_lengths = [
+ shard_length for shard_lengths in shard_lengths_per_job for shard_length in shard_lengths
+ ]
+ else:
+ # don't use any pattern
+ shard_id, job_id = 0, 0
+ self._rename(
+ fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ fpath.replace(SUFFIX, ""),
+ )
+
+ if self.info.features is None:
+ self.info.features = features
+
+ def _prepare_split_single(
+ self, gen_kwargs: dict, fpath: str, file_format: str, max_shard_size: int, job_id: int
+ ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
+ gen_kwargs = {k: tracked_list(v) if isinstance(v, list) else v for k, v in gen_kwargs.items()}
+ generator = self._generate_tables(**gen_kwargs)
+ writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter
+ embed_local_files = file_format == "parquet"
+ shard_lengths = []
+ total_num_examples, total_num_bytes = 0, 0
+
+ shard_id = 0
+ num_examples_progress_update = 0
+ try:
+ writer = writer_class(
+ features=self.info.features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ _time = time.time()
+ for _, table in generator:
+ if max_shard_size is not None and writer._num_bytes > max_shard_size:
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ shard_id += 1
+ writer = writer_class(
+ features=writer._features,
+ path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
+ writer_batch_size=self._writer_batch_size,
+ storage_options=self._fs.storage_options,
+ embed_local_files=embed_local_files,
+ )
+ try:
+ writer.write_table(table)
+ except CastError as cast_error:
+ raise DatasetGenerationCastError.from_cast_error(
+ cast_error=cast_error,
+ builder_name=self.info.builder_name,
+ gen_kwargs=gen_kwargs,
+ token=self.token,
+ )
+ num_examples_progress_update += len(table)
+ if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL:
+ _time = time.time()
+ yield job_id, False, num_examples_progress_update
+ num_examples_progress_update = 0
+ finally:
+ yield job_id, False, num_examples_progress_update
+ num_shards = shard_id + 1
+ num_examples, num_bytes = writer.finalize()
+ writer.close()
+ shard_lengths.append(num_examples)
+ total_num_examples += num_examples
+ total_num_bytes += num_bytes
+ except Exception as e:
+ # Ignore the writer's error for no examples written to the file if this error was caused by the error in _generate_examples before the first example was yielded
+ if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
+ e = e.__context__
+ if isinstance(e, DatasetGenerationError):
+ raise
+ raise DatasetGenerationError("An error occurred while generating the dataset") from e
+
+ yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
+
+ def _get_examples_iterable_for_split(self, split_generator: SplitGenerator) -> ExamplesIterable:
+ return ArrowExamplesIterable(self._generate_tables, kwargs=split_generator.gen_kwargs)
+
+
+class MissingBeamOptions(ValueError):
+ pass
+
+
+class BeamBasedBuilder(DatasetBuilder):
+ """Beam-based Builder."""
+
+ def __init__(self, *args, beam_runner=None, beam_options=None, **kwargs):
+ self._beam_runner = beam_runner
+ self._beam_options = beam_options
+ self._beam_writers = {} # {split: beam_writer} mapping.
+ super().__init__(*args, **kwargs)
+
+ def _make_split_generators_kwargs(self, prepare_split_kwargs):
+ # Pass `pipeline` into `_split_generators()` from `prepare_split_kwargs` if
+ # it's in the call signature of `_split_generators()`.
+ # This allows for global preprocessing in beam.
+ split_generators_kwargs = {}
+ split_generators_arg_names = inspect.signature(self._split_generators).parameters.keys()
+ if "pipeline" in split_generators_arg_names:
+ split_generators_kwargs["pipeline"] = prepare_split_kwargs["pipeline"]
+ return split_generators_kwargs
+
+ @abc.abstractmethod
+ def _build_pcollection(self, pipeline, **kwargs):
+ """Build the beam pipeline examples for each `SplitGenerator`.
+
+ This function extracts examples from the raw data with parallel transforms
+ in a Beam pipeline. It is called once for each `SplitGenerator` defined in
+ `_split_generators`. The examples from the PCollection will be
+ encoded and written to disk.
+
+
+ Warning: When running in a distributed setup, make sure that the data
+ which will be read (download_dir, manual_dir,...) and written (cache_dir)
+ can be accessed by the workers jobs. The data should be located in a
+ shared filesystem, like GCS.
+
+
+ Args:
+ pipeline ([`utils.beam_utils.BeamPipeline`]):
+ Apache Beam pipeline.
+ **kwargs (additional keyword arguments):
+ Arguments forwarded from the SplitGenerator.gen_kwargs.
+
+ Returns:
+ `beam.PCollection`: Apache Beam PCollection containing the
+ example to send to `self.info.features.encode_example(...)`.
+
+ Example:
+
+ ```
+ def _build_pcollection(pipeline, extracted_dir=None):
+ return (
+ pipeline
+ | beam.Create(gfile.io.listdir(extracted_dir))
+ | beam.Map(_process_file)
+ )
+ ```
+ """
+ raise NotImplementedError()
+
+ def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
+ # Create the Beam pipeline and forward it to `_prepare_split`
+ import apache_beam as beam
+
+ import datasets.utils.beam_utils as beam_utils
+
+ beam_runner = self._beam_runner
+ beam_options = self._beam_options
+
+ if not beam_runner and not beam_options:
+ usage_example = f"load_dataset('{self.name}', '{self.config.name}', beam_runner='DirectRunner')"
+ raise MissingBeamOptions(
+ "Trying to generate a dataset using Apache Beam, yet no Beam Runner "
+ "or PipelineOptions() has been provided in `load_dataset` or in the "
+ "builder arguments. For big datasets it has to run on large-scale data "
+ "processing tools like Dataflow, Spark, etc. More information about "
+ "Apache Beam runners at "
+ "https://beam.apache.org/documentation/runners/capability-matrix/"
+ "\nIf you really want to run it locally because you feel like the "
+ "Dataset is small enough, you can use the local beam runner called "
+ "`DirectRunner` (you may run out of memory). \nExample of usage: "
+ f"\n\t`{usage_example}`"
+ )
+ if self._writer_batch_size is not None:
+ logger.warning(
+ "`writer_batch_size` is not supported for beam pipelines yet. Using the default chunk size for writing."
+ )
+
+ # Beam type checking assumes transforms multiple outputs are of same type,
+ # which is not our case. Plus it doesn't handle correctly all types, so we
+ # are better without it.
+ pipeline_options = {"pipeline_type_check": False}
+ if "num_proc" in prepare_splits_kwargs:
+ num_workers = prepare_splits_kwargs.pop("num_proc")
+ pipeline_options["direct_num_workers"] = num_workers
+ pipeline_options["num_workers"] = num_workers
+ pipeline_options["direct_running_mode"] = "multi_processing"
+ # TODO: Fix ModuleNotFoundError: No module named 'datasets_modules' when running multiprocessed DirectRunner
+ raise NotImplementedError("Using a DirectRunner with `num_proc` for multiprocessing it not supported yet.")
+ beam_options = beam_options or beam.options.pipeline_options.PipelineOptions.from_dictionary(pipeline_options)
+ # Use a single pipeline for all splits
+ pipeline = beam_utils.BeamPipeline(
+ runner=beam_runner,
+ options=beam_options,
+ )
+ super()._download_and_prepare(
+ dl_manager, verification_mode=VerificationMode.NO_CHECKS, pipeline=pipeline, **prepare_splits_kwargs
+ ) # TODO handle verification_mode in beam datasets
+ # Run pipeline
+ pipeline_results = pipeline.run()
+ pipeline_results.wait_until_finish()
+ metrics = pipeline_results.metrics()
+ # Update `info.splits`.
+ split_dict = self.info.splits
+ for split_name, beam_writer in self._beam_writers.items():
+ m_filter = beam.metrics.MetricsFilter().with_namespace(namespace=split_name)
+ num_examples, num_bytes = beam_writer.finalize(metrics.query(m_filter))
+ split_info = split_dict[split_name]
+ split_info.num_examples = num_examples
+ split_info.num_bytes = num_bytes
+ if hasattr(beam_writer, "_shard_lengths") and len(beam_writer._shard_lengths) > 1:
+ # keep the -SSSSS-of-NNNNN pattern
+ split_info.shard_lengths = beam_writer._shard_lengths
+ else:
+ # don't use any pattern
+ file_format = prepare_splits_kwargs.get("file_format", "arrow")
+ src_fname = f"{self.dataset_name}-{split_name}-00000-of-00001.{file_format}"
+ dst_fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ src_fpath = posixpath.join(self._output_dir, src_fname)
+ dst_fpath = posixpath.join(self._output_dir, dst_fname)
+ self._rename(src_fpath, dst_fpath)
+
+ def _save_info(self):
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(f"{self._output_dir}/{config.DATASET_INFO_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_info(f)
+ if self.info.license:
+ with xopen(f"{self._output_dir}/{config.LICENSE_FILENAME}", "wb", download_config=download_config) as f:
+ self.info._dump_license(f)
+
+ def _prepare_split(
+ self, split_generator, pipeline, file_format="arrow", max_shard_size: Optional[Union[str, int]] = None
+ ):
+ import apache_beam as beam
+
+ if max_shard_size is not None:
+ raise NotImplementedError(
+ "max_shard_size is not supported for Beam datasets."
+ "Please set it to None to use the default Apache Beam sharding and get the best performance."
+ )
+
+ # To write examples in filesystem:
+ split_name = split_generator.split_info.name
+ fname = f"{self.dataset_name}-{split_name}.{file_format}"
+ fpath = posixpath.join(self._output_dir, fname)
+ beam_writer = BeamWriter(
+ features=self.info.features, path=fpath, namespace=split_name, cache_dir=self._output_dir
+ )
+ self._beam_writers[split_name] = beam_writer
+
+ encode_example = self.info.features.encode_example
+
+ # Note: We need to wrap the pipeline in a PTransform to avoid re-using the
+ # same label names for each split
+ @beam.ptransform_fn
+ def _build_pcollection(pipeline):
+ """PTransformation which build a single split."""
+ # Encode the PCollection
+ pcoll_examples = self._build_pcollection(pipeline, **split_generator.gen_kwargs)
+ pcoll_examples |= "Encode" >> beam.Map(lambda key_ex: (key_ex[0], encode_example(key_ex[1])))
+ return beam_writer.write_from_pcollection(pcoll_examples)
+
+ # Add the PCollection to the pipeline
+ _ = pipeline | split_name >> _build_pcollection() # pylint: disable=no-value-for-parameter max_bytes_per_shard
+
+ def as_streaming_dataset(
+ self,
+ split: Optional[str] = None,
+ ) -> Union[Dict[str, IterableDataset], IterableDataset]:
+ self._request_info_from_hf_gcs()
+ datasets = {
+ split.name: IterableDataset(self._get_examples_iterable_for_split(split), info=self.info, split=split.name)
+ for split in self.info.splits.values()
+ }
+ if split:
+ try:
+ datasets = datasets[split]
+ except KeyError:
+ raise ValueError(f"Bad split: {split}. Available splits: {list(datasets)}")
+ if isinstance(datasets, dict):
+ datasets = IterableDatasetDict(datasets)
+ return datasets
+
+ def _get_examples_iterable_for_split(self, split: SplitInfo) -> ExamplesIterable:
+ return ExamplesIterable(self._generate_examples_from_hf_gcs, {"split": split})
+
+ def _generate_examples_from_hf_gcs(self, split: SplitInfo):
+ if split.shard_lengths:
+ num_shards = len(split.shard_lengths)
+ remote_prepared_urls = [
+ f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}-{shard_id:05d}-of-{num_shards:05d}.arrow"
+ for shard_id in range(num_shards)
+ ]
+ else:
+ remote_prepared_urls = [f"{self._remote_cache_dir_from_hf_gcs}/{self.name}-{split.name}.arrow"]
+ key = 0
+ download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ for remote_prepared_url in remote_prepared_urls:
+ with xopen(remote_prepared_url, "rb", download_config=download_config) as f:
+ with pa.ipc.open_stream(f) as reader:
+ for record_batch in reader:
+ for record in record_batch.to_pylist():
+ yield key, record
+ key += 1
+
+ def _request_info_from_hf_gcs(self):
+ from .download.streaming_download_manager import xopen
+
+ remote_dataset_info = f"{self._remote_cache_dir_from_hf_gcs}/{config.DATASET_INFO_FILENAME}"
+ try:
+ download_config = download_config = (
+ self.dl_manager.download_config
+ if self.dl_manager
+ else DownloadConfig(token=self.token, storage_options=self._fs.storage_options)
+ )
+ with xopen(remote_dataset_info, download_config=download_config) as f:
+ import json
+
+ _info = json.load(f)
+ except FileNotFoundError as err:
+ raise DatasetNotOnHfGcsError(err) from None
+ self.info.update(DatasetInfo.from_dict(_info))
+
+ @property
+ def _remote_cache_dir_from_hf_gcs(self):
+ relative_data_dir = self._relative_data_dir(with_hash=False)
+ return HF_GCP_BASE_URL + "/" + Path(relative_data_dir).as_posix()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/config.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..32127bea7dcfc5f4bebce12f298ee8c8e3370f70
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/config.py
@@ -0,0 +1,259 @@
+import importlib
+import importlib.metadata
+import logging
+import os
+import platform
+from pathlib import Path
+from typing import Optional
+
+from packaging import version
+
+
+logger = logging.getLogger(__name__.split(".", 1)[0]) # to avoid circular import from .utils.logging
+
+# Datasets
+S3_DATASETS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets"
+CLOUDFRONT_DATASETS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/datasets"
+REPO_DATASETS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}"
+
+# Metrics
+S3_METRICS_BUCKET_PREFIX = "https://s3.amazonaws.com/datasets.huggingface.co/datasets/metrics"
+CLOUDFRONT_METRICS_DISTRIB_PREFIX = "https://cdn-datasets.huggingface.co/datasets/metric"
+REPO_METRICS_URL = "https://raw.githubusercontent.com/huggingface/datasets/{revision}/metrics/{path}/{name}"
+
+# Hub
+HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
+HUB_DATASETS_URL = HF_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
+HUB_DATASETS_HFFS_URL = "hf://datasets/{repo_id}@{revision}/{path}"
+HUB_DEFAULT_VERSION = "main"
+
+PY_VERSION = version.parse(platform.python_version())
+
+# General environment variables accepted values for booleans
+ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
+ENV_VARS_FALSE_VALUES = {"0", "OFF", "NO", "FALSE"}
+ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
+ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({"AUTO"})
+
+
+# Imports
+DILL_VERSION = version.parse(importlib.metadata.version("dill"))
+FSSPEC_VERSION = version.parse(importlib.metadata.version("fsspec"))
+PANDAS_VERSION = version.parse(importlib.metadata.version("pandas"))
+PYARROW_VERSION = version.parse(importlib.metadata.version("pyarrow"))
+HF_HUB_VERSION = version.parse(importlib.metadata.version("huggingface_hub"))
+
+USE_TF = os.environ.get("USE_TF", "AUTO").upper()
+USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
+USE_JAX = os.environ.get("USE_JAX", "AUTO").upper()
+
+TORCH_VERSION = "N/A"
+TORCH_AVAILABLE = False
+
+if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
+ TORCH_AVAILABLE = importlib.util.find_spec("torch") is not None
+ if TORCH_AVAILABLE:
+ try:
+ TORCH_VERSION = version.parse(importlib.metadata.version("torch"))
+ logger.info(f"PyTorch version {TORCH_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling PyTorch because USE_TF is set")
+
+TF_VERSION = "N/A"
+TF_AVAILABLE = False
+
+if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
+ TF_AVAILABLE = importlib.util.find_spec("tensorflow") is not None
+ if TF_AVAILABLE:
+ # For the metadata, we have to look for both tensorflow and tensorflow-cpu
+ for package in [
+ "tensorflow",
+ "tensorflow-cpu",
+ "tensorflow-gpu",
+ "tf-nightly",
+ "tf-nightly-cpu",
+ "tf-nightly-gpu",
+ "intel-tensorflow",
+ "tensorflow-rocm",
+ "tensorflow-macos",
+ ]:
+ try:
+ TF_VERSION = version.parse(importlib.metadata.version(package))
+ except importlib.metadata.PackageNotFoundError:
+ continue
+ else:
+ break
+ else:
+ TF_AVAILABLE = False
+ if TF_AVAILABLE:
+ if TF_VERSION.major < 2:
+ logger.info(f"TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.")
+ TF_AVAILABLE = False
+ else:
+ logger.info(f"TensorFlow version {TF_VERSION} available.")
+else:
+ logger.info("Disabling Tensorflow because USE_TORCH is set")
+
+
+JAX_VERSION = "N/A"
+JAX_AVAILABLE = False
+
+if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ JAX_AVAILABLE = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("jaxlib") is not None
+ if JAX_AVAILABLE:
+ try:
+ JAX_VERSION = version.parse(importlib.metadata.version("jax"))
+ logger.info(f"JAX version {JAX_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling JAX because USE_JAX is set to False")
+
+
+USE_BEAM = os.environ.get("USE_BEAM", "AUTO").upper()
+BEAM_VERSION = "N/A"
+BEAM_AVAILABLE = False
+if USE_BEAM in ENV_VARS_TRUE_AND_AUTO_VALUES:
+ try:
+ BEAM_VERSION = version.parse(importlib.metadata.version("apache_beam"))
+ BEAM_AVAILABLE = True
+ logger.info(f"Apache Beam version {BEAM_VERSION} available.")
+ except importlib.metadata.PackageNotFoundError:
+ pass
+else:
+ logger.info("Disabling Apache Beam because USE_BEAM is set to False")
+
+
+# Optional tools for data loading
+SQLALCHEMY_AVAILABLE = importlib.util.find_spec("sqlalchemy") is not None
+
+# Optional tools for feature decoding
+PIL_AVAILABLE = importlib.util.find_spec("PIL") is not None
+IS_OPUS_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.0.31")
+IS_MP3_SUPPORTED = importlib.util.find_spec("soundfile") is not None and version.parse(
+ importlib.import_module("soundfile").__libsndfile_version__
+) >= version.parse("1.1.0")
+
+# Optional compression tools
+RARFILE_AVAILABLE = importlib.util.find_spec("rarfile") is not None
+ZSTANDARD_AVAILABLE = importlib.util.find_spec("zstandard") is not None
+LZ4_AVAILABLE = importlib.util.find_spec("lz4") is not None
+PY7ZR_AVAILABLE = importlib.util.find_spec("py7zr") is not None
+
+# Cache location
+DEFAULT_XDG_CACHE_HOME = "~/.cache"
+XDG_CACHE_HOME = os.getenv("XDG_CACHE_HOME", DEFAULT_XDG_CACHE_HOME)
+DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, "huggingface")
+HF_CACHE_HOME = os.path.expanduser(os.getenv("HF_HOME", DEFAULT_HF_CACHE_HOME))
+
+DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, "datasets")
+HF_DATASETS_CACHE = Path(os.getenv("HF_DATASETS_CACHE", DEFAULT_HF_DATASETS_CACHE))
+
+DEFAULT_HF_METRICS_CACHE = os.path.join(HF_CACHE_HOME, "metrics")
+HF_METRICS_CACHE = Path(os.getenv("HF_METRICS_CACHE", DEFAULT_HF_METRICS_CACHE))
+
+DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, "modules")
+HF_MODULES_CACHE = Path(os.getenv("HF_MODULES_CACHE", DEFAULT_HF_MODULES_CACHE))
+
+DOWNLOADED_DATASETS_DIR = "downloads"
+DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR)
+DOWNLOADED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_DOWNLOADED_DATASETS_PATH", DEFAULT_DOWNLOADED_DATASETS_PATH))
+
+EXTRACTED_DATASETS_DIR = "extracted"
+DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR)
+EXTRACTED_DATASETS_PATH = Path(os.getenv("HF_DATASETS_EXTRACTED_DATASETS_PATH", DEFAULT_EXTRACTED_DATASETS_PATH))
+
+# Download count for the website
+HF_UPDATE_DOWNLOAD_COUNTS = (
+ os.environ.get("HF_UPDATE_DOWNLOAD_COUNTS", "AUTO").upper() in ENV_VARS_TRUE_AND_AUTO_VALUES
+)
+
+# Remote dataset scripts support
+__HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get("HF_DATASETS_TRUST_REMOTE_CODE", "1")
+HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = (
+ True
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES
+ else False
+ if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES
+ else None
+)
+TIME_OUT_REMOTE_CODE = 15
+
+# Datasets-server
+USE_PARQUET_EXPORT = True
+
+# Batch size constants. For more info, see:
+# https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations)
+DEFAULT_MAX_BATCH_SIZE = 1000
+
+# Size of the preloaded record batch in `Dataset.__iter__`
+ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10
+
+# Max shard size in bytes (e.g. to shard parquet datasets in push_to_hub or download_and_prepare)
+MAX_SHARD_SIZE = "500MB"
+
+# Parquet configuration
+PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100
+PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100
+
+# Offline mode
+HF_DATASETS_OFFLINE = os.environ.get("HF_DATASETS_OFFLINE", "AUTO").upper() in ENV_VARS_TRUE_VALUES
+
+# Here, `True` will disable progress bars globally without possibility of enabling it
+# programmatically. `False` will enable them without possibility of disabling them.
+# If environment variable is not set (None), then the user is free to enable/disable
+# them programmatically.
+# TL;DR: env variable has priority over code
+__HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get("HF_DATASETS_DISABLE_PROGRESS_BARS")
+HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = (
+ __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES
+ if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None
+ else None
+)
+
+# In-memory
+DEFAULT_IN_MEMORY_MAX_SIZE = 0 # Disabled
+IN_MEMORY_MAX_SIZE = float(os.environ.get("HF_DATASETS_IN_MEMORY_MAX_SIZE", DEFAULT_IN_MEMORY_MAX_SIZE))
+
+# File names
+DATASET_ARROW_FILENAME = "dataset.arrow"
+DATASET_INDICES_FILENAME = "indices.arrow"
+DATASET_STATE_JSON_FILENAME = "state.json"
+DATASET_INFO_FILENAME = "dataset_info.json"
+DATASETDICT_INFOS_FILENAME = "dataset_infos.json"
+LICENSE_FILENAME = "LICENSE"
+METRIC_INFO_FILENAME = "metric_info.json"
+DATASETDICT_JSON_FILENAME = "dataset_dict.json"
+METADATA_CONFIGS_FIELD = "configs"
+REPOCARD_FILENAME = "README.md"
+REPOYAML_FILENAME = ".huggingface.yaml"
+
+MODULE_NAME_FOR_DYNAMIC_MODULES = "datasets_modules"
+
+MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255
+
+# Temporary cache directory prefix
+TEMP_CACHE_DIR_PREFIX = "hf_datasets-"
+
+# Streaming
+STREAMING_READ_MAX_RETRIES = 20
+STREAMING_READ_RETRY_INTERVAL = 5
+
+# Datasets without script
+DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10
+ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200
+
+# Progress bars
+PBAR_REFRESH_TIME_INTERVAL = 0.05 # 20 progress updates per sec
+
+# Maximum number of uploaded files per commit
+UPLOADS_MAX_NUMBER_PER_COMMIT = 50
+
+# Backward compatibiliy
+MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/data_files.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/data_files.py
new file mode 100644
index 0000000000000000000000000000000000000000..752145413db8e770e921b99fd2b6bab00fcf1b4b
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/data_files.py
@@ -0,0 +1,806 @@
+import os
+import re
+from functools import partial
+from glob import has_magic
+from pathlib import Path, PurePath
+from typing import Callable, Dict, List, Optional, Set, Tuple, Union
+
+import huggingface_hub
+from fsspec import get_fs_token_paths
+from fsspec.implementations.http import HTTPFileSystem
+from huggingface_hub import HfFileSystem
+from packaging import version
+from tqdm.contrib.concurrent import thread_map
+
+from . import config
+from .download import DownloadConfig
+from .download.streaming_download_manager import _prepare_path_and_storage_options, xbasename, xjoin
+from .naming import _split_re
+from .splits import Split
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+from .utils.file_utils import is_local_path, is_relative_path
+from .utils.py_utils import glob_pattern_to_regex, string_to_dict
+
+
+SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN)
+
+
+logger = logging.get_logger(__name__)
+
+
+class Url(str):
+ pass
+
+
+class EmptyDatasetError(FileNotFoundError):
+ pass
+
+
+SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*"
+
+SPLIT_KEYWORDS = {
+ Split.TRAIN: ["train", "training"],
+ Split.VALIDATION: ["validation", "valid", "dev", "val"],
+ Split.TEST: ["test", "testing", "eval", "evaluation"],
+}
+NON_WORDS_CHARS = "-._ 0-9"
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**[{sep}/]{keyword}[{sep}/]**"]
+elif config.FSSPEC_VERSION < version.parse("2023.12.0"):
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = ["{keyword}[{sep}/]**", "**/*[{sep}/]{keyword}[{sep}/]**"]
+else:
+ KEYWORDS_IN_PATH_NAME_BASE_PATTERNS = [
+ "**/{keyword}[{sep}]*",
+ "**/{keyword}/**",
+ "**/*[{sep}]{keyword}[{sep}]*",
+ "**/*[{sep}]{keyword}[{sep}]*/**",
+ "**/{keyword}[{sep}]*/**",
+ "**/*[{sep}]{keyword}/**",
+ ]
+
+DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST]
+DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME = {
+ split: [
+ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS)
+ for keyword in SPLIT_KEYWORDS[split]
+ for pattern in KEYWORDS_IN_PATH_NAME_BASE_PATTERNS
+ ]
+ for split in DEFAULT_SPLITS
+}
+
+DEFAULT_PATTERNS_ALL = {
+ Split.TRAIN: ["**"],
+}
+
+ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED]
+ALL_DEFAULT_PATTERNS = [
+ DEFAULT_PATTERNS_SPLIT_IN_PATH_NAME,
+ DEFAULT_PATTERNS_ALL,
+]
+if config.FSSPEC_VERSION < version.parse("2023.9.0"):
+ METADATA_PATTERNS = [
+ "metadata.csv",
+ "**/metadata.csv",
+ "metadata.jsonl",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
+else:
+ METADATA_PATTERNS = [
+ "**/metadata.csv",
+ "**/metadata.jsonl",
+ ] # metadata file for ImageFolder and AudioFolder
+WILDCARD_CHARACTERS = "*[]"
+FILES_TO_IGNORE = [
+ "README.md",
+ "config.json",
+ "dataset_info.json",
+ "dataset_infos.json",
+ "dummy_data.zip",
+ "dataset_dict.json",
+]
+
+
+def contains_wildcards(pattern: str) -> bool:
+ return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS)
+
+
+def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]:
+ """
+ Take the data_files patterns from the user, and format them into a dictionary.
+ Each key is the name of the split, and each value is a list of data files patterns (paths or urls).
+ The default split is "train".
+
+ Returns:
+ patterns: dictionary of split_name -> list of patterns
+ """
+ if isinstance(patterns, dict):
+ return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()}
+ elif isinstance(patterns, str):
+ return {SANITIZED_DEFAULT_SPLIT: [patterns]}
+ elif isinstance(patterns, list):
+ if any(isinstance(pattern, dict) for pattern in patterns):
+ for pattern in patterns:
+ if not (
+ isinstance(pattern, dict)
+ and len(pattern) == 2
+ and "split" in pattern
+ and isinstance(pattern.get("path"), (str, list))
+ ):
+ raise ValueError(
+ f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}"
+ )
+ splits = [pattern["split"] for pattern in patterns]
+ if len(set(splits)) != len(splits):
+ raise ValueError(f"Some splits are duplicated in data_files: {splits}")
+ return {
+ str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]]
+ for pattern in patterns
+ }
+ else:
+ return {SANITIZED_DEFAULT_SPLIT: patterns}
+ else:
+ return sanitize_patterns(list(patterns))
+
+
+def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool:
+ """
+ When a path matches a pattern, we additionnally check if it's inside a special directory
+ we ignore by default (if it starts with a double underscore).
+
+ Users can still explicitly request a filepath inside such a directory if "__pycache__" is
+ mentioned explicitly in the requested pattern.
+
+ Some examples:
+
+ base directory:
+
+ ./
+ └── __pycache__
+ └── b.txt
+
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**")
+ True
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt")
+ True
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*")
+ False
+ >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*")
+ False
+ """
+ # We just need to check if every special directories from the path is present explicly in the pattern.
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
+ # the parent path and the parent pattern have the same number of special directories.
+ data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")]
+ data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")]
+ return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern)
+
+
+def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool:
+ """
+ When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside
+ a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot.
+
+ Users can still explicitly request a filepath that is hidden or is inside a hidden directory
+ if the hidden part is mentioned explicitly in the requested pattern.
+
+ Some examples:
+
+ base directory:
+
+ ./
+ └── .hidden_file.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*")
+ False
+
+ base directory:
+
+ ./
+ └── .hidden_dir
+ └── a.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*")
+ False
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*")
+ False
+
+ base directory:
+
+ ./
+ └── .hidden_dir
+ └── .hidden_file.txt
+
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*")
+ False
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*")
+ True
+ >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*")
+ False
+ """
+ # We just need to check if every hidden part from the path is present explicly in the pattern.
+ # Since we assume that the path matches the pattern, it's equivalent to counting that both
+ # the path and the pattern have the same number of hidden parts.
+ hidden_directories_in_path = [
+ part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."}
+ ]
+ hidden_directories_in_pattern = [
+ part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."}
+ ]
+ return len(hidden_directories_in_path) != len(hidden_directories_in_pattern)
+
+
+def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]:
+ """
+ Get the default pattern from a directory or repository by testing all the supported patterns.
+ The first patterns to return a non-empty list of data files is returned.
+
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
+ """
+ # first check the split patterns like data/{split}-00000-of-00001.parquet
+ for split_pattern in ALL_SPLIT_PATTERNS:
+ pattern = split_pattern.replace("{split}", "*")
+ try:
+ data_files = pattern_resolver(pattern)
+ except FileNotFoundError:
+ continue
+ if len(data_files) > 0:
+ splits: Set[str] = {
+ string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"]
+ for p in data_files
+ }
+ if any(not re.match(_split_re, split) for split in splits):
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.")
+ sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(
+ splits - set(DEFAULT_SPLITS)
+ )
+ return {split: [split_pattern.format(split=split)] for split in sorted_splits}
+ # then check the default patterns based on train/valid/test splits
+ for patterns_dict in ALL_DEFAULT_PATTERNS:
+ non_empty_splits = []
+ for split, patterns in patterns_dict.items():
+ for pattern in patterns:
+ try:
+ data_files = pattern_resolver(pattern)
+ except FileNotFoundError:
+ continue
+ if len(data_files) > 0:
+ non_empty_splits.append(split)
+ break
+ if non_empty_splits:
+ return {split: patterns_dict[split] for split in non_empty_splits}
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
+
+
+def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]:
+ """
+ Get the supported metadata patterns from a directory or repository.
+ """
+ non_empty_patterns = []
+ for pattern in METADATA_PATTERNS:
+ try:
+ metadata_files = pattern_resolver(pattern)
+ if len(metadata_files) > 0:
+ non_empty_patterns.append(pattern)
+ except FileNotFoundError:
+ pass
+ if non_empty_patterns:
+ return non_empty_patterns
+ raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}")
+
+
+def resolve_pattern(
+ pattern: str,
+ base_path: str,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+) -> List[str]:
+ """
+ Resolve the paths and URLs of the data files from the pattern passed by the user.
+
+ You can use patterns to resolve multiple local files. Here are a few examples:
+ - *.csv to match all the CSV files at the first level
+ - **.csv to match all the CSV files at any level
+ - data/* to match all the files inside "data"
+ - data/** to match all the files inside "data" and its subdirectories
+
+ The patterns are resolved using the fsspec glob. In fsspec>=2023.12.0 this is equivalent to
+ Python's glob.glob, Path.glob, Path.match and fnmatch where ** is unsupported with a prefix/suffix
+ other than a forward slash /.
+
+ More generally:
+ - '*' matches any character except a forward-slash (to match just the file or directory name)
+ - '**' matches any character including a forward-slash /
+
+ Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested.
+ The same applies to special directories that start with a double underscore like "__pycache__".
+ You can still include one if the pattern explicilty mentions it:
+ - to include a hidden file: "*/.hidden.txt" or "*/.*"
+ - to include a hidden directory: ".hidden/*" or ".*/*"
+ - to include a special directory: "__special__/*" or "__*/*"
+
+ Example::
+
+ >>> from datasets.data_files import resolve_pattern
+ >>> base_path = "."
+ >>> resolve_pattern("docs/**/*.py", base_path)
+ [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py']
+
+ Args:
+ pattern (str): Unix pattern or paths or URLs of the data files to resolve.
+ The paths can be absolute or relative to base_path.
+ Remote filesystems using fsspec are supported, e.g. with the hf:// protocol.
+ base_path (str): Base path to use when resolving relative paths.
+ allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions).
+ For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"]
+ Returns:
+ List[str]: List of paths or URLs to the local or remote files that match the patterns.
+ """
+ if is_relative_path(pattern):
+ pattern = xjoin(base_path, pattern)
+ elif is_local_path(pattern):
+ base_path = os.path.splitdrive(pattern)[0] + os.sep
+ else:
+ base_path = ""
+ pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config)
+ fs, _, _ = get_fs_token_paths(pattern, storage_options=storage_options)
+ fs_base_path = base_path.split("::")[0].split("://")[-1] or fs.root_marker
+ fs_pattern = pattern.split("::")[0].split("://")[-1]
+ files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)}
+ protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]
+ protocol_prefix = protocol + "://" if protocol != "file" else ""
+ glob_kwargs = {}
+ if protocol == "hf" and config.HF_HUB_VERSION >= version.parse("0.20.0"):
+ # 10 times faster glob with detail=True (ignores costly info like lastCommit)
+ glob_kwargs["expand_info"] = False
+ matched_paths = [
+ filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath
+ for filepath, info in fs.glob(pattern, detail=True, **glob_kwargs).items()
+ if info["type"] == "file"
+ and (xbasename(filepath) not in files_to_ignore)
+ and not _is_inside_unrequested_special_dir(
+ os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path)
+ )
+ and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(
+ os.path.relpath(filepath, fs_base_path), os.path.relpath(fs_pattern, fs_base_path)
+ )
+ ] # ignore .ipynb and __pycache__, but keep /../
+ if allowed_extensions is not None:
+ out = [
+ filepath
+ for filepath in matched_paths
+ if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:])
+ ]
+ if len(out) < len(matched_paths):
+ invalid_matched_files = list(set(matched_paths) - set(out))
+ logger.info(
+ f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}"
+ )
+ else:
+ out = matched_paths
+ if not out:
+ error_msg = f"Unable to find '{pattern}'"
+ if allowed_extensions is not None:
+ error_msg += f" with any supported extension {list(allowed_extensions)}"
+ raise FileNotFoundError(error_msg)
+ return out
+
+
+def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]:
+ """
+ Get the default pattern from a directory testing all the supported patterns.
+ The first patterns to return a non-empty list of data files is returned.
+
+ Some examples of supported patterns:
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── dataset.csv
+
+ Output:
+
+ {"train": ["**"]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ ├── train.csv
+ └── test.csv
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train.csv
+ └── test.csv
+
+ my_dataset_repository/
+ ├── README.md
+ ├── train_0.csv
+ ├── train_1.csv
+ ├── train_2.csv
+ ├── train_3.csv
+ ├── test_0.csv
+ └── test_1.csv
+
+ Output:
+
+ {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'],
+ 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train/
+ │ ├── shard_0.csv
+ │ ├── shard_1.csv
+ │ ├── shard_2.csv
+ │ └── shard_3.csv
+ └── test/
+ ├── shard_0.csv
+ └── shard_1.csv
+
+ Output:
+
+ {'train': ['train[-._ 0-9/]**', '**/*[-._ 0-9/]train[-._ 0-9/]**', 'training[-._ 0-9/]**', '**/*[-._ 0-9/]training[-._ 0-9/]**'],
+ 'test': ['test[-._ 0-9/]**', '**/*[-._ 0-9/]test[-._ 0-9/]**', 'testing[-._ 0-9/]**', '**/*[-._ 0-9/]testing[-._ 0-9/]**', ...]}
+
+ Input:
+
+ my_dataset_repository/
+ ├── README.md
+ └── data/
+ ├── train-00000-of-00003.csv
+ ├── train-00001-of-00003.csv
+ ├── train-00002-of-00003.csv
+ ├── test-00000-of-00001.csv
+ ├── random-00000-of-00003.csv
+ ├── random-00001-of-00003.csv
+ └── random-00002-of-00003.csv
+
+ Output:
+
+ {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'],
+ 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']}
+
+ In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS.
+ """
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
+ try:
+ return _get_data_files_patterns(resolver)
+ except FileNotFoundError:
+ raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None
+
+
+def get_metadata_patterns(
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+) -> List[str]:
+ """
+ Get the supported metadata patterns from a local directory.
+ """
+ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config)
+ try:
+ return _get_metadata_files_patterns(resolver)
+ except FileNotFoundError:
+ raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None
+
+
+def _get_single_origin_metadata(
+ data_file: str,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[str]:
+ data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config)
+ fs, _, _ = get_fs_token_paths(data_file, storage_options=storage_options)
+ if isinstance(fs, HfFileSystem):
+ resolved_path = fs.resolve_path(data_file)
+ return (resolved_path.repo_id, resolved_path.revision)
+ elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT):
+ hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token)
+ data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1)
+ resolved_path = hffs.resolve_path(data_file)
+ return (resolved_path.repo_id, resolved_path.revision)
+ info = fs.info(data_file)
+ # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime
+ for key in ["ETag", "etag", "mtime"]:
+ if key in info:
+ return (str(info[key]),)
+ return ()
+
+
+def _get_origin_metadata(
+ data_files: List[str],
+ max_workers=64,
+ download_config: Optional[DownloadConfig] = None,
+) -> Tuple[str]:
+ return thread_map(
+ partial(_get_single_origin_metadata, download_config=download_config),
+ data_files,
+ max_workers=max_workers,
+ tqdm_class=hf_tqdm,
+ desc="Resolving data files",
+ # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
+ disable=len(data_files) <= 16 or None,
+ )
+
+
+class DataFilesList(List[str]):
+ """
+ List of data files (absolute local paths or URLs).
+ It has two construction methods given the user's data files patterns :
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
+ - ``from_local_or_remote``: resolve patterns from a local path
+
+ Moreover DataFilesList has an additional attribute ``origin_metadata``.
+ It can store:
+ - the last modified time of local files
+ - ETag of remote files
+ - commit sha of a dataset repository
+
+ Thanks to this additional attribute, it is possible to hash the list
+ and get a different hash if and only if at least one file changed.
+ This is useful for caching Dataset objects that are obtained from a list of data files.
+ """
+
+ def __init__(self, data_files: List[str], origin_metadata: List[Tuple[str]]):
+ super().__init__(data_files)
+ self.origin_metadata = origin_metadata
+
+ def __add__(self, other):
+ return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata)
+
+ @classmethod
+ def from_hf_repo(
+ cls,
+ patterns: List[str],
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/")
+ return cls.from_patterns(
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
+ )
+
+ @classmethod
+ def from_local_or_remote(
+ cls,
+ patterns: List[str],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ return cls.from_patterns(
+ patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config
+ )
+
+ @classmethod
+ def from_patterns(
+ cls,
+ patterns: List[str],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ data_files = []
+ for pattern in patterns:
+ try:
+ data_files.extend(
+ resolve_pattern(
+ pattern,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ )
+ except FileNotFoundError:
+ if not has_magic(pattern):
+ raise
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
+ return cls(data_files, origin_metadata)
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
+ pattern = "|".join("\\" + ext for ext in extensions)
+ pattern = re.compile(f".*({pattern})(\\..+)?$")
+ return DataFilesList(
+ [data_file for data_file in self if pattern.match(data_file)],
+ origin_metadata=self.origin_metadata,
+ )
+
+
+class DataFilesDict(Dict[str, DataFilesList]):
+ """
+ Dict of split_name -> list of data files (absolute local paths or URLs).
+ It has two construction methods given the user's data files patterns :
+ - ``from_hf_repo``: resolve patterns inside a dataset repository
+ - ``from_local_or_remote``: resolve patterns from a local path
+
+ Moreover each list is a DataFilesList. It is possible to hash the dictionary
+ and get a different hash if and only if at least one file changed.
+ For more info, see ``DataFilesList``.
+
+ This is useful for caching Dataset objects that are obtained from a list of data files.
+
+ Changing the order of the keys of this dictionary also doesn't change its hash.
+ """
+
+ @classmethod
+ def from_local_or_remote(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_local_or_remote(
+ patterns_for_key,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ @classmethod
+ def from_hf_repo(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_hf_repo(
+ patterns_for_key,
+ dataset_info=dataset_info,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ @classmethod
+ def from_patterns(
+ cls,
+ patterns: Dict[str, Union[List[str], DataFilesList]],
+ base_path: Optional[str] = None,
+ allowed_extensions: Optional[List[str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesList.from_patterns(
+ patterns_for_key,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ if not isinstance(patterns_for_key, DataFilesList)
+ else patterns_for_key
+ )
+ return out
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesDict":
+ out = type(self)()
+ for key, data_files_list in self.items():
+ out[key] = data_files_list.filter_extensions(extensions)
+ return out
+
+
+class DataFilesPatternsList(List[str]):
+ """
+ List of data files patterns (absolute local paths or URLs).
+ For each pattern there should also be a list of allowed extensions
+ to keep, or a None ot keep all the files for the pattern.
+ """
+
+ def __init__(
+ self,
+ patterns: List[str],
+ allowed_extensions: List[Optional[List[str]]],
+ ):
+ super().__init__(patterns)
+ self.allowed_extensions = allowed_extensions
+
+ def __add__(self, other):
+ return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions)
+
+ @classmethod
+ def from_patterns(
+ cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None
+ ) -> "DataFilesPatternsDict":
+ return cls(patterns, [allowed_extensions] * len(patterns))
+
+ def resolve(
+ self,
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesList":
+ base_path = base_path if base_path is not None else Path().resolve().as_posix()
+ data_files = []
+ for pattern, allowed_extensions in zip(self, self.allowed_extensions):
+ try:
+ data_files.extend(
+ resolve_pattern(
+ pattern,
+ base_path=base_path,
+ allowed_extensions=allowed_extensions,
+ download_config=download_config,
+ )
+ )
+ except FileNotFoundError:
+ if not has_magic(pattern):
+ raise
+ origin_metadata = _get_origin_metadata(data_files, download_config=download_config)
+ return DataFilesList(data_files, origin_metadata)
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesList":
+ return DataFilesPatternsList(
+ self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]
+ )
+
+
+class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]):
+ """
+ Dict of split_name -> list of data files patterns (absolute local paths or URLs).
+ """
+
+ @classmethod
+ def from_patterns(
+ cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None
+ ) -> "DataFilesPatternsDict":
+ out = cls()
+ for key, patterns_for_key in patterns.items():
+ out[key] = (
+ DataFilesPatternsList.from_patterns(
+ patterns_for_key,
+ allowed_extensions=allowed_extensions,
+ )
+ if not isinstance(patterns_for_key, DataFilesPatternsList)
+ else patterns_for_key
+ )
+ return out
+
+ def resolve(
+ self,
+ base_path: str,
+ download_config: Optional[DownloadConfig] = None,
+ ) -> "DataFilesDict":
+ out = DataFilesDict()
+ for key, data_files_patterns_list in self.items():
+ out[key] = data_files_patterns_list.resolve(base_path, download_config)
+ return out
+
+ def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict":
+ out = type(self)()
+ for key, data_files_patterns_list in self.items():
+ out[key] = data_files_patterns_list.filter_extensions(extensions)
+ return out
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/dataset_dict.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/dataset_dict.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab26dbbb83d599cf62e6485d4d647871ea7f3a0d
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/dataset_dict.py
@@ -0,0 +1,2288 @@
+import contextlib
+import copy
+import fnmatch
+import json
+import math
+import posixpath
+import re
+import warnings
+from io import BytesIO
+from pathlib import Path
+from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
+
+import fsspec
+import numpy as np
+from huggingface_hub import (
+ CommitInfo,
+ CommitOperationAdd,
+ CommitOperationDelete,
+ DatasetCard,
+ DatasetCardData,
+ HfApi,
+)
+
+from . import config
+from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset
+from .features import Features
+from .features.features import FeatureType
+from .info import DatasetInfo, DatasetInfosDict
+from .naming import _split_re
+from .splits import NamedSplit, Split, SplitDict, SplitInfo
+from .table import Table
+from .tasks import TaskTemplate
+from .utils import logging
+from .utils.deprecation_utils import deprecated
+from .utils.doc_utils import is_documented_by
+from .utils.hub import list_files_info
+from .utils.metadata import MetadataConfigs
+from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict
+from .utils.typing import PathLike
+
+
+logger = logging.get_logger(__name__)
+
+
+class DatasetDict(dict):
+ """A dictionary (dict of str: datasets.Dataset) with dataset transforms methods (map, filter, etc.)"""
+
+ def _check_values_type(self):
+ for dataset in self.values():
+ if not isinstance(dataset, Dataset):
+ raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'")
+
+ def _check_values_features(self):
+ items = list(self.items())
+ for item_a, item_b in zip(items[:-1], items[1:]):
+ if item_a[1].features != item_b[1].features:
+ raise ValueError(
+ f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}"
+ )
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Here `del` is used to del the pyarrow tables. This properly closes the files used for memory mapped tables
+ for dataset in self.values():
+ if hasattr(dataset, "_data"):
+ del dataset._data
+ if hasattr(dataset, "_indices"):
+ del dataset._indices
+
+ def __getitem__(self, k) -> Dataset:
+ if isinstance(k, (str, NamedSplit)) or len(self) == 0:
+ return super().__getitem__(k)
+ else:
+ available_suggested_splits = [
+ split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self
+ ]
+ suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0]
+ raise KeyError(
+ f"Invalid key: {k}. Please first select a split. For example: "
+ f"`my_dataset_dictionary['{suggested_split}'][{k}]`. "
+ f"Available splits: {sorted(self)}"
+ )
+
+ @property
+ def data(self) -> Dict[str, Table]:
+ """The Apache Arrow tables backing each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.data
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.data for k, dataset in self.items()}
+
+ @property
+ def cache_files(self) -> Dict[str, Dict]:
+ """The cache files containing the Apache Arrow table backing each split.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.cache_files
+ {'test': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-test.arrow'}],
+ 'train': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-train.arrow'}],
+ 'validation': [{'filename': '/root/.cache/huggingface/datasets/rotten_tomatoes_movie_review/default/1.0.0/40d411e45a6ce3484deed7cc15b82a53dad9a72aafd9f86f8f227134bec5ca46/rotten_tomatoes_movie_review-validation.arrow'}]}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.cache_files for k, dataset in self.items()}
+
+ @property
+ def num_columns(self) -> Dict[str, int]:
+ """Number of columns in each split of the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.num_columns
+ {'test': 2, 'train': 2, 'validation': 2}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.num_columns for k, dataset in self.items()}
+
+ @property
+ def num_rows(self) -> Dict[str, int]:
+ """Number of rows in each split of the dataset (same as :func:`datasets.Dataset.__len__`).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.num_rows
+ {'test': 1066, 'train': 8530, 'validation': 1066}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.num_rows for k, dataset in self.items()}
+
+ @property
+ def column_names(self) -> Dict[str, List[str]]:
+ """Names of the columns in each split of the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.column_names
+ {'test': ['text', 'label'],
+ 'train': ['text', 'label'],
+ 'validation': ['text', 'label']}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.column_names for k, dataset in self.items()}
+
+ @property
+ def shape(self) -> Dict[str, Tuple[int]]:
+ """Shape of each split of the dataset (number of columns, number of rows).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.shape
+ {'test': (1066, 2), 'train': (8530, 2), 'validation': (1066, 2)}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.shape for k, dataset in self.items()}
+
+ def flatten(self, max_depth=16) -> "DatasetDict":
+ """Flatten the Apache Arrow Table of each split (nested features are flatten).
+ Each column with a struct type is flattened into one column per struct field.
+ Other columns are left unchanged.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("squad")
+ >>> ds["train"].features
+ {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None),
+ 'context': Value(dtype='string', id=None),
+ 'id': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None),
+ 'title': Value(dtype='string', id=None)}
+ >>> ds.flatten()
+ DatasetDict({
+ train: Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 87599
+ })
+ validation: Dataset({
+ features: ['id', 'title', 'context', 'question', 'answers.text', 'answers.answer_start'],
+ num_rows: 10570
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.flatten(max_depth=max_depth) for k, dataset in self.items()})
+
+ def unique(self, column: str) -> Dict[str, List]:
+ """Return a list of the unique elements in a column for each split.
+
+ This is implemented in the low-level backend and as such, very fast.
+
+ Args:
+ column (`str`):
+ column name (list all the column names with [`~datasets.Dataset.column_names`])
+
+ Returns:
+ Dict[`str`, `list`]: Dictionary of unique elements in the given column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.unique("label")
+ {'test': [1, 0], 'train': [1, 0], 'validation': [1, 0]}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.unique(column) for k, dataset in self.items()}
+
+ def cleanup_cache_files(self) -> Dict[str, int]:
+ """Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.
+ Be careful when running this command that no other process is currently using other cache files.
+
+ Return:
+ `Dict` with the number of removed files for each split
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.cleanup_cache_files()
+ {'test': 0, 'train': 0, 'validation': 0}
+ ```
+ """
+ self._check_values_type()
+ return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
+
+ def __repr__(self):
+ repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
+ repr = re.sub(r"^", " " * 4, repr, 0, re.M)
+ return f"DatasetDict({{\n{repr}\n}})"
+
+ def cast(self, features: Features) -> "DatasetDict":
+ """
+ Cast the dataset to a new set of features.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can also remove a column using [`Dataset.map`] with `feature` but `cast`
+ is in-place (doesn't copy the data to a new dataset) and is thus faster.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name and order of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds["train"].features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
+
+ def cast_column(self, column: str, feature) -> "DatasetDict":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature ([`Feature`]):
+ Target feature.
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()})
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
+ """
+ Remove one or several column(s) from each split in the dataset
+ and the features associated to the column(s).
+
+ The transformation is applied to all the splits of the dataset dictionary.
+
+ You can also remove a column using [`Dataset.map`] with `remove_columns` but the present method
+ is in-place (doesn't copy the data to a new dataset) and is thus faster.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.remove_columns("label")
+ DatasetDict({
+ train: Dataset({
+ features: ['text'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.remove_columns(column_names=column_names) for k, dataset in self.items()})
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "DatasetDict":
+ """
+ Rename a column in the dataset and move the features associated to the original column under the new column name.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can also rename a column using [`~Dataset.map`] with `remove_columns` but the present method:
+ - takes care of moving the original features under the new column name.
+ - doesn't copy the data to a new dataset and is thus much faster.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.rename_column("label", "label_new")
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text', 'label_new'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict(
+ {
+ k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "DatasetDict":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names.
+
+ Returns:
+ [`DatasetDict`]: A copy of the dataset with renamed columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.rename_columns({'text': 'text_new', 'label': 'label_new'})
+ DatasetDict({
+ train: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text_new', 'label_new'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()})
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "DatasetDict":
+ """Select one or several column(s) from each split in the dataset and
+ the features associated to the column(s).
+
+ The transformation is applied to all the splits of the dataset
+ dictionary.
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.select_columns("text")
+ DatasetDict({
+ train: Dataset({
+ features: ['text'],
+ num_rows: 8530
+ })
+ validation: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ test: Dataset({
+ features: ['text'],
+ num_rows: 1066
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict({k: dataset.select_columns(column_names=column_names) for k, dataset in self.items()})
+
+ def class_encode_column(self, column: str, include_nulls: bool = False) -> "DatasetDict":
+ """Casts the given column as [`~datasets.features.ClassLabel`] and updates the tables.
+
+ Args:
+ column (`str`):
+ The name of the column to cast.
+ include_nulls (`bool`, defaults to `False`):
+ Whether to include null values in the class labels. If `True`, the null values will be encoded as the `"None"` class label.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("boolq")
+ >>> ds["train"].features
+ {'answer': Value(dtype='bool', id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ >>> ds = ds.class_encode_column("answer")
+ >>> ds["train"].features
+ {'answer': ClassLabel(num_classes=2, names=['False', 'True'], id=None),
+ 'passage': Value(dtype='string', id=None),
+ 'question': Value(dtype='string', id=None)}
+ ```
+ """
+ self._check_values_type()
+ return DatasetDict(
+ {k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for k, dataset in self.items()}
+ )
+
+ @contextlib.contextmanager
+ def formatted_as(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """To be used in a `with` statement. Set `__getitem__` return format (type and columns).
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+ """
+ self._check_values_type()
+ old_format_type = {k: dataset._format_type for k, dataset in self.items()}
+ old_format_kwargs = {k: dataset._format_kwargs for k, dataset in self.items()}
+ old_format_columns = {k: dataset._format_columns for k, dataset in self.items()}
+ old_output_all_columns = {k: dataset._output_all_columns for k, dataset in self.items()}
+ try:
+ self.set_format(type, columns, output_all_columns, **format_kwargs)
+ yield
+ finally:
+ for k, dataset in self.items():
+ dataset.set_format(
+ old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]
+ )
+
+ def set_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ):
+ """Set `__getitem__` return format (type and columns).
+ The format is set for every dataset in the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects),
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ It is possible to call `map` after calling `set_format`. Since `map` may add new columns, then the list of formatted columns
+ gets updated. In this case, if you apply `map` on a dataset to add a new column, then this column will be formatted:
+
+ `new formatted columns = (all columns - previously unformatted columns)`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ ```
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+
+ def reset_format(self):
+ """Reset `__getitem__` return format to python objects and all columns.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Same as `self.set_format()`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x["text"], truncation=True, padding=True), batched=True)
+ >>> ds.set_format(type="numpy", columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'numpy'}
+ >>> ds.reset_format()
+ >>> ds["train"].format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ ```
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format()
+
+ def set_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ):
+ """Set ``__getitem__`` return format using this transform. The transform is applied on-the-fly on batches when ``__getitem__`` is called.
+ The transform is set for every dataset in the dataset dictionary
+ As :func:`datasets.Dataset.set_format`, this can be reset using :func:`datasets.Dataset.reset_format`
+
+ Args:
+ transform (`Callable`, optional): user-defined formatting transform, replaces the format defined by :func:`datasets.Dataset.set_format`
+ A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
+ This function is applied right before returning the objects in ``__getitem__``.
+ columns (`List[str]`, optional): columns to format in the output
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, default to False): keep un-formatted columns as well in the output (as python objects)
+ If set to True, then the other un-formatted columns are kept with the output of the transform.
+
+ """
+ self._check_values_type()
+ for dataset in self.values():
+ dataset.set_format("custom", columns=columns, output_all_columns=output_all_columns, transform=transform)
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ **format_kwargs,
+ ) -> "DatasetDict":
+ """Set `__getitem__` return format (type and columns). The data formatting is applied on-the-fly.
+ The format `type` (for example "numpy") is used to format batches when using `__getitem__`.
+ The format is set for every dataset in the dataset dictionary.
+
+ It's also possible to use custom transforms for formatting using [`~datasets.Dataset.with_transform`].
+
+ Contrary to [`~datasets.DatasetDict.set_format`], `with_format` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
+
+ Args:
+ type (`str`, *optional*):
+ Output type selected in `[None, 'numpy', 'torch', 'tensorflow', 'pandas', 'arrow', 'jax']`.
+ `None` means `__getitem__` returns python objects (default).
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ `None` means `__getitem__` returns all columns (default).
+ output_all_columns (`bool`, defaults to `False`):
+ Keep un-formatted columns as well in the output (as python objects).
+ **format_kwargs (additional keyword arguments):
+ Keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> ds = ds.map(lambda x: tokenizer(x['text'], truncation=True, padding=True), batched=True)
+ >>> ds["train"].format
+ {'columns': ['text', 'label', 'input_ids', 'token_type_ids', 'attention_mask'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': None}
+ >>> ds = ds.with_format(type='tensorflow', columns=['input_ids', 'token_type_ids', 'attention_mask', 'label'])
+ >>> ds["train"].format
+ {'columns': ['input_ids', 'token_type_ids', 'attention_mask', 'label'],
+ 'format_kwargs': {},
+ 'output_all_columns': False,
+ 'type': 'tensorflow'}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs)
+ return dataset
+
+ def with_transform(
+ self,
+ transform: Optional[Callable],
+ columns: Optional[List] = None,
+ output_all_columns: bool = False,
+ ) -> "DatasetDict":
+ """Set `__getitem__` return format using this transform. The transform is applied on-the-fly on batches when `__getitem__` is called.
+ The transform is set for every dataset in the dataset dictionary
+
+ As [`~datasets.Dataset.set_format`], this can be reset using [`~datasets.Dataset.reset_format`].
+
+ Contrary to [`~datasets.DatasetDict.set_transform`], `with_transform` returns a new [`DatasetDict`] object with new [`Dataset`] objects.
+
+ Args:
+ transform (`Callable`, *optional*):
+ User-defined formatting transform, replaces the format defined by [`~datasets.Dataset.set_format`].
+ A formatting function is a callable that takes a batch (as a dict) as input and returns a batch.
+ This function is applied right before returning the objects in `__getitem__`.
+ columns (`List[str]`, *optional*):
+ Columns to format in the output.
+ If specified, then the input batch of the transform only contains those columns.
+ output_all_columns (`bool`, defaults to False):
+ Keep un-formatted columns as well in the output (as python objects).
+ If set to `True`, then the other un-formatted columns are kept with the output of the transform.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from transformers import AutoTokenizer
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
+ >>> def encode(example):
+ ... return tokenizer(example['text'], truncation=True, padding=True, return_tensors="pt")
+ >>> ds = ds.with_transform(encode)
+ >>> ds["train"][0]
+ {'attention_mask': tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
+ 'input_ids': tensor([ 101, 1103, 2067, 1110, 17348, 1106, 1129, 1103, 6880, 1432,
+ 112, 188, 1207, 107, 14255, 1389, 107, 1105, 1115, 1119,
+ 112, 188, 1280, 1106, 1294, 170, 24194, 1256, 3407, 1190,
+ 170, 11791, 5253, 188, 1732, 7200, 10947, 12606, 2895, 117,
+ 179, 7766, 118, 172, 15554, 1181, 3498, 6961, 3263, 1137,
+ 188, 1566, 7912, 14516, 6997, 119, 102]),
+ 'token_type_ids': tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0])}
+ ```
+ """
+ dataset = copy.deepcopy(self)
+ dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns)
+ return dataset
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ desc: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Apply a function to all the elements in the table (individually or in batches)
+ and update the table (if function does updated examples).
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`callable`): with one of the following signature:
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], indices: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`,
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, default `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`[datasets.Features]`, *optional*, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Disallow null values in the table.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while mapping examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> ds["train"][0:3]["text"]
+ ['Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
+ 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
+ 'Review: effective but too-tepid biopic']
+
+ # process a batch of examples
+ >>> ds = ds.map(lambda example: tokenizer(example["text"]), batched=True)
+ # set number of processors
+ >>> ds = ds.map(add_prefix, num_proc=4)
+ ```
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.map(
+ function=function,
+ with_indices=with_indices,
+ with_rank=with_rank,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ desc=desc,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ with_rank: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ desc: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Apply a filter function to all the elements in the table in batches
+ and update the table so that the dataset only includes examples according to the filter function.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`Callable`): Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `batched=False` and `with_indices=False` and `with_rank=False`
+ - `function(example: Dict[str, Any], *extra_args) -> bool` if `batched=False` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+ - `function(batch: Dict[str, List]) -> List[bool]` if `batched=True` and `with_indices=False` and `with_rank=False`
+ - `function(batch: Dict[str, List], *extra_args) -> List[bool]` if `batched=True` and `with_indices=True` and/or `with_rank=True` (one extra arg for each)
+
+ If no function is provided, defaults to an always `True` function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the
+ signature of `function` should be `def function(example, idx[, rank]): ...`.
+ with_rank (`bool`, defaults to `False`):
+ Provide process rank to `function`. Note that in this case the
+ signature of `function` should be `def function(example[, idx], rank): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if chaching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+ num_proc (`int`, *optional*, defaults to `None`):
+ Number of processes for multiprocessing. By default it doesn't
+ use multiprocessing.
+ desc (`str`, *optional*, defaults to `None`):
+ Meaningful description to be displayed alongside with the progress bar while filtering examples.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds.filter(lambda x: x["label"] == 1)
+ DatasetDict({
+ train: Dataset({
+ features: ['text', 'label'],
+ num_rows: 4265
+ })
+ validation: Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ test: Dataset({
+ features: ['text', 'label'],
+ num_rows: 533
+ })
+ })
+ ```
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.filter(
+ function=function,
+ with_indices=with_indices,
+ with_rank=with_rank,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ fn_kwargs=fn_kwargs,
+ num_proc=num_proc,
+ desc=desc,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def flatten_indices(
+ self,
+ keep_in_memory: bool = False,
+ cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ features: Optional[Features] = None,
+ disable_nullable: bool = False,
+ num_proc: Optional[int] = None,
+ new_fingerprint: Optional[str] = None,
+ ) -> "DatasetDict":
+ """Create and cache a new Dataset by flattening the indices mapping.
+
+ Args:
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ cache_file_names (`Dict[str, str]`, *optional*, default `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ results of the computation instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+ features (`Optional[datasets.Features]`, defaults to `None`):
+ Use a specific [`Features`] to store the cache file
+ instead of the automatically generated one.
+ disable_nullable (`bool`, defaults to `False`):
+ Allow null values in the table.
+ num_proc (`int`, optional, default `None`):
+ Max number of processes when generating cache. Already cached shards are loaded sequentially
+ new_fingerprint (`str`, *optional*, defaults to `None`):
+ The new fingerprint of the dataset after transform.
+ If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments
+ """
+ self._check_values_type()
+ if cache_file_names is None:
+ cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.flatten_indices(
+ keep_in_memory=keep_in_memory,
+ cache_file_name=cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ features=features,
+ disable_nullable=disable_nullable,
+ num_proc=num_proc,
+ new_fingerprint=new_fingerprint,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def sort(
+ self,
+ column_names: Union[str, Sequence[str]],
+ reverse: Union[bool, Sequence[bool]] = False,
+ kind="deprecated",
+ null_placement: str = "at_end",
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "DatasetDict":
+ """Create a new dataset sorted according to a single or multiple columns.
+
+ Args:
+ column_names (`Union[str, Sequence[str]]`):
+ Column name(s) to sort by.
+ reverse (`Union[bool, Sequence[bool]]`, defaults to `False`):
+ If `True`, sort by descending order rather than ascending. If a single bool is provided,
+ the value is applied to the sorting of all column names. Otherwise a list of bools with the
+ same length and order as column_names must be provided.
+ kind (`str`, *optional*):
+ Pandas algorithm for sorting selected in `{quicksort, mergesort, heapsort, stable}`,
+ The default is `quicksort`. Note that both `stable` and `mergesort` use timsort under the covers and, in general,
+ the actual implementation will vary with data type. The `mergesort` option is retained for backwards compatibility.
+
+
+ `kind` was deprecated in version 2.10.0 and will be removed in 3.0.0.
+
+
+ null_placement (`str`, defaults to `at_end`):
+ Put `None` values at the beginning if `at_start` or `first` or at the end if `at_end` or `last`
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the sorted indices in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the sorted indices
+ can be identified, use it instead of recomputing.
+ indices_cache_file_names (`[Dict[str, str]]`, *optional*, defaults to `None`):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mapping instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ Higher value gives smaller cache files, lower value consume less temporary memory.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset('rotten_tomatoes')
+ >>> ds['train']['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ >>> sorted_ds = ds.sort('label')
+ >>> sorted_ds['train']['label'][:10]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ >>> another_sorted_ds = ds.sort(['label', 'text'], reverse=[True, False])
+ >>> another_sorted_ds['train']['label'][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+ ```
+ """
+ self._check_values_type()
+ if indices_cache_file_names is None:
+ indices_cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.sort(
+ column_names=column_names,
+ reverse=reverse,
+ kind=kind,
+ null_placement=null_placement,
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ indices_cache_file_name=indices_cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def shuffle(
+ self,
+ seeds: Optional[Union[int, Dict[str, Optional[int]]]] = None,
+ seed: Optional[int] = None,
+ generators: Optional[Dict[str, np.random.Generator]] = None,
+ keep_in_memory: bool = False,
+ load_from_cache_file: Optional[bool] = None,
+ indices_cache_file_names: Optional[Dict[str, Optional[str]]] = None,
+ writer_batch_size: Optional[int] = 1000,
+ ) -> "DatasetDict":
+ """Create a new Dataset where the rows are shuffled.
+
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ Currently shuffling uses numpy random generators.
+ You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).
+
+ Args:
+ seeds (`Dict[str, int]` or `int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`.
+ If `None`, then fresh, unpredictable entropy will be pulled from the OS.
+ If an `int` or `array_like[ints]` is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.
+ You can provide one `seed` per dataset in the dataset dictionary.
+ seed (`int`, *optional*):
+ A seed to initialize the default BitGenerator if `generator=None`. Alias for seeds (a `ValueError` is raised if both are provided).
+ generators (`Dict[str, *optional*, np.random.Generator]`):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ You have to provide one `generator` per dataset in the dataset dictionary.
+ keep_in_memory (`bool`, defaults to `False`):
+ Keep the dataset in memory instead of writing it to a cache file.
+ load_from_cache_file (`Optional[bool]`, defaults to `True` if caching is enabled):
+ If a cache file storing the current computation from `function`
+ can be identified, use it instead of recomputing.
+ indices_cache_file_names (`Dict[str, str]`, *optional*):
+ Provide the name of a path for the cache file. It is used to store the
+ indices mappings instead of the automatically generated cache file name.
+ You have to provide one `cache_file_name` per dataset in the dataset dictionary.
+ writer_batch_size (`int`, defaults to `1000`):
+ Number of rows per write operation for the cache file writer.
+ This value is a good trade-off between memory usage during the processing, and processing speed.
+ Higher value makes the processing do fewer lookups, lower value consume less temporary memory while running `map`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes")
+ >>> ds["train"]["label"][:10]
+ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
+
+ # set a seed
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> shuffled_ds["train"]["label"][:10]
+ [0, 1, 0, 1, 0, 0, 0, 0, 0, 0]
+ ```
+ """
+ self._check_values_type()
+ if seed is not None and seeds is not None:
+ raise ValueError("Please specify seed or seeds, but not both")
+ seeds = seed if seed is not None else seeds
+ if seeds is None:
+ seeds = {k: None for k in self}
+ elif not isinstance(seeds, dict):
+ seeds = {k: seeds for k in self}
+ if generators is None:
+ generators = {k: None for k in self}
+ if indices_cache_file_names is None:
+ indices_cache_file_names = {k: None for k in self}
+ return DatasetDict(
+ {
+ k: dataset.shuffle(
+ seed=seeds[k],
+ generator=generators[k],
+ keep_in_memory=keep_in_memory,
+ load_from_cache_file=load_from_cache_file,
+ indices_cache_file_name=indices_cache_file_names[k],
+ writer_batch_size=writer_batch_size,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def save_to_disk(
+ self,
+ dataset_dict_path: PathLike,
+ fs="deprecated",
+ max_shard_size: Optional[Union[str, int]] = None,
+ num_shards: Optional[Dict[str, int]] = None,
+ num_proc: Optional[int] = None,
+ storage_options: Optional[dict] = None,
+ ):
+ """
+ Saves a dataset dict to a filesystem using `fsspec.spec.AbstractFileSystem`.
+
+ For [`Image`] and [`Audio`] data:
+
+ All the Image() and Audio() data are stored in the arrow files.
+ If you want to store paths or urls, please use the Value("string") type.
+
+ Args:
+ dataset_dict_path (`str`):
+ Path (e.g. `dataset/train`) or remote URI
+ (e.g. `s3://my-bucket/dataset/train`) of the dataset dict directory where the dataset dict will be
+ saved to.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"50MB"`).
+ num_shards (`Dict[str, int]`, *optional*):
+ Number of shards to write. By default the number of shards depends on `max_shard_size` and `num_proc`.
+ You need to provide the number of shards for each dataset in the dataset dictionary.
+ Use a dictionary to define a different num_shards for each split.
+
+
+ num_proc (`int`, *optional*, default `None`):
+ Number of processes when downloading and generating the dataset locally.
+ Multiprocessing is disabled by default.
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```python
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory")
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory", max_shard_size="1GB")
+ >>> dataset_dict.save_to_disk("path/to/dataset/directory", num_shards={"train": 1024, "test": 8})
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options)
+
+ if num_shards is None:
+ num_shards = {k: None for k in self}
+ elif not isinstance(num_shards, dict):
+ raise ValueError(
+ "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
+ )
+
+ fs.makedirs(dataset_dict_path, exist_ok=True)
+
+ with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), "w", encoding="utf-8") as f:
+ json.dump({"splits": list(self)}, f)
+ for k, dataset in self.items():
+ dataset.save_to_disk(
+ posixpath.join(dataset_dict_path, k),
+ num_shards=num_shards.get(k),
+ max_shard_size=max_shard_size,
+ num_proc=num_proc,
+ storage_options=storage_options,
+ )
+
+ @staticmethod
+ def load_from_disk(
+ dataset_dict_path: PathLike,
+ fs="deprecated",
+ keep_in_memory: Optional[bool] = None,
+ storage_options: Optional[dict] = None,
+ ) -> "DatasetDict":
+ """
+ Load a dataset that was previously saved using [`save_to_disk`] from a filesystem using `fsspec.spec.AbstractFileSystem`.
+
+ Args:
+ dataset_dict_path (`str`):
+ Path (e.g. `"dataset/train"`) or remote URI (e.g. `"s3//my-bucket/dataset/train"`)
+ of the dataset dict directory where the dataset dict will be loaded from.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem where the dataset will be saved to.
+
+
+
+ `fs` was deprecated in version 2.8.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`
+
+
+
+ keep_in_memory (`bool`, defaults to `None`):
+ Whether to copy the dataset in-memory. If `None`, the
+ dataset will not be copied in-memory unless explicitly enabled by setting
+ `datasets.config.IN_MEMORY_MAX_SIZE` to nonzero. See more details in the
+ [improve performance](../cache#improve-performance) section.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> ds = load_from_disk('path/to/dataset/directory')
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.8.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, [dataset_dict_path] = fsspec.get_fs_token_paths(dataset_dict_path, storage_options=storage_options)
+
+ dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME)
+ dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME)
+ dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME)
+ if not fs.isfile(dataset_dict_json_path):
+ if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path):
+ raise FileNotFoundError(
+ f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead."
+ )
+ raise FileNotFoundError(
+ f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`."
+ )
+
+ with fs.open(dataset_dict_json_path, "r", encoding="utf-8") as f:
+ splits = json.load(f)["splits"]
+
+ dataset_dict = DatasetDict()
+ for k in splits:
+ dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k)
+ dataset_dict[k] = Dataset.load_from_disk(
+ dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options
+ )
+ return dataset_dict
+
+ @staticmethod
+ def from_csv(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from CSV file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the CSV file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`pandas.read_csv`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_csv({'train': 'path/to/dataset.csv'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.csv import CsvDatasetReader
+
+ return CsvDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @staticmethod
+ def from_json(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from JSON Lines file(s).
+
+ Args:
+ path_or_paths (`path-like` or list of `path-like`):
+ Path(s) of the JSON Lines file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (str, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`JsonConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_json({'train': 'path/to/dataset.json'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.json import JsonDatasetReader
+
+ return JsonDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @staticmethod
+ def from_parquet(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ columns: Optional[List[str]] = None,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from Parquet file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the CSV file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ columns (`List[str]`, *optional*):
+ If not `None`, only these columns will be read from the file.
+ A column name may be a prefix of a nested field, e.g. 'a' will select
+ 'a.b', 'a.c', and 'a.d.e'.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`ParquetConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_parquet({'train': 'path/to/dataset/parquet'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.parquet import ParquetDatasetReader
+
+ return ParquetDatasetReader(
+ path_or_paths,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ columns=columns,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_text(
+ path_or_paths: Dict[str, PathLike],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ) -> "DatasetDict":
+ """Create [`DatasetDict`] from text file(s).
+
+ Args:
+ path_or_paths (`dict` of path-like):
+ Path(s) of the text file(s).
+ features ([`Features`], *optional*):
+ Dataset features.
+ cache_dir (`str`, *optional*, defaults to `"~/.cache/huggingface/datasets"`):
+ Directory to cache data.
+ keep_in_memory (`bool`, defaults to `False`):
+ Whether to copy the data in-memory.
+ **kwargs (additional keyword arguments):
+ Keyword arguments to be passed to [`TextConfig`].
+
+ Returns:
+ [`DatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetDict
+ >>> ds = DatasetDict.from_text({'train': 'path/to/dataset.txt'})
+ ```
+ """
+ # Dynamic import to avoid circular dependency
+ from .io.text import TextDatasetReader
+
+ return TextDatasetReader(
+ path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs
+ ).read()
+
+ @deprecated()
+ @is_documented_by(Dataset.prepare_for_task)
+ def prepare_for_task(self, task: Union[str, TaskTemplate], id: int = 0) -> "DatasetDict":
+ self._check_values_type()
+ return DatasetDict({k: dataset.prepare_for_task(task=task, id=id) for k, dataset in self.items()})
+
+ @is_documented_by(Dataset.align_labels_with_mapping)
+ def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "DatasetDict":
+ self._check_values_type()
+ return DatasetDict(
+ {
+ k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column)
+ for k, dataset in self.items()
+ }
+ )
+
+ def push_to_hub(
+ self,
+ repo_id,
+ config_name: str = "default",
+ set_default: Optional[bool] = None,
+ data_dir: Optional[str] = None,
+ commit_message: Optional[str] = None,
+ commit_description: Optional[str] = None,
+ private: Optional[bool] = False,
+ token: Optional[str] = None,
+ revision: Optional[str] = None,
+ branch="deprecated",
+ create_pr: Optional[bool] = False,
+ max_shard_size: Optional[Union[int, str]] = None,
+ num_shards: Optional[Dict[str, int]] = None,
+ embed_external_files: bool = True,
+ ) -> CommitInfo:
+ """Pushes the [`DatasetDict`] to the hub as a Parquet dataset.
+ The [`DatasetDict`] is pushed using HTTP requests and does not need to have neither git or git-lfs installed.
+
+ Each dataset split will be pushed independently. The pushed dataset will keep the original split names.
+
+ The resulting Parquet files are self-contained by default: if your dataset contains [`Image`] or [`Audio`]
+ data, the Parquet files will store the bytes of your images or audio files.
+ You can disable this by setting `embed_external_files` to False.
+
+ Args:
+ repo_id (`str`):
+ The ID of the repository to push to in the following format: `/` or
+ `/`. Also accepts ``, which will default to the namespace
+ of the logged-in user.
+ config_name (`str`):
+ Configuration name of a dataset. Defaults to "default".
+ set_default (`bool`, *optional*):
+ Whether to set this configuration as the default one. Otherwise, the default configuration is the one
+ named "default".
+ data_dir (`str`, *optional*):
+ Directory name that will contain the uploaded data files. Defaults to the `config_name` if different
+ from "default", else "data".
+
+
+ commit_message (`str`, *optional*):
+ Message to commit while pushing. Will default to `"Upload dataset"`.
+ commit_description (`str`, *optional*):
+ Description of the commit that will be created.
+ Additionally, description of the PR if a PR is created (`create_pr` is True).
+
+
+ private (`bool`, *optional*):
+ Whether the dataset repository should be set to private or not. Only affects repository creation:
+ a repository that already exists will not be affected by that parameter.
+ token (`str`, *optional*):
+ An optional authentication token for the Hugging Face Hub. If no token is passed, will default
+ to the token saved locally when logging in with `huggingface-cli login`. Will raise an error
+ if no token is passed and the user is not logged-in.
+ revision (`str`, *optional*):
+ Branch to push the uploaded files to. Defaults to the `"main"` branch.
+
+
+ branch (`str`, *optional*):
+ The git branch on which to push the dataset. This defaults to the default branch as specified
+ in your repository, which defaults to `"main"`.
+
+
+
+ `branch` was deprecated in favor of `revision` in version 2.15.0 and will be removed in 3.0.0.
+
+
+ create_pr (`bool`, *optional*, defaults to `False`):
+ Whether to create a PR with the uploaded files or directly commit.
+
+
+ max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`):
+ The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit
+ (like `"500MB"` or `"1GB"`).
+ num_shards (`Dict[str, int]`, *optional*):
+ Number of shards to write. By default, the number of shards depends on `max_shard_size`.
+ Use a dictionary to define a different num_shards for each split.
+
+
+ embed_external_files (`bool`, defaults to `True`):
+ Whether to embed file bytes in the shards.
+ In particular, this will do the following before the push for the fields of type:
+
+ - [`Audio`] and [`Image`] removes local path information and embed file content in the Parquet files.
+
+ Return:
+ huggingface_hub.CommitInfo
+
+ Example:
+
+ ```python
+ >>> dataset_dict.push_to_hub("/")
+ >>> dataset_dict.push_to_hub("/", private=True)
+ >>> dataset_dict.push_to_hub("/", max_shard_size="1GB")
+ >>> dataset_dict.push_to_hub("/", num_shards={"train": 1024, "test": 8})
+ ```
+
+ If you want to add a new configuration (or subset) to a dataset (e.g. if the dataset has multiple tasks/versions/languages):
+
+ ```python
+ >>> english_dataset.push_to_hub("/", "en")
+ >>> french_dataset.push_to_hub("/", "fr")
+ >>> # later
+ >>> english_dataset = load_dataset("/", "en")
+ >>> french_dataset = load_dataset("/", "fr")
+ ```
+ """
+
+ if num_shards is None:
+ num_shards = {k: None for k in self}
+ elif not isinstance(num_shards, dict):
+ raise ValueError(
+ "Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}"
+ )
+
+ if branch != "deprecated":
+ warnings.warn(
+ "'branch' was deprecated in favor of 'revision' in version 2.15.0 and will be removed in 3.0.0.\n"
+ f"You can remove this warning by passing 'revision={branch}' instead.",
+ FutureWarning,
+ )
+ revision = branch
+
+ self._check_values_type()
+ self._check_values_features()
+ total_uploaded_size = 0
+ total_dataset_nbytes = 0
+ info_to_dump: DatasetInfo = next(iter(self.values())).info.copy()
+ info_to_dump.config_name = config_name
+ info_to_dump.splits = SplitDict()
+
+ for split in self.keys():
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.")
+
+ api = HfApi(endpoint=config.HF_ENDPOINT, token=token)
+
+ repo_url = api.create_repo(
+ repo_id,
+ token=token,
+ repo_type="dataset",
+ private=private,
+ exist_ok=True,
+ )
+ repo_id = repo_url.repo_id
+
+ if revision is not None:
+ api.create_branch(repo_id, branch=revision, token=token, repo_type="dataset", exist_ok=True)
+
+ if not data_dir:
+ data_dir = config_name if config_name != "default" else "data" # for backward compatibility
+
+ additions = []
+ for split in self.keys():
+ logger.info(f"Pushing split {split} to the Hub.")
+ # The split=key needs to be removed before merging
+ split_additions, uploaded_size, dataset_nbytes = self[split]._push_parquet_shards_to_hub(
+ repo_id,
+ data_dir=data_dir,
+ split=split,
+ token=token,
+ revision=revision,
+ create_pr=create_pr,
+ max_shard_size=max_shard_size,
+ num_shards=num_shards.get(split),
+ embed_external_files=embed_external_files,
+ )
+ additions += split_additions
+ total_uploaded_size += uploaded_size
+ total_dataset_nbytes += dataset_nbytes
+ info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split]))
+ info_to_dump.download_checksums = None
+ info_to_dump.download_size = total_uploaded_size
+ info_to_dump.dataset_size = total_dataset_nbytes
+ info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes
+
+ # Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern)
+ # and delete old split shards (if they exist)
+ repo_with_dataset_card, repo_with_dataset_infos = False, False
+ repo_splits = [] # use a list to keep the order of the splits
+ deletions = []
+ repo_files_to_add = [addition.path_in_repo for addition in additions]
+ for repo_file in list_files_info(api, repo_id=repo_id, revision=revision, repo_type="dataset", token=token):
+ if repo_file.rfilename == config.REPOCARD_FILENAME:
+ repo_with_dataset_card = True
+ elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME:
+ repo_with_dataset_infos = True
+ elif (
+ repo_file.rfilename.startswith(tuple(f"{data_dir}/{split}-" for split in self.keys()))
+ and repo_file.rfilename not in repo_files_to_add
+ ):
+ deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename))
+ elif fnmatch.fnmatch(
+ repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace("{split}", "*")
+ ):
+ repo_split = string_to_dict(
+ repo_file.rfilename,
+ glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED),
+ )["split"]
+ if repo_split not in repo_splits:
+ repo_splits.append(split)
+
+ # get the info from the README to update them
+ if repo_with_dataset_card:
+ dataset_card_path = api.hf_hub_download(
+ repo_id, config.REPOCARD_FILENAME, repo_type="dataset", revision=revision
+ )
+ dataset_card = DatasetCard.load(Path(dataset_card_path))
+ dataset_card_data = dataset_card.data
+ metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data)
+ # get the deprecated dataset_infos.json to update them
+ elif repo_with_dataset_infos:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ metadata_configs = MetadataConfigs()
+ # create the metadata configs if it was uploaded with push_to_hub before metadata configs existed
+ if not metadata_configs and repo_splits:
+ default_metadata_configs_to_dump = {
+ "data_files": [{"split": split, "path": f"data/{split}-*"} for split in repo_splits]
+ }
+ MetadataConfigs({"default": default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data)
+ metadata_config_to_dump = {
+ "data_files": [{"split": split, "path": f"{data_dir}/{split}-*"} for split in self.keys()],
+ }
+ if set_default and config_name != "default":
+ if metadata_configs:
+ default_config_name = metadata_configs.get_default_config_name()
+ if default_config_name == "default":
+ raise ValueError(
+ "There exists a configuration named 'default'. To set a different configuration as default, "
+ "rename the 'default' one first."
+ )
+ else:
+ _ = metadata_configs[default_config_name].pop("default")
+ metadata_config_to_dump["default"] = True
+ # push to the deprecated dataset_infos.json
+ if repo_with_dataset_infos:
+ dataset_infos_path = api.hf_hub_download(
+ repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type="dataset", revision=revision
+ )
+ with open(dataset_infos_path, encoding="utf-8") as f:
+ dataset_infos: dict = json.load(f)
+ dataset_infos[config_name] = asdict(info_to_dump)
+ buffer = BytesIO()
+ buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8"))
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)
+ )
+ # push to README
+ DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data)
+ MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data)
+ dataset_card = DatasetCard(f"---\n{dataset_card_data}\n---\n") if dataset_card is None else dataset_card
+ additions.append(
+ CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())
+ )
+
+ commit_message = commit_message if commit_message is not None else "Upload dataset"
+ if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT:
+ commit_info = api.create_commit(
+ repo_id,
+ operations=additions + deletions,
+ commit_message=commit_message,
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ else:
+ logger.info(
+ f"Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits."
+ )
+ num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT)
+ for i in range(0, num_commits):
+ operations = additions[
+ i * config.UPLOADS_MAX_NUMBER_PER_COMMIT : (i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT
+ ] + (deletions if i == 0 else [])
+ commit_info = api.create_commit(
+ repo_id,
+ operations=operations,
+ commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})",
+ commit_description=commit_description,
+ token=token,
+ repo_type="dataset",
+ revision=revision,
+ create_pr=create_pr,
+ )
+ logger.info(
+ f"Commit #{i+1} completed"
+ + (f" (still {num_commits - i - 1} to go)" if num_commits - i - 1 else "")
+ + "."
+ )
+ return commit_info
+
+
+class IterableDatasetDict(dict):
+ def __repr__(self):
+ repr = "\n".join([f"{k}: {v}" for k, v in self.items()])
+ repr = re.sub(r"^", " " * 4, repr, 0, re.M)
+ return f"IterableDatasetDict({{\n{repr}\n}})"
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ ) -> "IterableDatasetDict":
+ """
+ Return a dataset with the specified format.
+ This method only supports the "torch" format for now.
+ The format is set to all the datasets of the dataset dictionary.
+
+ Args:
+ type (`str`, *optional*, defaults to `None`):
+ If set to "torch", the returned dataset
+ will be a subclass of `torch.utils.data.IterableDataset` to be used in a `DataLoader`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> from transformers import AutoTokenizer
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
+ >>> def encode(example):
+ ... return tokenizer(examples["text"], truncation=True, padding="max_length")
+ >>> ds = ds.map(encode, batched=True, remove_columns=["text"])
+ >>> ds = ds.with_format("torch")
+ ```
+ """
+ return IterableDatasetDict({k: dataset.with_format(type=type) for k, dataset in self.items()})
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: int = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDatasetDict":
+ """
+ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
+ If your function returns a column that already exists, then it overwrites it.
+ The function is applied on-the-fly on the examples when iterating over the dataset.
+ The transformation is applied to all the datasets of the dataset dictionary.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is `{"text": ["Hello there !"]}`.
+ - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`, *optional*, defaults to `None`):
+ Function applied on-the-fly on the examples when you iterate on the dataset.
+ It must have one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ input_columns (`[Union[str, List[str]]]`, *optional*, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the `batch_size` should be
+ dropped instead of being processed by the function.
+ remove_columns (`[List[str]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> next(iter(ds["train"]))
+ {'label': 1,
+ 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.map(
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ fn_kwargs=fn_kwargs,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDatasetDict":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+ The filtering is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ function (`Callable`):
+ Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
+ - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
+ - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
+ - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+
+ If no function is provided, defaults to an always True function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ fn_kwargs (`Dict`, *optional*, defaults to `None`):
+ Keyword arguments to be passed to `function`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.filter(lambda x: x["label"] == 0)
+ >>> list(ds["train"].take(3))
+ [{'label': 0, 'text': 'Review: simplistic , silly and tedious .'},
+ {'label': 0,
+ 'text': "Review: it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
+ {'label': 0,
+ 'text': 'Review: exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.filter(
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ fn_kwargs=fn_kwargs,
+ )
+ for k, dataset in self.items()
+ }
+ )
+
+ def shuffle(
+ self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
+ ) -> "IterableDatasetDict":
+ """
+ Randomly shuffles the elements of this dataset.
+ The shuffling is applied to all the datasets of the dataset dictionary.
+
+ This dataset fills a buffer with buffer_size elements, then randomly samples elements from this buffer,
+ replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
+ equal to the full size of the dataset is required.
+
+ For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
+ initially select a random element from only the first 1000 elements in the buffer. Once an element is
+ selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
+ maintaining the 1000 element buffer.
+
+ If the dataset is made of several shards, it also does `shuffle` the order of the shards.
+ However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
+ then the order of the shards is kept unchanged.
+
+ Args:
+ seed (`int`, *optional*, defaults to `None`):
+ Random seed that will be used to shuffle the dataset.
+ It is used to sample from the shuffle buffer and also to shuffle the data shards.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ buffer_size (`int`, defaults to `1000`):
+ Size of the buffer.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> list(ds["train"].take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.shuffle(seed=42)
+ >>> list(ds["train"].take(3))
+ [{'label': 1,
+ 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
+ {'label': 1,
+ 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
+ {'label': 1,
+ 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDatasetDict":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+ The renaming is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.rename_column("text", "movie_review")
+ >>> next(iter(ds["train"]))
+ {'label': 1,
+ 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict(
+ {
+ k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name)
+ for k, dataset in self.items()
+ }
+ )
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDatasetDict":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+ The renaming is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column_mapping (`Dict[str, str]`):
+ A mapping of columns to rename to their new names.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with renamed columns
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.rename_columns({"text": "movie_review", "label": "rating"})
+ >>> next(iter(ds["train"]))
+ {'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
+ 'rating': 1}
+ ```
+ """
+ return IterableDatasetDict(
+ {k: dataset.rename_columns(column_mapping=column_mapping) for k, dataset in self.items()}
+ )
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+ The removal is done on-the-fly on the examples when iterating over the dataset.
+ The removal is applied to all the datasets of the dataset dictionary.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.remove_columns("label")
+ >>> next(iter(ds["train"]))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.remove_columns(column_names) for k, dataset in self.items()})
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDatasetDict":
+ """Select one or several column(s) in the dataset and the features
+ associated to them. The selection is done on-the-fly on the examples
+ when iterating over the dataset. The selection is applied to all the
+ datasets of the dataset dictionary.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to keep.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset object with only selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds = ds.select("text")
+ >>> next(iter(ds["train"]))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.select_columns(column_names) for k, dataset in self.items()})
+
+ def cast_column(self, column: str, feature: FeatureType) -> "IterableDatasetDict":
+ """Cast column to feature for decoding.
+ The type casting is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature ([`Feature`]):
+ Target feature.
+
+ Returns:
+ [`IterableDatasetDict`]
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column('label', ClassLabel(names=['bad', 'good']))
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ ```
+ """
+ return IterableDatasetDict(
+ {k: dataset.cast_column(column=column, feature=feature) for k, dataset in self.items()}
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDatasetDict":
+ """
+ Cast the dataset to a new set of features.
+ The type casting is applied to all the datasets of the dataset dictionary.
+
+ Args:
+ features (`Features`):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`map`] to update the Dataset.
+
+ Returns:
+ [`IterableDatasetDict`]: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", streaming=True)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds["train"].features.copy()
+ >>> new_features['label'] = ClassLabel(names=['bad', 'good'])
+ >>> new_features['text'] = Value('large_string')
+ >>> ds = ds.cast(new_features)
+ >>> ds["train"].features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ return IterableDatasetDict({k: dataset.cast(features=features) for k, dataset in self.items()})
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/distributed.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/distributed.py
new file mode 100644
index 0000000000000000000000000000000000000000..e036fabaf2cf6231ae6a3ca2c443100ccbb0b4d5
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/distributed.py
@@ -0,0 +1,39 @@
+from typing import TypeVar
+
+from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
+from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
+
+
+DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
+
+
+def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
+ """
+ Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ For map-style datasets:
+
+ Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
+ To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
+
+ For iterable datasets:
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`Dataset`] or [`IterableDataset`]):
+ The dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
+ """
+ if isinstance(dataset, Dataset):
+ return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
+ else:
+ return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/exceptions.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..619f2a10117dc16c20002b4cdcaf17a7f2350a8c
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/exceptions.py
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright 2023 The HuggingFace Authors.
+from typing import Any, Dict, List, Optional, Union
+
+from huggingface_hub import HfFileSystem
+
+from . import config
+from .table import CastError
+from .utils.track import TrackedIterable, tracked_list, tracked_str
+
+
+class DatasetsError(Exception):
+ """Base class for exceptions in this library."""
+
+
+class DefunctDatasetError(DatasetsError):
+ """The dataset has been defunct."""
+
+
+class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
+ """FileNotFoundError raised by this library."""
+
+
+class DataFilesNotFoundError(FileNotFoundDatasetsError):
+ """No (supported) data files found."""
+
+
+class DatasetNotFoundError(FileNotFoundDatasetsError):
+ """Dataset not found.
+
+ Raised when trying to access:
+ - a missing dataset, or
+ - a private/gated dataset and the user is not authenticated.
+ """
+
+
+class DatasetBuildError(DatasetsError):
+ pass
+
+
+class ManualDownloadError(DatasetBuildError):
+ pass
+
+
+class FileFormatError(DatasetBuildError):
+ pass
+
+
+class DatasetGenerationError(DatasetBuildError):
+ pass
+
+
+class DatasetGenerationCastError(DatasetGenerationError):
+ @classmethod
+ def from_cast_error(
+ cls,
+ cast_error: CastError,
+ builder_name: str,
+ gen_kwargs: Dict[str, Any],
+ token: Optional[Union[bool, str]],
+ ) -> "DatasetGenerationCastError":
+ explanation_message = (
+ f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
+ )
+ formatted_tracked_gen_kwargs: List[str] = []
+ for gen_kwarg in gen_kwargs.values():
+ if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterable)):
+ continue
+ while isinstance(gen_kwarg, (tracked_list, TrackedIterable)) and gen_kwarg.last_item is not None:
+ gen_kwarg = gen_kwarg.last_item
+ if isinstance(gen_kwarg, tracked_str):
+ gen_kwarg = gen_kwarg.get_origin()
+ if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
+ resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
+ gen_kwarg = "hf://" + resolved_path.unresolve()
+ if "@" + resolved_path.revision in gen_kwarg:
+ gen_kwarg = (
+ gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ + f" (at revision {resolved_path.revision})"
+ )
+ formatted_tracked_gen_kwargs.append(str(gen_kwarg))
+ if formatted_tracked_gen_kwargs:
+ explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
+ help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
+ return cls("An error occurred while generating the dataset" + explanation_message + help_message)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..15aefa5f42a3a2a3c8ca8ba282996d421d5d7d60
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__init__.py
@@ -0,0 +1,86 @@
+import importlib
+import shutil
+import threading
+import warnings
+from typing import List
+
+import fsspec
+import fsspec.asyn
+from fsspec.implementations.local import LocalFileSystem
+
+from ..utils.deprecation_utils import deprecated
+from . import compression
+
+
+_has_s3fs = importlib.util.find_spec("s3fs") is not None
+
+if _has_s3fs:
+ from .s3filesystem import S3FileSystem # noqa: F401
+
+COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
+ compression.Bz2FileSystem,
+ compression.GzipFileSystem,
+ compression.Lz4FileSystem,
+ compression.XzFileSystem,
+ compression.ZstdFileSystem,
+]
+
+# Register custom filesystems
+for fs_class in COMPRESSION_FILESYSTEMS:
+ if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
+ warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
+ fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
+
+
+@deprecated(
+ "This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead."
+)
+def extract_path_from_uri(dataset_path: str) -> str:
+ """
+ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
+
+ Args:
+ dataset_path (`str`):
+ Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
+ """
+ if "://" in dataset_path:
+ dataset_path = dataset_path.split("://")[1]
+ return dataset_path
+
+
+def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
+ """
+ Checks if `fs` is a remote filesystem.
+
+ Args:
+ fs (`fsspec.spec.AbstractFileSystem`):
+ An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
+ """
+ return not isinstance(fs, LocalFileSystem)
+
+
+def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
+ """
+ Renames the file `src` in `fs` to `dst`.
+ """
+ if not is_remote_filesystem(fs):
+ # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
+ shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
+ else:
+ fs.mv(src, dst, recursive=True)
+
+
+def _reset_fsspec_lock() -> None:
+ """
+ Clear reference to the loop and thread.
+ This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
+ Only required for fsspec >= 0.9.0
+ See https://github.com/fsspec/gcsfs/issues/379
+ """
+ if hasattr(fsspec.asyn, "reset_lock"):
+ # for future fsspec>2022.05.0
+ fsspec.asyn.reset_lock()
+ else:
+ fsspec.asyn.iothread[0] = None
+ fsspec.asyn.loop[0] = None
+ fsspec.asyn.lock = threading.Lock()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f61eca79a6986289bc22b9a992dab8005494969
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d7ad801c2fae499e542190b6ee34046c38fd58de
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/s3filesystem.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/compression.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/compression.py
new file mode 100644
index 0000000000000000000000000000000000000000..d64872040b0abe0cd0bcfdfe004c2279213edafd
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/compression.py
@@ -0,0 +1,178 @@
+import os
+from typing import Optional
+
+import fsspec
+from fsspec.archive import AbstractArchiveFileSystem
+from fsspec.utils import DEFAULT_BLOCK_SIZE
+
+
+class BaseCompressedFileFileSystem(AbstractArchiveFileSystem):
+ """Read contents of compressed file as a filesystem with one file inside."""
+
+ root_marker = ""
+ protocol: str = (
+ None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
+ )
+ compression: str = None # compression type in fsspec. ex: "gzip"
+ extension: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
+
+ def __init__(
+ self, fo: str = "", target_protocol: Optional[str] = None, target_options: Optional[dict] = None, **kwargs
+ ):
+ """
+ The compressed file system can be instantiated from any compressed file.
+ It reads the contents of compressed file as a filesystem with one file inside, as if it was an archive.
+
+ The single file inside the filesystem is named after the compresssed file,
+ without the compression extension at the end of the filename.
+
+ Args:
+ fo (:obj:``str``): Path to compressed file. Will fetch file using ``fsspec.open()``
+ mode (:obj:``str``): Currently, only 'rb' accepted
+ target_protocol(:obj:``str``, optional): To override the FS protocol inferred from a URL.
+ target_options (:obj:``dict``, optional): Kwargs passed when instantiating the target FS.
+ """
+ super().__init__(self, **kwargs)
+ # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
+ self.file = fsspec.open(
+ fo,
+ mode="rb",
+ protocol=target_protocol,
+ compression=self.compression,
+ client_kwargs={
+ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
+ "trust_env": True, # Enable reading proxy env variables.
+ **(target_options or {}).pop("client_kwargs", {}), # To avoid issues if it was already passed.
+ },
+ **(target_options or {}),
+ )
+ self.compressed_name = os.path.basename(self.file.path.split("::")[0])
+ self.uncompressed_name = (
+ self.compressed_name[: self.compressed_name.rindex(".")]
+ if "." in self.compressed_name
+ else self.compressed_name
+ )
+ self.dir_cache = None
+
+ @classmethod
+ def _strip_protocol(cls, path):
+ # compressed file paths are always relative to the archive root
+ return super()._strip_protocol(path).lstrip("/")
+
+ def _get_dirs(self):
+ if self.dir_cache is None:
+ f = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
+ self.dir_cache = {f["name"]: f}
+
+ def cat(self, path: str):
+ return self.file.open().read()
+
+ def _open(
+ self,
+ path: str,
+ mode: str = "rb",
+ block_size=None,
+ autocommit=True,
+ cache_options=None,
+ **kwargs,
+ ):
+ path = self._strip_protocol(path)
+ if mode != "rb":
+ raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
+ return self.file.open()
+
+
+class Bz2FileSystem(BaseCompressedFileFileSystem):
+ """Read contents of BZ2 file as a filesystem with one file inside."""
+
+ protocol = "bz2"
+ compression = "bz2"
+ extension = ".bz2"
+
+
+class GzipFileSystem(BaseCompressedFileFileSystem):
+ """Read contents of GZIP file as a filesystem with one file inside."""
+
+ protocol = "gzip"
+ compression = "gzip"
+ extension = ".gz"
+
+
+class Lz4FileSystem(BaseCompressedFileFileSystem):
+ """Read contents of LZ4 file as a filesystem with one file inside."""
+
+ protocol = "lz4"
+ compression = "lz4"
+ extension = ".lz4"
+
+
+class XzFileSystem(BaseCompressedFileFileSystem):
+ """Read contents of .xz (LZMA) file as a filesystem with one file inside."""
+
+ protocol = "xz"
+ compression = "xz"
+ extension = ".xz"
+
+
+class ZstdFileSystem(BaseCompressedFileFileSystem):
+ """
+ Read contents of zstd file as a filesystem with one file inside.
+
+ Note that reading in binary mode with fsspec isn't supported yet:
+ https://github.com/indygreg/python-zstandard/issues/136
+ """
+
+ protocol = "zstd"
+ compression = "zstd"
+ extension = ".zst"
+
+ def __init__(
+ self,
+ fo: str,
+ mode: str = "rb",
+ target_protocol: Optional[str] = None,
+ target_options: Optional[dict] = None,
+ block_size: int = DEFAULT_BLOCK_SIZE,
+ **kwargs,
+ ):
+ super().__init__(
+ fo=fo,
+ mode=mode,
+ target_protocol=target_protocol,
+ target_options=target_options,
+ block_size=block_size,
+ **kwargs,
+ )
+ # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
+ #
+ # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
+ # out.close = close
+ # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
+ #
+ # see https://github.com/intake/filesystem_spec/issues/725
+ _enter = self.file.__enter__
+
+ class WrappedFile:
+ def __init__(self, file_):
+ self._file = file_
+
+ def __enter__(self):
+ self._file.__enter__()
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ self._file.__exit__(*args, **kwargs)
+
+ def __iter__(self):
+ return iter(self._file)
+
+ def __next__(self):
+ return next(self._file)
+
+ def __getattr__(self, attr):
+ return getattr(self._file, attr)
+
+ def fixed_enter(*args, **kwargs):
+ return WrappedFile(_enter(*args, **kwargs))
+
+ self.file.__enter__ = fixed_enter
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d204f1f8738e51411cacac0201fd67e5c185422
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/s3filesystem.py
@@ -0,0 +1,116 @@
+import s3fs
+
+from ..utils.deprecation_utils import deprecated
+
+
+@deprecated("Use s3fs.S3FileSystem instead.")
+class S3FileSystem(s3fs.S3FileSystem):
+ """
+ `datasets.filesystems.S3FileSystem` is a subclass of [`s3fs.S3FileSystem`](https://s3fs.readthedocs.io/en/latest/api.html).
+
+ Users can use this class to access S3 as if it were a file system. It exposes a filesystem-like API (ls, cp, open, etc.) on top of S3 storage. Provide credentials either explicitly (`key=`, `secret=`) or with boto's credential methods. See botocore documentation for more information. If no credentials are available, use `anon=True`.
+
+ Args:
+ anon (`bool`, default to `False`):
+ Whether to use anonymous connection (public buckets only). If `False`, uses the key/secret given,
+ or boto's credential resolver (client_kwargs, environment, variables, config files, EC2 IAM server, in that order).
+ key (`str`):
+ If not anonymous, use this access key ID, if specified.
+ secret (`str`):
+ If not anonymous, use this secret access key, if specified.
+ token (`str`):
+ If not anonymous, use this security token, if specified.
+ use_ssl (`bool`, defaults to `True`):
+ Whether to use SSL in connections to S3; may be faster without, but insecure. If `use_ssl` is
+ also set in `client_kwargs`, the value set in `client_kwargs` will take priority.
+ s3_additional_kwargs (`dict`):
+ Parameters that are used when calling S3 API methods. Typically used for things
+ like ServerSideEncryption.
+ client_kwargs (`dict`):
+ Parameters for the botocore client.
+ requester_pays (`bool`, defaults to `False`):
+ Whether `RequesterPays` buckets are supported.
+ default_block_size (`int`):
+ If given, the default block size value used for `open()`, if no specific value is given at all time.
+ The built-in default is 5MB.
+ default_fill_cache (`bool`, defaults to `True`):
+ Whether to use cache filling with open by default. Refer to `S3File.open`.
+ default_cache_type (`str`, defaults to `bytes`):
+ If given, the default `cache_type` value used for `open()`. Set to `none` if no
+ caching is desired. See fsspec's documentation for other available `cache_type` values.
+ version_aware (`bool`, defaults to `False`):
+ Whether to support bucket versioning. If enable this will require the user to have
+ the necessary IAM permissions for dealing with versioned objects.
+ cache_regions (`bool`, defaults to `False`):
+ Whether to cache bucket regions. Whenever a new bucket is used, it will
+ first find out which region it belongs to and then use the client for that region.
+ asynchronous (`bool`, defaults to `False`):
+ Whether this instance is to be used from inside coroutines.
+ config_kwargs (`dict`):
+ Parameters passed to `botocore.client.Config`.
+ **kwargs:
+ Other parameters for core session.
+ session (`aiobotocore.session.AioSession`):
+ Session to be used for all connections. This session will be used inplace of creating
+ a new session inside S3FileSystem. For example: `aiobotocore.session.AioSession(profile='test_user')`.
+ skip_instance_cache (`bool`):
+ Control reuse of instances. Passed on to `fsspec`.
+ use_listings_cache (`bool`):
+ Control reuse of directory listings. Passed on to `fsspec`.
+ listings_expiry_time (`int` or `float`):
+ Control reuse of directory listings. Passed on to `fsspec`.
+ max_paths (`int`): Control reuse of directory listings. Passed on to `fsspec`.
+
+ Examples:
+
+ Listing files from public S3 bucket.
+
+ ```py
+ >>> import datasets
+ >>> s3 = datasets.filesystems.S3FileSystem(anon=True) # doctest: +SKIP
+ >>> s3.ls('public-datasets/imdb/train') # doctest: +SKIP
+ ['dataset_info.json.json','dataset.arrow','state.json']
+ ```
+
+ Listing files from private S3 bucket using `aws_access_key_id` and `aws_secret_access_key`.
+
+ ```py
+ >>> import datasets
+ >>> s3 = datasets.filesystems.S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> s3.ls('my-private-datasets/imdb/train') # doctest: +SKIP
+ ['dataset_info.json.json','dataset.arrow','state.json']
+ ```
+
+ Using `S3Filesystem` with `botocore.session.Session` and custom `aws_profile`.
+
+ ```py
+ >>> import botocore
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> s3_session = botocore.session.Session(profile_name='my_profile_name')
+ >>> s3 = S3FileSystem(session=s3_session) # doctest: +SKIP
+ ```
+
+ Loading dataset from S3 using `S3Filesystem` and [`load_from_disk`].
+
+ ```py
+ >>> from datasets import load_from_disk
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> dataset = load_from_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
+ >>> print(len(dataset))
+ 25000
+ ```
+
+ Saving dataset to S3 using `S3Filesystem` and [`Dataset.save_to_disk`].
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> from datasets.filesystems import S3Filesystem
+
+ >>> dataset = load_dataset("imdb")
+ >>> s3 = S3FileSystem(key=aws_access_key_id, secret=aws_secret_access_key) # doctest: +SKIP
+ >>> dataset.save_to_disk('s3://my-private-datasets/imdb/train', storage_options=s3.storage_options) # doctest: +SKIP
+ ```
+ """
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/info.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/info.py
new file mode 100644
index 0000000000000000000000000000000000000000..74e9a962a0cc4cf1d6b89728fa35c164a7caa93b
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/info.py
@@ -0,0 +1,592 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""DatasetInfo and MetricInfo record information we know about a dataset and a metric.
+
+This includes things that we know about the dataset statically, i.e.:
+ - description
+ - canonical location
+ - does it have validation and tests splits
+ - size
+ - etc.
+
+This also includes the things that can and should be computed once we've
+processed the dataset as well:
+ - number of examples (in each split)
+ - etc.
+"""
+
+import copy
+import dataclasses
+import json
+import os
+import posixpath
+import warnings
+from dataclasses import dataclass
+from pathlib import Path
+from typing import ClassVar, Dict, List, Optional, Union
+
+import fsspec
+from huggingface_hub import DatasetCard, DatasetCardData
+
+from . import config
+from .features import Features, Value
+from .splits import SplitDict
+from .tasks import TaskTemplate, task_template_from_dict
+from .utils import Version
+from .utils.logging import get_logger
+from .utils.py_utils import asdict, unique_values
+
+
+logger = get_logger(__name__)
+
+
+@dataclass
+class SupervisedKeysData:
+ input: str = ""
+ output: str = ""
+
+
+@dataclass
+class DownloadChecksumsEntryData:
+ key: str = ""
+ value: str = ""
+
+
+class MissingCachedSizesConfigError(Exception):
+ """The expected cached sizes of the download file are missing."""
+
+
+class NonMatchingCachedSizesError(Exception):
+ """The prepared split doesn't have expected sizes."""
+
+
+@dataclass
+class PostProcessedInfo:
+ features: Optional[Features] = None
+ resources_checksums: Optional[dict] = None
+
+ def __post_init__(self):
+ # Convert back to the correct classes when we reload from dict
+ if self.features is not None and not isinstance(self.features, Features):
+ self.features = Features.from_dict(self.features)
+
+ @classmethod
+ def from_dict(cls, post_processed_info_dict: dict) -> "PostProcessedInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in post_processed_info_dict.items() if k in field_names})
+
+
+@dataclass
+class DatasetInfo:
+ """Information about a dataset.
+
+ `DatasetInfo` documents datasets, including its name, version, and features.
+ See the constructor arguments and properties for a full list.
+
+ Not all fields are known on construction and may be updated later.
+
+ Attributes:
+ description (`str`):
+ A description of the dataset.
+ citation (`str`):
+ A BibTeX citation of the dataset.
+ homepage (`str`):
+ A URL to the official homepage for the dataset.
+ license (`str`):
+ The dataset's license. It can be the name of the license or a paragraph containing the terms of the license.
+ features ([`Features`], *optional*):
+ The features used to specify the dataset's column types.
+ post_processed (`PostProcessedInfo`, *optional*):
+ Information regarding the resources of a possible post-processing of a dataset. For example, it can contain the information of an index.
+ supervised_keys (`SupervisedKeysData`, *optional*):
+ Specifies the input feature and the label for supervised learning if applicable for the dataset (legacy from TFDS).
+ builder_name (`str`, *optional*):
+ The name of the `GeneratorBasedBuilder` subclass used to create the dataset. Usually matched to the corresponding script name. It is also the snake_case version of the dataset builder class name.
+ config_name (`str`, *optional*):
+ The name of the configuration derived from [`BuilderConfig`].
+ version (`str` or [`Version`], *optional*):
+ The version of the dataset.
+ splits (`dict`, *optional*):
+ The mapping between split name and metadata.
+ download_checksums (`dict`, *optional*):
+ The mapping between the URL to download the dataset's checksums and corresponding metadata.
+ download_size (`int`, *optional*):
+ The size of the files to download to generate the dataset, in bytes.
+ post_processing_size (`int`, *optional*):
+ Size of the dataset in bytes after post-processing, if any.
+ dataset_size (`int`, *optional*):
+ The combined size in bytes of the Arrow tables for all splits.
+ size_in_bytes (`int`, *optional*):
+ The combined size in bytes of all files associated with the dataset (downloaded files + Arrow files).
+ task_templates (`List[TaskTemplate]`, *optional*):
+ The task templates to prepare the dataset for during training and evaluation. Each template casts the dataset's [`Features`] to standardized column names and types as detailed in `datasets.tasks`.
+ **config_kwargs (additional keyword arguments):
+ Keyword arguments to be passed to the [`BuilderConfig`] and used in the [`DatasetBuilder`].
+ """
+
+ # Set in the dataset scripts
+ description: str = dataclasses.field(default_factory=str)
+ citation: str = dataclasses.field(default_factory=str)
+ homepage: str = dataclasses.field(default_factory=str)
+ license: str = dataclasses.field(default_factory=str)
+ features: Optional[Features] = None
+ post_processed: Optional[PostProcessedInfo] = None
+ supervised_keys: Optional[SupervisedKeysData] = None
+ task_templates: Optional[List[TaskTemplate]] = None
+
+ # Set later by the builder
+ builder_name: Optional[str] = None
+ dataset_name: Optional[str] = None # for packaged builders, to be different from builder_name
+ config_name: Optional[str] = None
+ version: Optional[Union[str, Version]] = None
+ # Set later by `download_and_prepare`
+ splits: Optional[dict] = None
+ download_checksums: Optional[dict] = None
+ download_size: Optional[int] = None
+ post_processing_size: Optional[int] = None
+ dataset_size: Optional[int] = None
+ size_in_bytes: Optional[int] = None
+
+ _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = [
+ "config_name",
+ "download_size",
+ "dataset_size",
+ "features",
+ "splits",
+ ]
+
+ def __post_init__(self):
+ # Convert back to the correct classes when we reload from dict
+ if self.features is not None and not isinstance(self.features, Features):
+ self.features = Features.from_dict(self.features)
+ if self.post_processed is not None and not isinstance(self.post_processed, PostProcessedInfo):
+ self.post_processed = PostProcessedInfo.from_dict(self.post_processed)
+ if self.version is not None and not isinstance(self.version, Version):
+ if isinstance(self.version, str):
+ self.version = Version(self.version)
+ else:
+ self.version = Version.from_dict(self.version)
+ if self.splits is not None and not isinstance(self.splits, SplitDict):
+ self.splits = SplitDict.from_split_dict(self.splits)
+ if self.supervised_keys is not None and not isinstance(self.supervised_keys, SupervisedKeysData):
+ if isinstance(self.supervised_keys, (tuple, list)):
+ self.supervised_keys = SupervisedKeysData(*self.supervised_keys)
+ else:
+ self.supervised_keys = SupervisedKeysData(**self.supervised_keys)
+
+ # Parse and make a list of templates
+ if self.task_templates is not None:
+ if isinstance(self.task_templates, (list, tuple)):
+ templates = [
+ template if isinstance(template, TaskTemplate) else task_template_from_dict(template)
+ for template in self.task_templates
+ ]
+ self.task_templates = [template for template in templates if template is not None]
+ elif isinstance(self.task_templates, TaskTemplate):
+ self.task_templates = [self.task_templates]
+ else:
+ template = task_template_from_dict(self.task_templates)
+ self.task_templates = [template] if template is not None else []
+
+ # Align task templates with features
+ if self.task_templates is not None:
+ self.task_templates = list(self.task_templates)
+ if self.features is not None:
+ self.task_templates = [
+ template.align_with_features(self.features) for template in (self.task_templates)
+ ]
+
+ def write_to_directory(
+ self, dataset_info_dir, pretty_print=False, fs="deprecated", storage_options: Optional[dict] = None
+ ):
+ """Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.
+
+ Args:
+ dataset_info_dir (`str`):
+ Destination directory.
+ pretty_print (`bool`, defaults to `False`):
+ If `True`, the JSON will be pretty-printed with the indent level of 4.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation")
+ >>> ds.info.write_to_directory("/path/to/directory/")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f:
+ self._dump_info(f, pretty_print=pretty_print)
+ if self.license:
+ with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f:
+ self._dump_license(f)
+
+ def _dump_info(self, file, pretty_print=False):
+ """Dump info in `file` file-like object open in bytes mode (to support remote files)"""
+ file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode("utf-8"))
+
+ def _dump_license(self, file):
+ """Dump license in `file` file-like object open in bytes mode (to support remote files)"""
+ file.write(self.license.encode("utf-8"))
+
+ @classmethod
+ def from_merge(cls, dataset_infos: List["DatasetInfo"]):
+ dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None]
+
+ if len(dataset_infos) > 0 and all(dataset_infos[0] == dset_info for dset_info in dataset_infos):
+ # if all dataset_infos are equal we don't need to merge. Just return the first.
+ return dataset_infos[0]
+
+ description = "\n\n".join(unique_values(info.description for info in dataset_infos)).strip()
+ citation = "\n\n".join(unique_values(info.citation for info in dataset_infos)).strip()
+ homepage = "\n\n".join(unique_values(info.homepage for info in dataset_infos)).strip()
+ license = "\n\n".join(unique_values(info.license for info in dataset_infos)).strip()
+ features = None
+ supervised_keys = None
+ task_templates = None
+
+ # Find common task templates across all dataset infos
+ all_task_templates = [info.task_templates for info in dataset_infos if info.task_templates is not None]
+ if len(all_task_templates) > 1:
+ task_templates = list(set(all_task_templates[0]).intersection(*all_task_templates[1:]))
+ elif len(all_task_templates):
+ task_templates = list(set(all_task_templates[0]))
+ # If no common task templates found, replace empty list with None
+ task_templates = task_templates if task_templates else None
+
+ return cls(
+ description=description,
+ citation=citation,
+ homepage=homepage,
+ license=license,
+ features=features,
+ supervised_keys=supervised_keys,
+ task_templates=task_templates,
+ )
+
+ @classmethod
+ def from_directory(
+ cls, dataset_info_dir: str, fs="deprecated", storage_options: Optional[dict] = None
+ ) -> "DatasetInfo":
+ """Create [`DatasetInfo`] from the JSON file in `dataset_info_dir`.
+
+ This function updates all the dynamically generated fields (num_examples,
+ hash, time of creation,...) of the [`DatasetInfo`].
+
+ This will overwrite all previous metadata.
+
+ Args:
+ dataset_info_dir (`str`):
+ The directory containing the metadata file. This
+ should be the root directory of a specific dataset version.
+ fs (`fsspec.spec.AbstractFileSystem`, *optional*):
+ Instance of the remote filesystem used to download the files from.
+
+
+
+ `fs` was deprecated in version 2.9.0 and will be removed in 3.0.0.
+ Please use `storage_options` instead, e.g. `storage_options=fs.storage_options`.
+
+
+
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ Example:
+
+ ```py
+ >>> from datasets import DatasetInfo
+ >>> ds_info = DatasetInfo.from_directory("/path/to/directory/")
+ ```
+ """
+ if fs != "deprecated":
+ warnings.warn(
+ "'fs' was deprecated in favor of 'storage_options' in version 2.9.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'storage_options=fs.storage_options' instead.",
+ FutureWarning,
+ )
+ storage_options = fs.storage_options
+
+ fs: fsspec.AbstractFileSystem
+ fs, _, _ = fsspec.get_fs_token_paths(dataset_info_dir, storage_options=storage_options)
+ logger.info(f"Loading Dataset info from {dataset_info_dir}")
+ if not dataset_info_dir:
+ raise ValueError("Calling DatasetInfo.from_directory() with undefined dataset_info_dir.")
+ with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), "r", encoding="utf-8") as f:
+ dataset_info_dict = json.load(f)
+ return cls.from_dict(dataset_info_dict)
+
+ @classmethod
+ def from_dict(cls, dataset_info_dict: dict) -> "DatasetInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in dataset_info_dict.items() if k in field_names})
+
+ def update(self, other_dataset_info: "DatasetInfo", ignore_none=True):
+ self_dict = self.__dict__
+ self_dict.update(
+ **{
+ k: copy.deepcopy(v)
+ for k, v in other_dataset_info.__dict__.items()
+ if (v is not None or not ignore_none)
+ }
+ )
+
+ def copy(self) -> "DatasetInfo":
+ return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
+
+ def _to_yaml_dict(self) -> dict:
+ yaml_dict = {}
+ dataset_info_dict = asdict(self)
+ for key in dataset_info_dict:
+ if key in self._INCLUDED_INFO_IN_YAML:
+ value = getattr(self, key)
+ if hasattr(value, "_to_yaml_list"): # Features, SplitDict
+ yaml_dict[key] = value._to_yaml_list()
+ elif hasattr(value, "_to_yaml_string"): # Version
+ yaml_dict[key] = value._to_yaml_string()
+ else:
+ yaml_dict[key] = value
+ return yaml_dict
+
+ @classmethod
+ def _from_yaml_dict(cls, yaml_data: dict) -> "DatasetInfo":
+ yaml_data = copy.deepcopy(yaml_data)
+ if yaml_data.get("features") is not None:
+ yaml_data["features"] = Features._from_yaml_list(yaml_data["features"])
+ if yaml_data.get("splits") is not None:
+ yaml_data["splits"] = SplitDict._from_yaml_list(yaml_data["splits"])
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in yaml_data.items() if k in field_names})
+
+
+class DatasetInfosDict(Dict[str, DatasetInfo]):
+ def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None:
+ total_dataset_infos = {}
+ dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)
+ dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)
+ if not overwrite:
+ total_dataset_infos = self.from_directory(dataset_infos_dir)
+ total_dataset_infos.update(self)
+ if os.path.exists(dataset_infos_path):
+ # for backward compatibility, let's update the JSON file if it exists
+ with open(dataset_infos_path, "w", encoding="utf-8") as f:
+ dataset_infos_dict = {
+ config_name: asdict(dset_info) for config_name, dset_info in total_dataset_infos.items()
+ }
+ json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None)
+ # Dump the infos in the YAML part of the README.md file
+ if os.path.exists(dataset_readme_path):
+ dataset_card = DatasetCard.load(dataset_readme_path)
+ dataset_card_data = dataset_card.data
+ else:
+ dataset_card = None
+ dataset_card_data = DatasetCardData()
+ if total_dataset_infos:
+ total_dataset_infos.to_dataset_card_data(dataset_card_data)
+ dataset_card = (
+ DatasetCard("---\n" + str(dataset_card_data) + "\n---\n") if dataset_card is None else dataset_card
+ )
+ dataset_card.save(Path(dataset_readme_path))
+
+ @classmethod
+ def from_directory(cls, dataset_infos_dir) -> "DatasetInfosDict":
+ logger.info(f"Loading Dataset Infos from {dataset_infos_dir}")
+ # Load the info from the YAML part of README.md
+ if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)):
+ dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data
+ if "dataset_info" in dataset_card_data:
+ return cls.from_dataset_card_data(dataset_card_data)
+ if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)):
+ # this is just to have backward compatibility with dataset_infos.json files
+ with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding="utf-8") as f:
+ return cls(
+ {
+ config_name: DatasetInfo.from_dict(dataset_info_dict)
+ for config_name, dataset_info_dict in json.load(f).items()
+ }
+ )
+ else:
+ return cls()
+
+ @classmethod
+ def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "DatasetInfosDict":
+ if isinstance(dataset_card_data.get("dataset_info"), (list, dict)):
+ if isinstance(dataset_card_data["dataset_info"], list):
+ return cls(
+ {
+ dataset_info_yaml_dict.get("config_name", "default"): DatasetInfo._from_yaml_dict(
+ dataset_info_yaml_dict
+ )
+ for dataset_info_yaml_dict in dataset_card_data["dataset_info"]
+ }
+ )
+ else:
+ dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data["dataset_info"])
+ dataset_info.config_name = dataset_card_data["dataset_info"].get("config_name", "default")
+ return cls({dataset_info.config_name: dataset_info})
+ else:
+ return cls()
+
+ def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
+ if self:
+ # first get existing metadata info
+ if "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], dict):
+ dataset_metadata_infos = {
+ dataset_card_data["dataset_info"].get("config_name", "default"): dataset_card_data["dataset_info"]
+ }
+ elif "dataset_info" in dataset_card_data and isinstance(dataset_card_data["dataset_info"], list):
+ dataset_metadata_infos = {
+ config_metadata["config_name"]: config_metadata
+ for config_metadata in dataset_card_data["dataset_info"]
+ }
+ else:
+ dataset_metadata_infos = {}
+ # update/rewrite existing metadata info with the one to dump
+ total_dataset_infos = {
+ **dataset_metadata_infos,
+ **{config_name: dset_info._to_yaml_dict() for config_name, dset_info in self.items()},
+ }
+ # the config_name from the dataset_infos_dict takes over the config_name of the DatasetInfo
+ for config_name, dset_info_yaml_dict in total_dataset_infos.items():
+ dset_info_yaml_dict["config_name"] = config_name
+ if len(total_dataset_infos) == 1:
+ # use a struct instead of a list of configurations, since there's only one
+ dataset_card_data["dataset_info"] = next(iter(total_dataset_infos.values()))
+ config_name = dataset_card_data["dataset_info"].pop("config_name", None)
+ if config_name != "default":
+ # if config_name is not "default" preserve it and put at the first position
+ dataset_card_data["dataset_info"] = {
+ "config_name": config_name,
+ **dataset_card_data["dataset_info"],
+ }
+ else:
+ dataset_card_data["dataset_info"] = []
+ for config_name, dataset_info_yaml_dict in sorted(total_dataset_infos.items()):
+ # add the config_name field in first position
+ dataset_info_yaml_dict.pop("config_name", None)
+ dataset_info_yaml_dict = {"config_name": config_name, **dataset_info_yaml_dict}
+ dataset_card_data["dataset_info"].append(dataset_info_yaml_dict)
+
+
+@dataclass
+class MetricInfo:
+ """Information about a metric.
+
+ `MetricInfo` documents a metric, including its name, version, and features.
+ See the constructor arguments and properties for a full list.
+
+ Note: Not all fields are known on construction and may be updated later.
+ """
+
+ # Set in the dataset scripts
+ description: str
+ citation: str
+ features: Features
+ inputs_description: str = dataclasses.field(default_factory=str)
+ homepage: str = dataclasses.field(default_factory=str)
+ license: str = dataclasses.field(default_factory=str)
+ codebase_urls: List[str] = dataclasses.field(default_factory=list)
+ reference_urls: List[str] = dataclasses.field(default_factory=list)
+ streamable: bool = False
+ format: Optional[str] = None
+
+ # Set later by the builder
+ metric_name: Optional[str] = None
+ config_name: Optional[str] = None
+ experiment_id: Optional[str] = None
+
+ def __post_init__(self):
+ if self.format is not None:
+ for key, value in self.features.items():
+ if not isinstance(value, Value):
+ raise ValueError(
+ f"When using 'numpy' format, all features should be a `datasets.Value` feature. "
+ f"Here {key} is an instance of {value.__class__.__name__}"
+ )
+
+ def write_to_directory(self, metric_info_dir, pretty_print=False):
+ """Write `MetricInfo` as JSON to `metric_info_dir`.
+ Also save the license separately in LICENCE.
+ If `pretty_print` is True, the JSON will be pretty-printed with the indent level of 4.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.info.write_to_directory("/path/to/directory/")
+ ```
+ """
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), "w", encoding="utf-8") as f:
+ json.dump(asdict(self), f, indent=4 if pretty_print else None)
+
+ if self.license:
+ with open(os.path.join(metric_info_dir, config.LICENSE_FILENAME), "w", encoding="utf-8") as f:
+ f.write(self.license)
+
+ @classmethod
+ def from_directory(cls, metric_info_dir) -> "MetricInfo":
+ """Create MetricInfo from the JSON file in `metric_info_dir`.
+
+ Args:
+ metric_info_dir: `str` The directory containing the metadata file. This
+ should be the root directory of a specific dataset version.
+
+ Example:
+
+ ```py
+ >>> from datasets import MetricInfo
+ >>> metric_info = MetricInfo.from_directory("/path/to/directory/")
+ ```
+ """
+ logger.info(f"Loading Metric info from {metric_info_dir}")
+ if not metric_info_dir:
+ raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.")
+
+ with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f:
+ metric_info_dict = json.load(f)
+ return cls.from_dict(metric_info_dict)
+
+ @classmethod
+ def from_dict(cls, metric_info_dict: dict) -> "MetricInfo":
+ field_names = {f.name for f in dataclasses.fields(cls)}
+ return cls(**{k: v for k, v in metric_info_dict.items() if k in field_names})
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/inspect.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/inspect.py
new file mode 100644
index 0000000000000000000000000000000000000000..f976073ac977f04c2023139685d245e8bda58b90
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/inspect.py
@@ -0,0 +1,581 @@
+# Copyright 2020 The HuggingFace Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""List and inspect datasets."""
+
+import inspect
+import os
+import shutil
+import warnings
+from pathlib import Path, PurePath
+from typing import Dict, List, Mapping, Optional, Sequence, Union
+
+import huggingface_hub
+
+from . import config
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadMode
+from .download.streaming_download_manager import StreamingDownloadManager
+from .info import DatasetInfo
+from .load import (
+ dataset_module_factory,
+ get_dataset_builder_class,
+ import_main_class,
+ load_dataset_builder,
+ metric_module_factory,
+)
+from .utils.deprecation_utils import deprecated
+from .utils.file_utils import relative_to_absolute_path
+from .utils.logging import get_logger
+from .utils.version import Version
+
+
+logger = get_logger(__name__)
+
+
+class SplitsNotFoundError(ValueError):
+ pass
+
+
+@deprecated("Use 'huggingface_hub.list_datasets' instead.")
+def list_datasets(with_community_datasets=True, with_details=False):
+ """List all the datasets scripts available on the Hugging Face Hub.
+
+ Args:
+ with_community_datasets (`bool`, *optional*, defaults to `True`):
+ Include the community provided datasets.
+ with_details (`bool`, *optional*, defaults to `False`):
+ Return the full details on the datasets instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_datasets
+ >>> list_datasets()
+ ['acronym_identification',
+ 'ade_corpus_v2',
+ 'adversarial_qa',
+ 'aeslc',
+ 'afrikaans_ner_corpus',
+ 'ag_news',
+ ...
+ ]
+ ```
+ """
+ datasets = huggingface_hub.list_datasets(full=with_details)
+ if not with_community_datasets:
+ datasets = [dataset for dataset in datasets if "/" not in dataset.id]
+ if not with_details:
+ datasets = [dataset.id for dataset in datasets]
+ return list(datasets)
+
+
+@deprecated(
+ "Use 'evaluate.list_evaluation_modules' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def list_metrics(with_community_metrics=True, with_details=False):
+ """List all the metrics script available on the Hugging Face Hub.
+
+
+
+ Use `evaluate.list_evaluation_modules` instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics.
+ with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name.
+
+ Example:
+
+ ```py
+ >>> from datasets import list_metrics
+ >>> list_metrics()
+ ['accuracy',
+ 'bertscore',
+ 'bleu',
+ 'bleurt',
+ 'cer',
+ 'chrf',
+ ...
+ ]
+ ```
+ """
+ metrics = huggingface_hub.list_metrics()
+ if not with_community_metrics:
+ metrics = [metric for metric in metrics if "/" not in metric.id]
+ if not with_details:
+ metrics = [metric.id for metric in metrics]
+ return metrics
+
+
+@deprecated("Clone the dataset repository from the Hugging Face Hub instead.")
+def inspect_dataset(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ """
+ Allow inspection/modification of a dataset script by copying on local drive at local_path.
+
+ Args:
+ path (`str`): Path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name
+ as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`.
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`.
+ local_path (`str`):
+ Path to the local folder to copy the dataset script to.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ **download_kwargs (additional keyword arguments):
+ Optional arguments for [`DownloadConfig`] which will override
+ the attributes of `download_config` if supplied.
+ """
+ if download_config is None:
+ download_config = DownloadConfig(**download_kwargs)
+ if os.path.isfile(path):
+ path = str(Path(path).parent)
+ if os.path.isdir(path):
+ shutil.copytree(path, local_path, dirs_exist_ok=True)
+ else:
+ huggingface_hub.HfApi(endpoint=config.HF_ENDPOINT, token=download_config.token).snapshot_download(
+ repo_id=path, repo_type="dataset", local_dir=local_path, force_download=download_config.force_download
+ )
+ print(
+ f"The dataset {path} can be inspected at {local_path}. "
+ f'You can modify this loading script if it has one and use it with `datasets.load_dataset("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+@deprecated(
+ "Use 'evaluate.inspect_evaluation_module' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate"
+)
+def inspect_metric(path: str, local_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs):
+ r"""
+ Allow inspection/modification of a metric script by copying it on local drive at local_path.
+
+
+
+ Use `evaluate.inspect_evaluation_module` instead, from the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ local_path (``str``): path to the local folder to copy the datset script to.
+ download_config (Optional ``datasets.DownloadConfig``): specific download configuration parameters.
+ **download_kwargs (additional keyword arguments): optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
+ """
+ metric_module = metric_module_factory(path, download_config=download_config, **download_kwargs)
+ metric_cls = import_main_class(metric_module.module_path, dataset=False)
+ module_source_path = inspect.getsourcefile(metric_cls)
+ module_source_dirpath = os.path.dirname(module_source_path)
+ for dirpath, dirnames, filenames in os.walk(module_source_dirpath):
+ dst_dirpath = os.path.join(local_path, os.path.relpath(dirpath, module_source_dirpath))
+ os.makedirs(dst_dirpath, exist_ok=True)
+ # skipping hidden directories; prune the search
+ dirnames[:] = [dirname for dirname in dirnames if not dirname.startswith((".", "__"))]
+ for filename in filenames:
+ shutil.copy2(os.path.join(dirpath, filename), os.path.join(dst_dirpath, filename))
+ shutil.copystat(dirpath, dst_dirpath)
+ local_path = relative_to_absolute_path(local_path)
+ print(
+ f"The processing scripts for metric {path} can be inspected at {local_path}. "
+ f"The main class is in {module_source_dirpath}. "
+ f'You can modify this processing scripts and use it with `datasets.load_metric("{PurePath(local_path).as_posix()}")`.'
+ )
+
+
+def get_dataset_infos(
+ path: str,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the meta information about a dataset, returned as a dict mapping config name to DatasetInfoDict.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or``'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_infos
+ >>> get_dataset_infos('rotten_tomatoes')
+ {'default': DatasetInfo(description="Movie Review Dataset.\nThis is a dataset of containing 5,331 positive and 5,331 negative processed\nsentences from Rotten Tomatoes movie reviews...), ...}
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ config_names = get_dataset_config_names(
+ path=path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ data_files=data_files,
+ token=token,
+ )
+ return {
+ config_name: get_dataset_config_info(
+ path=path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ for config_name in config_names
+ }
+
+
+def get_dataset_config_names(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+):
+ """Get the list of available config names for a particular dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_config_names
+ >>> get_dataset_config_names("glue")
+ ['cola',
+ 'sst2',
+ 'mrpc',
+ 'qqp',
+ 'stsb',
+ 'mnli',
+ 'mnli_mismatched',
+ 'mnli_matched',
+ 'qnli',
+ 'rte',
+ 'wnli',
+ 'ax']
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ return list(builder_cls.builder_configs.keys()) or [
+ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME or "default")
+ ]
+
+
+def get_dataset_default_config_name(
+ path: str,
+ revision: Optional[Union[str, Version]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ dynamic_modules_path: Optional[str] = None,
+ data_files: Optional[Union[Dict, List, str]] = None,
+ **download_kwargs,
+) -> Optional[str]:
+ """Get the default config name for a particular dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ revision (`Union[str, datasets.Version]`, *optional*):
+ If specified, the dataset module will be loaded from the datasets repository at this version.
+ By default:
+ - it is set to the local version of the lib.
+ - it will also try to load it from the main branch if it's not available at the local version of the lib.
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ dynamic_modules_path (`str`, defaults to `~/.cache/huggingface/modules/datasets_modules`):
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with `init_dynamic_modules`.
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
+ data_files (`Union[Dict, List, str]`, *optional*):
+ Defining the data_files of the dataset configuration.
+ **download_kwargs (additional keyword arguments):
+ Optional attributes for [`DownloadConfig`] which will override the attributes in `download_config` if supplied,
+ for example `token`.
+
+ Returns:
+ Optional[str]
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_default_config_name
+ >>> get_dataset_default_config_name("openbookqa")
+ 'main'
+ ```
+ """
+ dataset_module = dataset_module_factory(
+ path,
+ revision=revision,
+ download_config=download_config,
+ download_mode=download_mode,
+ dynamic_modules_path=dynamic_modules_path,
+ data_files=data_files,
+ **download_kwargs,
+ )
+ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path))
+ builder_configs = list(builder_cls.builder_configs.keys())
+ if builder_configs:
+ default_config_name = builder_configs[0] if len(builder_configs) == 1 else None
+ else:
+ default_config_name = "default"
+ return builder_cls.DEFAULT_CONFIG_NAME or default_config_name
+
+
+def get_dataset_config_info(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+) -> DatasetInfo:
+ """Get the meta information (DatasetInfo) about a dataset for a particular config
+
+ Args:
+ path (``str``): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
+ e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``
+ config_name (:obj:`str`, optional): Defining the name of the dataset configuration.
+ data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
+ download_config (:class:`~download.DownloadConfig`, optional): Specific download configuration parameters.
+ download_mode (:class:`DownloadMode` or :obj:`str`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (``str`` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If True, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments): optional attributes for builder class which will override the attributes if supplied.
+
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ builder = load_dataset_builder(
+ path,
+ name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ info = builder.info
+ if info.splits is None:
+ download_config = download_config.copy() if download_config else DownloadConfig()
+ if token is not None:
+ download_config.token = token
+ builder._check_manual_download(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ try:
+ info.splits = {
+ split_generator.name: {"name": split_generator.name, "dataset_name": path}
+ for split_generator in builder._split_generators(
+ StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)
+ )
+ }
+ except Exception as err:
+ raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
+ return info
+
+
+def get_dataset_split_names(
+ path: str,
+ config_name: Optional[str] = None,
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
+ download_config: Optional[DownloadConfig] = None,
+ download_mode: Optional[Union[DownloadMode, str]] = None,
+ revision: Optional[Union[str, Version]] = None,
+ token: Optional[Union[bool, str]] = None,
+ use_auth_token="deprecated",
+ **config_kwargs,
+):
+ """Get the list of available splits for a particular config and dataset.
+
+ Args:
+ path (`str`): path to the dataset processing script with the dataset builder. Can be either:
+
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
+ e.g. `'./dataset/squad'` or `'./dataset/squad/squad.py'`
+ - a dataset identifier on the Hugging Face Hub (list all available datasets and ids with [`datasets.list_datasets`])
+ e.g. `'squad'`, `'glue'` or `'openai/webtext'`
+ config_name (`str`, *optional*):
+ Defining the name of the dataset configuration.
+ data_files (`str` or `Sequence` or `Mapping`, *optional*):
+ Path(s) to source data file(s).
+ download_config ([`DownloadConfig`], *optional*):
+ Specific download configuration parameters.
+ download_mode ([`DownloadMode`] or `str`, defaults to `REUSE_DATASET_IF_EXISTS`):
+ Download/generate mode.
+ revision ([`Version`] or `str`, *optional*):
+ Version of the dataset script to load.
+ As datasets have their own git repository on the Datasets Hub, the default version "main" corresponds to their "main" branch.
+ You can specify a different version than the default "main" by using a commit SHA or a git tag of the dataset repository.
+ token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+ use_auth_token (`str` or `bool`, *optional*):
+ Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
+ If `True`, or not specified, will get token from `"~/.huggingface"`.
+
+
+
+ `use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
+
+
+
+ **config_kwargs (additional keyword arguments):
+ Optional attributes for builder class which will override the attributes if supplied.
+
+ Example:
+
+ ```py
+ >>> from datasets import get_dataset_split_names
+ >>> get_dataset_split_names('rotten_tomatoes')
+ ['train', 'validation', 'test']
+ ```
+ """
+ if use_auth_token != "deprecated":
+ warnings.warn(
+ "'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
+ "You can remove this warning by passing 'token=' instead.",
+ FutureWarning,
+ )
+ token = use_auth_token
+
+ info = get_dataset_config_info(
+ path,
+ config_name=config_name,
+ data_files=data_files,
+ download_config=download_config,
+ download_mode=download_mode,
+ revision=revision,
+ token=token,
+ **config_kwargs,
+ )
+ return list(info.splits.keys())
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b4864000dd3d1446b7f29e6ff6f67a67ee82aea2
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/csv.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a4a917d4da90b02a32b3166676aa8ef8fa05c8b
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/generator.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84469f50b58d379f19e4774bb8ea19f95786a9f9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/json.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e16b7a7f416ca590212e9572f2fb555666b4503a
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/parquet.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..943fdc0b392f3e4e664df36e43ed5935376eb0ad
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/sql.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dad06104fc9dbf4940e5717045af0df2a6aff3d0
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/text.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/abc.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/abc.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1913cc20e3fd748ef912e2fb3d7c1e18f16ac8c
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/abc.py
@@ -0,0 +1,53 @@
+from abc import ABC, abstractmethod
+from typing import Optional, Union
+
+from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
+from ..utils.typing import NestedDataStructureLike, PathLike
+
+
+class AbstractDatasetReader(ABC):
+ def __init__(
+ self,
+ path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ streaming: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ self.path_or_paths = path_or_paths
+ self.split = split if split or isinstance(path_or_paths, dict) else "train"
+ self.features = features
+ self.cache_dir = cache_dir
+ self.keep_in_memory = keep_in_memory
+ self.streaming = streaming
+ self.num_proc = num_proc
+ self.kwargs = kwargs
+
+ @abstractmethod
+ def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
+ pass
+
+
+class AbstractDatasetInputStream(ABC):
+ def __init__(
+ self,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ streaming: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ self.features = features
+ self.cache_dir = cache_dir
+ self.keep_in_memory = keep_in_memory
+ self.streaming = streaming
+ self.num_proc = num_proc
+ self.kwargs = kwargs
+
+ @abstractmethod
+ def read(self) -> Union[Dataset, IterableDataset]:
+ pass
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/generator.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cb461769c5bec9b86c984a17bb4890bdc9fab7e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/generator.py
@@ -0,0 +1,58 @@
+from typing import Callable, Optional
+
+from .. import Features
+from ..packaged_modules.generator.generator import Generator
+from .abc import AbstractDatasetInputStream
+
+
+class GeneratorDatasetInputStream(AbstractDatasetInputStream):
+ def __init__(
+ self,
+ generator: Callable,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ streaming: bool = False,
+ gen_kwargs: Optional[dict] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ super().__init__(
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ streaming=streaming,
+ num_proc=num_proc,
+ **kwargs,
+ )
+ self.builder = Generator(
+ cache_dir=cache_dir,
+ features=features,
+ generator=generator,
+ gen_kwargs=gen_kwargs,
+ **kwargs,
+ )
+
+ def read(self):
+ # Build iterable dataset
+ if self.streaming:
+ dataset = self.builder.as_streaming_dataset(split="train")
+ # Build regular (map-style) dataset
+ else:
+ download_config = None
+ download_mode = None
+ verification_mode = None
+ base_path = None
+
+ self.builder.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ try_from_hf_gcs=False,
+ base_path=base_path,
+ num_proc=self.num_proc,
+ )
+ dataset = self.builder.as_dataset(
+ split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
+ )
+ return dataset
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/json.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/json.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d4698df96659e97820d814bf6b991a2c66ebb57
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/json.py
@@ -0,0 +1,166 @@
+import multiprocessing
+import os
+from typing import BinaryIO, Optional, Union
+
+import fsspec
+
+from .. import Dataset, Features, NamedSplit, config
+from ..formatting import query_table
+from ..packaged_modules.json.json import Json
+from ..utils import tqdm as hf_tqdm
+from ..utils.typing import NestedDataStructureLike, PathLike
+from .abc import AbstractDatasetReader
+
+
+class JsonDatasetReader(AbstractDatasetReader):
+ def __init__(
+ self,
+ path_or_paths: NestedDataStructureLike[PathLike],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ streaming: bool = False,
+ field: Optional[str] = None,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ super().__init__(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ streaming=streaming,
+ num_proc=num_proc,
+ **kwargs,
+ )
+ self.field = field
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
+ self.builder = Json(
+ cache_dir=cache_dir,
+ data_files=path_or_paths,
+ features=features,
+ field=field,
+ **kwargs,
+ )
+
+ def read(self):
+ # Build iterable dataset
+ if self.streaming:
+ dataset = self.builder.as_streaming_dataset(split=self.split)
+ # Build regular (map-style) dataset
+ else:
+ download_config = None
+ download_mode = None
+ verification_mode = None
+ base_path = None
+
+ self.builder.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ # try_from_hf_gcs=try_from_hf_gcs,
+ base_path=base_path,
+ num_proc=self.num_proc,
+ )
+ dataset = self.builder.as_dataset(
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
+ )
+ return dataset
+
+
+class JsonDatasetWriter:
+ def __init__(
+ self,
+ dataset: Dataset,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ **to_json_kwargs,
+ ):
+ if num_proc is not None and num_proc <= 0:
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
+
+ self.dataset = dataset
+ self.path_or_buf = path_or_buf
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ self.num_proc = num_proc
+ self.encoding = "utf-8"
+ self.to_json_kwargs = to_json_kwargs
+
+ def write(self) -> int:
+ _ = self.to_json_kwargs.pop("path_or_buf", None)
+ orient = self.to_json_kwargs.pop("orient", "records")
+ lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
+ if "index" not in self.to_json_kwargs and orient in ["split", "table"]:
+ self.to_json_kwargs["index"] = False
+
+ # Determine the default compression value based on self.path_or_buf type
+ default_compression = "infer" if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None
+ compression = self.to_json_kwargs.pop("compression", default_compression)
+
+ if compression not in [None, "infer", "gzip", "bz2", "xz"]:
+ raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
+
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
+ with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer:
+ written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs)
+ else:
+ if compression:
+ raise NotImplementedError(
+ f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
+ " was passed. Please provide a local path instead."
+ )
+ written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs)
+ return written
+
+ def _batch_json(self, args):
+ offset, orient, lines, to_json_kwargs = args
+
+ batch = query_table(
+ table=self.dataset.data,
+ key=slice(offset, offset + self.batch_size),
+ indices=self.dataset._indices,
+ )
+ json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs)
+ if not json_str.endswith("\n"):
+ json_str += "\n"
+ return json_str.encode(self.encoding)
+
+ def _write(
+ self,
+ file_obj: BinaryIO,
+ orient,
+ lines,
+ **to_json_kwargs,
+ ) -> int:
+ """Writes the pyarrow table as JSON lines to a binary file handle.
+
+ Caller is responsible for opening and closing the handle.
+ """
+ written = 0
+
+ if self.num_proc is None or self.num_proc == 1:
+ for offset in hf_tqdm(
+ range(0, len(self.dataset), self.batch_size),
+ unit="ba",
+ desc="Creating json from Arrow format",
+ ):
+ json_str = self._batch_json((offset, orient, lines, to_json_kwargs))
+ written += file_obj.write(json_str)
+ else:
+ num_rows, batch_size = len(self.dataset), self.batch_size
+ with multiprocessing.Pool(self.num_proc) as pool:
+ for json_str in hf_tqdm(
+ pool.imap(
+ self._batch_json,
+ [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
+ ),
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
+ unit="ba",
+ desc="Creating json from Arrow format",
+ ):
+ written += file_obj.write(json_str)
+
+ return written
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/parquet.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/parquet.py
new file mode 100644
index 0000000000000000000000000000000000000000..97245a36204d5edb20c715c9aec1d4cc9de7852f
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/parquet.py
@@ -0,0 +1,156 @@
+import os
+from typing import BinaryIO, Optional, Union
+
+import numpy as np
+import pyarrow.parquet as pq
+
+from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
+from ..features.features import FeatureType, _visit
+from ..formatting import query_table
+from ..packaged_modules import _PACKAGED_DATASETS_MODULES
+from ..packaged_modules.parquet.parquet import Parquet
+from ..utils import tqdm as hf_tqdm
+from ..utils.typing import NestedDataStructureLike, PathLike
+from .abc import AbstractDatasetReader
+
+
+def get_writer_batch_size(features: Features) -> Optional[int]:
+ """
+ Get the writer_batch_size that defines the maximum row group size in the parquet files.
+ The default in `datasets` is 1,000 but we lower it to 100 for image datasets.
+ This allows to optimize random access to parquet file, since accessing 1 row requires
+ to read its entire row group.
+
+ This can be improved to get optimized size for querying/iterating
+ but at least it matches the dataset viewer expectations on HF.
+
+ Args:
+ ds_config_info (`datasets.info.DatasetInfo`):
+ Dataset info from `datasets`.
+ Returns:
+ writer_batch_size (`Optional[int]`):
+ Writer batch size to pass to a dataset builder.
+ If `None`, then it will use the `datasets` default.
+ """
+
+ batch_size = np.inf
+
+ def set_batch_size(feature: FeatureType) -> None:
+ nonlocal batch_size
+ if isinstance(feature, Image):
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
+ elif isinstance(feature, Audio):
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
+ elif isinstance(feature, Value) and feature.dtype == "binary":
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
+
+ _visit(features, set_batch_size)
+
+ return None if batch_size is np.inf else batch_size
+
+
+class ParquetDatasetReader(AbstractDatasetReader):
+ def __init__(
+ self,
+ path_or_paths: NestedDataStructureLike[PathLike],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ streaming: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ super().__init__(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ streaming=streaming,
+ num_proc=num_proc,
+ **kwargs,
+ )
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
+ hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
+ self.builder = Parquet(
+ cache_dir=cache_dir,
+ data_files=path_or_paths,
+ features=features,
+ hash=hash,
+ **kwargs,
+ )
+
+ def read(self):
+ # Build iterable dataset
+ if self.streaming:
+ dataset = self.builder.as_streaming_dataset(split=self.split)
+ # Build regular (map-style) dataset
+ else:
+ download_config = None
+ download_mode = None
+ verification_mode = None
+ base_path = None
+
+ self.builder.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ # try_from_hf_gcs=try_from_hf_gcs,
+ base_path=base_path,
+ num_proc=self.num_proc,
+ )
+ dataset = self.builder.as_dataset(
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
+ )
+ return dataset
+
+
+class ParquetDatasetWriter:
+ def __init__(
+ self,
+ dataset: Dataset,
+ path_or_buf: Union[PathLike, BinaryIO],
+ batch_size: Optional[int] = None,
+ **parquet_writer_kwargs,
+ ):
+ self.dataset = dataset
+ self.path_or_buf = path_or_buf
+ self.batch_size = batch_size or get_writer_batch_size(dataset.features)
+ self.parquet_writer_kwargs = parquet_writer_kwargs
+
+ def write(self) -> int:
+ batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
+
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
+ with open(self.path_or_buf, "wb+") as buffer:
+ written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
+ else:
+ written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
+ return written
+
+ def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
+ """Writes the pyarrow table as Parquet to a binary file handle.
+
+ Caller is responsible for opening and closing the handle.
+ """
+ written = 0
+ _ = parquet_writer_kwargs.pop("path_or_buf", None)
+ schema = self.dataset.features.arrow_schema
+
+ writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
+
+ for offset in hf_tqdm(
+ range(0, len(self.dataset), batch_size),
+ unit="ba",
+ desc="Creating parquet from Arrow format",
+ ):
+ batch = query_table(
+ table=self.dataset._data,
+ key=slice(offset, offset + batch_size),
+ indices=self.dataset._indices,
+ )
+ writer.write_table(batch)
+ written += batch.nbytes
+ writer.close()
+ return written
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/spark.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/spark.py
new file mode 100644
index 0000000000000000000000000000000000000000..7562ba1fb5f77ed8f82374e3021fcb3a93b1da8d
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/spark.py
@@ -0,0 +1,57 @@
+from typing import Optional
+
+import pyspark
+
+from .. import Features, NamedSplit
+from ..download import DownloadMode
+from ..packaged_modules.spark.spark import Spark
+from .abc import AbstractDatasetReader
+
+
+class SparkDatasetReader(AbstractDatasetReader):
+ """A dataset reader that reads from a Spark DataFrame.
+
+ When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
+ provided. Streaming is not currently supported.
+ """
+
+ def __init__(
+ self,
+ df: pyspark.sql.DataFrame,
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ streaming: bool = True,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ working_dir: str = None,
+ load_from_cache_file: bool = True,
+ file_format: str = "arrow",
+ **kwargs,
+ ):
+ super().__init__(
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ streaming=streaming,
+ **kwargs,
+ )
+ self._load_from_cache_file = load_from_cache_file
+ self._file_format = file_format
+ self.builder = Spark(
+ df=df,
+ features=features,
+ cache_dir=cache_dir,
+ working_dir=working_dir,
+ **kwargs,
+ )
+
+ def read(self):
+ if self.streaming:
+ return self.builder.as_streaming_dataset(split=self.split)
+ download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
+ self.builder.download_and_prepare(
+ download_mode=download_mode,
+ file_format=self._file_format,
+ )
+ return self.builder.as_dataset(split=self.split)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/sql.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/sql.py
new file mode 100644
index 0000000000000000000000000000000000000000..ceb425447c29c170499f68ab6fa221844e36d760
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/sql.py
@@ -0,0 +1,125 @@
+import multiprocessing
+from typing import TYPE_CHECKING, Optional, Union
+
+from .. import Dataset, Features, config
+from ..formatting import query_table
+from ..packaged_modules.sql.sql import Sql
+from ..utils import tqdm as hf_tqdm
+from .abc import AbstractDatasetInputStream
+
+
+if TYPE_CHECKING:
+ import sqlite3
+
+ import sqlalchemy
+
+
+class SqlDatasetReader(AbstractDatasetInputStream):
+ def __init__(
+ self,
+ sql: Union[str, "sqlalchemy.sql.Selectable"],
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ **kwargs,
+ ):
+ super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
+ self.builder = Sql(
+ cache_dir=cache_dir,
+ features=features,
+ sql=sql,
+ con=con,
+ **kwargs,
+ )
+
+ def read(self):
+ download_config = None
+ download_mode = None
+ verification_mode = None
+ base_path = None
+
+ self.builder.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ # try_from_hf_gcs=try_from_hf_gcs,
+ base_path=base_path,
+ )
+
+ # Build dataset for splits
+ dataset = self.builder.as_dataset(
+ split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
+ )
+ return dataset
+
+
+class SqlDatasetWriter:
+ def __init__(
+ self,
+ dataset: Dataset,
+ name: str,
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
+ batch_size: Optional[int] = None,
+ num_proc: Optional[int] = None,
+ **to_sql_kwargs,
+ ):
+ if num_proc is not None and num_proc <= 0:
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
+
+ self.dataset = dataset
+ self.name = name
+ self.con = con
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
+ self.num_proc = num_proc
+ self.to_sql_kwargs = to_sql_kwargs
+
+ def write(self) -> int:
+ _ = self.to_sql_kwargs.pop("sql", None)
+ _ = self.to_sql_kwargs.pop("con", None)
+ index = self.to_sql_kwargs.pop("index", False)
+
+ written = self._write(index=index, **self.to_sql_kwargs)
+ return written
+
+ def _batch_sql(self, args):
+ offset, index, to_sql_kwargs = args
+ to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
+ batch = query_table(
+ table=self.dataset.data,
+ key=slice(offset, offset + self.batch_size),
+ indices=self.dataset._indices,
+ )
+ df = batch.to_pandas()
+ num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
+ return num_rows or len(df)
+
+ def _write(self, index, **to_sql_kwargs) -> int:
+ """Writes the pyarrow table as SQL to a database.
+
+ Caller is responsible for opening and closing the SQL connection.
+ """
+ written = 0
+
+ if self.num_proc is None or self.num_proc == 1:
+ for offset in hf_tqdm(
+ range(0, len(self.dataset), self.batch_size),
+ unit="ba",
+ desc="Creating SQL from Arrow format",
+ ):
+ written += self._batch_sql((offset, index, to_sql_kwargs))
+ else:
+ num_rows, batch_size = len(self.dataset), self.batch_size
+ with multiprocessing.Pool(self.num_proc) as pool:
+ for num_rows in hf_tqdm(
+ pool.imap(
+ self._batch_sql,
+ [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
+ ),
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
+ unit="ba",
+ desc="Creating SQL from Arrow format",
+ ):
+ written += num_rows
+
+ return written
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/io/text.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/text.py
new file mode 100644
index 0000000000000000000000000000000000000000..42aa62b06589df2ad5679ef2935730483d76a4f6
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/io/text.py
@@ -0,0 +1,61 @@
+from typing import Optional
+
+from .. import Features, NamedSplit
+from ..packaged_modules.text.text import Text
+from ..utils.typing import NestedDataStructureLike, PathLike
+from .abc import AbstractDatasetReader
+
+
+class TextDatasetReader(AbstractDatasetReader):
+ def __init__(
+ self,
+ path_or_paths: NestedDataStructureLike[PathLike],
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ cache_dir: str = None,
+ keep_in_memory: bool = False,
+ streaming: bool = False,
+ num_proc: Optional[int] = None,
+ **kwargs,
+ ):
+ super().__init__(
+ path_or_paths,
+ split=split,
+ features=features,
+ cache_dir=cache_dir,
+ keep_in_memory=keep_in_memory,
+ streaming=streaming,
+ num_proc=num_proc,
+ **kwargs,
+ )
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
+ self.builder = Text(
+ cache_dir=cache_dir,
+ data_files=path_or_paths,
+ features=features,
+ **kwargs,
+ )
+
+ def read(self):
+ # Build iterable dataset
+ if self.streaming:
+ dataset = self.builder.as_streaming_dataset(split=self.split)
+ # Build regular (map-style) dataset
+ else:
+ download_config = None
+ download_mode = None
+ verification_mode = None
+ base_path = None
+
+ self.builder.download_and_prepare(
+ download_config=download_config,
+ download_mode=download_mode,
+ verification_mode=verification_mode,
+ # try_from_hf_gcs=try_from_hf_gcs,
+ base_path=base_path,
+ num_proc=self.num_proc,
+ )
+ dataset = self.builder.as_dataset(
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
+ )
+ return dataset
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/iterable_dataset.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/iterable_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..31329de9c3183b0f913e6044c010c60206e185df
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/iterable_dataset.py
@@ -0,0 +1,2388 @@
+import copy
+import itertools
+import sys
+import warnings
+from collections import Counter
+from copy import deepcopy
+from dataclasses import dataclass
+from functools import partial
+from itertools import cycle, islice
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+
+from . import config
+from .arrow_dataset import Dataset, DatasetInfoMixin
+from .features import Features
+from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects
+from .filesystems import _reset_fsspec_lock
+from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter
+from .info import DatasetInfo
+from .splits import NamedSplit
+from .table import cast_table_to_features, read_schema_from_file, table_cast
+from .utils.logging import get_logger
+from .utils.py_utils import Literal
+from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs
+
+
+logger = get_logger(__name__)
+
+Key = Union[int, str]
+
+
+def identity_func(x):
+ return x
+
+
+def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]):
+ if any(col not in example for col in column_mapping):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset."
+ )
+ if any(col in example for col in column_mapping.values()):
+ raise ValueError(
+ f"Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset."
+ )
+ return {
+ new_column_name: example[original_column_name]
+ for original_column_name, new_column_name in column_mapping.items()
+ }
+
+
+def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]):
+ if name in example:
+ raise ValueError(f"Error when adding {name}: column {name} is already in the dataset.")
+ return {name: column[idx]}
+
+
+def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features] = None) -> Features:
+ pa_table = pa.Table.from_pydict(batch)
+ if try_features is not None:
+ try:
+ pa_table = table_cast(pa_table, pa.schema(try_features.type))
+ except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError):
+ pass
+ return Features.from_arrow_schema(pa_table.schema)
+
+
+def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]:
+ # we order the columns by order of appearance
+ # to do so, we use a dict as an ordered set
+ cols = {col: None for example in examples for col in example}
+ # when an example is missing a column, we set the value to None with .get()
+ arrays = [[example.get(col) for example in examples] for col in cols]
+ return dict(zip(cols, arrays))
+
+
+def _batch_to_examples(batch: Dict[str, list]) -> List[Dict[str, Any]]:
+ """Convert a batch (dict of examples) to examples list"""
+ n_examples = len(batch[next(iter(batch))])
+ for i in range(n_examples):
+ yield {col: array[i] for col, array in batch.items()}
+
+
+class _HasNextIterator(Iterator):
+ """Iterator with an hasnext() function. Taken from https://stackoverflow.com/questions/1966591/has-next-in-python-iterators."""
+
+ def __init__(self, it):
+ self.it = iter(it)
+ self._hasnext = None
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._hasnext:
+ result = self._thenext
+ else:
+ result = next(self.it)
+ self._hasnext = None
+ return result
+
+ def hasnext(self):
+ if self._hasnext is None:
+ try:
+ self._thenext = next(self.it)
+ except StopIteration:
+ self._hasnext = False
+ else:
+ self._hasnext = True
+ return self._hasnext
+
+
+def _convert_to_arrow(
+ iterable: Iterable[Tuple[Key, dict]],
+ batch_size: int,
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Convert and group examples in Arrow tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, dict]]`):
+ An examples iterable containing tuples (example_key, example) of type (int/str, dict)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield (
+ "all",
+ pa.Table.from_pylist(cast_to_python_objects([example for _, example in iterable], only_1d_for_numpy=True)),
+ )
+ return
+ iterator = iter(iterable)
+ for key, example in iterator:
+ iterator_batch = islice(iterator, batch_size - 1)
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ if len(key_examples_list) < batch_size and drop_last_batch:
+ return
+ keys, examples = zip(*key_examples_list)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True))
+
+
+def _batch_arrow_tables(
+ iterable: Iterable[Tuple[Key, pa.Table]],
+ batch_size: Optional[int],
+ drop_last_batch: bool = False,
+) -> Iterator[Tuple[Key, pa.Table]]:
+ """Iterate over sub-tables of size `batch_size`.
+
+ Args:
+ iterable (`Iterable[Tuple[Key, pa.Table]]`):
+ A tables iterable containing tuples (table_key, table) of type (int/str, pa.Table)
+ batch_size (`Optional[int]`):
+ Size of each sub-table to yield. If None or <= 0, yields the full table.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ if batch_size is None or batch_size <= 0:
+ yield "all", pa.concat_tables([pa_table for _, pa_table in iterable])
+ return
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ for key, pa_table in iterable:
+ for chunk in pa_table.to_reader(max_chunksize=batch_size):
+ if len(chunk) == 0:
+ continue
+ elif chunks_buffer_size + len(chunk) < batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ chunks_buffer_size += len(chunk)
+ continue
+ elif chunks_buffer_size + len(chunk) == batch_size:
+ keys_buffer.append(key)
+ chunks_buffer.append(chunk)
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = []
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ else:
+ cropped_chunk_length = batch_size - chunks_buffer_size
+ keys_buffer.append(f"{key}[:{cropped_chunk_length}]")
+ chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+ keys_buffer = [f"{key}[{cropped_chunk_length}:]"]
+ chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
+ chunks_buffer_size = len(chunk) - cropped_chunk_length
+ if not drop_last_batch and chunks_buffer:
+ new_key = "_".join(str(_key) for _key in keys_buffer)
+ yield new_key, pa.Table.from_batches(chunks_buffer)
+
+
+class _BaseExamplesIterable:
+ """Base class for the examples iterable used by an IterableDataset"""
+
+ def __init__(self) -> None:
+ self.iter_arrow: Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]] = None
+
+ def __iter__(self) -> Iterator[Tuple[Key, dict]]:
+ """An examples iterable should yield tuples (example_key, example) of type (int/str, dict)"""
+ raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet")
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable":
+ """
+ Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable.
+ If the order of the shards must stay fixed (when using .skip or .take for example), then this method returns self.
+ """
+ raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet")
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "_BaseExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet")
+
+ def split_shard_indices_by_worker(self, worker_id: int, num_workers: int) -> List[int]:
+ return list(range(worker_id, self.n_shards, num_workers))
+
+ @property
+ def n_shards(self) -> int:
+ raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet")
+
+
+class ExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict):
+ super().__init__()
+ self.generate_examples_fn = generate_examples_fn
+ self.kwargs = kwargs
+
+ def __iter__(self):
+ yield from self.generate_examples_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ExamplesIterable":
+ return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesExamplesIterable(ExamplesIterable):
+ def __init__(
+ self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator
+ ):
+ super().__init__(generate_examples_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_examples_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class ArrowExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict):
+ super().__init__()
+ self.generate_tables_fn = generate_tables_fn
+ self.kwargs = kwargs
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**self.kwargs):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ yield from self.generate_tables_fn(**self.kwargs)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable":
+ return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards)
+ shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers)
+ requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices])
+ return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs)
+
+ @property
+ def n_shards(self) -> int:
+ return _number_of_shards_in_gen_kwargs(self.kwargs)
+
+
+class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable):
+ def __init__(
+ self,
+ generate_tables_fn: Callable[..., Tuple[Key, pa.Table]],
+ kwargs: dict,
+ generator: np.random.Generator,
+ ):
+ super().__init__(generate_tables_fn, kwargs)
+ self.generator = deepcopy(generator)
+
+ def __iter__(self):
+ """Shuffle the kwargs order to shuffle shards"""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ formatter = PythonFormatter()
+ for key, pa_table in self.generate_tables_fn(**kwargs_with_shuffled_shards):
+ for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER):
+ formatted_batch = formatter.format_batch(pa_subtable)
+ for example in _batch_to_examples(formatted_batch):
+ yield key, example
+
+ def _iter_arrow(self):
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ yield from self.generate_tables_fn(**kwargs_with_shuffled_shards)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "ArrowExamplesIterable":
+ """Keep only the requested shard."""
+ rng = deepcopy(self.generator)
+ kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs)
+ return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources(
+ worker_id, num_workers
+ )
+
+
+class SelectColumnsIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.column_names = column_names
+ if self.ex_iterable.iter_arrow:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for idx, row in self.ex_iterable:
+ yield idx, {c: row[c] for c in self.column_names}
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ for idx, pa_table in self.ex_iterable.iter_arrow():
+ yield idx, pa_table.select(self.column_names)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "SelectColumnsIterable":
+ return SelectColumnsIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.column_names)
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class StepExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.step = step
+ self.offset = offset
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterator = iter(self.ex_iterable)
+ while True:
+ batch = list(islice(ex_iterator, self.step))
+ if len(batch) > self.offset:
+ yield batch[self.offset]
+ else:
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "StepExamplesIterable":
+ return StepExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers), step=self.step, offset=self.offset
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ self.stopping_strategy = stopping_strategy
+
+ # if undersampling ("first_exhausted"), we stop as soon as one dataset is exhausted
+ # if oversampling ("all_exhausted"), we stop as soons as every dataset is exhausted, i.e as soon as every samples of every dataset has been visited at least once
+ self.bool_strategy_func = np.all if (stopping_strategy == "all_exhausted") else np.any
+ # TODO(QL): implement iter_arrow
+
+ def _get_indices_iterator(self):
+ # this is an infinite iterator to keep track of which iterator we want to pick examples from
+ return cycle(range(len(self.ex_iterables)))
+
+ def __iter__(self):
+ iterators = [_HasNextIterator(ex_iterable) for ex_iterable in self.ex_iterables]
+
+ indices_iterator = self._get_indices_iterator()
+
+ is_exhausted = np.full(len(self.ex_iterables), False)
+ for i in indices_iterator:
+ try: # let's pick one example from the iterator at index i
+ yield next(iterators[i])
+
+ # it will resume from the yield at the next call so that we can directly test if the iterable is exhausted and if we need to break out of the loop
+ if not iterators[i].hasnext():
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+ # otherwise reinitialise the iterator and yield the first example
+ iterators[i] = _HasNextIterator(self.ex_iterables[i])
+
+ except StopIteration:
+ # here it means that the i-th iterabledataset is empty, i.e we never have the occasion to yield an element of the i-th dataset.
+ # we still check if the stopping criteria is met and if we break out of the loop in case of an oversampling strategy
+ is_exhausted[i] = True
+
+ if self.bool_strategy_func(is_exhausted):
+ # if the stopping criteria is met, break the main for loop
+ break
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "CyclingMultiSourcesExamplesIterable":
+ """Shuffle each underlying examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "CyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return CyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ stopping_strategy=self.stopping_strategy,
+ )
+
+
+class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ VerticallyConcatenatedMultiSourcesExamplesIterable simply chains the input iterables.
+ It doesn't require the examples iterables to always yield the same columns.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ if all(ex_iterable.iter_arrow is not None for ex_iterable in ex_iterables):
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable
+
+ def _iter_arrow(self):
+ for ex_iterable in self.ex_iterables:
+ yield from ex_iterable.iter_arrow()
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Shuffle the list of examples iterable, as well as each underlying examples iterable."""
+ rng = deepcopy(generator)
+ ex_iterables = list(self.ex_iterables)
+ rng.shuffle(ex_iterables)
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables]
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+
+ @property
+ def n_shards(self) -> int:
+ return min(ex_iterable.n_shards for ex_iterable in self.ex_iterables)
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "VerticallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return VerticallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+def _check_column_names(column_names: List[str]):
+ """Check the column names to make sure they don't contain duplicates."""
+ counter = Counter(column_names)
+ if not all(count == 1 for count in counter.values()):
+ duplicated_columns = [col for col in counter if counter[col] > 1]
+ raise ValueError(
+ f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated."
+ )
+
+
+class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable):
+ """
+ HorizontallyConcatenatedMultiSourcesExamplesIterable merges examples together for the input list of iterables.
+ It also checks that there are no duplicate columns (otherwise we don't know which one to keep).
+ This check is done once when yielding the first example.
+
+ However it doesn't fill missing columns with None.
+ Instead, this is handled by the `IterableDataset` class or `TypedExamplesIterable`.
+
+ For information, `IterableDataset` merges the features of all the datasets to concatenate into one.
+ We use `IterableDataset._resolve_features` to obtain the features of all the datasets to concatenate.
+
+ Then for each example, `IterableDataset` and `TypedExamplesIterable` automatically fill missing columns with None.
+ This is done with `_apply_feature_types_on_example`.
+ """
+
+ def __init__(self, ex_iterables: List[_BaseExamplesIterable]):
+ super().__init__()
+ self.ex_iterables = ex_iterables
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables]
+ for i in itertools.count():
+ keys = []
+ examples = []
+ for ex_iterator in list(ex_iterators):
+ try:
+ key, example = next(ex_iterator)
+ keys.append(key)
+ examples.append(example)
+ except StopIteration:
+ ex_iterators.remove(ex_iterator)
+ if ex_iterators:
+ if i == 0:
+ _check_column_names([column_name for example in examples for column_name in example])
+ new_example = {}
+ for example in examples:
+ new_example.update(example)
+ new_key = "_".join(str(key) for key in keys)
+ yield new_key, new_example
+ else:
+ break
+
+ def shuffle_data_sources(
+ self, generator: np.random.Generator
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would break the alignment between them."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return 1
+
+ def shard_data_sources(
+ self, worker_id: int, num_workers: int
+ ) -> "HorizontallyConcatenatedMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return HorizontallyConcatenatedMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]
+ )
+
+
+class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable):
+ def __init__(
+ self,
+ ex_iterables: List[_BaseExamplesIterable],
+ generator: np.random.Generator,
+ probabilities: Optional[List[float]] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+ ):
+ super().__init__(ex_iterables, stopping_strategy)
+ self.generator = deepcopy(generator)
+ self.probabilities = probabilities
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(
+ rng: np.random.Generator,
+ num_sources: int,
+ random_batch_size=1000,
+ p: Optional[List[float]] = None,
+ ) -> Iterator[int]:
+ """Get an infinite iterator that randomly samples the index of the source to pick examples from."""
+ if p is None:
+ while True:
+ yield from (int(i) for i in rng.integers(0, num_sources, size=random_batch_size))
+ else:
+ while True:
+ yield from (int(i) for i in rng.choice(num_sources, size=random_batch_size, p=p))
+
+ def _get_indices_iterator(self):
+ rng = deepcopy(self.generator)
+ # this is an infinite iterator that randomly samples the index of the source to pick examples from
+ return self._iter_random_indices(rng, len(self.ex_iterables), p=self.probabilities)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Shuffle the data sources of each wrapped examples iterable."""
+ ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables]
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables,
+ generator=generator,
+ probabilities=self.probabilities,
+ stopping_strategy=self.stopping_strategy,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "RandomlyCyclingMultiSourcesExamplesIterable":
+ """Either keep only the requested shard, or propagate the request to the underlying iterable."""
+ return RandomlyCyclingMultiSourcesExamplesIterable(
+ [iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables],
+ self.generator,
+ self.probabilities,
+ self.stopping_strategy,
+ )
+
+
+class MappedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[List[str]] = None,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.drop_last_batch = drop_last_batch
+ self.remove_columns = remove_columns
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ if (
+ self.drop_last_batch
+ and self.batch_size is not None
+ and self.batch_size > 0
+ and len(examples) < self.batch_size
+ ): # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then apply the transform
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ transformed_batch = dict(batch) # this will be updated with the function output
+ transformed_batch.update(self.function(*function_args, **self.fn_kwargs))
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_batch[c]
+ if transformed_batch:
+ first_col = next(iter(transformed_batch))
+ bad_cols = [
+ col
+ for col in transformed_batch
+ if len(transformed_batch[col]) != len(transformed_batch[first_col])
+ ]
+ if bad_cols:
+ raise ValueError(
+ f"Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}."
+ )
+ # the new key is the concatenation of the examples keys from the batch
+ new_key = "_".join(str(key) for key in keys)
+ # yield one example at a time from the transformed batch
+ for example in _batch_to_examples(transformed_batch):
+ yield new_key, example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the transform and yield the example directly
+ # first copy the example, since we might drop some keys
+ example = dict(example)
+ example = format_dict(example) if format_dict else example
+ # then apply the transform
+ inputs = example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ transformed_example = dict(example) # this will be updated with the function output
+ transformed_example.update(self.function(*function_args, **self.fn_kwargs))
+ # then we remove the unwanted columns
+ if self.remove_columns:
+ for c in self.remove_columns:
+ del transformed_example[c]
+ yield key, transformed_example
+ current_idx += 1
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(),
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ else:
+ iterator = _convert_to_arrow(
+ self.ex_iterable,
+ batch_size=self.batch_size if self.batched else 1,
+ drop_last_batch=self.drop_last_batch,
+ )
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ output_table = self.function(*function_args, **self.fn_kwargs)
+ if not isinstance(output_table, pa.Table):
+ raise TypeError(
+ f"Provided `function` which is applied to pyarrow tables returns a variable of type {type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset."
+ )
+ # we don't need to merge results for consistency with Dataset.map which merges iif both input and output are dicts
+ # then remove the unwanted columns
+ if self.remove_columns:
+ for column in self.remove_columns:
+ if column in output_table.column_names:
+ output_table = output_table.remove_column(output_table.column_names.index(column))
+ # return output
+ yield key, output_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "MappedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "MappedExamplesIterable":
+ """Keep only the requested shard."""
+ return MappedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ drop_last_batch=self.drop_last_batch,
+ remove_columns=self.remove_columns,
+ fn_kwargs=self.fn_kwargs,
+ formatting=self.formatting,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class FilteredExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ function: Callable,
+ with_indices: bool = False,
+ input_columns: Optional[List[str]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ formatting: Optional["FormattingConfig"] = None,
+ format_type="deprecated",
+ ):
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.function = function
+ self.batched = batched
+ self.batch_size = batch_size
+ self.with_indices = with_indices
+ self.input_columns = input_columns
+ self.fn_kwargs = fn_kwargs or {}
+ self.formatting = formatting
+ if self.formatting and self.formatting.format_type == "arrow":
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ if self.formatting and self.formatting.format_type == "arrow":
+ yield from ArrowExamplesIterable(self._iter_arrow, {})
+ else:
+ yield from self._iter()
+
+ def _iter(self):
+ if self.formatting:
+ formatter = get_formatter(self.formatting.format_type)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ iterator = iter(self.ex_iterable)
+ current_idx = 0
+ if self.batched:
+ for key, example in iterator:
+ # If `batched`, first build the batch, if `batch_size` is None or <=0, then the batch is the whole dataset
+ iterator_batch = (
+ iterator
+ if self.batch_size is None or self.batch_size <= 0
+ else islice(iterator, self.batch_size - 1)
+ )
+ key_examples_list = [(key, example)] + list(iterator_batch)
+ keys, examples = zip(*key_examples_list)
+ batch = _examples_to_batch(examples)
+ batch = format_dict(batch) if format_dict else batch
+ # then compute the mask for the batch
+ inputs = batch
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append([current_idx + i for i in range(len(key_examples_list))])
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield one example at a time from the batch
+ for key_example, to_keep in zip(key_examples_list, mask):
+ if to_keep:
+ yield key_example
+ current_idx += 1
+ else:
+ for key, example in iterator:
+ # If not batched, we can apply the filtering function direcly
+ example = dict(example)
+ inputs = format_dict(example) if format_dict else example
+ function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns]
+ if self.with_indices:
+ function_args.append(current_idx)
+ to_keep = self.function(*function_args, **self.fn_kwargs)
+ if to_keep:
+ yield key, example
+ current_idx += 1
+
+ def _iter_arrow(self):
+ if self.ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ self.ex_iterable.iter_arrow(), batch_size=self.batch_size if self.batched else 1
+ )
+ else:
+ iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1)
+ current_idx = 0
+ for key, pa_table in iterator:
+ # first build the batch
+ function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns]
+ if self.with_indices:
+ if self.batched:
+ function_args.append([current_idx + i for i in range(len(pa_table))])
+ else:
+ function_args.append(current_idx)
+ # then apply the transform
+ mask = self.function(*function_args, **self.fn_kwargs)
+ # yield the filtered table
+ if self.batched:
+ yield key, pa_table.filter(mask)
+ elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask:
+ yield key, pa_table
+ current_idx += len(pa_table)
+
+ def shuffle_data_sources(self, seed: Optional[int]) -> "FilteredExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(seed),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "FilteredExamplesIterable":
+ """Keep only the requested shard."""
+ return FilteredExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ function=self.function,
+ with_indices=self.with_indices,
+ input_columns=self.input_columns,
+ batched=self.batched,
+ batch_size=self.batch_size,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class BufferShuffledExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.buffer_size = buffer_size
+ self.generator = generator
+ # TODO(QL): implement iter_arrow
+
+ @staticmethod
+ def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]:
+ while True:
+ yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size))
+
+ def __iter__(self):
+ buffer_size = self.buffer_size
+ rng = deepcopy(self.generator)
+ indices_iterator = self._iter_random_indices(rng, buffer_size)
+ # this is the shuffle buffer that we keep in memory
+ mem_buffer = []
+ for x in self.ex_iterable:
+ if len(mem_buffer) == buffer_size: # if the buffer is full, pick and example from it
+ i = next(indices_iterator)
+ yield mem_buffer[i]
+ mem_buffer[i] = x # replace the picked example by a new one
+ else: # otherwise, keep filling the buffer
+ mem_buffer.append(x)
+ # when we run out of examples, we shuffle the remaining examples in the buffer and yield them
+ rng.shuffle(mem_buffer)
+ yield from mem_buffer
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "BufferShuffledExamplesIterable":
+ """Shuffle the wrapped examples iterable as well as the shuffling buffer."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "BufferShuffledExamplesIterable":
+ """Keep only the requested shard."""
+ return BufferShuffledExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ buffer_size=self.buffer_size,
+ generator=self.generator,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class SkipExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n, None)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "SkipExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would skip examples from other shards instead."""
+ return self
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+class TakeExamplesIterable(_BaseExamplesIterable):
+ def __init__(self, ex_iterable: _BaseExamplesIterable, n: int):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.n = n
+ # TODO(QL): implement iter_arrow
+
+ def __iter__(self):
+ yield from islice(self.ex_iterable, self.n)
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TakeExamplesIterable":
+ """Doesn't shuffle the wrapped examples iterable since it would take examples from other shards instead."""
+ return self
+
+ @staticmethod
+ def split_number(num, n):
+ quotient = num // n
+ remainder = num % n
+ result = [quotient] * n
+ for i in range(remainder):
+ result[i] += 1
+ return result
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TakeExamplesIterable":
+ """Keep only the requested shard."""
+ return TakeExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ n=self.split_number(self.n, num_workers)[worker_id],
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+def _apply_feature_types_on_example(
+ example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ example = dict(example)
+ # add missing columns
+ for column_name in features:
+ if column_name not in example:
+ example[column_name] = None
+ # we encode the example for ClassLabel feature types for example
+ encoded_example = features.encode_example(example)
+ # Decode example for Audio feature, e.g.
+ decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id)
+ return decoded_example
+
+
+def _apply_feature_types_on_batch(
+ batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]
+) -> dict:
+ batch = dict(batch)
+ # add missing columns
+ n_examples = len(batch[next(iter(batch))])
+ for column_name in features:
+ if column_name not in batch:
+ batch[column_name] = [None] * n_examples
+ # we encode the batch for ClassLabel feature types for example
+ encoded_batch = features.encode_batch(batch)
+ # Decode batch for Audio feature, e.g.
+ decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id)
+ return decoded_batch
+
+
+class TypedExamplesIterable(_BaseExamplesIterable):
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ features: Features,
+ token_per_repo_id: Dict[str, Union[str, bool, None]],
+ ):
+ super().__init__()
+ self.ex_iterable = ex_iterable
+ self.features = features
+ self.token_per_repo_id = token_per_repo_id
+ if self.ex_iterable.iter_arrow is not None:
+ self.iter_arrow = self._iter_arrow
+
+ def __iter__(self):
+ # Then for each example, `TypedExamplesIterable` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ for key, example in self.ex_iterable:
+ yield (
+ key,
+ _apply_feature_types_on_example(example, self.features, token_per_repo_id=self.token_per_repo_id),
+ )
+
+ def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]:
+ schema = self.features.arrow_schema
+ for key, pa_table in self.ex_iterable.iter_arrow():
+ columns = set(pa_table.column_names)
+ # add missing columns
+ for column_name in self.features:
+ if column_name not in columns:
+ col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None])
+ pa_table = pa_table.append_column(column_name, col)
+ if pa_table.schema != schema:
+ pa_table = cast_table_to_features(pa_table, self.features)
+ yield key, pa_table
+
+ def shuffle_data_sources(self, generator: np.random.Generator) -> "TypedExamplesIterable":
+ """Shuffle the wrapped examples iterable."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shuffle_data_sources(generator),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ def shard_data_sources(self, worker_id: int, num_workers: int) -> "TypedExamplesIterable":
+ """Keep only the requested shard."""
+ return TypedExamplesIterable(
+ self.ex_iterable.shard_data_sources(worker_id, num_workers),
+ features=self.features,
+ token_per_repo_id=self.token_per_repo_id,
+ )
+
+ @property
+ def n_shards(self) -> int:
+ return self.ex_iterable.n_shards
+
+
+@dataclass
+class FormattingConfig:
+ format_type: Optional[str]
+
+ def __post_init__(self):
+ if self.format_type == "pandas":
+ raise NotImplementedError(
+ "The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead."
+ )
+
+
+@dataclass
+class ShufflingConfig:
+ generator: np.random.Generator
+ _original_seed: Optional[int] = None
+
+
+@dataclass
+class DistributedConfig:
+ rank: int
+ world_size: int
+
+
+def _maybe_add_torch_iterable_dataset_parent_class(cls):
+ """Add torch.utils.data.IterableDataset as a parent class if 'torch' is available"""
+ if config.TORCH_AVAILABLE:
+ import torch.utils.data
+
+ if torch.utils.data.IterableDataset not in cls.__bases__:
+ cls.__bases__ += (torch.utils.data.IterableDataset,)
+
+
+class IterableDataset(DatasetInfoMixin):
+ """A Dataset backed by an iterable."""
+
+ def __init__(
+ self,
+ ex_iterable: _BaseExamplesIterable,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ formatting: Optional[FormattingConfig] = None,
+ shuffling: Optional[ShufflingConfig] = None,
+ distributed: Optional[DistributedConfig] = None,
+ token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
+ format_type="deprecated",
+ ):
+ if distributed and distributed.world_size > 1 and shuffling and shuffling._original_seed is None:
+ raise RuntimeError(
+ "The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. "
+ "Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. "
+ )
+ if format_type != "deprecated":
+ warning_msg = "'format_type' is deprecated and will be removed in the next major version of datasets. "
+ help_message = "Please use 'formatting=FormattingConfig(format_type=format_type)' instead."
+ warnings.warn(warning_msg + help_message, category=FutureWarning, stacklevel=2)
+ formatting = FormattingConfig(format_type=format_type)
+
+ info = info.copy() if info is not None else DatasetInfo()
+ DatasetInfoMixin.__init__(self, info=info, split=split)
+
+ self._ex_iterable = ex_iterable
+ self._formatting = formatting
+ self._shuffling = shuffling
+ self._distributed = distributed
+ self._epoch = 0
+ self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {}
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def __repr__(self):
+ return f"IterableDataset({{\n features: {list(self._info.features.keys()) if self._info.features is not None else 'Unknown'},\n n_shards: {self.n_shards}\n}})"
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+ # Re-add torch iterable dataset as a parent class, since dynamically added parent classes are not kept when pickling
+ _maybe_add_torch_iterable_dataset_parent_class(self.__class__)
+
+ def _head(self, n=5):
+ return _examples_to_batch(list(self.take(n)))
+
+ def _effective_generator(self):
+ if self._shuffling and self._epoch == 0:
+ return self._shuffling.generator
+ elif self._shuffling:
+ # Create effective seed using self._epoch (we subtract in order to avoir overflow in long_scalars)
+ effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self._epoch
+ effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed
+ return np.random.default_rng(effective_seed)
+ else:
+ raise ValueError("This dataset is not shuffled")
+
+ @property
+ def n_shards(self) -> int:
+ if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0:
+ return self._ex_iterable.n_shards // self._distributed.world_size
+ return self._ex_iterable.n_shards
+
+ def _iter_pytorch(self):
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ # fix for fsspec when using multiprocess
+ _reset_fsspec_lock()
+ # check if there aren't too many workers
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers:
+ logger.warning(
+ f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). "
+ f"Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers."
+ )
+ logger.info(
+ f"To parallelize data loading, we give each process some shards (or data sources) to process. "
+ f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. "
+ f"To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}."
+ )
+ # split workload
+ _log_prefix = f"node#{self._distributed.rank} " if self._distributed else ""
+ shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers)
+ if shards_indices:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers)
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+ else:
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards."
+ )
+ else:
+ logger.debug(
+ f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers})."
+ )
+
+ def _is_main_process(self):
+ if self._distributed and self._distributed.rank > 0:
+ return False
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if worker_info is not None and worker_info.id > 0:
+ return False
+ return True
+
+ def _prepare_ex_iterable_for_iteration(self) -> _BaseExamplesIterable:
+ if self._shuffling:
+ ex_iterable = self._ex_iterable.shuffle_data_sources(self._effective_generator())
+ else:
+ ex_iterable = self._ex_iterable
+
+ if self._distributed:
+ rank = self._distributed.rank
+ world_size = self._distributed.world_size
+ if ex_iterable.n_shards % world_size == 0:
+ if self._is_main_process():
+ n_shards_per_node = ex_iterable.n_shards // world_size
+ plural = "s" if n_shards_per_node > 1 else ""
+ logger.info(
+ f"Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node."
+ )
+ ex_iterable = ex_iterable.shard_data_sources(rank, world_size)
+ else:
+ if self._is_main_process():
+ logger.info(
+ f"Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration."
+ )
+ logger.info(
+ f"It is more optimized to distribute the dataset shards (or data sources) across nodes. "
+ f"You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. "
+ f"The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}"
+ )
+ ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank)
+
+ return ex_iterable
+
+ def __iter__(self):
+ if "torch" in sys.modules:
+ import torch.utils.data
+
+ worker_info = torch.utils.data.get_worker_info()
+ if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None:
+ # We're a torch.utils.data.IterableDataset in a PyTorch worker process
+ yield from self._iter_pytorch()
+ return
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(ex_iterable.iter_arrow(), batch_size=1)
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=1)
+ for key, pa_table in iterator:
+ yield formatter.format_row(pa_table)
+ return
+
+ for key, example in ex_iterable:
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_example`.
+ example = _apply_feature_types_on_example(
+ example, self.features, token_per_repo_id=self._token_per_repo_id
+ )
+ yield format_dict(example) if format_dict else example
+
+ def iter(self, batch_size: int, drop_last_batch: bool = False):
+ """Iterate through the batches of size `batch_size`.
+
+ Args:
+ batch_size (:obj:`int`): size of each batch to yield.
+ drop_last_batch (:obj:`bool`, default `False`): Whether a last batch smaller than the batch_size should be
+ dropped
+ """
+
+ if self._formatting:
+ formatter = get_formatter(self._formatting.format_type, features=self.features)
+ format_dict = (
+ formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects
+ )
+ else:
+ format_dict = None
+
+ ex_iterable = self._prepare_ex_iterable_for_iteration()
+ if self._formatting and (ex_iterable.iter_arrow or self._formatting == "arrow"):
+ if ex_iterable.iter_arrow:
+ iterator = _batch_arrow_tables(
+ ex_iterable.iter_arrow(), batch_size=batch_size, drop_last_batch=drop_last_batch
+ )
+ else:
+ iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch)
+ for key, pa_table in iterator:
+ yield formatter.format_batch(pa_table)
+ return
+
+ iterator = iter(ex_iterable)
+ for key, example in iterator:
+ # If batched, first build the batch
+ examples = [example] + [example for key, example in islice(iterator, batch_size - 1)]
+ if drop_last_batch and len(examples) < batch_size: # ignore last batch
+ return
+ batch = _examples_to_batch(examples)
+ if self.features:
+ # `IterableDataset` automatically fills missing columns with None.
+ # This is done with `_apply_feature_types_on_batch`.
+ batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id)
+ yield format_dict(batch) if format_dict else batch
+
+ @staticmethod
+ def from_generator(
+ generator: Callable,
+ features: Optional[Features] = None,
+ gen_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Create an Iterable Dataset from a generator.
+
+ Args:
+ generator (`Callable`):
+ A generator function that `yields` examples.
+ features (`Features`, *optional*):
+ Dataset features.
+ gen_kwargs(`dict`, *optional*):
+ Keyword arguments to be passed to the `generator` callable.
+ You can define a sharded iterable dataset by passing the list of shards in `gen_kwargs`.
+ This can be used to improve shuffling and when iterating over the dataset with multiple workers.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> def gen():
+ ... yield {"text": "Good", "label": 0}
+ ... yield {"text": "Bad", "label": 1}
+ ...
+ >>> ds = IterableDataset.from_generator(gen)
+ ```
+
+ ```py
+ >>> def gen(shards):
+ ... for shard in shards:
+ ... with open(shard) as f:
+ ... for line in f:
+ ... yield {"line": line}
+ ...
+ >>> shards = [f"data{i}.txt" for i in range(32)]
+ >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards})
+ >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer
+ >>> from torch.utils.data import DataLoader
+ >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards
+ ```
+ """
+ from .io.generator import GeneratorDatasetInputStream
+
+ return GeneratorDatasetInputStream(
+ generator=generator,
+ features=features,
+ gen_kwargs=gen_kwargs,
+ streaming=True,
+ ).read()
+
+ @staticmethod
+ def from_spark(
+ df: "pyspark.sql.DataFrame",
+ split: Optional[NamedSplit] = None,
+ features: Optional[Features] = None,
+ **kwargs,
+ ) -> "IterableDataset":
+ """Create an IterableDataset from Spark DataFrame. The dataset is streamed to the driver in batches.
+
+ Args:
+ df (`pyspark.sql.DataFrame`):
+ The DataFrame containing the desired data.
+ split (`NamedSplit`, *optional*):
+ Split name to be assigned to the dataset.
+ features (`Features`, *optional*):
+ Dataset features.
+
+ Returns:
+ [`IterableDataset`]
+
+ Example:
+
+ ```py
+ >>> df = spark.createDataFrame(
+ >>> data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
+ >>> columns=["id", "name"],
+ >>> )
+ >>> ds = IterableDataset.from_spark(df)
+ ```
+ """
+ from .io.spark import SparkDatasetReader
+
+ if sys.platform == "win32":
+ raise EnvironmentError("IterableDataset.from_spark is not currently supported on Windows")
+
+ return SparkDatasetReader(
+ df,
+ split=split,
+ features=features,
+ streaming=True,
+ **kwargs,
+ ).read()
+
+ @staticmethod
+ def from_file(filename: str) -> "IterableDataset":
+ """Instantiate a IterableDataset from Arrow table at filename.
+
+ Args:
+ filename (`str`):
+ File name of the dataset.
+
+ Returns:
+ [`IterableDataset`]
+ """
+ pa_table_schema = read_schema_from_file(filename)
+ inferred_features = Features.from_arrow_schema(pa_table_schema)
+ ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={"filename": filename})
+ return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features))
+
+ def with_format(
+ self,
+ type: Optional[str] = None,
+ ) -> "IterableDataset":
+ """
+ Return a dataset with the specified format.
+ Supported formats: "arrow", or None for regular python objects.
+ The other formats are currently not implemented.
+
+ Args:
+
+ type (`str`, optional, default None): if set to "torch", the returned dataset
+ will be a subclass of torch.utils.data.IterableDataset to be used in a DataLoader
+ """
+ type = get_format_type_from_alias(type)
+ # TODO(QL): add format_kwargs
+ # TODO(QL): add format_columns and return_all_columns
+ # TODO(QL): add pandas format
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=FormattingConfig(format_type=type),
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def map(
+ self,
+ function: Optional[Callable] = None,
+ with_indices: bool = False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ drop_last_batch: bool = False,
+ remove_columns: Optional[Union[str, List[str]]] = None,
+ features: Optional[Features] = None,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """
+ Apply a function to all the examples in the iterable dataset (individually or in batches) and update them.
+ If your function returns a column that already exists, then it overwrites it.
+ The function is applied on-the-fly on the examples when iterating over the dataset.
+
+ You can specify whether the function should be batched or not with the `batched` parameter:
+
+ - If batched is `False`, then the function takes 1 example in and should return 1 example.
+ An example is a dictionary, e.g. `{"text": "Hello there !"}`.
+ - If batched is `True` and `batch_size` is 1, then the function takes a batch of 1 example as input and can return a batch with 1 or more examples.
+ A batch is a dictionary, e.g. a batch of 1 example is {"text": ["Hello there !"]}.
+ - If batched is `True` and `batch_size` is `n` > 1, then the function takes a batch of `n` examples as input and can return a batch with `n` examples, or with an arbitrary number of examples.
+ Note that the last batch may have less than `n` examples.
+ A batch is a dictionary, e.g. a batch of `n` examples is `{"text": ["Hello there !"] * n}`.
+
+ Args:
+ function (`Callable`, *optional*, defaults to `None`):
+ Function applied on-the-fly on the examples when you iterate on the dataset.
+ It must have one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> Dict[str, Any]` if `batched=False` and `with_indices=False`
+ - `function(example: Dict[str, Any], idx: int) -> Dict[str, Any]` if `batched=False` and `with_indices=True`
+ - `function(batch: Dict[str, List]) -> Dict[str, List]` if `batched=True` and `with_indices=False`
+ - `function(batch: Dict[str, List], indices: List[int]) -> Dict[str, List]` if `batched=True` and `with_indices=True`
+
+ For advanced usage, the function can also return a `pyarrow.Table`.
+ Moreover if your function returns nothing (`None`), then `map` will run your function and return the dataset unchanged.
+ If no function is provided, default to identity function: `lambda x: x`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx[, rank]): ...`.
+ input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`):
+ The columns to be passed into `function`
+ as positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, defaults to `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ `batch_size <= 0` or `batch_size == None` then provide the full dataset as a single batch to `function`.
+ drop_last_batch (`bool`, defaults to `False`):
+ Whether a last batch smaller than the batch_size should be
+ dropped instead of being processed by the function.
+ remove_columns (`[List[str]]`, *optional*, defaults to `None`):
+ Remove a selection of columns while doing the mapping.
+ Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding
+ columns with names in `remove_columns`, these columns will be kept.
+ features (`[Features]`, *optional*, defaults to `None`):
+ Feature types of the resulting dataset.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> def add_prefix(example):
+ ... example["text"] = "Review: " + example["text"]
+ ... return example
+ >>> ds = ds.map(add_prefix)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'Review: the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'Review: the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'Review: effective but too-tepid biopic'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+ if isinstance(remove_columns, str):
+ remove_columns = [remove_columns]
+ if function is None:
+ function = identity_func
+ if fn_kwargs is None:
+ fn_kwargs = {}
+ ex_iterable = MappedExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ drop_last_batch=drop_last_batch,
+ remove_columns=remove_columns,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def filter(
+ self,
+ function: Optional[Callable] = None,
+ with_indices=False,
+ input_columns: Optional[Union[str, List[str]]] = None,
+ batched: bool = False,
+ batch_size: Optional[int] = 1000,
+ fn_kwargs: Optional[dict] = None,
+ ) -> "IterableDataset":
+ """Apply a filter function to all the elements so that the dataset only includes examples according to the filter function.
+ The filtering is done on-the-fly when iterating over the dataset.
+
+ Args:
+ function (`Callable`):
+ Callable with one of the following signatures:
+
+ - `function(example: Dict[str, Any]) -> bool` if `with_indices=False, batched=False`
+ - `function(example: Dict[str, Any], indices: int) -> bool` if `with_indices=True, batched=False`
+ - `function(example: Dict[str, List]) -> List[bool]` if `with_indices=False, batched=True`
+ - `function(example: Dict[str, List], indices: List[int]) -> List[bool]` if `with_indices=True, batched=True`
+
+ If no function is provided, defaults to an always True function: `lambda x: True`.
+ with_indices (`bool`, defaults to `False`):
+ Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.
+ input_columns (`str` or `List[str]`, *optional*):
+ The columns to be passed into `function` as
+ positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.
+ batched (`bool`, defaults to `False`):
+ Provide batch of examples to `function`.
+ batch_size (`int`, *optional*, default `1000`):
+ Number of examples per batch provided to `function` if `batched=True`.
+ fn_kwargs (`Dict`, *optional*, default `None`):
+ Keyword arguments to be passed to `function`.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds = ds.filter(lambda x: x["label"] == 0)
+ >>> list(ds.take(3))
+ [{'label': 0, 'movie_review': 'simplistic , silly and tedious .'},
+ {'label': 0,
+ 'movie_review': "it's so laddish and juvenile , only teenage boys could possibly find it funny ."},
+ {'label': 0,
+ 'movie_review': 'exploitative and largely devoid of the depth or sophistication that would make watching such a graphic treatment of the crimes bearable .'}]
+ ```
+ """
+ if isinstance(input_columns, str):
+ input_columns = [input_columns]
+
+ # TODO(QL): keep the features (right now if we keep it it would call decode_example again on an already decoded example)
+ info = copy.deepcopy(self._info)
+ info.features = None
+
+ # We need the examples to be decoded for certain feature types like Image or Audio, so we use TypedExamplesIterable here
+ ex_iterable = FilteredExamplesIterable(
+ TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id)
+ if self._info.features is not None
+ else self._ex_iterable,
+ function=function,
+ with_indices=with_indices,
+ input_columns=input_columns,
+ batched=batched,
+ batch_size=batch_size,
+ fn_kwargs=fn_kwargs,
+ formatting=self._formatting,
+ )
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def shuffle(
+ self, seed=None, generator: Optional[np.random.Generator] = None, buffer_size: int = 1000
+ ) -> "IterableDataset":
+ """
+ Randomly shuffles the elements of this dataset.
+
+ This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer,
+ replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or
+ equal to the full size of the dataset is required.
+
+ For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1000, then `shuffle` will
+ initially select a random element from only the first 1000 elements in the buffer. Once an element is
+ selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element,
+ maintaining the 1000 element buffer.
+
+ If the dataset is made of several shards, it also does shuffle the order of the shards.
+ However if the order has been fixed by using [`~datasets.IterableDataset.skip`] or [`~datasets.IterableDataset.take`]
+ then the order of the shards is kept unchanged.
+
+ Args:
+ seed (`int`, *optional*, defaults to `None`):
+ Random seed that will be used to shuffle the dataset.
+ It is used to sample from the shuffle buffer and also to shuffle the data shards.
+ generator (`numpy.random.Generator`, *optional*):
+ Numpy random Generator to use to compute the permutation of the dataset rows.
+ If `generator=None` (default), uses `np.random.default_rng` (the default BitGenerator (PCG64) of NumPy).
+ buffer_size (`int`, defaults to `1000`):
+ Size of the buffer.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> shuffled_ds = ds.shuffle(seed=42)
+ >>> list(shuffled_ds.take(3))
+ [{'label': 1,
+ 'text': "a sports movie with action that's exciting on the field and a story you care about off it ."},
+ {'label': 1,
+ 'text': 'at its best , the good girl is a refreshingly adult take on adultery . . .'},
+ {'label': 1,
+ 'text': "sam jones became a very lucky filmmaker the day wilco got dropped from their record label , proving that one man's ruin may be another's fortune ."}]
+ ```
+ """
+ if generator is None:
+ generator = np.random.default_rng(seed)
+ else:
+ generator = deepcopy(generator)
+ shuffling = ShufflingConfig(generator=generator, _original_seed=seed)
+ return IterableDataset(
+ ex_iterable=BufferShuffledExamplesIterable(
+ self._ex_iterable, buffer_size=buffer_size, generator=generator
+ ).shuffle_data_sources(generator),
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=shuffling,
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def set_epoch(self, epoch: int):
+ self._epoch = epoch
+
+ def skip(self, n) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] that skips the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to skip.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'}]
+ >>> ds = ds.skip(1)
+ >>> list(ds.take(3))
+ [{'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'},
+ {'label': 1, 'text': 'effective but too-tepid biopic'},
+ {'label': 1,
+ 'text': 'if you sometimes like to go to the movies to have fun , wasabi is a good place to start .'}]
+ ```
+ """
+ ex_iterable = SkipExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def take(self, n) -> "IterableDataset":
+ """
+ Create a new [`IterableDataset`] with only the first `n` elements.
+
+ Args:
+ n (`int`):
+ Number of elements to take.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> small_ds = ds.take(2)
+ >>> list(small_ds)
+ [{'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
+ {'label': 1,
+ 'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
+ ```
+ """
+ ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ @property
+ def column_names(self) -> Optional[List[str]]:
+ """Names of the columns in the dataset.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="validation", streaming=True)
+ >>> ds.column_names
+ ['text', 'label']
+ ```
+ """
+ return list(self._info.features.keys()) if self._info.features is not None else None
+
+ def add_column(self, name: str, column: Union[list, np.array]) -> "IterableDataset":
+ """Add column to Dataset.
+
+ Args:
+ name (str): Column name.
+ column (list or np.array): Column data to be added.
+
+ Returns:
+ `IterableDataset`
+ """
+ return self.map(partial(add_column_fn, name=name, column=column), with_indices=True)
+
+ def rename_column(self, original_column_name: str, new_column_name: str) -> "IterableDataset":
+ """
+ Rename a column in the dataset, and move the features associated to the original column under the new column
+ name.
+
+ Args:
+ original_column_name (`str`):
+ Name of the column to rename.
+ new_column_name (`str`):
+ New name for the column.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with a renamed column.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'label': 1,
+ 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ >>> ds = ds.rename_column("text", "movie_review")
+ >>> next(iter(ds))
+ {'label': 1,
+ 'movie_review': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ return self.rename_columns({original_column_name: new_column_name})
+
+ def rename_columns(self, column_mapping: Dict[str, str]) -> "IterableDataset":
+ """
+ Rename several columns in the dataset, and move the features associated to the original columns under
+ the new column names.
+
+ Args:
+ column_mapping (`Dict[str, str]`): A mapping of columns to rename to their new names
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with renamed columns
+ """
+
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(
+ partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping)
+ )
+ if original_features is not None:
+ ds_iterable._info.features = Features(
+ {
+ column_mapping[col] if col in column_mapping.keys() else col: feature
+ for col, feature in original_features.items()
+ }
+ )
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+ return ds_iterable
+
+ def remove_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """
+ Remove one or several column(s) in the dataset and the features associated to them.
+ The removal is done on-the-fly on the examples when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to remove.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object without the columns to remove.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.remove_columns("label")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ original_features = self._info.features.copy() if self._info.features else None
+ ds_iterable = self.map(remove_columns=column_names)
+ if original_features is not None:
+ ds_iterable._info.features = original_features.copy()
+ for col, _ in original_features.items():
+ if col in column_names:
+ del ds_iterable._info.features[col]
+ # check that it's still valid, especially with regard to task templates
+ try:
+ ds_iterable._info.copy()
+ except ValueError:
+ ds_iterable._info.task_templates = None
+
+ return ds_iterable
+
+ def select_columns(self, column_names: Union[str, List[str]]) -> "IterableDataset":
+ """Select one or several column(s) in the dataset and the features
+ associated to them. The selection is done on-the-fly on the examples
+ when iterating over the dataset.
+
+
+ Args:
+ column_names (`Union[str, List[str]]`):
+ Name of the column(s) to select.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset object with selected columns.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'label': 1}
+ >>> ds = ds.select_columns("text")
+ >>> next(iter(ds))
+ {'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
+ ```
+ """
+ if isinstance(column_names, str):
+ column_names = [column_names]
+
+ if self._info:
+ info = copy.deepcopy(self._info)
+ if self._info.features is not None:
+ missing_columns = set(column_names) - set(self._info.features.keys())
+ if missing_columns:
+ raise ValueError(
+ f"Column name {list(missing_columns)} not in the "
+ "dataset. Columns in the dataset: "
+ f"{list(self._info.features.keys())}."
+ )
+ info.features = Features({c: info.features[c] for c in column_names})
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+
+ ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=self._shuffling,
+ distributed=self._distributed,
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast_column(self, column: str, feature: FeatureType) -> "IterableDataset":
+ """Cast column to feature for decoding.
+
+ Args:
+ column (`str`):
+ Column name.
+ feature (`Feature`):
+ Target feature.
+
+ Returns:
+ `IterableDataset`
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset, Audio
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train", streaming=True)
+ >>> ds.features
+ {'audio': Audio(sampling_rate=8000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
+ >>> ds.features
+ {'audio': Audio(sampling_rate=16000, mono=True, decode=True, id=None),
+ 'english_transcription': Value(dtype='string', id=None),
+ 'intent_class': ClassLabel(num_classes=14, names=['abroad', 'address', 'app_error', 'atm_limit', 'balance', 'business_loan', 'card_issues', 'cash_deposit', 'direct_debit', 'freeze', 'high_value_payment', 'joint_account', 'latest_transactions', 'pay_bill'], id=None),
+ 'lang_id': ClassLabel(num_classes=14, names=['cs-CZ', 'de-DE', 'en-AU', 'en-GB', 'en-US', 'es-ES', 'fr-FR', 'it-IT', 'ko-KR', 'nl-NL', 'pl-PL', 'pt-PT', 'ru-RU', 'zh-CN'], id=None),
+ 'path': Value(dtype='string', id=None),
+ 'transcription': Value(dtype='string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features[column] = feature
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def cast(
+ self,
+ features: Features,
+ ) -> "IterableDataset":
+ """
+ Cast the dataset to a new set of features.
+
+ Args:
+ features ([`Features`]):
+ New features to cast the dataset to.
+ The name of the fields in the features must match the current column names.
+ The type of the data must also be convertible from one type to the other.
+ For non-trivial conversion, e.g. `string` <-> `ClassLabel` you should use [`~Dataset.map`] to update the Dataset.
+
+ Returns:
+ `IterableDataset`: A copy of the dataset with casted features.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_dataset
+ >>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
+ 'text': Value(dtype='string', id=None)}
+ >>> new_features = ds.features.copy()
+ >>> new_features["label"] = ClassLabel(names=["bad", "good"])
+ >>> new_features["text"] = Value("large_string")
+ >>> ds = ds.cast(new_features)
+ >>> ds.features
+ {'label': ClassLabel(num_classes=2, names=['bad', 'good'], id=None),
+ 'text': Value(dtype='large_string', id=None)}
+ ```
+ """
+ info = self._info.copy()
+ info.features = features
+ # check that it's still valid, especially with regard to task templates
+ try:
+ info.copy()
+ except ValueError:
+ info.task_templates = None
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _step(self, step: int, offset: int) -> "IterableDataset":
+ ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset)
+ return IterableDataset(
+ ex_iterable=ex_iterable,
+ info=self._info.copy(),
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+ def _resolve_features(self):
+ if self.features is not None:
+ return self
+ elif isinstance(self._ex_iterable, TypedExamplesIterable):
+ features = self._ex_iterable.features
+ else:
+ features = _infer_features_from_batch(self.with_format(None)._head())
+ info = self.info.copy()
+ info.features = features
+ return IterableDataset(
+ ex_iterable=self._ex_iterable,
+ info=info,
+ split=self._split,
+ formatting=self._formatting,
+ shuffling=copy.deepcopy(self._shuffling),
+ distributed=copy.deepcopy(self._distributed),
+ token_per_repo_id=self._token_per_repo_id,
+ )
+
+
+def _concatenate_iterable_datasets(
+ dsets: List[IterableDataset],
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ axis: int = 0,
+) -> IterableDataset:
+ """
+ Converts a list of `IterableDataset` with the same schema into a single `IterableDataset`.
+ Missing data are filled with None values.
+
+
+
+ Args:
+ dsets (`List[datasets.IterableDataset]`): List of Datasets to concatenate.
+ info (`DatasetInfo`, optional): Dataset information, like description, citation, etc.
+ split (`NamedSplit`, optional): Name of the dataset split.
+ axis (``{0, 1}``, default ``0``, meaning over rows):
+ Axis to concatenate over, where ``0`` means over rows (vertically) and ``1`` means over columns
+ (horizontally).
+
+ *New in version 1.6.0*
+
+ Example:
+
+ ```py
+ >>> ds3 = _concatenate_iterable_datasets([ds1, ds2])
+ ```
+ """
+ dsets = [d._resolve_features() for d in dsets]
+
+ # Perform checks (and a potentional cast if axis=0)
+ if axis == 0:
+ _check_if_features_can_be_aligned([dset.features for dset in dsets])
+ else:
+ _check_column_names([col_name for dset in dsets for col_name in dset.features])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in dsets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in dsets]
+ if axis == 0:
+ ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ else:
+ ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables)
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in dsets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {repo_id: token for dataset in dsets for repo_id, token in dataset._token_per_repo_id.items()}
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _interleave_iterable_datasets(
+ datasets: List[IterableDataset],
+ probabilities: Optional[List[float]] = None,
+ seed: Optional[int] = None,
+ info: Optional[DatasetInfo] = None,
+ split: Optional[NamedSplit] = None,
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
+) -> IterableDataset:
+ """
+ Interleave several iterable datasets (sources) into a single iterable dataset.
+ The new iterable dataset alternates between the sources to yield examples.
+ If `probabilities = None` (default) the iterable dataset will cycles through the sources in order for each next example in the iteration.
+ If `probabilities` is not `None, the iterable dataset will sample a random source according to the provided probabilities for each next examples in the iteration.
+
+
+
+ Args:
+ datasets (`List[IterableDataset]`): list of datasets to interleave
+ probabilities (`List[float]`, optional, default None): If specified, the new iterable dataset samples
+ examples from one source at a time according to these probabilities.
+ seed (`int`, optional, default None): The random seed used to choose a source for each example.
+ stopping_strategy (`str`, defaults to `first_exhausted`):
+ Two strategies are proposed right now.
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
+ - with no probabilities, the resulting dataset will have max_length_datasets*nb_dataset samples.
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
+
+ Output:
+ `datasets.IterableDataset`
+ """
+ datasets = [d._resolve_features() for d in datasets]
+
+ # Perform checks
+ _check_if_features_can_be_aligned([dset.features for dset in datasets])
+
+ # TODO: improve this to account for a mix of ClassLabel and Value for example
+ # right now it would keep the type of the first dataset in the list
+ features = Features(
+ {k: v for features in _align_features([dset.features for dset in datasets]) for k, v in features.items()}
+ )
+
+ ex_iterables = [d._ex_iterable for d in datasets]
+
+ # Use cycling or random cycling of sources
+ if probabilities is None:
+ ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy)
+ else:
+ generator = np.random.default_rng(seed)
+ ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(
+ ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy
+ )
+ # Set new info - we update the features
+ # setting the features also ensures to fill missing columns with None
+ if info is None:
+ info = DatasetInfo.from_merge([d.info for d in datasets])
+ else:
+ info = info.copy()
+ info.features = features
+ # Get all the auth tokens per repository - in case the datasets come from different private repositories
+ token_per_repo_id = {
+ repo_id: token for dataset in datasets for repo_id, token in dataset._token_per_repo_id.items()
+ }
+ # Return new daset
+ return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id)
+
+
+def _split_by_node_iterable_dataset(dataset: IterableDataset, rank: int, world_size: int) -> IterableDataset:
+ """
+ Split an iterable dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
+
+ If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`),
+ then the shards are evenly assigned across the nodes, which is the most optimized.
+ Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
+
+ Args:
+ dataset ([`IterableDataset`]):
+ The iterable dataset to split by node.
+ rank (`int`):
+ Rank of the current node.
+ world_size (`int`):
+ Total number of nodes.
+
+ Returns:
+ [`IterableDataset`]: The iterable dataset to be used on the node at rank `rank`.
+ """
+ if dataset._distributed:
+ world_size = world_size * dataset._distributed.world_size
+ rank = world_size * dataset._distributed.rank + rank
+ distributed = DistributedConfig(rank=rank, world_size=world_size)
+ return IterableDataset(
+ ex_iterable=dataset._ex_iterable,
+ info=dataset._info.copy(),
+ split=dataset._split,
+ formatting=dataset._formatting,
+ shuffling=copy.deepcopy(dataset._shuffling),
+ distributed=distributed,
+ token_per_repo_id=dataset._token_per_repo_id,
+ )
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/keyhash.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/keyhash.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c75fcfd7ffb300aac1ffd0fc822287f21b56f8a
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/keyhash.py
@@ -0,0 +1,104 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+
+"""
+Hashing function for dataset keys using `hashlib.md5`
+
+Requirements for the hash function:
+
+- Provides a uniformly distributed hash from random space
+- Adequately fast speed
+- Working with multiple input types (in this case, `str`, `int` or `bytes`)
+- Should be platform independent (generates same hash on different OS and systems)
+
+The hashing function provides a unique 128-bit integer hash of the key provided.
+
+The split name is being used here as the hash salt to avoid having same hashes
+in different splits due to same keys
+"""
+
+from typing import Union
+
+from huggingface_hub.utils import insecure_hashlib
+
+
+def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes:
+ """
+ Returns the input hash_data in its bytes form
+
+ Args:
+ hash_data: the hash salt/key to be converted to bytes
+ """
+ if isinstance(hash_data, bytes):
+ # Data already in bytes, returns as it as
+ return hash_data
+ elif isinstance(hash_data, str):
+ # We keep the data as it as for it ot be later encoded to UTF-8
+ # However replace `\\` with `/` for Windows compatibility
+ hash_data = hash_data.replace("\\", "/")
+ elif isinstance(hash_data, int):
+ hash_data = str(hash_data)
+ else:
+ # If data is not of the required type, raise error
+ raise InvalidKeyError(hash_data)
+
+ return hash_data.encode("utf-8")
+
+
+class InvalidKeyError(Exception):
+ """Raises an error when given key is of invalid datatype."""
+
+ def __init__(self, hash_data):
+ self.prefix = "\nFAILURE TO GENERATE DATASET: Invalid key type detected"
+ self.err_msg = f"\nFound Key {hash_data} of type {type(hash_data)}"
+ self.suffix = "\nKeys should be either str, int or bytes type"
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class DuplicatedKeysError(Exception):
+ """Raise an error when duplicate key found."""
+
+ def __init__(self, key, duplicate_key_indices, fix_msg=""):
+ self.key = key
+ self.duplicate_key_indices = duplicate_key_indices
+ self.fix_msg = fix_msg
+ self.prefix = "Found multiple examples generated with the same key"
+ if len(duplicate_key_indices) <= 20:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}"
+ else:
+ self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}"
+ self.suffix = "\n" + fix_msg if fix_msg else ""
+ super().__init__(f"{self.prefix}{self.err_msg}{self.suffix}")
+
+
+class KeyHasher:
+ """KeyHasher class for providing hash using md5"""
+
+ def __init__(self, hash_salt: str):
+ self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt))
+
+ def hash(self, key: Union[str, int, bytes]) -> int:
+ """Returns 128-bits unique hash of input key
+
+ Args:
+ key: the input key to be hashed (should be str, int or bytes)
+
+ Returns: 128-bit int hash key"""
+ md5 = self._split_md5.copy()
+ byte_key = _as_bytes(key)
+ md5.update(byte_key)
+ # Convert to integer with hexadecimal conversion
+ return int(md5.hexdigest(), 16)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/metric.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..187c5e5c925b71b26ca83021523dd55c28989d28
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/metric.py
@@ -0,0 +1,652 @@
+# Copyright 2020 The HuggingFace Datasets Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Metrics base class."""
+
+import os
+import types
+import uuid
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+from filelock import BaseFileLock, Timeout
+
+from . import config
+from .arrow_dataset import Dataset
+from .arrow_reader import ArrowReader
+from .arrow_writer import ArrowWriter
+from .download.download_config import DownloadConfig
+from .download.download_manager import DownloadManager
+from .features import Features
+from .info import DatasetInfo, MetricInfo
+from .naming import camelcase_to_snakecase
+from .utils._filelock import FileLock
+from .utils.deprecation_utils import deprecated
+from .utils.logging import get_logger
+from .utils.py_utils import copyfunc, temp_seed
+
+
+logger = get_logger(__name__)
+
+
+class FileFreeLock(BaseFileLock):
+ """Thread lock until a file **cannot** be locked"""
+
+ def __init__(self, lock_file, *args, **kwargs):
+ self.filelock = FileLock(lock_file)
+ super().__init__(self.filelock.lock_file, *args, **kwargs)
+
+ def _acquire(self):
+ try:
+ self.filelock.acquire(timeout=0.01, poll_intervall=0.02) # Try to lock once
+ except Timeout:
+ # We couldn't acquire the lock, the file is locked!
+ self._context.lock_file_fd = self.filelock.lock_file
+ else:
+ # We were able to acquire the lock, the file is not yet locked!
+ self.filelock.release()
+ self._context.lock_file_fd = None
+
+ def _release(self):
+ self._context.lock_file_fd = None
+
+
+# lists - summarize long lists similarly to NumPy
+# arrays/tensors - let the frameworks control formatting
+def summarize_if_long_list(obj):
+ if not type(obj) == list or len(obj) <= 6: # noqa: E721
+ return f"{obj}"
+
+ def format_chunk(chunk):
+ return ", ".join(repr(x) for x in chunk)
+
+ return f"[{format_chunk(obj[:3])}, ..., {format_chunk(obj[-3:])}]"
+
+
+class MetricInfoMixin:
+ """This base class exposes some attributes of MetricInfo
+ at the base level of the Metric for easy access.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ """
+
+ def __init__(self, info: MetricInfo):
+ self._metric_info = info
+
+ @property
+ def info(self):
+ """:class:`datasets.MetricInfo` object containing all the metadata in the metric."""
+ return self._metric_info
+
+ @property
+ def name(self) -> str:
+ return self._metric_info.metric_name
+
+ @property
+ def experiment_id(self) -> Optional[str]:
+ return self._metric_info.experiment_id
+
+ @property
+ def description(self) -> str:
+ return self._metric_info.description
+
+ @property
+ def citation(self) -> str:
+ return self._metric_info.citation
+
+ @property
+ def features(self) -> Features:
+ return self._metric_info.features
+
+ @property
+ def inputs_description(self) -> str:
+ return self._metric_info.inputs_description
+
+ @property
+ def homepage(self) -> Optional[str]:
+ return self._metric_info.homepage
+
+ @property
+ def license(self) -> str:
+ return self._metric_info.license
+
+ @property
+ def codebase_urls(self) -> Optional[List[str]]:
+ return self._metric_info.codebase_urls
+
+ @property
+ def reference_urls(self) -> Optional[List[str]]:
+ return self._metric_info.reference_urls
+
+ @property
+ def streamable(self) -> bool:
+ return self._metric_info.streamable
+
+ @property
+ def format(self) -> Optional[str]:
+ return self._metric_info.format
+
+
+class Metric(MetricInfoMixin):
+ """A Metric is the base class and common API for all metrics.
+
+
+
+ Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate
+
+
+
+ Args:
+ config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
+ to be overridden when the metric loading script is modified.
+ keep_in_memory (:obj:`bool`): keep all predictions and references in memory. Not possible in distributed settings.
+ cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
+ The data directory should be located on a shared file-system in distributed setups.
+ num_process (``int``): specify the total number of nodes in a distributed settings.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ seed (:obj:`int`, optional): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
+ max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
+ timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
+ """
+
+ @deprecated("Use the new library 🤗 Evaluate instead: https://huggingface.co/docs/evaluate")
+ def __init__(
+ self,
+ config_name: Optional[str] = None,
+ keep_in_memory: bool = False,
+ cache_dir: Optional[str] = None,
+ num_process: int = 1,
+ process_id: int = 0,
+ seed: Optional[int] = None,
+ experiment_id: Optional[str] = None,
+ max_concurrent_cache_files: int = 10000,
+ timeout: Union[int, float] = 100,
+ **kwargs,
+ ):
+ # prepare info
+ self.config_name = config_name or "default"
+ info = self._info()
+ info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
+ info.config_name = self.config_name
+ info.experiment_id = experiment_id or "default_experiment"
+ MetricInfoMixin.__init__(self, info) # For easy access on low level
+
+ # Safety checks on num_process and process_id
+ if not isinstance(process_id, int) or process_id < 0:
+ raise ValueError("'process_id' should be a number greater than 0")
+ if not isinstance(num_process, int) or num_process <= process_id:
+ raise ValueError("'num_process' should be a number greater than process_id")
+ if keep_in_memory and num_process != 1:
+ raise ValueError("Using 'keep_in_memory' is not possible in distributed setting (num_process > 1).")
+
+ self.num_process = num_process
+ self.process_id = process_id
+ self.max_concurrent_cache_files = max_concurrent_cache_files
+
+ self.keep_in_memory = keep_in_memory
+ self._data_dir_root = os.path.expanduser(cache_dir or config.HF_METRICS_CACHE)
+ self.data_dir = self._build_data_dir()
+ if seed is None:
+ _, seed, pos, *_ = np.random.get_state()
+ self.seed: int = seed[pos] if pos < 624 else seed[0]
+ else:
+ self.seed: int = seed
+ self.timeout: Union[int, float] = timeout
+
+ # Update 'compute' and 'add' docstring
+ # methods need to be copied otherwise it changes the docstrings of every instance
+ self.compute = types.MethodType(copyfunc(self.compute), self)
+ self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
+ self.add = types.MethodType(copyfunc(self.add), self)
+ self.compute.__func__.__doc__ += self.info.inputs_description
+ self.add_batch.__func__.__doc__ += self.info.inputs_description
+ self.add.__func__.__doc__ += self.info.inputs_description
+
+ # self.arrow_schema = pa.schema(field for field in self.info.features.type)
+ self.buf_writer = None
+ self.writer = None
+ self.writer_batch_size = None
+ self.data = None
+
+ # This is the cache file we store our predictions/references in
+ # Keep it None for now so we can (cloud)pickle the object
+ self.cache_file_name = None
+ self.filelock = None
+ self.rendez_vous_lock = None
+
+ # This is all the cache files on which we have a lock when we are in a distributed setting
+ self.file_paths = None
+ self.filelocks = None
+
+ def __len__(self):
+ """Return the number of examples (predictions or predictions/references pair)
+ currently stored in the metric's cache.
+ """
+ return 0 if self.writer is None else len(self.writer)
+
+ def __repr__(self):
+ return (
+ f'Metric(name: "{self.name}", features: {self.features}, '
+ f'usage: """{self.inputs_description}""", '
+ f"stored examples: {len(self)})"
+ )
+
+ def _build_data_dir(self):
+ """Path of this metric in cache_dir:
+ Will be:
+ self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
+ If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
+ """
+ builder_data_dir = self._data_dir_root
+ builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
+ os.makedirs(builder_data_dir, exist_ok=True)
+ return builder_data_dir
+
+ def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
+ """Create a new cache file. If the default cache file is used, we generated a new hash."""
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
+ filelock = None
+ for i in range(self.max_concurrent_cache_files):
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=timeout)
+ except Timeout:
+ # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
+ # We raise an error
+ if self.num_process != 1:
+ raise ValueError(
+ f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+ if i == self.max_concurrent_cache_files - 1:
+ raise ValueError(
+ f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
+ f"You should set a larger value of max_concurrent_cache_files when creating the metric "
+ f"(current value is {self.max_concurrent_cache_files})."
+ ) from None
+ # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
+ file_uuid = str(uuid.uuid4())
+ file_path = os.path.join(
+ self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
+ )
+ else:
+ break
+
+ return file_path, filelock
+
+ def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
+ """Get a lock on all the cache files in a distributed setup.
+ We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
+ """
+ if self.num_process == 1:
+ if self.cache_file_name is None:
+ raise ValueError(
+ "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
+ "at least once before calling `compute`."
+ )
+ file_paths = [self.cache_file_name]
+ else:
+ file_paths = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
+ for process_id in range(self.num_process)
+ ]
+
+ # Let's acquire a lock on each process files to be sure they are finished writing
+ filelocks = []
+ for process_id, file_path in enumerate(file_paths):
+ if process_id == 0: # process 0 already has its lock file
+ filelocks.append(self.filelock)
+ else:
+ filelock = FileLock(file_path + ".lock")
+ try:
+ filelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Cannot acquire lock on cached file {file_path} for process {process_id}."
+ ) from None
+ else:
+ filelocks.append(filelock)
+
+ return file_paths, filelocks
+
+ def _check_all_processes_locks(self):
+ expected_lock_file_names = [
+ os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
+ for process_id in range(self.num_process)
+ ]
+ for expected_lock_file_name in expected_lock_file_names:
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+
+ def _check_rendez_vous(self):
+ expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
+ nofilelock = FileFreeLock(expected_lock_file_name)
+ try:
+ nofilelock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(
+ f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
+ ) from None
+ else:
+ nofilelock.release()
+ lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ rendez_vous_lock = FileLock(lock_file_name)
+ try:
+ rendez_vous_lock.acquire(timeout=self.timeout)
+ except Timeout:
+ raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.") from None
+ else:
+ rendez_vous_lock.release()
+
+ def _finalize(self):
+ """Close all the writing process and load/gather the data
+ from all the nodes if main node or all_process is True.
+ """
+ if self.writer is not None:
+ self.writer.finalize()
+ self.writer = None
+ # release the locks of the processes > 0 so that process 0 can lock them to read + delete the data
+ if self.filelock is not None and self.process_id > 0:
+ self.filelock.release()
+
+ if self.keep_in_memory:
+ # Read the predictions and references
+ reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
+ self.data = Dataset.from_buffer(self.buf_writer.getvalue())
+
+ elif self.process_id == 0:
+ # Let's acquire a lock on each node files to be sure they are finished writing
+ file_paths, filelocks = self._get_all_cache_files()
+
+ # Read the predictions and references
+ try:
+ reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
+ self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
+ except FileNotFoundError:
+ raise ValueError(
+ "Error in finalize: another metric instance is already using the local cache file. "
+ "Please specify an experiment_id to avoid collision between distributed metric instances."
+ ) from None
+
+ # Store file paths and locks and we will release/delete them after the computation.
+ self.file_paths = file_paths
+ self.filelocks = filelocks
+
+ def compute(self, *, predictions=None, references=None, **kwargs) -> Optional[dict]:
+ """Compute the metrics.
+
+ Usage of positional arguments is not allowed to prevent mistakes.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+ **kwargs (optional): Keyword arguments that will be forwarded to the metrics :meth:`_compute`
+ method (see details in the docstring).
+
+ Return:
+ dict or None
+
+ - Dictionary with the metrics if this metric is run on the main process (``process_id == 0``).
+ - None if the metric is not run on the main process (``process_id != 0``).
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> accuracy = metric.compute(predictions=model_prediction, references=labels)
+ ```
+ """
+ all_kwargs = {"predictions": predictions, "references": references, **kwargs}
+ if predictions is None and references is None:
+ missing_kwargs = {k: None for k in self.features if k not in all_kwargs}
+ all_kwargs.update(missing_kwargs)
+ else:
+ missing_inputs = [k for k in self.features if k not in all_kwargs]
+ if missing_inputs:
+ raise ValueError(
+ f"Metric inputs are missing: {missing_inputs}. All required inputs are {list(self.features)}"
+ )
+ inputs = {input_name: all_kwargs[input_name] for input_name in self.features}
+ compute_kwargs = {k: kwargs[k] for k in kwargs if k not in self.features}
+
+ if any(v is not None for v in inputs.values()):
+ self.add_batch(**inputs)
+ self._finalize()
+
+ self.cache_file_name = None
+ self.filelock = None
+
+ if self.process_id == 0:
+ self.data.set_format(type=self.info.format)
+
+ inputs = {input_name: self.data[input_name] for input_name in self.features}
+ with temp_seed(self.seed):
+ output = self._compute(**inputs, **compute_kwargs)
+
+ if self.buf_writer is not None:
+ self.buf_writer = None
+ del self.data
+ self.data = None
+ else:
+ # Release locks and delete all the cache files. Process 0 is released last.
+ for filelock, file_path in reversed(list(zip(self.filelocks, self.file_paths))):
+ logger.info(f"Removing {file_path}")
+ del self.data
+ self.data = None
+ del self.writer
+ self.writer = None
+ os.remove(file_path)
+ filelock.release()
+
+ return output
+ else:
+ return None
+
+ def add_batch(self, *, predictions=None, references=None, **kwargs):
+ """Add a batch of predictions and references for the metric's stack.
+
+ Args:
+ predictions (list/array/tensor, optional): Predictions.
+ references (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add_batch(predictions=model_prediction, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ batch = {"predictions": predictions, "references": references, **kwargs}
+ batch = {intput_name: batch[intput_name] for intput_name in self.features}
+ batch = self.info.features.encode_batch(batch)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write_batch(batch)
+ except pa.ArrowInvalid:
+ if any(len(batch[c]) != len(next(iter(batch.values()))) for c in batch):
+ col0 = next(iter(batch))
+ bad_col = [c for c in batch if len(batch[c]) != len(batch[col0])][0]
+ error_msg = (
+ f"Mismatch in the number of {col0} ({len(batch[col0])}) and {bad_col} ({len(batch[bad_col])})"
+ )
+ elif sorted(self.features) != ["references", "predictions"]:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(batch[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ else:
+ error_msg = (
+ f"Predictions and/or references don't match the expected format.\n"
+ f"Expected format: {self.features},\n"
+ f"Input predictions: {summarize_if_long_list(predictions)},\n"
+ f"Input references: {summarize_if_long_list(references)}"
+ )
+ raise ValueError(error_msg) from None
+
+ def add(self, *, prediction=None, reference=None, **kwargs):
+ """Add one prediction and reference for the metric's stack.
+
+ Args:
+ prediction (list/array/tensor, optional): Predictions.
+ reference (list/array/tensor, optional): References.
+
+ Example:
+
+ ```py
+ >>> from datasets import load_metric
+ >>> metric = load_metric("accuracy")
+ >>> metric.add(predictions=model_predictions, references=labels)
+ ```
+ """
+ bad_inputs = [input_name for input_name in kwargs if input_name not in self.features]
+ if bad_inputs:
+ raise ValueError(f"Bad inputs for metric: {bad_inputs}. All required inputs are {list(self.features)}")
+ example = {"predictions": prediction, "references": reference, **kwargs}
+ example = {intput_name: example[intput_name] for intput_name in self.features}
+ example = self.info.features.encode_example(example)
+ if self.writer is None:
+ self._init_writer()
+ try:
+ self.writer.write(example)
+ except pa.ArrowInvalid:
+ error_msg = f"Metric inputs don't match the expected format.\n" f"Expected format: {self.features},\n"
+ error_msg_inputs = ",\n".join(
+ f"Input {input_name}: {summarize_if_long_list(example[input_name])}" for input_name in self.features
+ )
+ error_msg += error_msg_inputs
+ raise ValueError(error_msg) from None
+
+ def _init_writer(self, timeout=1):
+ if self.num_process > 1:
+ if self.process_id == 0:
+ file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
+ self.rendez_vous_lock = FileLock(file_path)
+ try:
+ self.rendez_vous_lock.acquire(timeout=timeout)
+ except TimeoutError:
+ raise ValueError(
+ f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
+ f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid collision "
+ f"between distributed metric instances."
+ ) from None
+
+ if self.keep_in_memory:
+ self.buf_writer = pa.BufferOutputStream()
+ self.writer = ArrowWriter(
+ features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
+ )
+ else:
+ self.buf_writer = None
+
+ # Get cache file name and lock it
+ if self.cache_file_name is None or self.filelock is None:
+ cache_file_name, filelock = self._create_cache_file() # get ready
+ self.cache_file_name = cache_file_name
+ self.filelock = filelock
+
+ self.writer = ArrowWriter(
+ features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
+ )
+ # Setup rendez-vous here if
+ if self.num_process > 1:
+ if self.process_id == 0:
+ self._check_all_processes_locks() # wait for everyone to be ready
+ self.rendez_vous_lock.release() # let everyone go
+ else:
+ self._check_rendez_vous() # wait for master to be ready and to let everyone go
+
+ def _info(self) -> MetricInfo:
+ """Construct the MetricInfo object. See `MetricInfo` for details.
+
+ Warning: This function is only called once and the result is cached for all
+ following .info() calls.
+
+ Returns:
+ info: (MetricInfo) The metrics information
+ """
+ raise NotImplementedError
+
+ def download_and_prepare(
+ self,
+ download_config: Optional[DownloadConfig] = None,
+ dl_manager: Optional[DownloadManager] = None,
+ ):
+ """Downloads and prepares dataset for reading.
+
+ Args:
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
+ dl_manager (:class:`DownloadManager`, optional): Specific download manager to use.
+ """
+ if dl_manager is None:
+ if download_config is None:
+ download_config = DownloadConfig()
+ download_config.cache_dir = os.path.join(self.data_dir, "downloads")
+ download_config.force_download = False
+
+ dl_manager = DownloadManager(
+ dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
+ )
+
+ self._download_and_prepare(dl_manager)
+
+ def _download_and_prepare(self, dl_manager):
+ """Downloads and prepares resources for the metric.
+
+ This is the internal implementation to overwrite called when user calls
+ `download_and_prepare`. It should download all required resources for the metric.
+
+ Args:
+ dl_manager (:class:`DownloadManager`): `DownloadManager` used to download and cache data.
+ """
+ return None
+
+ def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
+ """This method defines the common API for all the metrics in the library"""
+ raise NotImplementedError
+
+ def __del__(self):
+ if hasattr(self, "filelock") and self.filelock is not None:
+ self.filelock.release()
+ if hasattr(self, "rendez_vous_lock") and self.rendez_vous_lock is not None:
+ self.rendez_vous_lock.release()
+ if hasattr(self, "writer"): # in case it was already deleted
+ del self.writer
+ if hasattr(self, "data"): # in case it was already deleted
+ del self.data
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/naming.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/naming.py
new file mode 100644
index 0000000000000000000000000000000000000000..65e7ede10dcde8701823223ae98e7971f705f945
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/naming.py
@@ -0,0 +1,84 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Utilities for file names."""
+
+import itertools
+import os
+import re
+
+
+_uppercase_uppercase_re = re.compile(r"([A-Z]+)([A-Z][a-z])")
+_lowercase_uppercase_re = re.compile(r"([a-z\d])([A-Z])")
+
+_single_underscore_re = re.compile(r"(?:/\|?*"
+
+
+def camelcase_to_snakecase(name):
+ """Convert camel-case string to snake-case."""
+ name = _uppercase_uppercase_re.sub(r"\1_\2", name)
+ name = _lowercase_uppercase_re.sub(r"\1_\2", name)
+ return name.lower()
+
+
+def snakecase_to_camelcase(name):
+ """Convert snake-case string to camel-case string."""
+ name = _single_underscore_re.split(name)
+ name = [_multiple_underscores_re.split(n) for n in name]
+ return "".join(n.capitalize() for n in itertools.chain.from_iterable(name) if n != "")
+
+
+def filename_prefix_for_name(name):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ return camelcase_to_snakecase(name)
+
+
+def filename_prefix_for_split(name, split):
+ if os.path.basename(name) != name:
+ raise ValueError(f"Should be a dataset name, not a path: {name}")
+ if not re.match(_split_re, split):
+ raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.")
+ return f"{filename_prefix_for_name(name)}-{split}"
+
+
+def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ if filetype_suffix:
+ prefix += f".{filetype_suffix}"
+ filepath = os.path.join(data_dir, prefix)
+ return f"{filepath}*"
+
+
+def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None):
+ prefix = filename_prefix_for_split(dataset_name, split)
+ prefix = os.path.join(path, prefix)
+
+ if shard_lengths:
+ num_shards = len(shard_lengths)
+ filenames = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(num_shards)]
+ if filetype_suffix:
+ filenames = [filename + f".{filetype_suffix}" for filename in filenames]
+ return filenames
+ else:
+ filename = prefix
+ if filetype_suffix:
+ filename += f".{filetype_suffix}"
+ return [filename]
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/search.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/search.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ec41bbc3e00c34d6d10e75ea05264caabc3256e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/search.py
@@ -0,0 +1,779 @@
+import importlib.util
+import os
+import tempfile
+from pathlib import PurePath
+from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union
+
+import fsspec
+import numpy as np
+
+from .utils import logging
+from .utils import tqdm as hf_tqdm
+
+
+if TYPE_CHECKING:
+ from .arrow_dataset import Dataset # noqa: F401
+
+ try:
+ from elasticsearch import Elasticsearch # noqa: F401
+
+ except ImportError:
+ pass
+ try:
+ import faiss # noqa: F401
+
+ except ImportError:
+ pass
+
+_has_elasticsearch = importlib.util.find_spec("elasticsearch") is not None
+_has_faiss = importlib.util.find_spec("faiss") is not None
+
+
+logger = logging.get_logger(__name__)
+
+
+class MissingIndex(Exception):
+ pass
+
+
+class SearchResults(NamedTuple):
+ scores: List[float]
+ indices: List[int]
+
+
+class BatchedSearchResults(NamedTuple):
+ total_scores: List[List[float]]
+ total_indices: List[List[int]]
+
+
+class NearestExamplesResults(NamedTuple):
+ scores: List[float]
+ examples: dict
+
+
+class BatchedNearestExamplesResults(NamedTuple):
+ total_scores: List[List[float]]
+ total_examples: List[dict]
+
+
+class BaseIndex:
+ """Base class for indexing"""
+
+ def search(self, query, k: int = 10, **kwargs) -> SearchResults:
+ """
+ To implement.
+ This method has to return the scores and the indices of the retrieved examples given a certain query.
+ """
+ raise NotImplementedError
+
+ def search_batch(self, queries, k: int = 10, **kwargs) -> BatchedSearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ queries (`Union[List[str], np.ndarray]`): The queries as a list of strings if `column` is a text index or as a numpy array if `column` is a vector index.
+ k (`int`): The number of examples to retrieve per query.
+
+ Ouput:
+ total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
+ total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
+ """
+ total_scores, total_indices = [], []
+ for query in queries:
+ scores, indices = self.search(query, k)
+ total_scores.append(scores)
+ total_indices.append(indices)
+ return BatchedSearchResults(total_scores, total_indices)
+
+ def save(self, file: Union[str, PurePath]):
+ """Serialize the index on disk"""
+ raise NotImplementedError
+
+ @classmethod
+ def load(cls, file: Union[str, PurePath]) -> "BaseIndex":
+ """Deserialize the index from disk"""
+ raise NotImplementedError
+
+
+class ElasticSearchIndex(BaseIndex):
+ """
+ Sparse index using Elasticsearch. It is used to index text and run queries based on BM25 similarity.
+ An Elasticsearch server needs to be accessible, and a python client is declared with
+ ```
+ es_client = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
+ ```
+ for example.
+ """
+
+ def __init__(
+ self,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ if not _has_elasticsearch:
+ raise ImportError(
+ "You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`"
+ )
+ if es_client is not None and (host is not None or port is not None):
+ raise ValueError("Please specify either `es_client` or `(host, port)`, but not both.")
+ host = host or "localhost"
+ port = port or 9200
+
+ import elasticsearch.helpers # noqa: F401 - need this to properly load all the es features
+ from elasticsearch import Elasticsearch # noqa: F811
+
+ self.es_client = es_client if es_client is not None else Elasticsearch([{"host": host, "port": str(port)}])
+ self.es_index_name = (
+ es_index_name
+ if es_index_name is not None
+ else "huggingface_datasets_" + os.path.basename(tempfile.NamedTemporaryFile().name)
+ )
+ self.es_index_config = (
+ es_index_config
+ if es_index_config is not None
+ else {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {"properties": {"text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}}},
+ }
+ )
+
+ def add_documents(self, documents: Union[List[str], "Dataset"], column: Optional[str] = None):
+ """
+ Add documents to the index.
+ If the documents are inside a certain column, you can specify it using the `column` argument.
+ """
+ index_name = self.es_index_name
+ index_config = self.es_index_config
+ self.es_client.indices.create(index=index_name, body=index_config)
+ number_of_docs = len(documents)
+ progress = hf_tqdm(unit="docs", total=number_of_docs)
+ successes = 0
+
+ def passage_generator():
+ if column is not None:
+ for i, example in enumerate(documents):
+ yield {"text": example[column], "_id": i}
+ else:
+ for i, example in enumerate(documents):
+ yield {"text": example, "_id": i}
+
+ # create the ES index
+ import elasticsearch as es
+
+ for ok, action in es.helpers.streaming_bulk(
+ client=self.es_client,
+ index=index_name,
+ actions=passage_generator(),
+ ):
+ progress.update(1)
+ successes += ok
+ if successes != len(documents):
+ logger.warning(
+ f"Some documents failed to be added to ElasticSearch. Failures: {len(documents)-successes}/{len(documents)}"
+ )
+ logger.info(f"Indexed {successes:d} documents")
+
+ def search(self, query: str, k=10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ query (`str`): The query as a string.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ scores (`List[List[float]`): The retrieval scores of the retrieved examples.
+ indices (`List[List[int]]`): The indices of the retrieved examples.
+ """
+ response = self.es_client.search(
+ index=self.es_index_name,
+ body={"query": {"multi_match": {"query": query, "fields": ["text"], "type": "cross_fields"}}, "size": k},
+ **kwargs,
+ )
+ hits = response["hits"]["hits"]
+ return SearchResults([hit["_score"] for hit in hits], [int(hit["_id"]) for hit in hits])
+
+ def search_batch(self, queries, k: int = 10, max_workers=10, **kwargs) -> BatchedSearchResults:
+ import concurrent.futures
+
+ total_scores, total_indices = [None] * len(queries), [None] * len(queries)
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+ future_to_index = {executor.submit(self.search, query, k, **kwargs): i for i, query in enumerate(queries)}
+ for future in concurrent.futures.as_completed(future_to_index):
+ index = future_to_index[future]
+ results: SearchResults = future.result()
+ total_scores[index] = results.scores
+ total_indices[index] = results.indices
+ return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores)
+
+
+class FaissIndex(BaseIndex):
+ """
+ Dense index using Faiss. It is used to index vectors.
+ Faiss is a library for efficient similarity search and clustering of dense vectors.
+ It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
+ You can find more information about Faiss here:
+ - For index types and the string factory: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+ - For GPU settings: https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU
+ """
+
+ def __init__(
+ self,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ ):
+ """
+ Create a Dense index using Faiss. You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+ """
+ if string_factory is not None and custom_index is not None:
+ raise ValueError("Please specify either `string_factory` or `custom_index` but not both.")
+ if device is not None and custom_index is not None:
+ raise ValueError(
+ "Cannot pass both 'custom_index' and 'device'. "
+ "Pass 'custom_index' already transferred to the target device instead."
+ )
+ self.device = device
+ self.string_factory = string_factory
+ self.metric_type = metric_type
+ self.faiss_index = custom_index
+ if not _has_faiss:
+ raise ImportError(
+ "You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. "
+ "A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. "
+ "Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available."
+ )
+
+ def add_vectors(
+ self,
+ vectors: Union[np.array, "Dataset"],
+ column: Optional[str] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: Optional[bool] = None,
+ ):
+ """
+ Add vectors to the index.
+ If the arrays are inside a certain column, you can specify it using the `column` argument.
+ """
+ import faiss # noqa: F811
+
+ # Create index
+ if self.faiss_index is None:
+ size = len(vectors[0]) if column is None else len(vectors[0][column])
+ if self.string_factory is not None:
+ if self.metric_type is None:
+ index = faiss.index_factory(size, self.string_factory)
+ else:
+ index = faiss.index_factory(size, self.string_factory, self.metric_type)
+ else:
+ if self.metric_type is None:
+ index = faiss.IndexFlat(size)
+ else:
+ index = faiss.IndexFlat(size, self.metric_type)
+
+ self.faiss_index = self._faiss_index_to_device(index, self.device)
+ logger.info(f"Created faiss index of type {type(self.faiss_index)}")
+
+ # Set verbosity level
+ if faiss_verbose is not None:
+ self.faiss_index.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "index") and self.faiss_index.index is not None:
+ self.faiss_index.index.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "quantizer") and self.faiss_index.quantizer is not None:
+ self.faiss_index.quantizer.verbose = faiss_verbose
+ if hasattr(self.faiss_index, "clustering_index") and self.faiss_index.clustering_index is not None:
+ self.faiss_index.clustering_index.verbose = faiss_verbose
+
+ # Train
+ if train_size is not None:
+ train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column]
+ logger.info(f"Training the index with the first {len(train_vecs)} vectors")
+ self.faiss_index.train(train_vecs)
+ else:
+ logger.info("Ignored the training step of the faiss index as `train_size` is None.")
+
+ # Add vectors
+ logger.info(f"Adding {len(vectors)} vectors to the faiss index")
+ for i in hf_tqdm(range(0, len(vectors), batch_size)):
+ vecs = vectors[i : i + batch_size] if column is None else vectors[i : i + batch_size][column]
+ self.faiss_index.add(vecs)
+
+ @staticmethod
+ def _faiss_index_to_device(index: "faiss.Index", device: Optional[Union[int, List[int]]] = None) -> "faiss.Index":
+ """
+ Sends a faiss index to a device.
+ A device can either be a positive integer (GPU id), a negative integer (all GPUs),
+ or a list of positive integers (select GPUs to use), or `None` for CPU.
+ """
+
+ # If device is not specified, then it runs on CPU.
+ if device is None:
+ return index
+
+ import faiss # noqa: F811
+
+ # If the device id is given as an integer
+ if isinstance(device, int):
+ # Positive integers are directly mapped to GPU ids
+ if device > -1:
+ faiss_res = faiss.StandardGpuResources()
+ index = faiss.index_cpu_to_gpu(faiss_res, device, index)
+ # And negative integers mean using all GPUs
+ else:
+ index = faiss.index_cpu_to_all_gpus(index)
+ # Device ids given as a list mean mapping to those devices specified.
+ elif isinstance(device, (list, tuple)):
+ index = faiss.index_cpu_to_gpus_list(index, gpus=list(device))
+ else:
+ raise TypeError(
+ f"The argument type: {type(device)} is not expected. "
+ + "Please pass in either nothing, a positive int, a negative int, or a list of positive ints."
+ )
+
+ return index
+
+ def search(self, query: np.array, k=10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices to the query.
+
+ Args:
+ query (`np.array`): The query as a numpy array.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ scores (`List[List[float]`): The retrieval scores of the retrieved examples.
+ indices (`List[List[int]]`): The indices of the retrieved examples.
+ """
+ if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1):
+ raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)")
+
+ queries = query.reshape(1, -1)
+ if not queries.flags.c_contiguous:
+ queries = np.asarray(queries, order="C")
+ scores, indices = self.faiss_index.search(queries, k, **kwargs)
+ return SearchResults(scores[0], indices[0].astype(int))
+
+ def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults:
+ """Find the nearest examples indices to the queries.
+
+ Args:
+ queries (`np.array`): The queries as a numpy array.
+ k (`int`): The number of examples to retrieve.
+
+ Ouput:
+ total_scores (`List[List[float]`): The retrieval scores of the retrieved examples per query.
+ total_indices (`List[List[int]]`): The indices of the retrieved examples per query.
+ """
+ if len(queries.shape) != 2:
+ raise ValueError("Shape of query must be 2D")
+ if not queries.flags.c_contiguous:
+ queries = np.asarray(queries, order="C")
+ scores, indices = self.faiss_index.search(queries, k, **kwargs)
+ return BatchedSearchResults(scores, indices.astype(int))
+
+ def save(self, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
+ """Serialize the FaissIndex on disk"""
+ import faiss # noqa: F811
+
+ if self.device is not None and isinstance(self.device, (int, list, tuple)):
+ index = faiss.index_gpu_to_cpu(self.faiss_index)
+ else:
+ index = self.faiss_index
+
+ with fsspec.open(str(file), "wb", **(storage_options or {})) as f:
+ faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write)))
+
+ @classmethod
+ def load(
+ cls,
+ file: Union[str, PurePath],
+ device: Optional[Union[int, List[int]]] = None,
+ storage_options: Optional[Dict] = None,
+ ) -> "FaissIndex":
+ """Deserialize the FaissIndex from disk"""
+ import faiss # noqa: F811
+
+ # Instances of FaissIndex is essentially just a wrapper for faiss indices.
+ faiss_index = cls(device=device)
+ with fsspec.open(str(file), "rb", **(storage_options or {})) as f:
+ index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read)))
+ faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device)
+ return faiss_index
+
+
+class IndexableMixin:
+ """Add indexing features to `datasets.Dataset`"""
+
+ def __init__(self):
+ self._indexes: Dict[str, BaseIndex] = {}
+
+ def __len__(self):
+ raise NotImplementedError
+
+ def __getitem__(self, key):
+ raise NotImplementedError
+
+ def is_index_initialized(self, index_name: str) -> bool:
+ return index_name in self._indexes
+
+ def _check_index_is_initialized(self, index_name: str):
+ if not self.is_index_initialized(index_name):
+ raise MissingIndex(
+ f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first."
+ )
+
+ def list_indexes(self) -> List[str]:
+ """List the `colindex_nameumns`/identifiers of all the attached indexes."""
+ return list(self._indexes)
+
+ def get_index(self, index_name: str) -> BaseIndex:
+ """List the `index_name`/identifiers of all the attached indexes.
+
+ Args:
+ index_name (`str`): Index name.
+
+ Returns:
+ [`BaseIndex`]
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name]
+
+ def add_faiss_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of the specified column.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index, see more below).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+
+ Args:
+ column (`str`): The column of the vectors to add to the index.
+ index_name (Optional `str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ By default it corresponds to `column`.
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
+ metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
+ """
+ index_name = index_name if index_name is not None else column
+ faiss_index = FaissIndex(
+ device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
+ )
+ faiss_index.add_vectors(
+ self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
+ )
+ self._indexes[index_name] = faiss_index
+
+ def add_faiss_index_from_external_arrays(
+ self,
+ external_arrays: np.array,
+ index_name: str,
+ device: Optional[Union[int, List[int]]] = None,
+ string_factory: Optional[str] = None,
+ metric_type: Optional[int] = None,
+ custom_index: Optional["faiss.Index"] = None,
+ batch_size: int = 1000,
+ train_size: Optional[int] = None,
+ faiss_verbose: bool = False,
+ ):
+ """Add a dense index using Faiss for fast retrieval.
+ The index is created using the vectors of `external_arrays`.
+ You can specify `device` if you want to run it on GPU (`device` must be the GPU index).
+ You can find more information about Faiss here:
+ - For `string factory`: https://github.com/facebookresearch/faiss/wiki/The-index-factory
+
+ Args:
+ external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`.
+ It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`.
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ string_factory (Optional `str`): This is passed to the index factory of Faiss to create the index. Default index class is IndexFlatIP.
+ metric_type (Optional `int`): Type of metric. Ex: `faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`.
+ custom_index (Optional `faiss.Index`): Custom Faiss index that you already have instantiated and configured for your needs.
+ batch_size (Optional `int`): Size of the batch to use while adding vectors to the FaissIndex. Default value is 1000.
+
+ train_size (Optional `int`): If the index needs a training step, specifies how many vectors will be used to train the index.
+ faiss_verbose (`bool`, defaults to False): Enable the verbosity of the Faiss index.
+ """
+ faiss_index = FaissIndex(
+ device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index
+ )
+ faiss_index.add_vectors(
+ external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose
+ )
+ self._indexes[index_name] = faiss_index
+
+ def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict] = None):
+ """Save a FaissIndex on disk.
+
+ Args:
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to call `.get_nearest` or `.search`.
+ file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ """
+ index = self.get_index(index_name)
+ if not isinstance(index, FaissIndex):
+ raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'")
+ index.save(file, storage_options=storage_options)
+ logger.info(f"Saved FaissIndex {index_name} at {file}")
+
+ def load_faiss_index(
+ self,
+ index_name: str,
+ file: Union[str, PurePath],
+ device: Optional[Union[int, List[int]]] = None,
+ storage_options: Optional[Dict] = None,
+ ):
+ """Load a FaissIndex from disk.
+
+ If you want to do additional configurations, you can have access to the faiss index object by doing
+ `.get_index(index_name).faiss_index` to make it fit your needs.
+
+ Args:
+ index_name (`str`): The index_name/identifier of the index. This is the index_name that is used to
+ call `.get_nearest` or `.search`.
+ file (`str`): The path to the serialized faiss index on disk or remote URI (e.g. `"s3://my-bucket/index.faiss"`).
+ device (Optional `Union[int, List[int]]`): If positive integer, this is the index of the GPU to use. If negative integer, use all GPUs.
+ If a list of positive integers is passed in, run only on those GPUs. By default it uses the CPU.
+ storage_options (`dict`, *optional*):
+ Key/value pairs to be passed on to the file-system backend, if any.
+
+
+
+ """
+ index = FaissIndex.load(file, device=device, storage_options=storage_options)
+ if index.faiss_index.ntotal != len(self):
+ raise ValueError(
+ f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples."
+ )
+ self._indexes[index_name] = index
+ logger.info(f"Loaded FaissIndex {index_name} from {file}")
+
+ def add_elasticsearch_index(
+ self,
+ column: str,
+ index_name: Optional[str] = None,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_name: Optional[str] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Add a text index using ElasticSearch for fast retrieval.
+
+ Args:
+ column (`str`): The column of the documents to add to the index.
+ index_name (Optional `str`): The index_name/identifier of the index. This is the index name that is used to call `.get_nearest` or `.search`.
+ By default it corresponds to `column`.
+ host (Optional `str`, defaults to localhost):
+ host of where ElasticSearch is running
+ port (Optional `str`, defaults to 9200):
+ port of where ElasticSearch is running
+ es_client (Optional `elasticsearch.Elasticsearch`):
+ The elasticsearch client used to create the index if host and port are None.
+ es_index_name (Optional `str`): The elasticsearch index name used to create the index.
+ es_index_config (Optional `dict`):
+ The configuration of the elasticsearch index.
+ Default config is:
+
+ Config::
+
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ """
+ index_name = index_name if index_name is not None else column
+ es_index = ElasticSearchIndex(
+ host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
+ )
+ es_index.add_documents(self, column=column)
+ self._indexes[index_name] = es_index
+
+ def load_elasticsearch_index(
+ self,
+ index_name: str,
+ es_index_name: str,
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ es_client: Optional["Elasticsearch"] = None,
+ es_index_config: Optional[dict] = None,
+ ):
+ """Load an existing text index using ElasticSearch for fast retrieval.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index. This is the index name that is used to call `get_nearest` or `search`.
+ es_index_name (`str`):
+ The name of elasticsearch index to load.
+ host (`str`, *optional*, defaults to `localhost`):
+ Host of where ElasticSearch is running.
+ port (`str`, *optional*, defaults to `9200`):
+ Port of where ElasticSearch is running.
+ es_client (`elasticsearch.Elasticsearch`, *optional*):
+ The elasticsearch client used to create the index if host and port are `None`.
+ es_index_config (`dict`, *optional*):
+ The configuration of the elasticsearch index.
+ Default config is:
+ ```
+ {
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}},
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "type": "text",
+ "analyzer": "standard",
+ "similarity": "BM25"
+ },
+ }
+ },
+ }
+ ```
+ """
+ self._indexes[index_name] = ElasticSearchIndex(
+ host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config
+ )
+
+ def drop_index(self, index_name: str):
+ """Drop the index with the specified column.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ """
+ del self._indexes[index_name]
+
+ def search(self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs) -> SearchResults:
+ """Find the nearest examples indices in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The name/identifier of the index.
+ query (`Union[str, np.ndarray]`):
+ The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve.
+
+ Returns:
+ `(scores, indices)`:
+ A tuple of `(scores, indices)` where:
+ - **scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
+ - **indices** (`List[List[int]]`): the indices of the retrieved examples
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name].search(query, k, **kwargs)
+
+ def search_batch(
+ self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
+ ) -> BatchedSearchResults:
+ """Find the nearest examples indices in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ queries (`Union[List[str], np.ndarray]`):
+ The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve per query.
+
+ Returns:
+ `(total_scores, total_indices)`:
+ A tuple of `(total_scores, total_indices)` where:
+ - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
+ - **total_indices** (`List[List[int]]`): the indices of the retrieved examples per query
+ """
+ self._check_index_is_initialized(index_name)
+ return self._indexes[index_name].search_batch(queries, k, **kwargs)
+
+ def get_nearest_examples(
+ self, index_name: str, query: Union[str, np.array], k: int = 10, **kwargs
+ ) -> NearestExamplesResults:
+ """Find the nearest examples in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The index_name/identifier of the index.
+ query (`Union[str, np.ndarray]`):
+ The query as a string if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve.
+
+ Returns:
+ `(scores, examples)`:
+ A tuple of `(scores, examples)` where:
+ - **scores** (`List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples
+ - **examples** (`dict`): the retrieved examples
+ """
+ self._check_index_is_initialized(index_name)
+ scores, indices = self.search(index_name, query, k, **kwargs)
+ top_indices = [i for i in indices if i >= 0]
+ return NearestExamplesResults(scores[: len(top_indices)], self[top_indices])
+
+ def get_nearest_examples_batch(
+ self, index_name: str, queries: Union[List[str], np.array], k: int = 10, **kwargs
+ ) -> BatchedNearestExamplesResults:
+ """Find the nearest examples in the dataset to the query.
+
+ Args:
+ index_name (`str`):
+ The `index_name`/identifier of the index.
+ queries (`Union[List[str], np.ndarray]`):
+ The queries as a list of strings if `index_name` is a text index or as a numpy array if `index_name` is a vector index.
+ k (`int`):
+ The number of examples to retrieve per query.
+
+ Returns:
+ `(total_scores, total_examples)`:
+ A tuple of `(total_scores, total_examples)` where:
+ - **total_scores** (`List[List[float]`): the retrieval scores from either FAISS (`IndexFlatL2` by default) or ElasticSearch of the retrieved examples per query
+ - **total_examples** (`List[dict]`): the retrieved examples per query
+ """
+ self._check_index_is_initialized(index_name)
+ total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs)
+ total_scores = [
+ scores_i[: len([i for i in indices_i if i >= 0])]
+ for scores_i, indices_i in zip(total_scores, total_indices)
+ ]
+ total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices]
+ return BatchedNearestExamplesResults(total_scores, total_samples)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/splits.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/splits.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd4966cb4007adc9f47fd78cf2b0a1732913aaef
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/splits.py
@@ -0,0 +1,635 @@
+# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Lint as: python3
+"""Splits related API."""
+
+import abc
+import collections
+import copy
+import dataclasses
+import re
+from dataclasses import dataclass
+from typing import Dict, List, Optional, Union
+
+from .arrow_reader import FileInstructions, make_file_instructions
+from .naming import _split_re
+from .utils.py_utils import NonMutableDict, asdict
+
+
+@dataclass
+class SplitInfo:
+ name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True})
+ num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
+ num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True})
+ shard_lengths: Optional[List[int]] = None
+
+ # Deprecated
+ # For backward compatibility, this field needs to always be included in files like
+ # dataset_infos.json and dataset_info.json files
+ # To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info)
+ dataset_name: Optional[str] = dataclasses.field(
+ default=None, metadata={"include_in_asdict_even_if_is_default": True}
+ )
+
+ @property
+ def file_instructions(self):
+ """Returns the list of dict(filename, take, skip)."""
+ # `self.dataset_name` is assigned in `SplitDict.add()`.
+ instructions = make_file_instructions(
+ name=self.dataset_name,
+ split_infos=[self],
+ instruction=str(self.name),
+ )
+ return instructions.file_instructions
+
+
+@dataclass
+class SubSplitInfo:
+ """Wrapper around a sub split info.
+ This class expose info on the subsplit:
+ ```
+ ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True)
+ info.splits['train[75%:]'].num_examples
+ ```
+ """
+
+ instructions: FileInstructions
+
+ @property
+ def num_examples(self):
+ """Returns the number of example in the subsplit."""
+ return self.instructions.num_examples
+
+ @property
+ def file_instructions(self):
+ """Returns the list of dict(filename, take, skip)."""
+ return self.instructions.file_instructions
+
+
+class SplitBase(metaclass=abc.ABCMeta):
+ # pylint: disable=line-too-long
+ """Abstract base class for Split compositionality.
+
+ See the
+ [guide on splits](../loading#slice-splits)
+ for more information.
+
+ There are three parts to the composition:
+ 1) The splits are composed (defined, merged, split,...) together before
+ calling the `.as_dataset()` function. This is done with the `__add__`,
+ `__getitem__`, which return a tree of `SplitBase` (whose leaf
+ are the `NamedSplit` objects)
+
+ ```
+ split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50])
+ ```
+
+ 2) The `SplitBase` is forwarded to the `.as_dataset()` function
+ to be resolved into actual read instruction. This is done by the
+ `.get_read_instruction()` method which takes the real dataset splits
+ (name, number of shards,...) and parse the tree to return a
+ `SplitReadInstruction()` object
+
+ ```
+ read_instruction = split.get_read_instruction(self.info.splits)
+ ```
+
+ 3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline
+ to define which files to read and how to skip examples within file.
+
+ """
+
+ # pylint: enable=line-too-long
+
+ @abc.abstractmethod
+ def get_read_instruction(self, split_dict):
+ """Parse the descriptor tree and compile all read instructions together.
+
+ Args:
+ split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset
+
+ Returns:
+ split_read_instruction: `SplitReadInstruction`
+ """
+ raise NotImplementedError("Abstract method")
+
+ def __eq__(self, other):
+ """Equality: datasets.Split.TRAIN == 'train'."""
+ if isinstance(other, (NamedSplit, str)):
+ return False
+ raise NotImplementedError("Equality is not implemented between merged/sub splits.")
+
+ def __ne__(self, other):
+ """InEquality: datasets.Split.TRAIN != 'test'."""
+ return not self.__eq__(other)
+
+ def __add__(self, other):
+ """Merging: datasets.Split.TRAIN + datasets.Split.TEST."""
+ return _SplitMerged(self, other)
+
+ def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name
+ """Divides this split into subsplits.
+
+ There are 3 ways to define subsplits, which correspond to the 3
+ arguments `k` (get `k` even subsplits), `percent` (get a slice of the
+ dataset with `datasets.percent`), and `weighted` (get subsplits with proportions
+ specified by `weighted`).
+
+ Example::
+
+ ```
+ # 50% train, 50% test
+ train, test = split.subsplit(k=2)
+ # 50% train, 25% test, 25% validation
+ train, test, validation = split.subsplit(weighted=[2, 1, 1])
+ # Extract last 20%
+ subsplit = split.subsplit(datasets.percent[-20:])
+ ```
+
+ Warning: k and weighted will be converted into percent which mean that
+ values below the percent will be rounded up or down. The final split may be
+ bigger to deal with remainders. For instance:
+
+ ```
+ train, test, valid = split.subsplit(k=3) # 33%, 33%, 34%
+ s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18%
+ ```
+
+ Args:
+ arg: If no kwargs are given, `arg` will be interpreted as one of
+ `k`, `percent`, or `weighted` depending on the type.
+ For example:
+ ```
+ split.subsplit(10) # Equivalent to split.subsplit(k=10)
+ split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20]
+ split.subsplit([1, 1, 2]) # weighted=[1, 1, 2]
+ ```
+ k: `int` If set, subdivide the split into `k` equal parts.
+ percent: `datasets.percent slice`, return a single subsplit corresponding to
+ a slice of the original split. For example:
+ `split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`.
+ weighted: `list[int]`, return a list of subsplits whose proportions match
+ the normalized sum of the list. For example:
+ `split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`.
+
+ Returns:
+ A subsplit or list of subsplits extracted from this split object.
+ """
+ # Note that the percent kwargs redefine the outer name datasets.percent. This
+ # is done for consistency (.subsplit(percent=datasets.percent[:40]))
+ if sum(bool(x) for x in (arg, k, percent, weighted)) != 1:
+ raise ValueError("Only one argument of subsplit should be set.")
+
+ # Auto deduce k
+ if isinstance(arg, int):
+ k = arg
+ elif isinstance(arg, slice):
+ percent = arg
+ elif isinstance(arg, list):
+ weighted = arg
+
+ if not (k or percent or weighted):
+ raise ValueError(
+ f"Invalid split argument {arg}. Only list, slice and int supported. "
+ "One of k, weighted or percent should be set to a non empty value."
+ )
+
+ def assert_slices_coverage(slices):
+ # Ensure that the expended slices cover all percents.
+ assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100))
+
+ if k:
+ if not 0 < k <= 100:
+ raise ValueError(f"Subsplit k should be between 0 and 100, got {k}")
+ shift = 100 // k
+ slices = [slice(i * shift, (i + 1) * shift) for i in range(k)]
+ # Round up last element to ensure all elements are taken
+ slices[-1] = slice(slices[-1].start, 100)
+ # Internal check to ensure full coverage
+ assert_slices_coverage(slices)
+ return tuple(_SubSplit(self, s) for s in slices)
+ elif percent:
+ return _SubSplit(self, percent)
+ elif weighted:
+ # Normalize the weighted sum
+ total = sum(weighted)
+ weighted = [100 * x // total for x in weighted]
+ # Create the slice for each of the elements
+ start = 0
+ stop = 0
+ slices = []
+ for v in weighted:
+ stop += v
+ slices.append(slice(start, stop))
+ start = stop
+ # Round up last element to ensure all elements are taken
+ slices[-1] = slice(slices[-1].start, 100)
+ # Internal check to ensure full coverage
+ assert_slices_coverage(slices)
+ return tuple(_SubSplit(self, s) for s in slices)
+ else:
+ # Should not be possible
+ raise ValueError("Could not determine the split")
+
+
+# 2 requirements:
+# 1. datasets.percent be sliceable
+# 2. datasets.percent be documented
+#
+# Instances are not documented, so we want datasets.percent to be a class, but to
+# have it be sliceable, we need this metaclass.
+class PercentSliceMeta(type):
+ def __getitem__(cls, slice_value):
+ if not isinstance(slice_value, slice):
+ raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}")
+ return slice_value
+
+
+class PercentSlice(metaclass=PercentSliceMeta):
+ # pylint: disable=line-too-long
+ """Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`.
+
+ See the
+ [guide on splits](../loading#slice-splits)
+ for more information.
+ """
+
+ # pylint: enable=line-too-long
+ pass
+
+
+percent = PercentSlice # pylint: disable=invalid-name
+
+
+class _SplitMerged(SplitBase):
+ """Represent two split descriptors merged together."""
+
+ def __init__(self, split1, split2):
+ self._split1 = split1
+ self._split2 = split2
+
+ def get_read_instruction(self, split_dict):
+ read_instruction1 = self._split1.get_read_instruction(split_dict)
+ read_instruction2 = self._split2.get_read_instruction(split_dict)
+ return read_instruction1 + read_instruction2
+
+ def __repr__(self):
+ return f"({repr(self._split1)} + {repr(self._split2)})"
+
+
+class _SubSplit(SplitBase):
+ """Represent a sub split of a split descriptor."""
+
+ def __init__(self, split, slice_value):
+ self._split = split
+ self._slice_value = slice_value
+
+ def get_read_instruction(self, split_dict):
+ return self._split.get_read_instruction(split_dict)[self._slice_value]
+
+ def __repr__(self):
+ slice_str = "{start}:{stop}"
+ if self._slice_value.step is not None:
+ slice_str += ":{step}"
+ slice_str = slice_str.format(
+ start="" if self._slice_value.start is None else self._slice_value.start,
+ stop="" if self._slice_value.stop is None else self._slice_value.stop,
+ step=self._slice_value.step,
+ )
+ return f"{repr(self._split)}(datasets.percent[{slice_str}])"
+
+
+class NamedSplit(SplitBase):
+ """Descriptor corresponding to a named split (train, test, ...).
+
+ Example:
+ Each descriptor can be composed with other using addition or slice:
+
+ ```py
+ split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST
+ ```
+
+ The resulting split will correspond to 25% of the train split merged with
+ 100% of the test split.
+
+ A split cannot be added twice, so the following will fail:
+
+ ```py
+ split = (
+ datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
+ datasets.Split.TRAIN.subsplit(datasets.percent[75:])
+ ) # Error
+ split = datasets.Split.TEST + datasets.Split.ALL # Error
+ ```
+
+ The slices can be applied only one time. So the following are valid:
+
+ ```py
+ split = (
+ datasets.Split.TRAIN.subsplit(datasets.percent[:25]) +
+ datasets.Split.TEST.subsplit(datasets.percent[:50])
+ )
+ split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50])
+ ```
+
+ But this is not valid:
+
+ ```py
+ train = datasets.Split.TRAIN
+ test = datasets.Split.TEST
+ split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25])
+ split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50])
+ ```
+ """
+
+ def __init__(self, name):
+ self._name = name
+ split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")]
+ for split_name in split_names_from_instruction:
+ if not re.match(_split_re, split_name):
+ raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.")
+
+ def __str__(self):
+ return self._name
+
+ def __repr__(self):
+ return f"NamedSplit({self._name!r})"
+
+ def __eq__(self, other):
+ """Equality: datasets.Split.TRAIN == 'train'."""
+ if isinstance(other, NamedSplit):
+ return self._name == other._name # pylint: disable=protected-access
+ elif isinstance(other, SplitBase):
+ return False
+ elif isinstance(other, str): # Other should be string
+ return self._name == other
+ else:
+ raise ValueError(f"Equality not supported between split {self} and {other}")
+
+ def __lt__(self, other):
+ return self._name < other._name # pylint: disable=protected-access
+
+ def __hash__(self):
+ return hash(self._name)
+
+ def get_read_instruction(self, split_dict):
+ return SplitReadInstruction(split_dict[self._name])
+
+
+class NamedSplitAll(NamedSplit):
+ """Split corresponding to the union of all defined dataset splits."""
+
+ def __init__(self):
+ super().__init__("all")
+
+ def __repr__(self):
+ return "NamedSplitAll()"
+
+ def get_read_instruction(self, split_dict):
+ # Merge all dataset split together
+ read_instructions = [SplitReadInstruction(s) for s in split_dict.values()]
+ return sum(read_instructions, SplitReadInstruction())
+
+
+class Split:
+ # pylint: disable=line-too-long
+ """`Enum` for dataset splits.
+
+ Datasets are typically split into different subsets to be used at various
+ stages of training and evaluation.
+
+ - `TRAIN`: the training data.
+ - `VALIDATION`: the validation data. If present, this is typically used as
+ evaluation data while iterating on a model (e.g. changing hyperparameters,
+ model architecture, etc.).
+ - `TEST`: the testing data. This is the data to report metrics on. Typically
+ you do not want to use this during model iteration as you may overfit to it.
+ - `ALL`: the union of all defined dataset splits.
+
+ All splits, including compositions inherit from `datasets.SplitBase`.
+
+ See the [guide](../load_hub#splits) on splits for more information.
+
+ Example:
+
+ ```py
+ >>> datasets.SplitGenerator(
+ ... name=datasets.Split.TRAIN,
+ ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)},
+ ... ),
+ ... datasets.SplitGenerator(
+ ... name=datasets.Split.VALIDATION,
+ ... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)},
+ ... ),
+ ... datasets.SplitGenerator(
+ ... name=datasets.Split.TEST,
+ ... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)},
+ ... )
+ ```
+ """
+
+ # pylint: enable=line-too-long
+ TRAIN = NamedSplit("train")
+ TEST = NamedSplit("test")
+ VALIDATION = NamedSplit("validation")
+ ALL = NamedSplitAll()
+
+ def __new__(cls, name):
+ """Create a custom split with datasets.Split('custom_name')."""
+ return NamedSplitAll() if name == "all" else NamedSplit(name)
+
+
+# Similar to SplitInfo, but contain an additional slice info
+SlicedSplitInfo = collections.namedtuple(
+ "SlicedSplitInfo",
+ [
+ "split_info",
+ "slice_value",
+ ],
+) # noqa: E231
+
+
+class SplitReadInstruction:
+ """Object containing the reading instruction for the dataset.
+
+ Similarly to `SplitDescriptor` nodes, this object can be composed with itself,
+ but the resolution happens instantaneously, instead of keeping track of the
+ tree, such as all instructions are compiled and flattened in a single
+ SplitReadInstruction object containing the list of files and slice to use.
+
+ Once resolved, the instructions can be accessed with:
+
+ ```
+ read_instructions.get_list_sliced_split_info() # List of splits to use
+ ```
+
+ """
+
+ def __init__(self, split_info=None):
+ self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with " "itself.")
+
+ if split_info:
+ self.add(SlicedSplitInfo(split_info=split_info, slice_value=None))
+
+ def add(self, sliced_split):
+ """Add a SlicedSplitInfo the read instructions."""
+ # TODO(epot): Check that the number of examples per shard % 100 == 0
+ # Otherwise the slices value may be unbalanced and not exactly reflect the
+ # requested slice.
+ self._splits[sliced_split.split_info.name] = sliced_split
+
+ def __add__(self, other):
+ """Merging split together."""
+ # Will raise error if a split has already be added (NonMutableDict)
+ # TODO(epot): If a split is already added but there is no overlap between
+ # the slices, should merge the slices (ex: [:10] + [80:])
+ split_instruction = SplitReadInstruction()
+ split_instruction._splits.update(self._splits) # pylint: disable=protected-access
+ split_instruction._splits.update(other._splits) # pylint: disable=protected-access
+ return split_instruction
+
+ def __getitem__(self, slice_value):
+ """Sub-splits."""
+ # Will raise an error if a split has already been sliced
+ split_instruction = SplitReadInstruction()
+ for v in self._splits.values():
+ if v.slice_value is not None:
+ raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced")
+ v = v._asdict()
+ v["slice_value"] = slice_value
+ split_instruction.add(SlicedSplitInfo(**v))
+ return split_instruction
+
+ def get_list_sliced_split_info(self):
+ return list(self._splits.values())
+
+
+class SplitDict(dict):
+ """Split info object."""
+
+ def __init__(self, *args, dataset_name=None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.dataset_name = dataset_name
+
+ def __getitem__(self, key: Union[SplitBase, str]):
+ # 1st case: The key exists: `info.splits['train']`
+ if str(key) in self:
+ return super().__getitem__(str(key))
+ # 2nd case: Uses instructions: `info.splits['train[50%]']`
+ else:
+ instructions = make_file_instructions(
+ name=self.dataset_name,
+ split_infos=self.values(),
+ instruction=key,
+ )
+ return SubSplitInfo(instructions)
+
+ def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo):
+ if key != value.name:
+ raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')")
+ super().__setitem__(key, value)
+
+ def add(self, split_info: SplitInfo):
+ """Add the split info."""
+ if split_info.name in self:
+ raise ValueError(f"Split {split_info.name} already present")
+ split_info.dataset_name = self.dataset_name
+ super().__setitem__(split_info.name, split_info)
+
+ @property
+ def total_num_examples(self):
+ """Return the total number of examples."""
+ return sum(s.num_examples for s in self.values())
+
+ @classmethod
+ def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None):
+ """Returns a new SplitDict initialized from a Dict or List of `split_infos`."""
+ if isinstance(split_infos, dict):
+ split_infos = list(split_infos.values())
+
+ if dataset_name is None:
+ dataset_name = split_infos[0].get("dataset_name") if split_infos else None
+
+ split_dict = cls(dataset_name=dataset_name)
+
+ for split_info in split_infos:
+ if isinstance(split_info, dict):
+ split_info = SplitInfo(**split_info)
+ split_dict.add(split_info)
+
+ return split_dict
+
+ def to_split_dict(self):
+ """Returns a list of SplitInfo protos that we have."""
+ out = []
+ for split_name, split_info in self.items():
+ split_info = copy.deepcopy(split_info)
+ split_info.name = split_name
+ out.append(split_info)
+ return out
+
+ def copy(self):
+ return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name)
+
+ def _to_yaml_list(self) -> list:
+ out = [asdict(s) for s in self.to_split_dict()]
+ # we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc
+ for split_info_dict in out:
+ split_info_dict.pop("shard_lengths", None)
+ # we don't need the dataset_name attribute that is deprecated
+ for split_info_dict in out:
+ split_info_dict.pop("dataset_name", None)
+ return out
+
+ @classmethod
+ def _from_yaml_list(cls, yaml_data: list) -> "SplitDict":
+ return cls.from_split_dict(yaml_data)
+
+
+@dataclass
+class SplitGenerator:
+ """Defines the split information for the generator.
+
+ This should be used as returned value of
+ `GeneratorBasedBuilder._split_generators`.
+ See `GeneratorBasedBuilder._split_generators` for more info and example
+ of usage.
+
+ Args:
+ name (`str`):
+ Name of the `Split` for which the generator will
+ create the examples.
+ **gen_kwargs (additional keyword arguments):
+ Keyword arguments to forward to the `DatasetBuilder._generate_examples` method
+ of the builder.
+
+ Example:
+
+ ```py
+ >>> datasets.SplitGenerator(
+ ... name=datasets.Split.TRAIN,
+ ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)},
+ ... )
+ ```
+ """
+
+ name: str
+ gen_kwargs: Dict = dataclasses.field(default_factory=dict)
+ split_info: SplitInfo = dataclasses.field(init=False)
+
+ def __post_init__(self):
+ self.name = str(self.name) # Make sure we convert NamedSplits in strings
+ NamedSplit(self.name) # check that it's a valid split name
+ self.split_info = SplitInfo(name=self.name)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/streaming.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/streaming.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9e7e185a95bd4a4343e231f1ce150f0d4d8372c
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/streaming.py
@@ -0,0 +1,140 @@
+import importlib
+import inspect
+from functools import wraps
+from typing import TYPE_CHECKING, Optional
+
+from .download.download_config import DownloadConfig
+from .download.streaming_download_manager import (
+ xbasename,
+ xdirname,
+ xet_parse,
+ xexists,
+ xgetsize,
+ xglob,
+ xgzip_open,
+ xisdir,
+ xisfile,
+ xjoin,
+ xlistdir,
+ xnumpy_load,
+ xopen,
+ xpandas_read_csv,
+ xpandas_read_excel,
+ xPath,
+ xpyarrow_parquet_read_table,
+ xrelpath,
+ xsio_loadmat,
+ xsplit,
+ xsplitext,
+ xwalk,
+ xxml_dom_minidom_parse,
+)
+from .utils.logging import get_logger
+from .utils.patching import patch_submodule
+from .utils.py_utils import get_imports
+
+
+logger = get_logger(__name__)
+
+
+if TYPE_CHECKING:
+ from .builder import DatasetBuilder
+
+
+def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None):
+ """Extend the module to support streaming.
+
+ We patch some functions in the module to use `fsspec` to support data streaming:
+ - We use `fsspec.open` to open and read remote files. We patch the module function:
+ - `open`
+ - We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
+ functions:
+ - `os.path.join`
+ - `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
+
+ The patched functions are replaced with custom functions defined to work with the
+ :class:`~download.streaming_download_manager.StreamingDownloadManager`.
+
+ Args:
+ module_path: Path to the module to be extended.
+ download_config : mainly use use_auth_token or storage_options to support different platforms and auth types.
+ """
+
+ module = importlib.import_module(module_path)
+
+ # TODO(QL): always update the module to add subsequent new authentication without removing old ones
+ if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
+ if isinstance(module._patched_for_streaming, DownloadConfig):
+ module._patched_for_streaming.token = download_config.token
+ module._patched_for_streaming.storage_options = download_config.storage_options
+ return
+
+ def wrap_auth(function):
+ @wraps(function)
+ def wrapper(*args, **kwargs):
+ return function(*args, download_config=download_config, **kwargs)
+
+ wrapper._decorator_name_ = "wrap_auth"
+ return wrapper
+
+ # open files in a streaming fashion
+ patch_submodule(module, "open", wrap_auth(xopen)).start()
+ patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
+ patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
+ patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
+ # allow to navigate in remote zip files
+ patch_submodule(module, "os.path.join", xjoin).start()
+ patch_submodule(module, "os.path.dirname", xdirname).start()
+ patch_submodule(module, "os.path.basename", xbasename).start()
+ patch_submodule(module, "os.path.relpath", xrelpath).start()
+ patch_submodule(module, "os.path.split", xsplit).start()
+ patch_submodule(module, "os.path.splitext", xsplitext).start()
+ # allow checks on paths
+ patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start()
+ patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
+ patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
+ patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start()
+ patch_submodule(module, "pathlib.Path", xPath).start()
+ # file readers
+ patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start()
+ patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start()
+ patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
+ patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start()
+ patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start()
+ patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start()
+ patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start()
+ # pyarrow: do not patch pyarrow attribute in packaged modules
+ if not module.__name__.startswith("datasets.packaged_modules."):
+ patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start()
+ module._patched_for_streaming = download_config
+
+
+def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"):
+ """Extend the dataset builder module and the modules imported by it to support streaming.
+
+ Args:
+ builder (:class:`DatasetBuilder`): Dataset builder instance.
+ """
+ # this extends the open and os.path.join functions for data streaming
+ download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token)
+ extend_module_for_streaming(builder.__module__, download_config=download_config)
+ # if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils)
+ if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv
+ for imports in get_imports(inspect.getfile(builder.__class__)):
+ if imports[0] == "internal":
+ internal_import_name = imports[1]
+ internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name])
+ extend_module_for_streaming(internal_module_name, download_config=download_config)
+
+ # builders can inherit from other builders that might use streaming functionality
+ # (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation)
+ # but these parents builders are not patched automatically as they are not instantiated, so we patch them here
+ from .builder import DatasetBuilder
+
+ parent_builder_modules = [
+ cls.__module__
+ for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched
+ if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__
+ ] # check it's not a standard builder from datasets.builder
+ for module in parent_builder_modules:
+ extend_module_for_streaming(module, download_config=download_config)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/table.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/table.py
new file mode 100644
index 0000000000000000000000000000000000000000..43aa228278f96deb09b162e17a38e07472c0fa9d
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/table.py
@@ -0,0 +1,2360 @@
+import copy
+import os
+from functools import partial
+from itertools import groupby
+from typing import TYPE_CHECKING, Callable, Iterator, List, Optional, Tuple, TypeVar, Union
+
+import numpy as np
+import pyarrow as pa
+import pyarrow.compute as pc
+import pyarrow.types
+
+from . import config
+from .utils.logging import get_logger
+
+
+if TYPE_CHECKING:
+ from .features.features import Features, FeatureType
+
+
+logger = get_logger(__name__)
+
+
+def inject_arrow_table_documentation(arrow_table_method):
+ def wrapper(fn):
+ fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else "")
+ fn.__doc__ = fn.__doc__.replace("pyarrow.Table", "Table")
+ if hasattr(arrow_table_method, "__annotations__"):
+ fn.__annotations__ = arrow_table_method.__annotations__
+ return fn
+
+ return wrapper
+
+
+def _in_memory_arrow_table_from_file(filename: str) -> pa.Table:
+ in_memory_stream = pa.input_stream(filename)
+ opened_stream = pa.ipc.open_stream(in_memory_stream)
+ pa_table = opened_stream.read_all()
+ return pa_table
+
+
+def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table:
+ stream = pa.BufferReader(buffer)
+ opened_stream = pa.ipc.open_stream(stream)
+ table = opened_stream.read_all()
+ return table
+
+
+def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader:
+ memory_mapped_stream = pa.memory_map(filename)
+ return pa.ipc.open_stream(memory_mapped_stream)
+
+
+def read_schema_from_file(filename: str) -> pa.Schema:
+ """
+ Infer arrow table schema from file without loading whole file into memory.
+ Usefull especially while having very big files.
+ """
+ with pa.memory_map(filename) as memory_mapped_stream:
+ schema = pa.ipc.open_stream(memory_mapped_stream).schema
+ return schema
+
+
+def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table:
+ opened_stream = _memory_mapped_record_batch_reader_from_file(filename)
+ pa_table = opened_stream.read_all()
+ return pa_table
+
+
+def _deepcopy(x, memo: dict):
+ """deepcopy a regular class instance"""
+ cls = x.__class__
+ result = cls.__new__(cls)
+ memo[id(x)] = result
+ for k, v in x.__dict__.items():
+ setattr(result, k, copy.deepcopy(v, memo))
+ return result
+
+
+def _interpolation_search(arr: List[int], x: int) -> int:
+ """
+ Return the position i of a sorted array so that arr[i] <= x < arr[i+1]
+
+ Args:
+ arr (`List[int]`): non-empty sorted list of integers
+ x (`int`): query
+
+ Returns:
+ `int`: the position i so that arr[i] <= x < arr[i+1]
+
+ Raises:
+ `IndexError`: if the array is empty or if the query is outside the array values
+ """
+ i, j = 0, len(arr) - 1
+ while i < j and arr[i] <= x < arr[j]:
+ k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i]))
+ if arr[k] <= x < arr[k + 1]:
+ return k
+ elif arr[k] < x:
+ i, j = k + 1, j
+ else:
+ i, j = i, k
+ raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.")
+
+
+class IndexedTableMixin:
+ def __init__(self, table: pa.Table):
+ self._schema: pa.Schema = table.schema
+ self._batches: List[pa.RecordBatch] = [
+ recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0
+ ]
+ self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64)
+
+ def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table:
+ """
+ Create a pa.Table by gathering the records at the records at the specified indices. Should be faster
+ than pa.concat_tables(table.fast_slice(int(i) % table.num_rows, 1) for i in indices) since NumPy can compute
+ the binary searches in parallel, highly optimized C
+ """
+ if not len(indices):
+ raise ValueError("Indices must be non-empty")
+ batch_indices = np.searchsorted(self._offsets, indices, side="right") - 1
+ return pa.Table.from_batches(
+ [
+ self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1)
+ for batch_idx, i in zip(batch_indices, indices)
+ ],
+ schema=self._schema,
+ )
+
+ def fast_slice(self, offset=0, length=None) -> pa.Table:
+ """
+ Slice the Table using interpolation search.
+ The behavior is the same as `pyarrow.Table.slice` but it's significantly faster.
+
+ Interpolation search is used to find the start and end indexes of the batches we want to keep.
+ The batches to keep are then concatenated to form the sliced Table.
+ """
+ if offset < 0:
+ raise IndexError("Offset must be non-negative")
+ elif offset >= self._offsets[-1] or (length is not None and length <= 0):
+ return pa.Table.from_batches([], schema=self._schema)
+ i = _interpolation_search(self._offsets, offset)
+ if length is None or length + offset >= self._offsets[-1]:
+ batches = self._batches[i:]
+ batches[0] = batches[0].slice(offset - self._offsets[i])
+ else:
+ j = _interpolation_search(self._offsets, offset + length - 1)
+ batches = self._batches[i : j + 1]
+ batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j])
+ batches[0] = batches[0].slice(offset - self._offsets[i])
+ return pa.Table.from_batches(batches, schema=self._schema)
+
+
+class Table(IndexedTableMixin):
+ """
+ Wraps a pyarrow Table by using composition.
+ This is the base class for `InMemoryTable`, `MemoryMappedTable` and `ConcatenationTable`.
+
+ It implements all the basic attributes/methods of the pyarrow Table class except
+ the Table transforms: `slice, filter, flatten, combine_chunks, cast, add_column,
+ append_column, remove_column, set_column, rename_columns` and `drop`.
+
+ The implementation of these methods differs for the subclasses.
+ """
+
+ def __init__(self, table: pa.Table):
+ super().__init__(table)
+ self.table = table
+
+ def __deepcopy__(self, memo: dict):
+ # arrow tables are immutable, so there's no need to copy self.table
+ # moreover calling deepcopy on a pyarrow table seems to make pa.total_allocated_bytes() decrease for some reason
+ # by adding it to the memo, self.table won't be copied
+ memo[id(self.table)] = self.table
+ # same for the recordbatches used by the index
+ memo[id(self._batches)] = list(self._batches)
+ return _deepcopy(self, memo)
+
+ def validate(self, *args, **kwargs):
+ """
+ Perform validation checks. An exception is raised if validation fails.
+
+ By default only cheap validation checks are run. Pass `full=True`
+ for thorough validation checks (potentially `O(n)`).
+
+ Args:
+ full (`bool`, defaults to `False`):
+ If `True`, run expensive checks, otherwise cheap checks only.
+
+ Raises:
+ `pa.lib.ArrowInvalid`: if validation fails
+ """
+ return self.table.validate(*args, **kwargs)
+
+ def equals(self, *args, **kwargs):
+ """
+ Check if contents of two tables are equal.
+
+ Args:
+ other ([`~datasets.table.Table`]):
+ Table to compare against.
+ check_metadata `bool`, defaults to `False`):
+ Whether schema metadata equality should be checked as well.
+
+ Returns:
+ `bool`
+ """
+ args = tuple(arg.table if isinstance(arg, Table) else arg for arg in args)
+ kwargs = {k: v.table if isinstance(v, Table) else v for k, v in kwargs}
+ return self.table.equals(*args, **kwargs)
+
+ def to_batches(self, *args, **kwargs):
+ """
+ Convert Table to list of (contiguous) `RecordBatch` objects.
+
+ Args:
+ max_chunksize (`int`, defaults to `None`):
+ Maximum size for `RecordBatch` chunks. Individual chunks may be
+ smaller depending on the chunk layout of individual columns.
+
+ Returns:
+ `List[pyarrow.RecordBatch]`
+ """
+ return self.table.to_batches(*args, **kwargs)
+
+ def to_pydict(self, *args, **kwargs):
+ """
+ Convert the Table to a `dict` or `OrderedDict`.
+
+ Returns:
+ `dict`
+ """
+ return self.table.to_pydict(*args, **kwargs)
+
+ def to_pylist(self, *args, **kwargs):
+ """
+ Convert the Table to a list
+
+ Returns:
+ `list`
+ """
+ return self.table.to_pylist(*args, **kwargs)
+
+ def to_pandas(self, *args, **kwargs):
+ """
+ Convert to a pandas-compatible NumPy array or DataFrame, as appropriate.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ Arrow MemoryPool to use for allocations. Uses the default memory
+ pool is not passed.
+ strings_to_categorical (`bool`, defaults to `False`):
+ Encode string (UTF8) and binary types to `pandas.Categorical`.
+ categories (`list`, defaults to `empty`):
+ List of fields that should be returned as `pandas.Categorical`. Only
+ applies to table-like data structures.
+ zero_copy_only (`bool`, defaults to `False`):
+ Raise an `ArrowException` if this function call would require copying
+ the underlying data.
+ integer_object_nulls (`bool`, defaults to `False`):
+ Cast integers with nulls to objects.
+ date_as_object (`bool`, defaults to `True`):
+ Cast dates to objects. If `False`, convert to `datetime64[ns]` dtype.
+ timestamp_as_object (`bool`, defaults to `False`):
+ Cast non-nanosecond timestamps (`np.datetime64`) to objects. This is
+ useful if you have timestamps that don't fit in the normal date
+ range of nanosecond timestamps (1678 CE-2262 CE).
+ If `False`, all timestamps are converted to `datetime64[ns]` dtype.
+ use_threads (`bool`, defaults to `True`):
+ Whether to parallelize the conversion using multiple threads.
+ deduplicate_objects (`bool`, defaults to `False`):
+ Do not create multiple copies Python objects when created, to save
+ on memory use. Conversion will be slower.
+ ignore_metadata (`bool`, defaults to `False`):
+ If `True`, do not use the 'pandas' metadata to reconstruct the
+ DataFrame index, if present.
+ safe (`bool`, defaults to `True`):
+ For certain data types, a cast is needed in order to store the
+ data in a pandas DataFrame or Series (e.g. timestamps are always
+ stored as nanoseconds in pandas). This option controls whether it
+ is a safe cast or not.
+ split_blocks (`bool`, defaults to `False`):
+ If `True`, generate one internal "block" for each column when
+ creating a pandas.DataFrame from a `RecordBatch` or `Table`. While this
+ can temporarily reduce memory note that various pandas operations
+ can trigger "consolidation" which may balloon memory use.
+ self_destruct (`bool`, defaults to `False`):
+ EXPERIMENTAL: If `True`, attempt to deallocate the originating Arrow
+ memory while converting the Arrow object to pandas. If you use the
+ object after calling `to_pandas` with this option it will crash your
+ program.
+ types_mapper (`function`, defaults to `None`):
+ A function mapping a pyarrow DataType to a pandas `ExtensionDtype`.
+ This can be used to override the default pandas type for conversion
+ of built-in pyarrow types or in absence of `pandas_metadata` in the
+ Table schema. The function receives a pyarrow DataType and is
+ expected to return a pandas `ExtensionDtype` or `None` if the
+ default conversion should be used for that type. If you have
+ a dictionary mapping, you can pass `dict.get` as function.
+
+ Returns:
+ `pandas.Series` or `pandas.DataFrame`: `pandas.Series` or `pandas.DataFrame` depending on type of object
+ """
+ return self.table.to_pandas(*args, **kwargs)
+
+ def to_string(self, *args, **kwargs):
+ return self.table.to_string(*args, **kwargs)
+
+ def to_reader(self, max_chunksize: Optional[int] = None):
+ """
+ Convert the Table to a RecordBatchReader.
+
+ Note that this method is zero-copy, it merely exposes the same data under a different API.
+
+ Args:
+ max_chunksize (`int`, defaults to `None`)
+ Maximum size for RecordBatch chunks. Individual chunks may be smaller depending
+ on the chunk layout of individual columns.
+
+ Returns:
+ `pyarrow.RecordBatchReader`
+ """
+ return self.table.to_reader(max_chunksize=max_chunksize)
+
+ def field(self, *args, **kwargs):
+ """
+ Select a schema field by its column name or numeric index.
+
+ Args:
+ i (`Union[int, str]`):
+ The index or name of the field to retrieve.
+
+ Returns:
+ `pyarrow.Field`
+ """
+ return self.table.field(*args, **kwargs)
+
+ def column(self, *args, **kwargs):
+ """
+ Select a column by its column name, or numeric index.
+
+ Args:
+ i (`Union[int, str]`):
+ The index or name of the column to retrieve.
+
+ Returns:
+ `pyarrow.ChunkedArray`
+ """
+ return self.table.column(*args, **kwargs)
+
+ def itercolumns(self, *args, **kwargs):
+ """
+ Iterator over all columns in their numerical order.
+
+ Yields:
+ `pyarrow.ChunkedArray`
+ """
+ return self.table.itercolumns(*args, **kwargs)
+
+ @property
+ def schema(self):
+ """
+ Schema of the table and its columns.
+
+ Returns:
+ `pyarrow.Schema`
+ """
+ return self.table.schema
+
+ @property
+ def columns(self):
+ """
+ List of all columns in numerical order.
+
+ Returns:
+ `List[pa.ChunkedArray]`
+ """
+ return self.table.columns
+
+ @property
+ def num_columns(self):
+ """
+ Number of columns in this table.
+
+ Returns:
+ int
+ """
+ return self.table.num_columns
+
+ @property
+ def num_rows(self):
+ """
+ Number of rows in this table.
+
+ Due to the definition of a table, all columns have the same number of
+ rows.
+
+ Returns:
+ int
+ """
+ return self.table.num_rows
+
+ @property
+ def shape(self):
+ """
+ Dimensions of the table: (#rows, #columns).
+
+ Returns:
+ `(int, int)`: Number of rows and number of columns.
+ """
+ return self.table.shape
+
+ @property
+ def nbytes(self):
+ """
+ Total number of bytes consumed by the elements of the table.
+ """
+ return self.table.nbytes
+
+ @property
+ def column_names(self):
+ """
+ Names of the table's columns.
+ """
+ return self.table.column_names
+
+ def __eq__(self, other):
+ return self.equals(other)
+
+ def __getitem__(self, i):
+ return self.table[i]
+
+ def __len__(self):
+ return len(self.table)
+
+ def __repr__(self):
+ return self.table.__repr__().replace("pyarrow.Table", self.__class__.__name__)
+
+ def __str__(self):
+ return self.table.__str__().replace("pyarrow.Table", self.__class__.__name__)
+
+ def slice(self, *args, **kwargs):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ raise NotImplementedError()
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ raise NotImplementedError()
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be None,
+ which deletes any existing metadata
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ raise NotImplementedError()
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`: New table without the column.
+ """
+ raise NotImplementedError()
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column set.
+ """
+ raise NotImplementedError()
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ raise NotImplementedError()
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`: New table without the columns.
+ """
+ raise NotImplementedError()
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ `datasets.table.Table`: table with only a subset of the columns
+ """
+ raise NotImplementedError()
+
+
+class TableBlock(Table):
+ """
+ `TableBlock` is the allowed class inside a `ConcanetationTable`.
+ Only `MemoryMappedTable` and `InMemoryTable` are `TableBlock`.
+ This is because we don't want a `ConcanetationTable` made out of other `ConcanetationTables`.
+ """
+
+ pass
+
+
+class InMemoryTable(TableBlock):
+ """
+ The table is said in-memory when it is loaded into the user's RAM.
+
+ Pickling it does copy all the data using memory.
+ Its implementation is simple and uses the underlying pyarrow Table methods directly.
+
+ This is different from the `MemoryMapped` table, for which pickling doesn't copy all the
+ data in memory. For a `MemoryMapped`, unpickling instead reloads the table from the disk.
+
+ `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
+ data bigger than memory or when you want the memory footprint of your application to
+ stay low.
+ """
+
+ @classmethod
+ def from_file(cls, filename: str):
+ table = _in_memory_arrow_table_from_file(filename)
+ return cls(table)
+
+ @classmethod
+ def from_buffer(cls, buffer: pa.Buffer):
+ table = _in_memory_arrow_table_from_buffer(buffer)
+ return cls(table)
+
+ @classmethod
+ def from_pandas(cls, *args, **kwargs):
+ """
+ Convert pandas.DataFrame to an Arrow Table.
+
+ The column types in the resulting Arrow Table are inferred from the
+ dtypes of the pandas.Series in the DataFrame. In the case of non-object
+ Series, the NumPy dtype is translated to its Arrow equivalent. In the
+ case of `object`, we need to guess the datatype by looking at the
+ Python objects in this Series.
+
+ Be aware that Series of the `object` dtype don't carry enough
+ information to always lead to a meaningful Arrow type. In the case that
+ we cannot infer a type, e.g. because the DataFrame is of length 0 or
+ the Series only contains `None/nan` objects, the type is set to
+ null. This behavior can be avoided by constructing an explicit schema
+ and passing it to this function.
+
+ Args:
+ df (`pandas.DataFrame`):
+ schema (`pyarrow.Schema`, *optional*):
+ The expected schema of the Arrow Table. This can be used to
+ indicate the type of columns if we cannot infer it automatically.
+ If passed, the output will have exactly this schema. Columns
+ specified in the schema that are not found in the DataFrame columns
+ or its index will raise an error. Additional columns or index
+ levels in the DataFrame which are not specified in the schema will
+ be ignored.
+ preserve_index (`bool`, *optional*):
+ Whether to store the index as an additional column in the resulting
+ `Table`. The default of None will store the index as a column,
+ except for RangeIndex which is stored as metadata only. Use
+ `preserve_index=True` to force it to be stored as a column.
+ nthreads (`int`, defaults to `None` (may use up to system CPU count threads))
+ If greater than 1, convert columns to Arrow in parallel using
+ indicated number of threads.
+ columns (`List[str]`, *optional*):
+ List of column to be converted. If `None`, use all columns.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions,
+
+ Returns:
+ `datasets.table.Table`:
+
+ Examples:
+ ```python
+ >>> import pandas as pd
+ >>> import pyarrow as pa
+ >>> df = pd.DataFrame({
+ ... 'int': [1, 2],
+ ... 'str': ['a', 'b']
+ ... })
+ >>> pa.Table.from_pandas(df)
+
+ ```
+ """
+ return cls(pa.Table.from_pandas(*args, **kwargs))
+
+ @classmethod
+ def from_arrays(cls, *args, **kwargs):
+ """
+ Construct a Table from Arrow arrays.
+
+ Args:
+ arrays (`List[Union[pyarrow.Array, pyarrow.ChunkedArray]]`):
+ Equal-length arrays that should form the table.
+ names (`List[str]`, *optional*):
+ Names for the table columns. If not passed, schema must be passed.
+ schema (`Schema`, defaults to `None`):
+ Schema for the created table. If not passed, names must be passed.
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_arrays(*args, **kwargs))
+
+ @classmethod
+ def from_pydict(cls, *args, **kwargs):
+ """
+ Construct a Table from Arrow arrays or columns.
+
+ Args:
+ mapping (`Union[dict, Mapping]`):
+ A mapping of strings to Arrays or Python lists.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the Mapping values
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_pydict(*args, **kwargs))
+
+ @classmethod
+ def from_pylist(cls, mapping, *args, **kwargs):
+ """
+ Construct a Table from list of rows / dictionaries.
+
+ Args:
+ mapping (`List[dict]`):
+ A mapping of strings to row values.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the Mapping values
+ metadata (`Union[dict, Mapping]`, defaults to `None`):
+ Optional metadata for the schema (if inferred).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return cls(pa.Table.from_pylist(mapping, *args, **kwargs))
+
+ @classmethod
+ def from_batches(cls, *args, **kwargs):
+ """
+ Construct a Table from a sequence or iterator of Arrow `RecordBatches`.
+
+ Args:
+ batches (`Union[Sequence[pyarrow.RecordBatch], Iterator[pyarrow.RecordBatch]]`):
+ Sequence of `RecordBatch` to be converted, all schemas must be equal.
+ schema (`Schema`, defaults to `None`):
+ If not passed, will be inferred from the first `RecordBatch`.
+
+ Returns:
+ `datasets.table.Table`:
+ """
+ return cls(pa.Table.from_batches(*args, **kwargs))
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ # Use fast slicing here
+ return InMemoryTable(self.fast_slice(offset=offset, length=length))
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ return InMemoryTable(self.table.filter(*args, **kwargs))
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(table_flatten(self.table, *args, **kwargs))
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(self.table.combine_chunks(*args, **kwargs))
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ return InMemoryTable(table_cast(self.table, *args, **kwargs))
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be `None`,
+ which deletes any existing metadata).
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs))
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ return InMemoryTable(self.table.add_column(*args, **kwargs))
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ return InMemoryTable(self.table.append_column(*args, **kwargs))
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ return InMemoryTable(self.table.remove_column(*args, **kwargs))
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ return InMemoryTable(self.table.set_column(*args, **kwargs))
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ return InMemoryTable(self.table.rename_columns(*args, **kwargs))
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ return InMemoryTable(self.table.drop(*args, **kwargs))
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ return InMemoryTable(self.table.select(*args, **kwargs))
+
+
+# The MemoryMappedTable needs replays to properly reload tables from the disk
+Replay = Tuple[str, tuple, dict]
+
+
+class MemoryMappedTable(TableBlock):
+ """
+ The table is said memory mapped when it doesn't use the user's RAM but loads the data
+ from the disk instead.
+
+ Pickling it doesn't copy the data into memory.
+ Instead, only the path to the memory mapped arrow file is pickled, as well as the list
+ of transforms to "replay" when reloading the table from the disk.
+
+ Its implementation requires to store an history of all the transforms that were applied
+ to the underlying pyarrow Table, so that they can be "replayed" when reloading the Table
+ from the disk.
+
+ This is different from the `InMemoryTable` table, for which pickling does copy all the
+ data in memory.
+
+ `InMemoryTable` must be used when data fit in memory, while `MemoryMapped` are reserved for
+ data bigger than memory or when you want the memory footprint of your application to
+ stay low.
+ """
+
+ def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]] = None):
+ super().__init__(table)
+ self.path = os.path.abspath(path)
+ self.replays: List[Replay] = replays if replays is not None else []
+
+ @classmethod
+ def from_file(cls, filename: str, replays=None):
+ table = _memory_mapped_arrow_table_from_file(filename)
+ table = cls._apply_replays(table, replays)
+ return cls(table, filename, replays)
+
+ def __getstate__(self):
+ return {"path": self.path, "replays": self.replays}
+
+ def __setstate__(self, state):
+ path = state["path"]
+ replays = state["replays"]
+ table = _memory_mapped_arrow_table_from_file(path)
+ table = self._apply_replays(table, replays)
+ MemoryMappedTable.__init__(self, table, path=path, replays=replays)
+
+ @staticmethod
+ def _apply_replays(table: pa.Table, replays: Optional[List[Replay]] = None) -> pa.Table:
+ if replays is not None:
+ for name, args, kwargs in replays:
+ if name == "cast":
+ table = table_cast(table, *args, **kwargs)
+ elif name == "flatten":
+ table = table_flatten(table, *args, **kwargs)
+ else:
+ table = getattr(table, name)(*args, **kwargs)
+ return table
+
+ def _append_replay(self, replay: Replay) -> List[Replay]:
+ replays = copy.deepcopy(self.replays)
+ replays.append(replay)
+ return replays
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("slice", (offset, length), {})
+ replays = self._append_replay(replay)
+ # Use fast slicing here
+ return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays)
+
+ def filter(self, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ replay = ("filter", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays)
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("flatten", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays)
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the ChunkedArray of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("combine_chunks", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays)
+
+ def cast(self, *args, **kwargs):
+ """
+ Cast table values to another schema
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ replay = ("cast", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays)
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be None,
+ which deletes any existing metadata.
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ replay = ("replace_schema_metadata", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays)
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ replay = ("add_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays)
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ replay = ("append_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays)
+
+ def remove_column(self, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ replay = ("remove_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays)
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ replay = ("set_column", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays)
+
+ def rename_columns(self, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ replay = ("rename_columns", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays)
+
+ def drop(self, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ replay = ("drop", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays)
+
+ def select(self, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ replay = ("select", copy.deepcopy(args), copy.deepcopy(kwargs))
+ replays = self._append_replay(replay)
+ return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays)
+
+
+# A ConcatenationTable is the concatenation of several tables.
+# The ``blocks`` attributes stores a list of list of blocks.
+# The first axis concatenates the tables along the axis 0 (it appends rows),
+# while the second axis concatenates tables along the axis 1 (it appends columns).
+TableBlockContainer = TypeVar("TableBlockContainer", TableBlock, List[TableBlock], List[List[TableBlock]])
+
+
+class ConcatenationTable(Table):
+ """
+ The table comes from the concatenation of several tables called blocks.
+ It enables concatenation on both axis 0 (append rows) and axis 1 (append columns).
+
+ The underlying tables are called "blocks" and can be either `InMemoryTable`
+ or `MemoryMappedTable` objects.
+ This allows to combine tables that come from memory or that are memory mapped.
+ When a `ConcatenationTable` is pickled, then each block is pickled:
+ - the `InMemoryTable` objects are pickled by copying all the data in memory.
+ - the MemoryMappedTable objects are pickled without copying the data into memory.
+ Instead, only the path to the memory mapped arrow file is pickled, as well as the list
+ of transforms to "replays" when reloading the table from the disk.
+
+ Its implementation requires to store each block separately.
+ The `blocks` attributes stores a list of list of blocks.
+ The first axis concatenates the tables along the axis 0 (it appends rows),
+ while the second axis concatenates tables along the axis 1 (it appends columns).
+
+ If some columns are missing when concatenating on axis 0, they are filled with null values.
+ This is done using `pyarrow.concat_tables(tables, promote=True)`.
+
+ You can access the fully combined table by accessing the `ConcatenationTable.table` attribute,
+ and the blocks by accessing the `ConcatenationTable.blocks` attribute.
+ """
+
+ def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]):
+ super().__init__(table)
+ self.blocks = blocks
+ # Check that all the blocks have the right type.
+ # Only InMemoryTable and MemoryMappedTable are allowed.
+ for subtables in blocks:
+ for subtable in subtables:
+ if not isinstance(subtable, TableBlock):
+ raise TypeError(
+ "The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects"
+ f", but got {subtable}."
+ )
+
+ def __getstate__(self):
+ return {"blocks": self.blocks}
+
+ def __setstate__(self, state):
+ blocks = state["blocks"]
+ table = self._concat_blocks_horizontally_and_vertically(blocks)
+ ConcatenationTable.__init__(self, table, blocks=blocks)
+
+ @staticmethod
+ def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int = 0) -> pa.Table:
+ pa_tables = [table.table if hasattr(table, "table") else table for table in blocks]
+ if axis == 0:
+ # we set promote=True to fill missing columns with null values
+ if config.PYARROW_VERSION.major < 14:
+ return pa.concat_tables(pa_tables, promote=True)
+ else:
+ return pa.concat_tables(pa_tables, promote_options="default")
+ elif axis == 1:
+ for i, table in enumerate(pa_tables):
+ if i == 0:
+ pa_table = table
+ else:
+ for name, col in zip(table.column_names, table.columns):
+ pa_table = pa_table.append_column(name, col)
+ return pa_table
+ else:
+ raise ValueError("'axis' must be either 0 or 1")
+
+ @classmethod
+ def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table:
+ pa_tables_to_concat_vertically = []
+ for i, tables in enumerate(blocks):
+ if not tables:
+ continue
+ pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1)
+ pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated)
+ return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0)
+
+ @classmethod
+ def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int] = None) -> TableBlockContainer:
+ if axis is not None:
+ merged_blocks = []
+ for is_in_memory, block_group in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)):
+ if is_in_memory:
+ block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))]
+ merged_blocks += list(block_group)
+ else: # both
+ merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks]
+ if all(len(row_block) == 1 for row_block in merged_blocks):
+ merged_blocks = cls._merge_blocks(
+ [block for row_block in merged_blocks for block in row_block], axis=0
+ )
+ return merged_blocks
+
+ @classmethod
+ def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer:
+ if isinstance(blocks, TableBlock):
+ return blocks
+ elif isinstance(blocks[0], TableBlock):
+ return cls._merge_blocks(blocks, axis=0)
+ else:
+ return cls._merge_blocks(blocks)
+
+ @classmethod
+ def from_blocks(cls, blocks: TableBlockContainer) -> "ConcatenationTable":
+ blocks = cls._consolidate_blocks(blocks)
+ if isinstance(blocks, TableBlock):
+ table = blocks
+ return cls(table.table, [[table]])
+ elif isinstance(blocks[0], TableBlock):
+ table = cls._concat_blocks(blocks, axis=0)
+ blocks = [[t] for t in blocks]
+ return cls(table, blocks)
+ else:
+ table = cls._concat_blocks_horizontally_and_vertically(blocks)
+ return cls(table, blocks)
+
+ @classmethod
+ def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int = 0) -> "ConcatenationTable":
+ """Create `ConcatenationTable` from list of tables.
+
+ Args:
+ tables (list of `Table` or list of `pyarrow.Table`):
+ List of tables.
+ axis (`{0, 1}`, defaults to `0`, meaning over rows):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+ """
+
+ def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]:
+ if isinstance(table, pa.Table):
+ return [[InMemoryTable(table)]]
+ elif isinstance(table, ConcatenationTable):
+ return copy.deepcopy(table.blocks)
+ else:
+ return [[table]]
+
+ def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]:
+ sliced = [table.slice(0, length) for table in row_block]
+ remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block]
+ return sliced, remainder
+
+ def _split_both_like(
+ result: List[List[TableBlock]], blocks: List[List[TableBlock]]
+ ) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]:
+ """
+ Make sure each row_block contain the same num_rows to be able to concatenate them on axis=1.
+
+ To do so, we modify both blocks sets to have the same row_blocks boundaries.
+ For example, if `result` has 2 row_blocks of 3 rows and `blocks` has 3 row_blocks of 2 rows,
+ we modify both to have 4 row_blocks of size 2, 1, 1 and 2:
+
+ [ x x x | x x x ]
+ + [ y y | y y | y y ]
+ -----------------------------
+ = [ x x | x | x | x x ]
+ [ y y | y | y | y y ]
+
+ """
+ result, blocks = list(result), list(blocks)
+ new_result, new_blocks = [], []
+ while result and blocks:
+ # we slice the longest row block to save two row blocks of same length
+ # and we replace the long row block by its remainder if necessary
+ if len(result[0][0]) > len(blocks[0][0]):
+ new_blocks.append(blocks[0])
+ sliced, result[0] = _slice_row_block(result[0], len(blocks.pop(0)[0]))
+ new_result.append(sliced)
+ elif len(result[0][0]) < len(blocks[0][0]):
+ new_result.append(result[0])
+ sliced, blocks[0] = _slice_row_block(blocks[0], len(result.pop(0)[0]))
+ new_blocks.append(sliced)
+ else:
+ new_result.append(result.pop(0))
+ new_blocks.append(blocks.pop(0))
+ if result or blocks:
+ raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows")
+ return new_result, new_blocks
+
+ def _extend_blocks(
+ result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int = 0
+ ) -> List[List[TableBlock]]:
+ if axis == 0:
+ result.extend(blocks)
+ elif axis == 1:
+ # We make sure each row_block have the same num_rows
+ result, blocks = _split_both_like(result, blocks)
+ for i, row_block in enumerate(blocks):
+ result[i].extend(row_block)
+ return result
+
+ blocks = to_blocks(tables[0])
+ for table in tables[1:]:
+ table_blocks = to_blocks(table)
+ blocks = _extend_blocks(blocks, table_blocks, axis=axis)
+ return cls.from_blocks(blocks)
+
+ @property
+ def _slices(self):
+ offset = 0
+ for tables in self.blocks:
+ length = len(tables[0])
+ yield (offset, length)
+ offset += length
+
+ def slice(self, offset=0, length=None):
+ """
+ Compute zero-copy slice of this Table.
+
+ Args:
+ offset (`int`, defaults to `0`):
+ Offset from start of table to slice.
+ length (`int`, defaults to `None`):
+ Length of slice (default is until end of table starting from
+ offset).
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = self.table.slice(offset, length=length)
+ length = length if length is not None else self.num_rows - offset
+ blocks = []
+ for tables in self.blocks:
+ n_rows = len(tables[0])
+ if length == 0:
+ break
+ elif n_rows <= offset:
+ offset = offset - n_rows
+ elif n_rows <= offset + length:
+ blocks.append([t.slice(offset) for t in tables])
+ length, offset = length + offset - n_rows, 0
+ else:
+ blocks.append([t.slice(offset, length) for t in tables])
+ length, offset = 0, 0
+ return ConcatenationTable(table, blocks)
+
+ def filter(self, mask, *args, **kwargs):
+ """
+ Select records from a Table. See `pyarrow.compute.filter` for full usage.
+ """
+ table = self.table.filter(mask, *args, **kwargs)
+ blocks = []
+ for (offset, length), tables in zip(self._slices, self.blocks):
+ submask = mask.slice(offset, length)
+ blocks.append([t.filter(submask, *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def flatten(self, *args, **kwargs):
+ """
+ Flatten this Table. Each column with a struct type is flattened
+ into one column per struct field. Other columns are left unchanged.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = table_flatten(self.table, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.flatten(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def combine_chunks(self, *args, **kwargs):
+ """
+ Make a new table by combining the chunks this table has.
+
+ All the underlying chunks in the `ChunkedArray` of each column are
+ concatenated into zero or one chunk.
+
+ Args:
+ memory_pool (`MemoryPool`, defaults to `None`):
+ For memory allocations, if required, otherwise use default pool.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ table = self.table.combine_chunks(*args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.combine_chunks(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def cast(self, target_schema, *args, **kwargs):
+ """
+ Cast table values to another schema.
+
+ Args:
+ target_schema (`Schema`):
+ Schema to cast to, the names and order of fields must match.
+ safe (`bool`, defaults to `True`):
+ Check for overflows or other unsafe conversions.
+
+ Returns:
+ `datasets.table.Table`
+ """
+ from .features import Features
+
+ table = table_cast(self.table, target_schema, *args, **kwargs)
+ target_features = Features.from_arrow_schema(target_schema)
+ blocks = []
+ for subtables in self.blocks:
+ new_tables = []
+ fields = list(target_schema)
+ for subtable in subtables:
+ subfields = []
+ for name in subtable.column_names:
+ subfields.append(fields.pop(next(i for i, field in enumerate(fields) if field.name == name)))
+ subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields})
+ subschema = subfeatures.arrow_schema
+ new_tables.append(subtable.cast(subschema, *args, **kwargs))
+ blocks.append(new_tables)
+ return ConcatenationTable(table, blocks)
+
+ def replace_schema_metadata(self, *args, **kwargs):
+ """
+ EXPERIMENTAL: Create shallow copy of table by replacing schema
+ key-value metadata with the indicated new metadata (which may be `None`,
+ which deletes any existing metadata).
+
+ Args:
+ metadata (`dict`, defaults to `None`):
+
+ Returns:
+ `datasets.table.Table`: shallow_copy
+ """
+ table = self.table.replace_schema_metadata(*args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables])
+ return ConcatenationTable(table, self.blocks)
+
+ def add_column(self, *args, **kwargs):
+ """
+ Add column to Table at position.
+
+ A new table is returned with the column added, the original table
+ object is left unchanged.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`: New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def append_column(self, *args, **kwargs):
+ """
+ Append column at end of columns.
+
+ Args:
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column added.
+ """
+ raise NotImplementedError()
+
+ def remove_column(self, i, *args, **kwargs):
+ """
+ Create new Table with the indicated column removed.
+
+ Args:
+ i (`int`):
+ Index of column to remove.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the column.
+ """
+ table = self.table.remove_column(i, *args, **kwargs)
+ name = self.table.column_names[i]
+ blocks = []
+ for tables in self.blocks:
+ blocks.append(
+ [
+ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t
+ for t in tables
+ ]
+ )
+ return ConcatenationTable(table, blocks)
+
+ def set_column(self, *args, **kwargs):
+ """
+ Replace column in Table at position.
+
+ Args:
+ i (`int`):
+ Index to place the column at.
+ field_ (`Union[str, pyarrow.Field]`):
+ If a string is passed then the type is deduced from the column
+ data.
+ column (`Union[pyarrow.Array, List[pyarrow.Array]]`):
+ Column data.
+
+ Returns:
+ `datasets.table.Table`:
+ New table with the passed column set.
+ """
+ raise NotImplementedError()
+
+ def rename_columns(self, names, *args, **kwargs):
+ """
+ Create new table with columns renamed to provided names.
+ """
+ table = self.table.rename_columns(names, *args, **kwargs)
+ names = dict(zip(self.table.column_names, names))
+ blocks = []
+ for tables in self.blocks:
+ blocks.append(
+ [t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables]
+ )
+ return ConcatenationTable(table, blocks)
+
+ def drop(self, columns, *args, **kwargs):
+ """
+ Drop one or more columns and return a new table.
+
+ Args:
+ columns (`List[str]`):
+ List of field names referencing existing columns.
+
+ Raises:
+ `KeyError` : if any of the passed columns name are not existing.
+
+ Returns:
+ `datasets.table.Table`:
+ New table without the columns.
+ """
+ table = self.table.drop(columns, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+ def select(self, columns, *args, **kwargs):
+ """
+ Select columns of the table.
+
+ Returns a new table with the specified columns, and metadata preserved.
+
+ Args:
+ columns (:obj:`Union[List[str], List[int]]`):
+ The column names or integer indices to select.
+
+ Returns:
+ :class:`datasets.table.Table`: New table with the specified columns, and metadata preserved.
+ """
+ table = self.table.select(columns, *args, **kwargs)
+ blocks = []
+ for tables in self.blocks:
+ blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables])
+ return ConcatenationTable(table, blocks)
+
+
+def concat_tables(tables: List[Table], axis: int = 0) -> Table:
+ """
+ Concatenate tables.
+
+ Args:
+ tables (list of `Table`):
+ List of tables to be concatenated.
+ axis (`{0, 1}`, defaults to `0`, meaning over rows):
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
+ (horizontally).
+
+
+ Returns:
+ `datasets.table.Table`:
+ If the number of input tables is > 1, then the returned table is a `datasets.table.ConcatenationTable`.
+ Otherwise if there's only one table, it is returned as is.
+ """
+ tables = list(tables)
+ if len(tables) == 1:
+ return tables[0]
+ return ConcatenationTable.from_tables(tables, axis=axis)
+
+
+def list_table_cache_files(table: Table) -> List[str]:
+ """
+ Get the cache files that are loaded by the table.
+ Cache file are used when parts of the table come from the disk via memory mapping.
+
+ Returns:
+ `List[str]`:
+ A list of paths to the cache files loaded by the table.
+ """
+ if isinstance(table, ConcatenationTable):
+ cache_files = []
+ for subtables in table.blocks:
+ for subtable in subtables:
+ cache_files += list_table_cache_files(subtable)
+ return cache_files
+ elif isinstance(table, MemoryMappedTable):
+ return [table.path]
+ else:
+ return []
+
+
+def _wrap_for_chunked_arrays(func):
+ """Apply the function on each chunk of a `pyarrow.ChunkedArray`, or on the array directly"""
+
+ def wrapper(array, *args, **kwargs):
+ if isinstance(array, pa.ChunkedArray):
+ return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
+ else:
+ return func(array, *args, **kwargs)
+
+ return wrapper
+
+
+def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool:
+ """Check if all the sub-lists of a `pa.ListArray` have the specified length."""
+ return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array)
+
+
+def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array:
+ """Add the null bitmap to the offsets of a `pa.ListArray`."""
+ offsets = array.offsets
+ if array.null_count > 0:
+ offsets = pa.concat_arrays(
+ [
+ pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())),
+ offsets[-1:],
+ ]
+ )
+ return offsets
+
+
+def _storage_type(type: pa.DataType) -> pa.DataType:
+ """Convert a (possibly nested) `pa.ExtensionType` to its storage type."""
+ if isinstance(type, pa.ExtensionType):
+ return _storage_type(type.storage_type)
+ elif isinstance(type, pa.StructType):
+ return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type])
+ elif isinstance(type, pa.ListType):
+ return pa.list_(_storage_type(type.value_type))
+ elif isinstance(type, pa.FixedSizeListType):
+ return pa.list_(_storage_type(type.value_type), type.list_size)
+ return type
+
+
+@_wrap_for_chunked_arrays
+def array_cast(array: pa.Array, pa_type: pa.DataType, allow_number_to_str=True):
+ """Improved version of `pa.Array.cast`
+
+ It supports casting `pa.StructArray` objects to re-order the fields.
+ It also let you control certain aspects of the casting, e.g. whether
+ to disable numbers (`floats` or `ints`) to strings.
+
+ Args:
+ array (`pa.Array`):
+ PyArrow array to cast
+ pa_type (`pa.DataType`):
+ Target PyArrow type
+ allow_number_to_str (`bool`, defaults to `True`):
+ Whether to allow casting numbers to strings.
+ Defaults to `True`.
+
+ Raises:
+ `pa.ArrowInvalidError`: if the arrow data casting fails
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+ - if casting from numbers to strings and `allow_number_to_str` is `False`
+
+ Returns:
+ `List[pyarrow.Array]`: the casted array
+ """
+ _c = partial(array_cast, allow_number_to_str=allow_number_to_str)
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if isinstance(pa_type, pa.ExtensionType):
+ return pa_type.wrap_array(_c(array, pa_type.storage_type))
+ elif array.type == pa_type:
+ return array
+ elif pa.types.is_struct(array.type):
+ if pa.types.is_struct(pa_type) and ({field.name for field in pa_type} == {field.name for field in array.type}):
+ if array.type.num_fields == 0:
+ return array
+ arrays = [_c(array.field(field.name), field.type) for field in pa_type]
+ return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ if pa.types.is_fixed_size_list(pa_type):
+ if _are_list_values_of_length(array, pa_type.list_size):
+ if array.null_count > 0:
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = _c(array, storage_type)
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array = _c(array, array_type)
+ else:
+ array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True)
+ array_values = array.values
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
+ )
+ else:
+ array_values = array.values[
+ array.offset * pa_type.length : (array.offset + len(array)) * pa_type.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size)
+ elif pa.types.is_list(pa_type):
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type))
+ elif pa.types.is_fixed_size_list(array.type):
+ if pa.types.is_fixed_size_list(pa_type):
+ if pa_type.list_size == array.type.list_size:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa_type,
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[_c(array_values, pa_type.value_type)],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ _c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()
+ )
+ elif pa.types.is_list(pa_type):
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null())
+ else:
+ if (
+ not allow_number_to_str
+ and pa.types.is_string(pa_type)
+ and (pa.types.is_floating(array.type) or pa.types.is_integer(array.type))
+ ):
+ raise TypeError(
+ f"Couldn't cast array of type {array.type} to {pa_type} since allow_number_to_str is set to {allow_number_to_str}"
+ )
+ if pa.types.is_null(pa_type) and not pa.types.is_null(array.type):
+ raise TypeError(f"Couldn't cast array of type {array.type} to {pa_type}")
+ return array.cast(pa_type)
+ raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{pa_type}")
+
+
+@_wrap_for_chunked_arrays
+def cast_array_to_feature(array: pa.Array, feature: "FeatureType", allow_number_to_str=True):
+ """Cast an array to the arrow type that corresponds to the requested feature type.
+ For custom features like [`Audio`] or [`Image`], it takes into account the "cast_storage" methods
+ they defined to enable casting from other arrow types.
+
+ Args:
+ array (`pa.Array`):
+ The PyArrow array to cast.
+ feature (`datasets.features.FeatureType`):
+ The target feature type.
+ allow_number_to_str (`bool`, defaults to `True`):
+ Whether to allow casting numbers to strings.
+ Defaults to `True`.
+
+ Raises:
+ `pa.ArrowInvalidError`: if the arrow data casting fails
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+ - if casting from numbers to strings and `allow_number_to_str` is `False`
+
+ Returns:
+ array (`pyarrow.Array`): the casted array
+ """
+ from .features.features import Sequence, get_nested_type
+
+ _c = partial(cast_array_to_feature, allow_number_to_str=allow_number_to_str)
+
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if hasattr(feature, "cast_storage"):
+ return feature.cast_storage(array)
+
+ elif pa.types.is_struct(array.type):
+ # feature must be a dict or Sequence(subfeatures_dict)
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
+ }
+ if isinstance(feature, dict) and {field.name for field in array.type} == set(feature):
+ if array.type.num_fields == 0:
+ return array
+ arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
+ return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ if isinstance(feature, list):
+ casted_array_values = _c(array.values, feature[0])
+ if casted_array_values.type == array.values.type:
+ return array
+ else:
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
+ elif isinstance(feature, Sequence):
+ if feature.length > -1:
+ if _are_list_values_of_length(array, feature.length):
+ if array.null_count > 0:
+ # Ensure each null value in the array translates to [null] * pa_type.list_size in the array's values array
+ array_type = array.type
+ storage_type = _storage_type(array_type)
+ if array_type != storage_type:
+ # Temporarily convert to the storage type to support extension types in the slice operation
+ array = array_cast(array, storage_type, allow_number_to_str=allow_number_to_str)
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array = array_cast(array, array_type, allow_number_to_str=allow_number_to_str)
+ else:
+ array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True)
+ array_values = array.values
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
+ )
+ else:
+ array_values = array.values[
+ array.offset * feature.length : (array.offset + len(array)) * feature.length
+ ]
+ return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length)
+ else:
+ casted_array_values = _c(array.values, feature.feature)
+ if casted_array_values.type == array.values.type:
+ return array
+ else:
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ return pa.ListArray.from_arrays(array_offsets, casted_array_values)
+ elif pa.types.is_fixed_size_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ if isinstance(feature, list):
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null())
+ elif isinstance(feature, Sequence):
+ if feature.length > -1:
+ if feature.length == array.type.list_size:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ casted_array_values = _c(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(casted_array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[casted_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(
+ casted_array_values, feature.length, mask=array.is_null()
+ )
+ else:
+ array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size
+ return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null())
+ if pa.types.is_null(array.type):
+ return array_cast(array, get_nested_type(feature), allow_number_to_str=allow_number_to_str)
+ elif not isinstance(feature, (Sequence, dict, list, tuple)):
+ return array_cast(array, feature(), allow_number_to_str=allow_number_to_str)
+ raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}")
+
+
+@_wrap_for_chunked_arrays
+def embed_array_storage(array: pa.Array, feature: "FeatureType"):
+ """Embed data into an arrays's storage.
+ For custom features like Audio or Image, it takes into account the "embed_storage" methods
+ they define to embed external data (e.g. an image file) into an array.
+
+
+
+ Args:
+ array (`pa.Array`):
+ The PyArrow array in which to embed data.
+ feature (`datasets.features.FeatureType`):
+ Array features.
+
+ Raises:
+ `TypeError`: if the target type is not supported according, e.g.
+
+ - if a field is missing
+
+ Returns:
+ array (`pyarrow.Array`): the casted array
+ """
+ from .features import Sequence
+
+ _e = embed_array_storage
+
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ if hasattr(feature, "embed_storage"):
+ return feature.embed_storage(array)
+ elif pa.types.is_struct(array.type):
+ # feature must be a dict or Sequence(subfeatures_dict)
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items()
+ }
+ if isinstance(feature, dict):
+ arrays = [_e(array.field(name), subfeature) for name, subfeature in feature.items()]
+ return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null())
+ elif pa.types.is_list(array.type):
+ # feature must be either [subfeature] or Sequence(subfeature)
+ # Merge offsets with the null bitmap to avoid the "Null bitmap with offsets slice not supported" ArrowNotImplementedError
+ array_offsets = _combine_list_array_offsets_with_mask(array)
+ if isinstance(feature, list):
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0]))
+ if isinstance(feature, Sequence) and feature.length == -1:
+ return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature))
+ elif pa.types.is_fixed_size_list(array.type):
+ # feature must be Sequence(subfeature)
+ if isinstance(feature, Sequence) and feature.length > -1:
+ array_values = array.values[
+ array.offset * array.type.list_size : (array.offset + len(array)) * array.type.list_size
+ ]
+ embedded_array_values = _e(array_values, feature.feature)
+ if config.PYARROW_VERSION.major < 15:
+ return pa.Array.from_buffers(
+ pa.list_(array_values.type, feature.length),
+ len(array),
+ [array.is_valid().buffers()[1]],
+ children=[embedded_array_values],
+ )
+ else:
+ return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null())
+ if not isinstance(feature, (Sequence, dict, list, tuple)):
+ return array
+ raise TypeError(f"Couldn't embed array of type\n{array.type}\nwith\n{feature}")
+
+
+class CastError(ValueError):
+ """When it's not possible to cast an Arrow table to a specific schema or set of features"""
+
+ def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None:
+ super().__init__(*args)
+ self.table_column_names = table_column_names
+ self.requested_column_names = requested_column_names
+
+ def details(self):
+ new_columns = set(self.table_column_names) - set(self.requested_column_names)
+ missing_columns = set(self.requested_column_names) - set(self.table_column_names)
+ if new_columns and missing_columns:
+ return f"there are {len(new_columns)} new columns ({', '.join(new_columns)}) and {len(missing_columns)} missing columns ({', '.join(missing_columns)})."
+ elif new_columns:
+ return f"there are {len(new_columns)} new columns ({new_columns})"
+ else:
+ return f"there are {len(missing_columns)} missing columns ({missing_columns})"
+
+
+def cast_table_to_features(table: pa.Table, features: "Features"):
+ """Cast a table to the arrow schema that corresponds to the requested features.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to cast.
+ features ([`Features`]):
+ Target features.
+
+ Returns:
+ table (`pyarrow.Table`): the casted table
+ """
+ if sorted(table.column_names) != sorted(features):
+ raise CastError(
+ f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
+ table_column_names=table.column_names,
+ requested_column_names=list(features),
+ )
+ arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
+ return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
+
+
+def cast_table_to_schema(table: pa.Table, schema: pa.Schema):
+ """Cast a table to the arrow schema. Different from `cast_table_to_features`, this method can preserve nullability.
+
+ Args:
+ table (`pa.Table`):
+ PyArrow table to cast.
+ features ([`Features`]):
+ Target features.
+
+ Returns:
+ `pa.Table`: the casted table
+ """
+ from .features import Features
+
+ features = Features.from_arrow_schema(schema)
+ if sorted(table.column_names) != sorted(features):
+ raise CastError(
+ f"Couldn't cast\n{table.schema}\nto\n{features}\nbecause column names don't match",
+ table_column_names=table.column_names,
+ requested_column_names=list(features),
+ )
+ arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
+ return pa.Table.from_arrays(arrays, schema=schema)
+
+
+def embed_table_storage(table: pa.Table):
+ """Embed external data into a table's storage.
+
+
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table in which to embed data.
+
+ Returns:
+ table (`pyarrow.Table`): the table with embedded data
+ """
+ from .features.features import Features, require_storage_embed
+
+ features = Features.from_arrow_schema(table.schema)
+ arrays = [
+ embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name]
+ for name, feature in features.items()
+ ]
+ return pa.Table.from_arrays(arrays, schema=features.arrow_schema)
+
+
+def table_cast(table: pa.Table, schema: pa.Schema):
+ """Improved version of `pa.Table.cast`.
+
+ It supports casting to feature types stored in the schema metadata.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to cast.
+ schema (`pyarrow.Schema`):
+ Target PyArrow schema.
+
+ Returns:
+ table (`pyarrow.Table`): the casted table
+ """
+ if table.schema != schema:
+ return cast_table_to_schema(table, schema)
+ elif table.schema.metadata != schema.metadata:
+ return table.replace_schema_metadata(schema.metadata)
+ else:
+ return table
+
+
+def table_flatten(table: pa.Table):
+ """Improved version of `pa.Table.flatten`.
+
+ It behaves as `pa.Table.flatten` in a sense it does 1-step flatten of the columns with a struct type into one column per struct field,
+ but updates the metadata and skips decodable features unless the `decode` attribute of these features is set to False.
+
+ Args:
+ table (`pa.Table`):
+ PyArrow table to flatten.
+
+ Returns:
+ `Table`: the flattened table
+ """
+ from .features import Features
+
+ features = Features.from_arrow_schema(table.schema)
+ if any(hasattr(subfeature, "flatten") and subfeature.flatten() == subfeature for subfeature in features.values()):
+ flat_arrays = []
+ flat_column_names = []
+ for field in table.schema:
+ array = table.column(field.name)
+ subfeature = features[field.name]
+ if pa.types.is_struct(field.type) and (
+ not hasattr(subfeature, "flatten") or subfeature.flatten() != subfeature
+ ):
+ flat_arrays.extend(array.flatten())
+ flat_column_names.extend([f"{field.name}.{subfield.name}" for subfield in field.type])
+ else:
+ flat_arrays.append(array)
+ flat_column_names.append(field.name)
+ flat_table = pa.Table.from_arrays(
+ flat_arrays,
+ names=flat_column_names,
+ )
+ else:
+ flat_table = table.flatten()
+ # Preserve complex types in the metadata
+ flat_features = features.flatten(max_depth=2)
+ flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names})
+ return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata)
+
+
+def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]):
+ """Visit all arrays in a table and apply a function to them.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to visit.
+ function (`Callable[[pa.Array], None]`):
+ Function to apply to each array.
+ """
+ from .features import Features, Sequence
+
+ features = Features.from_arrow_schema(table.schema)
+
+ def _visit(array, feature):
+ if isinstance(array, pa.ChunkedArray):
+ for chunk in array.chunks:
+ _visit(chunk, feature)
+ else:
+ if isinstance(array, pa.ExtensionArray):
+ array = array.storage
+ function(array, feature)
+ if pa.types.is_struct(array.type) and not hasattr(feature, "cast_storage"):
+ if isinstance(feature, Sequence) and isinstance(feature.feature, dict):
+ feature = {
+ name: Sequence(subfeature, length=feature.length)
+ for name, subfeature in feature.feature.items()
+ }
+ for name, subfeature in feature.items():
+ _visit(array.field(name), subfeature)
+ elif pa.types.is_list(array.type):
+ if isinstance(feature, list):
+ _visit(array.values, feature[0])
+ elif isinstance(feature, Sequence):
+ _visit(array.values, feature.feature)
+
+ for name, feature in features.items():
+ _visit(table[name], feature)
+
+
+def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]:
+ """Iterate over sub-tables of size `batch_size`.
+
+ Args:
+ table (`pyarrow.Table`):
+ PyArrow table to iterate over.
+ batch_size (`int`):
+ Size of each sub-table to yield.
+ drop_last_batch (`bool`, defaults to `False`):
+ Drop the last batch if it is smaller than `batch_size`.
+ """
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ for chunk in table.to_reader(max_chunksize=batch_size):
+ if len(chunk) == 0:
+ continue
+ elif chunks_buffer_size + len(chunk) < batch_size:
+ chunks_buffer.append(chunk)
+ chunks_buffer_size += len(chunk)
+ continue
+ elif chunks_buffer_size + len(chunk) == batch_size:
+ chunks_buffer.append(chunk)
+ yield pa.Table.from_batches(chunks_buffer)
+ chunks_buffer = []
+ chunks_buffer_size = 0
+ else:
+ cropped_chunk_length = batch_size - chunks_buffer_size
+ chunks_buffer.append(chunk.slice(0, cropped_chunk_length))
+ yield pa.Table.from_batches(chunks_buffer)
+ chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)]
+ chunks_buffer_size = len(chunk) - cropped_chunk_length
+ if not drop_last_batch and chunks_buffer:
+ yield pa.Table.from_batches(chunks_buffer)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/_datasets_server.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/_datasets_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..3699a4081e85bf22d675f1566f2d710f1bff88e3
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/_datasets_server.py
@@ -0,0 +1,96 @@
+from typing import Any, Dict, List, Optional, Union
+
+from .. import config
+from ..exceptions import DatasetsError
+from .file_utils import (
+ get_authentication_headers_for_url,
+ http_get,
+)
+from .logging import get_logger
+
+
+logger = get_logger(__name__)
+
+
+class DatasetsServerError(DatasetsError):
+ """Dataset-server error.
+
+ Raised when trying to use the Datasets-server HTTP API and when trying to access:
+ - a missing dataset, or
+ - a private/gated dataset and the user is not authenticated.
+ - unavailable /parquet or /info responses
+ """
+
+
+def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
+ """
+ Get the dataset exported parquet files
+ Docs: https://huggingface.co/docs/datasets-server/parquet
+ """
+ datasets_server_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
+ try:
+ parquet_data_files_response = http_get(
+ url=datasets_server_parquet_url + dataset,
+ temp_file=None,
+ headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
+ timeout=100.0,
+ max_retries=3,
+ )
+ parquet_data_files_response.raise_for_status()
+ if "X-Revision" in parquet_data_files_response.headers:
+ if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
+ parquet_data_files_response_json = parquet_data_files_response.json()
+ if (
+ parquet_data_files_response_json.get("partial") is False
+ and not parquet_data_files_response_json.get("pending", True)
+ and not parquet_data_files_response_json.get("failed", True)
+ and "parquet_files" in parquet_data_files_response_json
+ ):
+ return parquet_data_files_response_json["parquet_files"]
+ else:
+ logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
+ else:
+ logger.debug(
+ f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
+ )
+ except Exception as e: # noqa catch any exception of the datasets-server and consider the parquet export doesn't exist
+ logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
+ raise DatasetsServerError("No exported Parquet files available.")
+
+
+def get_exported_dataset_infos(
+ dataset: str, revision: str, token: Optional[Union[str, bool]]
+) -> Dict[str, Dict[str, Any]]:
+ """
+ Get the dataset information, can be useful to get e.g. the dataset features.
+ Docs: https://huggingface.co/docs/datasets-server/info
+ """
+ datasets_server_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
+ try:
+ info_response = http_get(
+ url=datasets_server_info_url + dataset,
+ temp_file=None,
+ headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
+ timeout=100.0,
+ max_retries=3,
+ )
+ info_response.raise_for_status()
+ if "X-Revision" in info_response.headers:
+ if info_response.headers["X-Revision"] == revision or revision is None:
+ info_response = info_response.json()
+ if (
+ info_response.get("partial") is False
+ and not info_response.get("pending", True)
+ and not info_response.get("failed", True)
+ and "dataset_info" in info_response
+ ):
+ return info_response["dataset_info"]
+ else:
+ logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
+ else:
+ logger.debug(
+ f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
+ )
+ except Exception as e: # noqa catch any exception of the datasets-server and consider the dataset info doesn't exist
+ logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
+ raise DatasetsServerError("No exported dataset infos available.")
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/download_manager.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/download_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..b524c2f9686f65d083c424a4e17d001395b743b6
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/download_manager.py
@@ -0,0 +1 @@
+# deprecated, please use datasets.download.download_manager
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/logging.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..dffd5ce46e0d2da5cbbfb023003c3f4caae86093
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/logging.py
@@ -0,0 +1,179 @@
+# Copyright 2020 Optuna, Hugging Face
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Logging utilities."""
+
+import logging
+import os
+from logging import (
+ CRITICAL, # NOQA
+ DEBUG, # NOQA
+ ERROR, # NOQA
+ FATAL, # NOQA
+ INFO, # NOQA
+ NOTSET, # NOQA
+ WARN, # NOQA
+ WARNING, # NOQA
+)
+from typing import Optional
+
+from .tqdm import ( # noqa: F401 # imported for backward compatibility
+ disable_progress_bar,
+ enable_progress_bar,
+ is_progress_bar_enabled,
+ tqdm,
+)
+
+
+log_levels = {
+ "debug": logging.DEBUG,
+ "info": logging.INFO,
+ "warning": logging.WARNING,
+ "error": logging.ERROR,
+ "critical": logging.CRITICAL,
+}
+
+_default_log_level = logging.WARNING
+
+
+def _get_default_logging_level():
+ """
+ If DATASETS_VERBOSITY env var is set to one of the valid choices return that as the new default level.
+ If it is not - fall back to ``_default_log_level``
+ """
+ env_level_str = os.getenv("DATASETS_VERBOSITY", None)
+ if env_level_str:
+ if env_level_str in log_levels:
+ return log_levels[env_level_str]
+ else:
+ logging.getLogger().warning(
+ f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
+ f"has to be one of: { ', '.join(log_levels.keys()) }"
+ )
+ return _default_log_level
+
+
+def _get_library_name() -> str:
+ return __name__.split(".")[0]
+
+
+def _get_library_root_logger() -> logging.Logger:
+ return logging.getLogger(_get_library_name())
+
+
+def _configure_library_root_logger() -> None:
+ # Apply our default configuration to the library root logger.
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.addHandler(logging.StreamHandler())
+ library_root_logger.setLevel(_get_default_logging_level())
+
+
+def _reset_library_root_logger() -> None:
+ library_root_logger = _get_library_root_logger()
+ library_root_logger.setLevel(logging.NOTSET)
+
+
+def get_logger(name: Optional[str] = None) -> logging.Logger:
+ """Return a logger with the specified name.
+ This function can be used in dataset scripts.
+ """
+ if name is None:
+ name = _get_library_name()
+ return logging.getLogger(name)
+
+
+def get_verbosity() -> int:
+ """Return the current level for the HuggingFace datasets library's root logger.
+ Returns:
+ Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
+
+
+
+ HuggingFace datasets library has following logging levels:
+ - `datasets.logging.CRITICAL`, `datasets.logging.FATAL`
+ - `datasets.logging.ERROR`
+ - `datasets.logging.WARNING`, `datasets.logging.WARN`
+ - `datasets.logging.INFO`
+ - `datasets.logging.DEBUG`
+
+
+ """
+ return _get_library_root_logger().getEffectiveLevel()
+
+
+def set_verbosity(verbosity: int) -> None:
+ """Set the level for the Hugging Face Datasets library's root logger.
+ Args:
+ verbosity:
+ Logging level, e.g., `datasets.logging.DEBUG` and `datasets.logging.INFO`.
+ """
+ _get_library_root_logger().setLevel(verbosity)
+
+
+def set_verbosity_info():
+ """Set the level for the Hugging Face datasets library's root logger to `INFO`.
+
+ This will display most of the logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.INFO)`.
+ """
+ return set_verbosity(INFO)
+
+
+def set_verbosity_warning():
+ """Set the level for the Hugging Face datasets library's root logger to `WARNING`.
+
+ This will display only the warning and errors logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.WARNING)`.
+ """
+ return set_verbosity(WARNING)
+
+
+def set_verbosity_debug():
+ """Set the level for the Hugging Face datasets library's root logger to `DEBUG`.
+
+ This will display all the logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.DEBUG)`.
+ """
+ return set_verbosity(DEBUG)
+
+
+def set_verbosity_error():
+ """Set the level for the Hugging Face datasets library's root logger to `ERROR`.
+
+ This will display only the errors logging information and tqdm bars.
+
+ Shortcut to `datasets.logging.set_verbosity(datasets.logging.ERROR)`.
+ """
+ return set_verbosity(ERROR)
+
+
+def disable_propagation() -> None:
+ """Disable propagation of the library log outputs.
+ Note that log propagation is disabled by default.
+ """
+ _get_library_root_logger().propagate = False
+
+
+def enable_propagation() -> None:
+ """Enable propagation of the library log outputs.
+ Please disable the Hugging Face datasets library's default handler to prevent double logging if the root logger has
+ been configured.
+ """
+ _get_library_root_logger().propagate = True
+
+
+# Configure the library root logger at the module level (singleton-like)
+_configure_library_root_logger()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/metadata.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2e763e9cb6a150db3c7a6a46b0f60bd7f799b1e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/metadata.py
@@ -0,0 +1,320 @@
+import re
+import textwrap
+from collections import Counter
+from itertools import groupby
+from operator import itemgetter
+from pathlib import Path
+from typing import Any, ClassVar, Dict, List, Optional, Tuple, Union
+
+import yaml
+from huggingface_hub import DatasetCardData
+
+from ..config import METADATA_CONFIGS_FIELD
+from ..info import DatasetInfo, DatasetInfosDict
+from ..naming import _split_re
+from ..utils.logging import get_logger
+from .deprecation_utils import deprecated
+
+
+logger = get_logger(__name__)
+
+
+class _NoDuplicateSafeLoader(yaml.SafeLoader):
+ def _check_no_duplicates_on_constructed_node(self, node):
+ keys = [self.constructed_objects[key_node] for key_node, _ in node.value]
+ keys = [tuple(key) if isinstance(key, list) else key for key in keys]
+ counter = Counter(keys)
+ duplicate_keys = [key for key in counter if counter[key] > 1]
+ if duplicate_keys:
+ raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}")
+
+ def construct_mapping(self, node, deep=False):
+ mapping = super().construct_mapping(node, deep=deep)
+ self._check_no_duplicates_on_constructed_node(node)
+ return mapping
+
+
+def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]:
+ full_content = list(readme_content.splitlines())
+ if full_content and full_content[0] == "---" and "---" in full_content[1:]:
+ sep_idx = full_content[1:].index("---") + 1
+ yamlblock = "\n".join(full_content[1:sep_idx])
+ return yamlblock, "\n".join(full_content[sep_idx + 1 :])
+
+ return None, "\n".join(full_content)
+
+
+@deprecated("Use `huggingface_hub.DatasetCardData` instead.")
+class DatasetMetadata(dict):
+ # class attributes
+ _FIELDS_WITH_DASHES = {"train_eval_index"} # train-eval-index in the YAML metadata
+
+ @classmethod
+ def from_readme(cls, path: Union[Path, str]) -> "DatasetMetadata":
+ """Loads and validates the dataset metadata from its dataset card (README.md)
+
+ Args:
+ path (:obj:`Path`): Path to the dataset card (its README.md file)
+
+ Returns:
+ :class:`DatasetMetadata`: The dataset's metadata
+
+ Raises:
+ :obj:`TypeError`: If the dataset's metadata is invalid
+ """
+ with open(path, encoding="utf-8") as readme_file:
+ yaml_string, _ = _split_yaml_from_readme(readme_file.read())
+ if yaml_string is not None:
+ return cls.from_yaml_string(yaml_string)
+ else:
+ return cls()
+
+ def to_readme(self, path: Path):
+ if path.exists():
+ with open(path, encoding="utf-8") as readme_file:
+ readme_content = readme_file.read()
+ else:
+ readme_content = None
+ updated_readme_content = self._to_readme(readme_content)
+ with open(path, "w", encoding="utf-8") as readme_file:
+ readme_file.write(updated_readme_content)
+
+ def _to_readme(self, readme_content: Optional[str] = None) -> str:
+ if readme_content is not None:
+ _, content = _split_yaml_from_readme(readme_content)
+ full_content = "---\n" + self.to_yaml_string() + "---\n" + content
+ else:
+ full_content = "---\n" + self.to_yaml_string() + "---\n"
+ return full_content
+
+ @classmethod
+ def from_yaml_string(cls, string: str) -> "DatasetMetadata":
+ """Loads and validates the dataset metadata from a YAML string
+
+ Args:
+ string (:obj:`str`): The YAML string
+
+ Returns:
+ :class:`DatasetMetadata`: The dataset's metadata
+
+ Raises:
+ :obj:`TypeError`: If the dataset's metadata is invalid
+ """
+ metadata_dict = yaml.load(string, Loader=_NoDuplicateSafeLoader) or {}
+
+ # Convert the YAML keys to DatasetMetadata fields
+ metadata_dict = {
+ (key.replace("-", "_") if key.replace("-", "_") in cls._FIELDS_WITH_DASHES else key): value
+ for key, value in metadata_dict.items()
+ }
+ return cls(**metadata_dict)
+
+ def to_yaml_string(self) -> str:
+ return yaml.safe_dump(
+ {
+ (key.replace("_", "-") if key in self._FIELDS_WITH_DASHES else key): value
+ for key, value in self.items()
+ },
+ sort_keys=False,
+ allow_unicode=True,
+ encoding="utf-8",
+ ).decode("utf-8")
+
+
+class MetadataConfigs(Dict[str, Dict[str, Any]]):
+ """Should be in format {config_name: {**config_params}}."""
+
+ FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD
+
+ @staticmethod
+ def _raise_if_data_files_field_not_valid(metadata_config: dict):
+ yaml_data_files = metadata_config.get("data_files")
+ if yaml_data_files is not None:
+ yaml_error_message = textwrap.dedent(
+ f"""
+ Expected data_files in YAML to be either a string or a list of strings
+ or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files}
+ Examples of data_files in YAML:
+
+ data_files: data.csv
+
+ data_files: data/*.png
+
+ data_files:
+ - part0/*
+ - part1/*
+
+ data_files:
+ - split: train
+ path: train/*
+ - split: test
+ path: test/*
+
+ data_files:
+ - split: train
+ path:
+ - train/part1/*
+ - train/part2/*
+ - split: test
+ path: test/*
+
+ PS: some symbols like dashes '-' are not allowed in split names
+ """
+ )
+ if not isinstance(yaml_data_files, (list, str)):
+ raise ValueError(yaml_error_message)
+ if isinstance(yaml_data_files, list):
+ for yaml_data_files_item in yaml_data_files:
+ if (
+ not isinstance(yaml_data_files_item, (str, dict))
+ or isinstance(yaml_data_files_item, dict)
+ and not (
+ len(yaml_data_files_item) == 2
+ and "split" in yaml_data_files_item
+ and re.match(_split_re, yaml_data_files_item["split"])
+ and isinstance(yaml_data_files_item.get("path"), (str, list))
+ )
+ ):
+ raise ValueError(yaml_error_message)
+
+ @classmethod
+ def _from_exported_parquet_files_and_dataset_infos(
+ cls,
+ revision: str,
+ exported_parquet_files: List[Dict[str, Any]],
+ dataset_infos: DatasetInfosDict,
+ ) -> "MetadataConfigs":
+ metadata_configs = {
+ config_name: {
+ "data_files": [
+ {
+ "split": split_name,
+ "path": [
+ parquet_file["url"].replace("refs%2Fconvert%2Fparquet", revision)
+ for parquet_file in parquet_files_for_split
+ ],
+ }
+ for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split"))
+ ],
+ "version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"),
+ }
+ for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config"))
+ }
+ if dataset_infos:
+ # Preserve order of configs and splits
+ metadata_configs = {
+ config_name: {
+ "data_files": [
+ data_file
+ for split_name in dataset_info.splits
+ for data_file in metadata_configs[config_name]["data_files"]
+ if data_file["split"] == split_name
+ ],
+ "version": metadata_configs[config_name]["version"],
+ }
+ for config_name, dataset_info in dataset_infos.items()
+ }
+ return cls(metadata_configs)
+
+ @classmethod
+ def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs":
+ if dataset_card_data.get(cls.FIELD_NAME):
+ metadata_configs = dataset_card_data[cls.FIELD_NAME]
+ if not isinstance(metadata_configs, list):
+ raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'")
+ for metadata_config in metadata_configs:
+ if "config_name" not in metadata_config:
+ raise ValueError(
+ f"Each config must include `config_name` field with a string name of a config, "
+ f"but got {metadata_config}. "
+ )
+ cls._raise_if_data_files_field_not_valid(metadata_config)
+ return cls(
+ {
+ config["config_name"]: {param: value for param, value in config.items() if param != "config_name"}
+ for config in metadata_configs
+ }
+ )
+ return cls()
+
+ def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None:
+ if self:
+ for metadata_config in self.values():
+ self._raise_if_data_files_field_not_valid(metadata_config)
+ current_metadata_configs = self.from_dataset_card_data(dataset_card_data)
+ total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items()))
+ for config_name, config_metadata in total_metadata_configs.items():
+ config_metadata.pop("config_name", None)
+ dataset_card_data[self.FIELD_NAME] = [
+ {"config_name": config_name, **config_metadata}
+ for config_name, config_metadata in total_metadata_configs.items()
+ ]
+
+ def get_default_config_name(self) -> Optional[str]:
+ default_config_name = None
+ for config_name, metadata_config in self.items():
+ if len(self) == 1 or config_name == "default" or metadata_config.get("default"):
+ if default_config_name is None:
+ default_config_name = config_name
+ else:
+ raise ValueError(
+ f"Dataset has several default configs: '{default_config_name}' and '{config_name}'."
+ )
+ return default_config_name
+
+
+# DEPRECATED - just here to support old versions of evaluate like 0.2.2
+# To support new tasks on the Hugging Face Hub, please open a PR for this file:
+# https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts
+known_task_ids = {
+ "image-classification": [],
+ "translation": [],
+ "image-segmentation": [],
+ "fill-mask": [],
+ "automatic-speech-recognition": [],
+ "token-classification": [],
+ "sentence-similarity": [],
+ "audio-classification": [],
+ "question-answering": [],
+ "summarization": [],
+ "zero-shot-classification": [],
+ "table-to-text": [],
+ "feature-extraction": [],
+ "other": [],
+ "multiple-choice": [],
+ "text-classification": [],
+ "text-to-image": [],
+ "text2text-generation": [],
+ "zero-shot-image-classification": [],
+ "tabular-classification": [],
+ "tabular-regression": [],
+ "image-to-image": [],
+ "tabular-to-text": [],
+ "unconditional-image-generation": [],
+ "text-retrieval": [],
+ "text-to-speech": [],
+ "object-detection": [],
+ "audio-to-audio": [],
+ "text-generation": [],
+ "conversational": [],
+ "table-question-answering": [],
+ "visual-question-answering": [],
+ "image-to-text": [],
+ "reinforcement-learning": [],
+ "voice-activity-detection": [],
+ "time-series-forecasting": [],
+ "document-question-answering": [],
+}
+
+
+if __name__ == "__main__":
+ from argparse import ArgumentParser
+
+ ap = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
+ ap.add_argument("readme_filepath")
+ args = ap.parse_args()
+
+ readme_filepath = Path(args.readme_filepath)
+ dataset_metadata = DatasetMetadata.from_readme(readme_filepath)
+ print(dataset_metadata)
+ dataset_metadata.to_readme(readme_filepath)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/track.py b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/track.py
new file mode 100644
index 0000000000000000000000000000000000000000..11a3787c7d8595cc7160994973f28db1f709b3b2
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/datasets/utils/track.py
@@ -0,0 +1,49 @@
+from collections.abc import Iterator
+from typing import Iterable
+
+
+class tracked_str(str):
+ origins = {}
+
+ def set_origin(self, origin: str):
+ if super().__repr__() not in self.origins:
+ self.origins[super().__repr__()] = origin
+
+ def get_origin(self):
+ return self.origins.get(super().__repr__(), str(self))
+
+ def __repr__(self) -> str:
+ if super().__repr__() not in self.origins or self.origins[super().__repr__()] == self:
+ return super().__repr__()
+ else:
+ return f"{str(self)} (origin={self.origins[super().__repr__()]})"
+
+
+class tracked_list(list):
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+ self.last_item = None
+
+ def __iter__(self) -> Iterator:
+ for x in super().__iter__():
+ self.last_item = x
+ yield x
+ self.last_item = None
+
+ def __repr__(self) -> str:
+ if self.last_item is None:
+ return super().__repr__()
+ else:
+ return f"{self.__class__.__name__}(current={self.last_item})"
+
+
+class TrackedIterable(Iterable):
+ def __init__(self) -> None:
+ super().__init__()
+ self.last_item = None
+
+ def __repr__(self) -> str:
+ if self.last_item is None:
+ super().__repr__()
+ else:
+ return f"{self.__class__.__name__}(current={self.last_item})"
diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2125f1b688dad16dbbc3d312533d3fa50adf1bb3
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4a16720bcc7b01f6c5446f960184df7a3d8a9a0ef52a9c0739f1dca9846660c9
+size 107114
diff --git a/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/INSTALLER b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/LICENSE b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..9d3cfcd8f611756af3e638a7f12dc0a8646add7e
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 Ronie Martinez
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/METADATA b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..7917e3db6789f1633f2f038b8aff90f6d22b1632
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/METADATA
@@ -0,0 +1,172 @@
+Metadata-Version: 2.1
+Name: latex2mathml
+Version: 3.77.0
+Summary: Pure Python library for LaTeX to MathML conversion
+Home-page: https://github.com/roniemartinez/latex2mathml
+License: MIT
+Keywords: latex,mathml
+Author: Ronie Martinez
+Author-email: ronmarti18@gmail.com
+Requires-Python: >=3.8.1,<4.0.0
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Classifier: Topic :: Text Processing :: Markup :: LaTeX
+Project-URL: Donate, https://www.buymeacoffee.com/roniemartinez
+Project-URL: Repository, https://github.com/roniemartinez/latex2mathml
+Description-Content-Type: text/markdown
+
+
+
+ | License |
+  |
+ Version |
+  |
+
+
+ | Github Actions |
+  |
+ Coverage |
+  |
+
+
+ | Supported versions |
+  |
+ Wheel |
+  |
+
+
+ | Status |
+  |
+ Downloads |
+  |
+
+
+ | All Contributors |
+  |
+
+
+
+# latex2mathml
+
+Pure Python library for LaTeX to MathML conversion
+
+## Installation
+
+```bash
+pip install latex2mathml
+```
+
+## Usage
+
+### Python
+
+```python
+import latex2mathml.converter
+
+latex_input = ""
+mathml_output = latex2mathml.converter.convert(latex_input)
+```
+
+### Command-line
+
+```shell
+% latex2mathml -h
+usage: latex2mathml [-h] [-V] [-b] [-t TEXT | -f FILE | -s]
+
+Pure Python library for LaTeX to MathML conversion
+
+options:
+ -h, --help show this help message and exit
+ -V, --version Show version
+ -b, --block Display block
+
+required arguments:
+ -t TEXT, --text TEXT Text
+ -f FILE, --file FILE File
+ -s, --stdin Stdin
+```
+
+## References
+### LaTeX
+
+- https://en.wikibooks.org/wiki/LaTeX/Mathematics
+- http://artofproblemsolving.com/wiki/index.php?title=Main_Page
+- http://milde.users.sourceforge.net/LUCR/Math/
+- https://math-linux.com/latex-26/faq/latex-faq/article/latex-derivatives-limits-sums-products-and-integrals
+- https://www.tutorialspoint.com/tex_commands
+- https://www.giss.nasa.gov/tools/latex/ltx-86.html
+- https://ftp.gwdg.de/pub/ctan/info/l2tabu/english/l2tabuen.pdf
+
+### MathML
+
+- http://www.xmlmind.com/tutorials/MathML/
+
+
+## Author
+
+- [Ronie Martinez](mailto:ronmarti18@gmail.com)
+
+## Contributors ✨
+
+Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):
+
+
+
+
+
+
+
+
+
+
+
+This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
+
diff --git a/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/RECORD b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..e530cf3eb412e67ae78d4c974e1bb0fa988014cf
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/RECORD
@@ -0,0 +1,25 @@
+../../../bin/l2m,sha256=A6cOPrAVRl9g-TTDoMgee4CvtzOWe52sOvGt69idfXc,240
+../../../bin/latex2mathml,sha256=A6cOPrAVRl9g-TTDoMgee4CvtzOWe52sOvGt69idfXc,240
+latex2mathml-3.77.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+latex2mathml-3.77.0.dist-info/LICENSE,sha256=SsHh2gf2s0PVQZHF4XFoQFNOCFkbP56uyQ1oAM3EdUM,1071
+latex2mathml-3.77.0.dist-info/METADATA,sha256=8I_5snYGZtFTF_ZdrISzE_x4L1PcEc9WFhHJfwkOUhA,14844
+latex2mathml-3.77.0.dist-info/RECORD,,
+latex2mathml-3.77.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+latex2mathml-3.77.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
+latex2mathml-3.77.0.dist-info/entry_points.txt,sha256=8cs8b08QDIhqIeEJaUBPeXlBTEE9a-d8ybiKa5ZGMak,92
+latex2mathml/__init__.py,sha256=xA5GqWgZqbTv0TUeOiwBpOk8D48ibElsZI1OGTEnhuk,79
+latex2mathml/__pycache__/__init__.cpython-310.pyc,,
+latex2mathml/__pycache__/commands.cpython-310.pyc,,
+latex2mathml/__pycache__/converter.cpython-310.pyc,,
+latex2mathml/__pycache__/exceptions.cpython-310.pyc,,
+latex2mathml/__pycache__/symbols_parser.cpython-310.pyc,,
+latex2mathml/__pycache__/tokenizer.cpython-310.pyc,,
+latex2mathml/__pycache__/walker.cpython-310.pyc,,
+latex2mathml/commands.py,sha256=ZyBodQ-PqYfg4-LG6YipiDywzc5iUlYCLA1WTV5La04,13087
+latex2mathml/converter.py,sha256=a3CTyam1xFz2oPkNVxklSXuPny_DmqnALLsDTRLVmeI,23219
+latex2mathml/exceptions.py,sha256=zF1xPclVRSTzvZHPsujMv_bNITnHNW7Jvi4wg0aBQ60,645
+latex2mathml/py.typed,sha256=JZLABTSp2pMHV6dDrYko_mqeQOmCl7_w46Ro2OaynOo,10
+latex2mathml/symbols_parser.py,sha256=xZr_xYVWOj1-3GpdVKd0ly44AAkc2wvSmIUvbHBcJfg,2954
+latex2mathml/tokenizer.py,sha256=FtlIzWm279chdDE6SrIohMDeHIcwT8GCeyNqexxHu94,2360
+latex2mathml/unimathsymbols.txt,sha256=dO-JVO-vAdwfsTExPvbX97dBxPeOMPbW02ALndWlWu4,216334
+latex2mathml/walker.py,sha256=3KZEjg4WbWQUogHmKO5yp0Ziu2n_m1JvyQs3sQc5fPA,19969
diff --git a/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/REQUESTED b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/WHEEL b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..7c881525d384f1537e81e8a783c8433a748a7089
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: poetry-core 1.8.1
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/entry_points.txt b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..85a36b00804610159704e4c3a7ed8a7f8df730b3
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/latex2mathml-3.77.0.dist-info/entry_points.txt
@@ -0,0 +1,4 @@
+[console_scripts]
+l2m=latex2mathml.converter:main
+latex2mathml=latex2mathml.converter:main
+
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfb3a4f7f11df6c992f42390d0d2c4b1a64a6475
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/data.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/data.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3696120bc3a00e4b589cc1b5a0ba0bed63fac7b9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/data.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/distributed.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/distributed.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..306c3e7cb3b83dd579769a166492943e2280e94d
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/distributed.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/file_utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/file_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7090fb806b8ad8936198723e3499dce00a01a166
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/file_utils.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/logger.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/logger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4e2d8296b339a03f7c5fbdd97453bf70e258138
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/logger.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/main.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/main.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37d54c360189d90e748907edb02797cd8f35407f
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/main.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/params.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/params.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0240697a0254fc485354c5eb762a9278a5b53cf2
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/params.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/precision.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/precision.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17d3eefc7f50ad4f8676a1802017e85f3ffc8980
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/precision.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/profiler.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/profiler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37d33dea316e95513929276ebc87da8601d47a46
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/profiler.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/train.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/train.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2738f791fcd4041c2e6a6204f26f9acaefc952a6
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/train.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/zero_shot.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/zero_shot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10591bc3c40c805ce2e4da4a4ff378df73ed4e24
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/__pycache__/zero_shot.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/data.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/data.py
new file mode 100644
index 0000000000000000000000000000000000000000..07b9fee961d211eeb0c60c0fbd595a3a0ba12518
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/data.py
@@ -0,0 +1,564 @@
+import ast
+import json
+import logging
+import math
+import os
+import random
+import sys
+import braceexpand
+from dataclasses import dataclass
+from multiprocessing import Value
+
+import numpy as np
+import pandas as pd
+import torch
+import torchvision.datasets as datasets
+import webdataset as wds
+from PIL import Image
+from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info
+from torch.utils.data.distributed import DistributedSampler
+from webdataset.filters import _shuffle
+from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
+
+try:
+ import horovod.torch as hvd
+except ImportError:
+ hvd = None
+
+
+class CsvDataset(Dataset):
+ def __init__(self, input_filename, transforms, img_key, caption_key, sep="\t", tokenizer=None):
+ logging.debug(f'Loading csv data from {input_filename}.')
+ df = pd.read_csv(input_filename, sep=sep)
+
+ self.images = df[img_key].tolist()
+ self.captions = df[caption_key].tolist()
+ self.transforms = transforms
+ logging.debug('Done loading data.')
+
+ self.tokenize = tokenizer
+
+ def __len__(self):
+ return len(self.captions)
+
+ def __getitem__(self, idx):
+ images = self.transforms(Image.open(str(self.images[idx])))
+ texts = self.tokenize([str(self.captions[idx])])[0]
+ return images, texts
+
+
+class SharedEpoch:
+ def __init__(self, epoch: int = 0):
+ self.shared_epoch = Value('i', epoch)
+
+ def set_value(self, epoch):
+ self.shared_epoch.value = epoch
+
+ def get_value(self):
+ return self.shared_epoch.value
+
+
+@dataclass
+class DataInfo:
+ dataloader: DataLoader
+ sampler: DistributedSampler = None
+ shared_epoch: SharedEpoch = None
+
+ def set_epoch(self, epoch):
+ if self.shared_epoch is not None:
+ self.shared_epoch.set_value(epoch)
+ if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
+ self.sampler.set_epoch(epoch)
+
+
+def expand_urls(urls, weights=None):
+ if weights is None:
+ expanded_urls = wds.shardlists.expand_urls(urls)
+ return expanded_urls, None
+ if isinstance(urls, str):
+ urllist = urls.split("::")
+ weights = weights.split('::')
+ assert len(weights) == len(urllist),\
+ f"Expected the number of data components ({len(urllist)}) and weights({len(weights)}) to match."
+ weights = [float(weight) for weight in weights]
+ all_urls, all_weights = [], []
+ for url, weight in zip(urllist, weights):
+ expanded_url = list(braceexpand.braceexpand(url))
+ expanded_weights = [weight for _ in expanded_url]
+ all_urls.extend(expanded_url)
+ all_weights.extend(expanded_weights)
+ return all_urls, all_weights
+ else:
+ all_urls = list(urls)
+ return all_urls, weights
+
+
+def get_dataset_size(shards):
+ shards_list, _ = expand_urls(shards)
+ dir_path = os.path.dirname(shards_list[0])
+ sizes_filename = os.path.join(dir_path, 'sizes.json')
+ len_filename = os.path.join(dir_path, '__len__')
+ if os.path.exists(sizes_filename):
+ sizes = json.load(open(sizes_filename, 'r'))
+ total_size = sum([int(sizes[os.path.basename(shard)]) for shard in shards_list])
+ elif os.path.exists(len_filename):
+ # FIXME this used to be eval(open(...)) but that seemed rather unsafe
+ total_size = ast.literal_eval(open(len_filename, 'r').read())
+ else:
+ total_size = None # num samples undefined
+ # some common dataset sizes (at time of authors last download)
+ # CC3M (train): 2905954
+ # CC12M: 10968539
+ # LAION-400M: 407332084
+ # LAION-2B (english): 2170337258
+ num_shards = len(shards_list)
+ return total_size, num_shards
+
+
+def get_imagenet(args, preprocess_fns, split):
+ assert split in ["train", "val", "v2"]
+ is_train = split == "train"
+ preprocess_train, preprocess_val = preprocess_fns
+
+ if split == "v2":
+ from imagenetv2_pytorch import ImageNetV2Dataset
+ dataset = ImageNetV2Dataset(location=args.imagenet_v2, transform=preprocess_val)
+ else:
+ if is_train:
+ data_path = args.imagenet_train
+ preprocess_fn = preprocess_train
+ else:
+ data_path = args.imagenet_val
+ preprocess_fn = preprocess_val
+ assert data_path
+
+ dataset = datasets.ImageFolder(data_path, transform=preprocess_fn)
+
+ if is_train:
+ idxs = np.zeros(len(dataset.targets))
+ target_array = np.array(dataset.targets)
+ k = 50
+ for c in range(1000):
+ m = target_array == c
+ n = len(idxs[m])
+ arr = np.zeros(n)
+ arr[:k] = 1
+ np.random.shuffle(arr)
+ idxs[m] = arr
+
+ idxs = idxs.astype('int')
+ sampler = SubsetRandomSampler(np.where(idxs)[0])
+ else:
+ sampler = None
+
+ dataloader = torch.utils.data.DataLoader(
+ dataset,
+ batch_size=args.batch_size,
+ num_workers=args.workers,
+ sampler=sampler,
+ )
+
+ return DataInfo(dataloader=dataloader, sampler=sampler)
+
+
+def count_samples(dataloader):
+ os.environ["WDS_EPOCH"] = "0"
+ n_elements, n_batches = 0, 0
+ for images, texts in dataloader:
+ n_batches += 1
+ n_elements += len(images)
+ assert len(images) == len(texts)
+ return n_elements, n_batches
+
+
+def filter_no_caption_or_no_image(sample):
+ has_caption = ('txt' in sample)
+ has_image = ('png' in sample or 'jpg' in sample or 'jpeg' in sample or 'webp' in sample)
+ return has_caption and has_image
+
+
+def log_and_continue(exn):
+ """Call in an exception handler to ignore any exception, issue a warning, and continue."""
+ logging.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.')
+ return True
+
+
+def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None):
+ """Return function over iterator that groups key, value pairs into samples.
+
+ :param keys: function that splits the key into key and extension (base_plus_ext)
+ :param lcase: convert suffixes to lower case (Default value = True)
+ """
+ current_sample = None
+ for filesample in data:
+ assert isinstance(filesample, dict)
+ fname, value = filesample["fname"], filesample["data"]
+ prefix, suffix = keys(fname)
+ if prefix is None:
+ continue
+ if lcase:
+ suffix = suffix.lower()
+ # FIXME webdataset version throws if suffix in current_sample, but we have a potential for
+ # this happening in the current LAION400m dataset if a tar ends with same prefix as the next
+ # begins, rare, but can happen since prefix aren't unique across tar files in that dataset
+ if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample:
+ if valid_sample(current_sample):
+ yield current_sample
+ current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
+ if suffixes is None or suffix in suffixes:
+ current_sample[suffix] = value
+ if valid_sample(current_sample):
+ yield current_sample
+
+
+def tarfile_to_samples_nothrow(src, handler=log_and_continue):
+ # NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw
+ streams = url_opener(src, handler=handler)
+ files = tar_file_expander(streams, handler=handler)
+ samples = group_by_keys_nothrow(files, handler=handler)
+ return samples
+
+
+def pytorch_worker_seed(increment=0):
+ """get dataloader worker seed from pytorch"""
+ worker_info = get_worker_info()
+ if worker_info is not None:
+ # favour using the seed already created for pytorch dataloader workers if it exists
+ seed = worker_info.seed
+ if increment:
+ # space out seed increments so they can't overlap across workers in different iterations
+ seed += increment * max(1, worker_info.num_workers)
+ return seed
+ # fallback to wds rank based seed
+ return wds.utils.pytorch_worker_seed()
+
+
+_SHARD_SHUFFLE_SIZE = 2000
+_SHARD_SHUFFLE_INITIAL = 500
+_SAMPLE_SHUFFLE_SIZE = 5000
+_SAMPLE_SHUFFLE_INITIAL = 1000
+
+
+class detshuffle2(wds.PipelineStage):
+ def __init__(
+ self,
+ bufsize=1000,
+ initial=100,
+ seed=0,
+ epoch=-1,
+ ):
+ self.bufsize = bufsize
+ self.initial = initial
+ self.seed = seed
+ self.epoch = epoch
+
+ def run(self, src):
+ if isinstance(self.epoch, SharedEpoch):
+ epoch = self.epoch.get_value()
+ else:
+ # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
+ # situation as different workers may wrap at different times (or not at all).
+ self.epoch += 1
+ epoch = self.epoch
+ rng = random.Random()
+ if self.seed < 0:
+ # If seed is negative, we use the worker's seed, this will be different across all nodes/workers
+ seed = pytorch_worker_seed(epoch)
+ else:
+ # This seed to be deterministic AND the same across all nodes/workers in each epoch
+ seed = self.seed + epoch
+ rng.seed(seed)
+ return _shuffle(src, self.bufsize, self.initial, rng)
+
+
+class ResampledShards2(IterableDataset):
+ """An iterable dataset yielding a list of urls."""
+
+ def __init__(
+ self,
+ urls,
+ weights=None,
+ nshards=sys.maxsize,
+ worker_seed=None,
+ deterministic=False,
+ epoch=-1,
+ ):
+ """Sample shards from the shard list with replacement.
+
+ :param urls: a list of URLs as a Python list or brace notation string
+ """
+ super().__init__()
+ urls, weights = expand_urls(urls, weights)
+ self.urls = urls
+ self.weights = weights
+ if self.weights is not None:
+ assert len(self.urls) == len(self.weights),\
+ f"Number of urls {len(self.urls)} and weights {len(self.weights)} should match."
+ assert isinstance(self.urls[0], str)
+ self.nshards = nshards
+ self.rng = random.Random()
+ self.worker_seed = worker_seed
+ self.deterministic = deterministic
+ self.epoch = epoch
+
+ def __iter__(self):
+ """Return an iterator over the shards."""
+ if isinstance(self.epoch, SharedEpoch):
+ epoch = self.epoch.get_value()
+ else:
+ # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
+ # situation as different workers may wrap at different times (or not at all).
+ self.epoch += 1
+ epoch = self.epoch
+ if self.deterministic:
+ # reset seed w/ epoch if deterministic
+ if self.worker_seed is None:
+ # pytorch worker seed should be deterministic due to being init by arg.seed + rank + worker id
+ seed = pytorch_worker_seed(epoch)
+ else:
+ seed = self.worker_seed() + epoch
+ self.rng.seed(seed)
+ for _ in range(self.nshards):
+ if self.weights is None:
+ yield dict(url=self.rng.choice(self.urls))
+ else:
+ yield dict(url=self.rng.choices(self.urls, weights=self.weights, k=1)[0])
+
+
+def get_wds_dataset(args, preprocess_img, is_train, epoch=0, floor=False, tokenizer=None):
+ input_shards = args.train_data if is_train else args.val_data
+ assert input_shards is not None
+ resampled = getattr(args, 'dataset_resampled', False) and is_train
+
+ num_shards = None
+ if is_train:
+ if args.train_num_samples is not None:
+ num_samples = args.train_num_samples
+ else:
+ num_samples, num_shards = get_dataset_size(input_shards)
+ if not num_samples:
+ raise RuntimeError(
+ 'Currently, the number of dataset samples must be specified for the training dataset. '
+ 'Please specify it via `--train-num-samples` if no dataset length info is present.')
+ else:
+ # Eval will just exhaust the iterator if the size is not specified.
+ num_samples = args.val_num_samples or 0
+
+ shared_epoch = SharedEpoch(epoch=epoch) # create a shared epoch store to sync epoch to dataloader worker proc
+
+ if is_train and args.train_data_upsampling_factors is not None:
+ assert resampled, "--train_data_upsampling_factors is only supported when sampling with replacement (with --dataset-resampled)."
+
+ if resampled:
+ pipeline = [ResampledShards2(
+ input_shards,
+ weights=args.train_data_upsampling_factors,
+ deterministic=True,
+ epoch=shared_epoch,
+ )]
+ else:
+ pipeline = [wds.SimpleShardList(input_shards)]
+
+ # at this point we have an iterator over all the shards
+ if is_train:
+ if not resampled:
+ pipeline.extend([
+ detshuffle2(
+ bufsize=_SHARD_SHUFFLE_SIZE,
+ initial=_SHARD_SHUFFLE_INITIAL,
+ seed=args.seed,
+ epoch=shared_epoch,
+ ),
+ wds.split_by_node,
+ wds.split_by_worker,
+ ])
+ pipeline.extend([
+ # at this point, we have an iterator over the shards assigned to each worker at each node
+ tarfile_to_samples_nothrow, # wds.tarfile_to_samples(handler=log_and_continue),
+ wds.shuffle(
+ bufsize=_SAMPLE_SHUFFLE_SIZE,
+ initial=_SAMPLE_SHUFFLE_INITIAL,
+ ),
+ ])
+ else:
+ pipeline.extend([
+ wds.split_by_worker,
+ # at this point, we have an iterator over the shards assigned to each worker
+ wds.tarfile_to_samples(handler=log_and_continue),
+ ])
+ pipeline.extend([
+ wds.select(filter_no_caption_or_no_image),
+ wds.decode("pilrgb", handler=log_and_continue),
+ wds.rename(image="jpg;png;jpeg;webp", text="txt"),
+ wds.map_dict(image=preprocess_img, text=lambda text: tokenizer(text)[0]),
+ wds.to_tuple("image", "text"),
+ wds.batched(args.batch_size, partial=not is_train)
+ ])
+
+ dataset = wds.DataPipeline(*pipeline)
+
+ if is_train:
+ if not resampled:
+ num_shards = num_shards or len(expand_urls(input_shards)[0])
+ assert num_shards >= args.workers * args.world_size, 'number of shards must be >= total workers'
+ # roll over and repeat a few samples to get same number of full batches on each node
+ round_fn = math.floor if floor else math.ceil
+ global_batch_size = args.batch_size * args.world_size
+ num_batches = round_fn(num_samples / global_batch_size)
+ num_workers = max(1, args.workers)
+ num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker
+ num_batches = num_worker_batches * num_workers
+ num_samples = num_batches * global_batch_size
+ dataset = dataset.with_epoch(num_worker_batches) # each worker is iterating over this
+ else:
+ # last batches are partial, eval is done on single (master) node
+ num_batches = math.ceil(num_samples / args.batch_size)
+
+ dataloader = wds.WebLoader(
+ dataset,
+ batch_size=None,
+ shuffle=False,
+ num_workers=args.workers,
+ persistent_workers=args.workers > 0,
+ )
+
+ # FIXME not clear which approach is better, with_epoch before vs after dataloader?
+ # hoping to resolve via https://github.com/webdataset/webdataset/issues/169
+ # if is_train:
+ # # roll over and repeat a few samples to get same number of full batches on each node
+ # global_batch_size = args.batch_size * args.world_size
+ # num_batches = math.ceil(num_samples / global_batch_size)
+ # num_workers = max(1, args.workers)
+ # num_batches = math.ceil(num_batches / num_workers) * num_workers
+ # num_samples = num_batches * global_batch_size
+ # dataloader = dataloader.with_epoch(num_batches)
+ # else:
+ # # last batches are partial, eval is done on single (master) node
+ # num_batches = math.ceil(num_samples / args.batch_size)
+
+ # add meta-data to dataloader instance for convenience
+ dataloader.num_batches = num_batches
+ dataloader.num_samples = num_samples
+
+ return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch)
+
+
+def get_csv_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None):
+ input_filename = args.train_data if is_train else args.val_data
+ assert input_filename
+ dataset = CsvDataset(
+ input_filename,
+ preprocess_fn,
+ img_key=args.csv_img_key,
+ caption_key=args.csv_caption_key,
+ sep=args.csv_separator,
+ tokenizer=tokenizer
+ )
+ num_samples = len(dataset)
+ sampler = DistributedSampler(dataset) if args.distributed and is_train else None
+ shuffle = is_train and sampler is None
+
+ dataloader = DataLoader(
+ dataset,
+ batch_size=args.batch_size,
+ shuffle=shuffle,
+ num_workers=args.workers,
+ pin_memory=True,
+ sampler=sampler,
+ drop_last=is_train,
+ )
+ dataloader.num_samples = num_samples
+ dataloader.num_batches = len(dataloader)
+
+ return DataInfo(dataloader, sampler)
+
+
+class SyntheticDataset(Dataset):
+
+ def __init__(
+ self,
+ transform=None,
+ image_size=(224, 224),
+ caption="Dummy caption",
+ dataset_size=100,
+ tokenizer=None,
+ ):
+ self.transform = transform
+ self.image_size = image_size
+ self.caption = caption
+ self.image = Image.new('RGB', image_size)
+ self.dataset_size = dataset_size
+
+ self.preprocess_txt = lambda text: tokenizer(text)[0]
+
+ def __len__(self):
+ return self.dataset_size
+
+ def __getitem__(self, idx):
+ if self.transform is not None:
+ image = self.transform(self.image)
+ return image, self.preprocess_txt(self.caption)
+
+
+def get_synthetic_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None):
+ image_size = preprocess_fn.transforms[0].size
+ dataset = SyntheticDataset(
+ transform=preprocess_fn, image_size=image_size, dataset_size=args.train_num_samples, tokenizer=tokenizer)
+ num_samples = len(dataset)
+ sampler = DistributedSampler(dataset) if args.distributed and is_train else None
+ shuffle = is_train and sampler is None
+
+ dataloader = DataLoader(
+ dataset,
+ batch_size=args.batch_size,
+ shuffle=shuffle,
+ num_workers=args.workers,
+ pin_memory=True,
+ sampler=sampler,
+ drop_last=is_train,
+ )
+ dataloader.num_samples = num_samples
+ dataloader.num_batches = len(dataloader)
+
+ return DataInfo(dataloader, sampler)
+
+
+def get_dataset_fn(data_path, dataset_type):
+ if dataset_type == "webdataset":
+ return get_wds_dataset
+ elif dataset_type == "csv":
+ return get_csv_dataset
+ elif dataset_type == "synthetic":
+ return get_synthetic_dataset
+ elif dataset_type == "auto":
+ ext = data_path.split('.')[-1]
+ if ext in ['csv', 'tsv']:
+ return get_csv_dataset
+ elif ext in ['tar']:
+ return get_wds_dataset
+ else:
+ raise ValueError(
+ f"Tried to figure out dataset type, but failed for extension {ext}.")
+ else:
+ raise ValueError(f"Unsupported dataset type: {dataset_type}")
+
+
+def get_data(args, preprocess_fns, epoch=0, tokenizer=None):
+ preprocess_train, preprocess_val = preprocess_fns
+ data = {}
+
+ if args.train_data or args.dataset_type == "synthetic":
+ data["train"] = get_dataset_fn(args.train_data, args.dataset_type)(
+ args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer)
+
+ if args.val_data:
+ data["val"] = get_dataset_fn(args.val_data, args.dataset_type)(
+ args, preprocess_val, is_train=False, tokenizer=tokenizer)
+
+ if args.imagenet_val is not None:
+ data["imagenet-val"] = get_imagenet(args, preprocess_fns, "val")
+
+ if args.imagenet_v2 is not None:
+ data["imagenet-v2"] = get_imagenet(args, preprocess_fns, "v2")
+
+ return data
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/distributed.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/distributed.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fad34575f0965f1082752d9df66ceeb2f109344
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/distributed.py
@@ -0,0 +1,218 @@
+import os
+import warnings
+from typing import Optional
+
+import torch
+import torch.distributed as dist
+
+try:
+ import horovod.torch as hvd
+except ImportError:
+ hvd = None
+
+
+def is_global_master(args):
+ return args.rank == 0
+
+
+def is_local_master(args):
+ return args.local_rank == 0
+
+
+def is_master(args, local=False):
+ return is_local_master(args) if local else is_global_master(args)
+
+
+def is_device_available(device):
+ device_type = torch.device(device).type
+ is_avail = False
+ is_known = False
+ if device_type == 'cuda':
+ is_avail = torch.cuda.is_available()
+ is_known = True
+ elif device_type == 'npu':
+ # NOTE autoload device extension needed for this not to error out on this check
+ is_avail = torch.npu.is_available()
+ is_known = True
+ elif device_type == 'mps':
+ is_avail = torch.backends.mps.is_available()
+ is_known = True
+ elif device_type == 'cpu':
+ is_avail = True
+ is_known = True
+
+ return is_avail, is_known
+
+
+def set_device(device):
+ if device.startswith('cuda:'):
+ torch.cuda.set_device(device)
+ elif device.startswith('npu:'):
+ torch.npu.set_device(device)
+
+
+def is_using_horovod():
+ # NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set
+ # Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required...
+ ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
+ pmi_vars = ["PMI_RANK", "PMI_SIZE"]
+ if all([var in os.environ for var in ompi_vars]) or all([var in os.environ for var in pmi_vars]):
+ return True
+ else:
+ return False
+
+
+def is_using_distributed():
+ if 'WORLD_SIZE' in os.environ:
+ return int(os.environ['WORLD_SIZE']) > 1
+ if 'SLURM_NTASKS' in os.environ:
+ return int(os.environ['SLURM_NTASKS']) > 1
+ return False
+
+
+def world_info_from_env():
+ local_rank = 0
+ for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'):
+ if v in os.environ:
+ local_rank = int(os.environ[v])
+ break
+ global_rank = 0
+ for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'):
+ if v in os.environ:
+ global_rank = int(os.environ[v])
+ break
+ world_size = 1
+ for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'):
+ if v in os.environ:
+ world_size = int(os.environ[v])
+ break
+
+ return local_rank, global_rank, world_size
+
+
+def init_distributed_device(args):
+ # Distributed training = training on more than one GPU.
+ # Works in both single and multi-node scenarios.
+ args.distributed = False
+ args.world_size = 1
+ args.rank = 0 # global rank
+ args.local_rank = 0
+ result = init_distributed_device_so(
+ device=getattr(args, 'device', 'cuda'),
+ dist_backend=getattr(args, 'dist_backend', None),
+ dist_url=getattr(args, 'dist_url', None),
+ horovod=getattr(args, 'horovod', False),
+ no_set_device_rank=getattr(args, 'no_set_device_rank', False),
+ )
+ args.device = result['device']
+ args.world_size = result['world_size']
+ args.rank = result['global_rank']
+ args.local_rank = result['local_rank']
+ args.distributed = result['distributed']
+ device = torch.device(args.device)
+ return device
+
+
+def init_distributed_device_so(
+ device: str = 'cuda',
+ dist_backend: Optional[str] = None,
+ dist_url: Optional[str] = None,
+ horovod: bool = False,
+ no_set_device_rank: bool = False,
+):
+ # Distributed training = training on more than one GPU.
+ # Works in both single and multi-node scenarios.
+ distributed = False
+ world_size = 1
+ global_rank = 0
+ local_rank = 0
+ device_type, *device_idx = device.split(':', maxsplit=1)
+ is_avail, is_known = is_device_available(device_type)
+ if not is_known:
+ warnings.warn(f"Device {device} was not known and checked for availability, trying anyways.")
+ elif not is_avail:
+ warnings.warn(f"Device {device} was not available, falling back to CPU.")
+ device_type = device = 'cpu'
+
+ if horovod:
+ import horovod.torch as hvd
+ assert hvd is not None, "Horovod is not installed"
+ hvd.init()
+ local_rank = int(hvd.local_rank())
+ global_rank = hvd.rank()
+ world_size = hvd.size()
+ distributed = True
+ elif is_using_distributed():
+ if dist_backend is None:
+ dist_backends = {
+ "cuda": "nccl",
+ "hpu": "hccl",
+ "npu": "hccl",
+ "xpu": "ccl",
+ }
+ dist_backend = dist_backends.get(device_type, 'gloo')
+
+ dist_url = dist_url or 'env://'
+
+ if 'SLURM_PROCID' in os.environ:
+ # DDP via SLURM
+ local_rank, global_rank, world_size = world_info_from_env()
+ # SLURM var -> torch.distributed vars in case needed
+ os.environ['LOCAL_RANK'] = str(local_rank)
+ os.environ['RANK'] = str(global_rank)
+ os.environ['WORLD_SIZE'] = str(world_size)
+ torch.distributed.init_process_group(
+ backend=dist_backend,
+ init_method=dist_url,
+ world_size=world_size,
+ rank=global_rank,
+ )
+ else:
+ # DDP via torchrun, torch.distributed.launch
+ local_rank, _, _ = world_info_from_env()
+ torch.distributed.init_process_group(
+ backend=dist_backend,
+ init_method=dist_url,
+ )
+ world_size = torch.distributed.get_world_size()
+ global_rank = torch.distributed.get_rank()
+ distributed = True
+
+ if distributed and not no_set_device_rank and device_type not in ('cpu', 'mps'):
+ # Ignore manually specified device index in distributed mode and
+ # override with resolved local rank, fewer headaches in most setups.
+ if device_idx:
+ warnings.warn(f'device index {device_idx[0]} removed from specified ({device}).')
+ device = f'{device_type}:{local_rank}'
+ set_device(device)
+
+ return dict(
+ device=device,
+ global_rank=global_rank,
+ local_rank=local_rank,
+ world_size=world_size,
+ distributed=distributed,
+ )
+
+
+def broadcast_object(args, obj, src=0):
+ # broadcast a pickle-able python object from rank-0 to all ranks
+ if args.horovod:
+ return hvd.broadcast_object(obj, root_rank=src)
+ else:
+ if args.rank == src:
+ objects = [obj]
+ else:
+ objects = [None]
+ dist.broadcast_object_list(objects, src=src)
+ return objects[0]
+
+
+def all_gather_object(args, obj, dst=0):
+ # gather a pickle-able python object across all ranks
+ if args.horovod:
+ return hvd.allgather_object(obj)
+ else:
+ objects = [None for _ in range(args.world_size)]
+ dist.all_gather_object(objects, obj)
+ return objects
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/file_utils.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/file_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..395cf7df0acc164c6851f17834d793f5852d4605
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/file_utils.py
@@ -0,0 +1,83 @@
+import logging
+import os
+import multiprocessing
+import subprocess
+import time
+import fsspec
+import torch
+from tqdm import tqdm
+
+def remote_sync_s3(local_dir, remote_dir):
+ # skip epoch_latest which can change during sync.
+ result = subprocess.run(["aws", "s3", "sync", local_dir, remote_dir, '--exclude', '*epoch_latest.pt'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ if result.returncode != 0:
+ logging.error(f"Error: Failed to sync with S3 bucket {result.stderr.decode('utf-8')}")
+ return False
+
+ logging.info(f"Successfully synced with S3 bucket")
+ return True
+
+def remote_sync_fsspec(local_dir, remote_dir):
+ # FIXME currently this is slow and not recommended. Look into speeding up.
+ a = fsspec.get_mapper(local_dir)
+ b = fsspec.get_mapper(remote_dir)
+
+ for k in a:
+ # skip epoch_latest which can change during sync.
+ if 'epoch_latest.pt' in k:
+ continue
+
+ logging.info(f'Attempting to sync {k}')
+ if k in b and len(a[k]) == len(b[k]):
+ logging.debug(f'Skipping remote sync for {k}.')
+ continue
+
+ try:
+ logging.info(f'Successful sync for {k}.')
+ b[k] = a[k]
+ except Exception as e:
+ logging.info(f'Error during remote sync for {k}: {e}')
+ return False
+
+ return True
+
+def remote_sync(local_dir, remote_dir, protocol):
+ logging.info('Starting remote sync.')
+ if protocol == 's3':
+ return remote_sync_s3(local_dir, remote_dir)
+ elif protocol == 'fsspec':
+ return remote_sync_fsspec(local_dir, remote_dir)
+ else:
+ logging.error('Remote protocol not known')
+ return False
+
+def keep_running_remote_sync(sync_every, local_dir, remote_dir, protocol):
+ while True:
+ time.sleep(sync_every)
+ remote_sync(local_dir, remote_dir, protocol)
+
+def start_sync_process(sync_every, local_dir, remote_dir, protocol):
+ p = multiprocessing.Process(target=keep_running_remote_sync, args=(sync_every, local_dir, remote_dir, protocol))
+ return p
+
+# Note: we are not currently using this save function.
+def pt_save(pt_obj, file_path):
+ of = fsspec.open(file_path, "wb")
+ with of as f:
+ torch.save(pt_obj, file_path)
+
+def pt_load(file_path, map_location=None):
+ if file_path.startswith('s3'):
+ logging.info('Loading remote checkpoint, which may take a bit.')
+ of = fsspec.open(file_path, "rb")
+ with of as f:
+ out = torch.load(f, map_location=map_location)
+ return out
+
+def check_exists(file_path):
+ try:
+ with fsspec.open(file_path):
+ pass
+ except FileNotFoundError:
+ return False
+ return True
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/logger.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d9abed92568d459cbc8d6094ae3901935d89621
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/logger.py
@@ -0,0 +1,26 @@
+import logging
+
+
+def setup_logging(log_file, level, include_host=False):
+ if include_host:
+ import socket
+ hostname = socket.gethostname()
+ formatter = logging.Formatter(
+ f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
+ else:
+ formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
+
+ logging.root.setLevel(level)
+ loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
+ for logger in loggers:
+ logger.setLevel(level)
+
+ stream_handler = logging.StreamHandler()
+ stream_handler.setFormatter(formatter)
+ logging.root.addHandler(stream_handler)
+
+ if log_file:
+ file_handler = logging.FileHandler(filename=log_file)
+ file_handler.setFormatter(formatter)
+ logging.root.addHandler(file_handler)
+
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/main.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c244ae355000752a722e7674c093e197d98a2f2
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/main.py
@@ -0,0 +1,514 @@
+import glob
+import logging
+import os
+import re
+import subprocess
+import sys
+import random
+from datetime import datetime
+from functools import partial
+
+import numpy as np
+import torch
+from torch import optim
+
+try:
+ import wandb
+except ImportError:
+ wandb = None
+
+try:
+ import torch.utils.tensorboard as tensorboard
+except ImportError:
+ tensorboard = None
+
+try:
+ import horovod.torch as hvd
+except ImportError:
+ hvd = None
+
+from open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss
+from open_clip_train.data import get_data
+from open_clip_train.distributed import is_master, init_distributed_device, broadcast_object
+from open_clip_train.logger import setup_logging
+from open_clip_train.params import parse_args
+from open_clip_train.scheduler import cosine_lr, const_lr, const_lr_cooldown
+from open_clip_train.train import train_one_epoch, evaluate
+from open_clip_train.file_utils import pt_load, check_exists, start_sync_process, remote_sync
+
+
+LATEST_CHECKPOINT_NAME = "epoch_latest.pt"
+
+
+def random_seed(seed=42, rank=0):
+ torch.manual_seed(seed + rank)
+ np.random.seed(seed + rank)
+ random.seed(seed + rank)
+
+
+def natural_key(string_):
+ """See http://www.codinghorror.com/blog/archives/001018.html"""
+ return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
+
+
+def get_latest_checkpoint(path: str, remote : bool):
+ # as writen, this glob recurses, so can pick up checkpoints across multiple sub-folders
+ if remote:
+ result = subprocess.run(["aws", "s3", "ls", path + "/"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ print(result)
+ if result.returncode == 1:
+ return None
+ checkpoints = [os.path.join(path, x.split(' ')[-1]) for x in result.stdout.decode().split('\n')[:-1]]
+ else:
+ checkpoints = glob.glob(path + '**/*.pt', recursive=True)
+ if checkpoints:
+ checkpoints = sorted(checkpoints, key=natural_key)
+ return checkpoints[-1]
+ return None
+
+
+def main(args):
+ args = parse_args(args)
+
+ if torch.cuda.is_available():
+ # This enables tf32 on Ampere GPUs which is only 8% slower than
+ # float16 and almost as accurate as float32
+ # This was a default in pytorch until 1.12
+ torch.backends.cuda.matmul.allow_tf32 = True
+ torch.backends.cudnn.benchmark = True
+ torch.backends.cudnn.deterministic = False
+
+ # fully initialize distributed device environment
+ device = init_distributed_device(args)
+
+ # get the name of the experiments
+ if args.name is None:
+ # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
+ model_name_safe = args.model.replace('/', '-')
+ date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
+ if args.distributed:
+ # sync date_str from master to all ranks
+ date_str = broadcast_object(args, date_str)
+ args.name = '-'.join([
+ date_str,
+ f"model_{model_name_safe}",
+ f"lr_{args.lr}",
+ f"b_{args.batch_size}",
+ f"j_{args.workers}",
+ f"p_{args.precision}",
+ ])
+
+ resume_latest = args.resume == 'latest'
+ log_base_path = os.path.join(args.logs, args.name)
+ args.log_path = None
+ if is_master(args, local=args.log_local):
+ os.makedirs(log_base_path, exist_ok=True)
+ log_filename = f'out-{args.rank}' if args.log_local else 'out.log'
+ args.log_path = os.path.join(log_base_path, log_filename)
+ if os.path.exists(args.log_path) and not resume_latest:
+ print(
+ "Error. Experiment already exists. Use --name {} to specify a new experiment."
+ )
+ return -1
+
+ # Setup text logger
+ args.log_level = logging.DEBUG if args.debug else logging.INFO
+ setup_logging(args.log_path, args.log_level)
+
+ # Setup wandb, tensorboard, checkpoint logging
+ args.wandb = 'wandb' in args.report_to or 'all' in args.report_to
+ args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to
+ args.checkpoint_path = os.path.join(log_base_path, "checkpoints")
+ if is_master(args):
+ args.tensorboard_path = os.path.join(log_base_path, "tensorboard") if args.tensorboard else ''
+ for dirname in [args.tensorboard_path, args.checkpoint_path]:
+ if dirname:
+ os.makedirs(dirname, exist_ok=True)
+ else:
+ args.tensorboard_path = ''
+
+ if resume_latest:
+ resume_from = None
+ checkpoint_path = args.checkpoint_path
+ # If using remote_sync, need to check the remote instead of the local checkpoints folder.
+ if args.remote_sync is not None:
+ checkpoint_path = os.path.join(args.remote_sync, args.name, "checkpoints")
+ if args.save_most_recent:
+ print('Error. Cannot use save-most-recent with remote_sync and resume latest.')
+ return -1
+ if args.remote_sync_protocol != 's3':
+ print('Error. Sync protocol not supported when using resume latest.')
+ return -1
+ if is_master(args):
+ # Checking for existing checkpoint via master rank only. It is possible for
+ # different rank processes to see different files if a shared file-system is under
+ # stress, however it's very difficult to fully work around such situations.
+ if args.save_most_recent:
+ # if --save-most-recent flag is set, look for latest at a fixed filename
+ resume_from = os.path.join(checkpoint_path, LATEST_CHECKPOINT_NAME)
+ if not os.path.exists(resume_from):
+ # If no latest checkpoint has been saved yet, don't try to resume
+ resume_from = None
+ else:
+ # otherwise, list checkpoint dir contents and pick the newest checkpoint
+ resume_from = get_latest_checkpoint(checkpoint_path, remote=args.remote_sync is not None)
+ if resume_from:
+ logging.info(f'Found latest resume checkpoint at {resume_from}.')
+ else:
+ logging.info(f'No latest resume checkpoint found in {checkpoint_path}.')
+ if args.distributed:
+ # sync found checkpoint path to all ranks
+ resume_from = broadcast_object(args, resume_from)
+ args.resume = resume_from
+
+ if args.copy_codebase:
+ copy_codebase(args)
+
+ # start the sync proces if remote-sync is not None
+ remote_sync_process = None
+ if is_master(args) and args.remote_sync is not None:
+ # first make sure it works
+ result = remote_sync(
+ os.path.join(args.logs, args.name),
+ os.path.join(args.remote_sync, args.name),
+ args.remote_sync_protocol
+ )
+ if result:
+ logging.info('remote sync successful.')
+ else:
+ logging.info('Error: remote sync failed. Exiting.')
+ return -1
+ # if all looks good, start a process to do this every args.remote_sync_frequency seconds
+ remote_sync_process = start_sync_process(
+ args.remote_sync_frequency,
+ os.path.join(args.logs, args.name),
+ os.path.join(args.remote_sync, args.name),
+ args.remote_sync_protocol
+ )
+ remote_sync_process.start()
+
+ if args.precision == 'fp16':
+ logging.warning(
+ 'It is recommended to use AMP mixed-precision instead of FP16. '
+ 'FP16 support needs further verification and tuning, especially for train.')
+
+ if args.horovod:
+ logging.info(
+ f'Running in horovod mode with multiple processes / nodes. Device: {args.device}.'
+ f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')
+ elif args.distributed:
+ logging.info(
+ f'Running in distributed mode with multiple processes. Device: {args.device}.'
+ f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')
+ else:
+ logging.info(f'Running with a single process. Device {args.device}.')
+
+ dist_model = None
+ args.distill = args.distill_model is not None and args.distill_pretrained is not None
+ if args.distill:
+ #FIXME: support distillation with grad accum.
+ assert args.accum_freq == 1
+ #FIXME: support distillation with coca.
+ assert 'coca' not in args.model.lower()
+
+ if isinstance(args.force_image_size, (tuple, list)) and len(args.force_image_size) == 1:
+ # arg is nargs, single (square) image size list -> int
+ args.force_image_size = args.force_image_size[0]
+ random_seed(args.seed, 0)
+ model_kwargs = {}
+ if args.siglip:
+ model_kwargs['init_logit_scale'] = np.log(10) # different from CLIP
+ model_kwargs['init_logit_bias'] = -10
+ model, preprocess_train, preprocess_val = create_model_and_transforms(
+ args.model,
+ args.pretrained,
+ precision=args.precision,
+ device=device,
+ jit=args.torchscript,
+ force_quick_gelu=args.force_quick_gelu,
+ force_custom_text=args.force_custom_text,
+ force_patch_dropout=args.force_patch_dropout,
+ force_image_size=args.force_image_size,
+ image_mean=args.image_mean,
+ image_std=args.image_std,
+ image_interpolation=args.image_interpolation,
+ image_resize_mode=args.image_resize_mode, # only effective for inference
+ aug_cfg=args.aug_cfg,
+ pretrained_image=args.pretrained_image,
+ output_dict=True,
+ cache_dir=args.cache_dir,
+ **model_kwargs,
+ )
+ if args.distill:
+ # FIXME: currently assumes the model you're distilling from has the same tokenizer & transforms.
+ dist_model, _, _ = create_model_and_transforms(
+ args.distill_model,
+ args.distill_pretrained,
+ device=device,
+ precision=args.precision,
+ output_dict=True,
+ cache_dir=args.cache_dir,
+ )
+ if args.use_bnb_linear is not None:
+ print('=> using a layer from bitsandbytes.\n'
+ ' this is an experimental feature which requires two extra pip installs\n'
+ ' pip install bitsandbytes triton'
+ ' please make sure to use triton 2.0.0')
+ import bitsandbytes as bnb
+ from open_clip.utils import replace_linear
+ print(f'=> replacing linear layers with {args.use_bnb_linear}')
+ linear_replacement_cls = getattr(bnb.nn.triton_based_modules, args.use_bnb_linear)
+ replace_linear(model, linear_replacement_cls)
+ model = model.to(device)
+
+ random_seed(args.seed, args.rank)
+
+ if args.trace:
+ model = trace_model(model, batch_size=args.batch_size, device=device)
+
+ if args.lock_image:
+ # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
+ model.lock_image_tower(
+ unlocked_groups=args.lock_image_unlocked_groups,
+ freeze_bn_stats=args.lock_image_freeze_bn_stats)
+ if args.lock_text:
+ model.lock_text_tower(
+ unlocked_layers=args.lock_text_unlocked_layers,
+ freeze_layer_norm=args.lock_text_freeze_layer_norm)
+
+ if args.grad_checkpointing:
+ model.set_grad_checkpointing()
+
+ if is_master(args):
+ logging.info("Model:")
+ logging.info(f"{str(model)}")
+ logging.info("Params:")
+ params_file = os.path.join(args.logs, args.name, "params.txt")
+ with open(params_file, "w") as f:
+ for name in sorted(vars(args)):
+ val = getattr(args, name)
+ logging.info(f" {name}: {val}")
+ f.write(f"{name}: {val}\n")
+
+ if args.distributed and not args.horovod:
+ if args.use_bn_sync:
+ model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
+ ddp_args = {}
+ if args.ddp_static_graph:
+ # this doesn't exist in older PyTorch, arg only added if enabled
+ ddp_args['static_graph'] = True
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args)
+
+ if args.distill:
+ dist_model = torch.nn.parallel.DistributedDataParallel(dist_model, device_ids=[device], **ddp_args)
+
+ # create optimizer and scaler
+ optimizer = None
+ scaler = None
+
+ if args.train_data or args.dataset_type == "synthetic":
+ assert not args.trace, 'Cannot train with traced model'
+
+ exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n
+ include = lambda n, p: not exclude(n, p)
+
+ named_parameters = list(model.named_parameters())
+ gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad]
+ rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad]
+
+ optimizer = optim.AdamW(
+ [
+ {"params": gain_or_bias_params, "weight_decay": 0.},
+ {"params": rest_params, "weight_decay": args.wd},
+ ],
+ lr=args.lr,
+ betas=(args.beta1, args.beta2),
+ eps=args.eps,
+ )
+ if args.horovod:
+ optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
+ hvd.broadcast_parameters(model.state_dict(), root_rank=0)
+ hvd.broadcast_optimizer_state(optimizer, root_rank=0)
+
+ scaler = None
+ if args.precision == "amp":
+ try:
+ scaler = torch.amp.GradScaler(device=device)
+ except (AttributeError, TypeError) as e:
+ scaler = torch.cuda.amp.GradScaler()
+
+ # optionally resume from a checkpoint
+ start_epoch = 0
+ if args.resume is not None:
+ checkpoint = pt_load(args.resume, map_location='cpu')
+ if 'epoch' in checkpoint:
+ # resuming a train checkpoint w/ epoch and optimizer state
+ start_epoch = checkpoint["epoch"]
+ sd = checkpoint["state_dict"]
+ if not args.distributed and next(iter(sd.items()))[0].startswith('module'):
+ sd = {k[len('module.'):]: v for k, v in sd.items()}
+ model.load_state_dict(sd)
+ if optimizer is not None:
+ optimizer.load_state_dict(checkpoint["optimizer"])
+ if scaler is not None and 'scaler' in checkpoint:
+ scaler.load_state_dict(checkpoint['scaler'])
+ logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})")
+ else:
+ # loading a bare (model only) checkpoint for fine-tune or evaluation
+ model.load_state_dict(checkpoint)
+ logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})")
+
+ # initialize datasets
+ tokenizer = get_tokenizer(args.model, cache_dir=args.cache_dir)
+ data = get_data(
+ args,
+ (preprocess_train, preprocess_val),
+ epoch=start_epoch,
+ tokenizer=tokenizer,
+ )
+ assert len(data), 'At least one train or eval dataset must be specified.'
+
+ # create scheduler if train
+ scheduler = None
+ if 'train' in data and optimizer is not None:
+ total_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs
+ if args.lr_scheduler == "cosine":
+ scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps)
+ elif args.lr_scheduler == "const":
+ scheduler = const_lr(optimizer, args.lr, args.warmup, total_steps)
+ elif args.lr_scheduler == "const-cooldown":
+ assert args.epochs_cooldown is not None,\
+ "Please specify the number of cooldown epochs for this lr schedule."
+ cooldown_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs_cooldown
+ scheduler = const_lr_cooldown(
+ optimizer, args.lr, args.warmup, total_steps,
+ cooldown_steps, args.lr_cooldown_power, args.lr_cooldown_end)
+ else:
+ logging.error(
+ f'Unknown scheduler, {args.lr_scheduler}. Available options are: cosine, const, const-cooldown.')
+ exit(1)
+
+ # determine if this worker should save logs and checkpoints. only do so if it is rank == 0
+ args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args)
+ writer = None
+ if args.save_logs and args.tensorboard:
+ assert tensorboard is not None, "Please install tensorboard."
+ writer = tensorboard.SummaryWriter(args.tensorboard_path)
+
+ if args.wandb and is_master(args):
+ assert wandb is not None, 'Please install wandb.'
+ logging.debug('Starting wandb.')
+ args.train_sz = data["train"].dataloader.num_samples
+ if args.val_data is not None:
+ args.val_sz = data["val"].dataloader.num_samples
+ # you will have to configure this for your project!
+ wandb.init(
+ project=args.wandb_project_name,
+ name=args.name,
+ id=args.name,
+ notes=args.wandb_notes,
+ tags=[],
+ resume='auto' if args.resume == "latest" else None,
+ config=vars(args),
+ )
+ if args.debug:
+ wandb.watch(model, log='all')
+ wandb.save(params_file)
+ logging.debug('Finished loading wandb.')
+
+ # Pytorch 2.0 adds '_orig_mod.' prefix to keys of state_dict() of compiled models.
+ # For compatibility, we save state_dict() of the original model, which shares the
+ # weights without the prefix.
+ original_model = model
+ if args.torchcompile:
+ logging.info('Compiling model...')
+ model = torch.compile(original_model)
+
+ if 'train' not in data:
+ # If using int8, convert to inference mode.
+ if args.use_bnb_linear is not None:
+ from open_clip.utils import convert_int8_model_to_inference_mode
+ convert_int8_model_to_inference_mode(model)
+ # Evaluate.
+ evaluate(model, data, start_epoch, args, tb_writer=writer, tokenizer=tokenizer)
+ return
+
+ loss = create_loss(args)
+
+ for epoch in range(start_epoch, args.epochs):
+ if is_master(args):
+ logging.info(f'Start epoch {epoch}')
+
+ train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args, tb_writer=writer)
+ completed_epoch = epoch + 1
+
+ if any(v in data for v in ('val', 'imagenet-val', 'imagenet-v2')):
+ evaluate(model, data, completed_epoch, args, tb_writer=writer, tokenizer=tokenizer)
+
+ # Saving checkpoints.
+ if args.save_logs:
+ checkpoint_dict = {
+ "epoch": completed_epoch,
+ "name": args.name,
+ "state_dict": original_model.state_dict(),
+ "optimizer": optimizer.state_dict(),
+ }
+ if scaler is not None:
+ checkpoint_dict["scaler"] = scaler.state_dict()
+
+ if completed_epoch == args.epochs or (
+ args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0
+ ):
+ torch.save(
+ checkpoint_dict,
+ os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"),
+ )
+ if args.delete_previous_checkpoint:
+ previous_checkpoint = os.path.join(args.checkpoint_path, f"epoch_{completed_epoch - 1}.pt")
+ if os.path.exists(previous_checkpoint):
+ os.remove(previous_checkpoint)
+
+ if args.save_most_recent:
+ # try not to corrupt the latest checkpoint if save fails
+ tmp_save_path = os.path.join(args.checkpoint_path, "tmp.pt")
+ latest_save_path = os.path.join(args.checkpoint_path, LATEST_CHECKPOINT_NAME)
+ torch.save(checkpoint_dict, tmp_save_path)
+ os.replace(tmp_save_path, latest_save_path)
+
+ if args.wandb and is_master(args):
+ wandb.finish()
+
+ # run a final sync.
+ if remote_sync_process is not None:
+ logging.info('Final remote sync.')
+ remote_sync_process.terminate()
+ result = remote_sync(
+ os.path.join(args.logs, args.name),
+ os.path.join(args.remote_sync, args.name),
+ args.remote_sync_protocol
+ )
+ if result:
+ logging.info('Final remote sync successful.')
+ else:
+ logging.info('Final remote sync failed.')
+
+
+def copy_codebase(args):
+ from shutil import copytree, ignore_patterns
+ new_code_path = os.path.join(args.logs, args.name, "code")
+ if os.path.exists(new_code_path):
+ print(
+ f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
+ )
+ return -1
+ print(f"Copying codebase to {new_code_path}")
+ current_code_path = os.path.realpath(__file__)
+ for _ in range(3):
+ current_code_path = os.path.dirname(current_code_path)
+ copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))
+ print("Done copying code.")
+ return 1
+
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/params.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/params.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d94b7e2159c46ec5a414ebd1d7bfd0751e82251
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/params.py
@@ -0,0 +1,476 @@
+import argparse
+import ast
+
+
+def get_default_params(model_name):
+ # Params from paper (https://arxiv.org/pdf/2103.00020.pdf)
+ model_name = model_name.lower()
+ if "vit" in model_name:
+ return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6}
+ else:
+ return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8}
+
+
+class ParseKwargs(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ kw = {}
+ for value in values:
+ key, value = value.split('=')
+ try:
+ kw[key] = ast.literal_eval(value)
+ except ValueError:
+ kw[key] = str(value) # fallback to string (avoid need to escape on command line)
+ setattr(namespace, self.dest, kw)
+
+
+def parse_args(args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--train-data",
+ type=str,
+ default=None,
+ help="Path to file(s) with training data. When using webdataset, multiple datasources can be combined using the `::` separator.",
+ )
+ parser.add_argument(
+ "--train-data-upsampling-factors",
+ type=str,
+ default=None,
+ help=(
+ "When using multiple data sources with webdataset and sampling with replacement, this can be used to upsample specific data sources. "
+ "Similar to --train-data, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) "
+ "By default, datapoints are sampled uniformly regardless of the dataset sizes."
+ )
+ )
+ parser.add_argument(
+ "--val-data",
+ type=str,
+ default=None,
+ help="Path to file(s) with validation data",
+ )
+ parser.add_argument(
+ "--train-num-samples",
+ type=int,
+ default=None,
+ help="Number of samples in dataset. Required for webdataset if not available in info file.",
+ )
+ parser.add_argument(
+ "--val-num-samples",
+ type=int,
+ default=None,
+ help="Number of samples in dataset. Useful for webdataset if not available in info file.",
+ )
+ parser.add_argument(
+ "--dataset-type",
+ choices=["webdataset", "csv", "synthetic", "auto"],
+ default="auto",
+ help="Which type of dataset to process."
+ )
+ parser.add_argument(
+ "--dataset-resampled",
+ default=False,
+ action="store_true",
+ help="Whether to use sampling with replacement for webdataset shard selection."
+ )
+ parser.add_argument(
+ "--csv-separator",
+ type=str,
+ default="\t",
+ help="For csv-like datasets, which separator to use."
+ )
+ parser.add_argument(
+ "--csv-img-key",
+ type=str,
+ default="filepath",
+ help="For csv-like datasets, the name of the key for the image paths."
+ )
+ parser.add_argument(
+ "--csv-caption-key",
+ type=str,
+ default="title",
+ help="For csv-like datasets, the name of the key for the captions."
+ )
+ parser.add_argument(
+ "--imagenet-val",
+ type=str,
+ default=None,
+ help="Path to imagenet val set for conducting zero shot evaluation.",
+ )
+ parser.add_argument(
+ "--imagenet-v2",
+ type=str,
+ default=None,
+ help="Path to imagenet v2 for conducting zero shot evaluation.",
+ )
+ parser.add_argument(
+ "--cache-dir",
+ type=str,
+ default=None,
+ help="Override system default cache path for model & tokenizer file downloads.",
+ )
+ parser.add_argument(
+ "--logs",
+ type=str,
+ default="./logs/",
+ help="Where to store tensorboard logs. Use None to avoid storing logs.",
+ )
+ parser.add_argument(
+ "--log-local",
+ action="store_true",
+ default=False,
+ help="log files on local master, otherwise global master only.",
+ )
+ parser.add_argument(
+ "--name",
+ type=str,
+ default=None,
+ help="Optional identifier for the experiment when storing logs. Otherwise use current time.",
+ )
+ parser.add_argument(
+ "--workers", type=int, default=4, help="Number of dataloader workers per GPU."
+ )
+ parser.add_argument(
+ "--batch-size", type=int, default=64, help="Batch size per GPU."
+ )
+ parser.add_argument(
+ "--epochs", type=int, default=32, help="Number of epochs to train for."
+ )
+ parser.add_argument(
+ "--epochs-cooldown", type=int, default=None,
+ help="When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards."
+ )
+ parser.add_argument("--lr", type=float, default=None, help="Learning rate.")
+ parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.")
+ parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.")
+ parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.")
+ parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.")
+ parser.add_argument(
+ "--warmup", type=int, default=10000, help="Number of steps to warmup for."
+ )
+ parser.add_argument(
+ "--use-bn-sync",
+ default=False,
+ action="store_true",
+ help="Whether to use batch norm sync.")
+ parser.add_argument(
+ "--skip-scheduler",
+ action="store_true",
+ default=False,
+ help="Use this flag to skip the learning rate decay.",
+ )
+ parser.add_argument(
+ "--lr-scheduler",
+ type=str,
+ default='cosine',
+ help="LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine",
+ )
+ parser.add_argument(
+ "--lr-cooldown-end", type=float, default=0.0,
+ help="End learning rate for cooldown schedule. Default: 0"
+ )
+ parser.add_argument(
+ "--lr-cooldown-power", type=float, default=1.0,
+ help="Power for polynomial cooldown schedule. Default: 1.0 (linear decay)"
+ )
+ parser.add_argument(
+ "--save-frequency", type=int, default=1, help="How often to save checkpoints."
+ )
+ parser.add_argument(
+ "--save-most-recent",
+ action="store_true",
+ default=False,
+ help="Always save the most recent model trained to epoch_latest.pt.",
+ )
+ parser.add_argument(
+ "--zeroshot-frequency", type=int, default=2, help="How often to run zero shot."
+ )
+ parser.add_argument(
+ "--val-frequency", type=int, default=1, help="How often to run evaluation with val data."
+ )
+ parser.add_argument(
+ "--resume",
+ default=None,
+ type=str,
+ help="path to latest checkpoint (default: none)",
+ )
+ parser.add_argument(
+ "--precision",
+ choices=["amp", "amp_bf16", "amp_bfloat16", "bf16", "fp16", "pure_bf16", "pure_fp16", "fp32"],
+ default="amp",
+ help="Floating point precision."
+ )
+ parser.add_argument(
+ "--model",
+ type=str,
+ default="RN50",
+ help="Name of the vision backbone to use.",
+ )
+ parser.add_argument(
+ "--pretrained",
+ default='',
+ type=str,
+ help="Use a pretrained CLIP model weights with the specified tag or file path.",
+ )
+ parser.add_argument(
+ "--pretrained-image",
+ default=False,
+ action='store_true',
+ help="Load imagenet pretrained weights for image tower backbone if available.",
+ )
+ parser.add_argument(
+ "--lock-image",
+ default=False,
+ action='store_true',
+ help="Lock full image tower by disabling gradients.",
+ )
+ parser.add_argument(
+ "--lock-image-unlocked-groups",
+ type=int,
+ default=0,
+ help="Leave last n image tower layer groups unlocked.",
+ )
+ parser.add_argument(
+ "--lock-image-freeze-bn-stats",
+ default=False,
+ action='store_true',
+ help="Freeze BatchNorm running stats in image tower for any locked layers.",
+ )
+ parser.add_argument(
+ '--image-mean', type=float, nargs='+', default=None, metavar='MEAN',
+ help='Override default image mean value of dataset')
+ parser.add_argument(
+ '--image-std', type=float, nargs='+', default=None, metavar='STD',
+ help='Override default image std deviation of of dataset')
+ parser.add_argument(
+ '--image-interpolation',
+ default=None, type=str, choices=['bicubic', 'bilinear', 'random'],
+ help="Override default image resize interpolation"
+ )
+ parser.add_argument(
+ '--image-resize-mode',
+ default=None, type=str, choices=['shortest', 'longest', 'squash'],
+ help="Override default image resize (& crop) mode during inference"
+ )
+ parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)
+ parser.add_argument(
+ "--grad-checkpointing",
+ default=False,
+ action='store_true',
+ help="Enable gradient checkpointing.",
+ )
+ parser.add_argument(
+ "--local-loss",
+ default=False,
+ action="store_true",
+ help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)"
+ )
+ parser.add_argument(
+ "--gather-with-grad",
+ default=False,
+ action="store_true",
+ help="enable full distributed gradient for feature gather"
+ )
+ parser.add_argument(
+ '--force-image-size', type=int, nargs='+', default=None,
+ help='Override default image size'
+ )
+ parser.add_argument(
+ "--force-quick-gelu",
+ default=False,
+ action='store_true',
+ help="Force use of QuickGELU activation for non-OpenAI transformer models.",
+ )
+ parser.add_argument(
+ "--force-patch-dropout",
+ default=None,
+ type=float,
+ help="Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper",
+ )
+ parser.add_argument(
+ "--force-custom-text",
+ default=False,
+ action='store_true',
+ help="Force use of CustomTextCLIP model (separate text-tower).",
+ )
+ parser.add_argument(
+ "--torchscript",
+ default=False,
+ action='store_true',
+ help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'",
+ )
+ parser.add_argument(
+ "--torchcompile",
+ default=False,
+ action='store_true',
+ help="torch.compile() the model, requires pytorch 2.0 or later.",
+ )
+ parser.add_argument(
+ "--trace",
+ default=False,
+ action='store_true',
+ help="torch.jit.trace the model for inference / eval only",
+ )
+ parser.add_argument(
+ "--accum-freq", type=int, default=1, help="Update the model every --acum-freq steps."
+ )
+ parser.add_argument(
+ "--device", default="cuda", type=str, help="Accelerator to use."
+ )
+ # arguments for distributed training
+ parser.add_argument(
+ "--dist-url",
+ default=None,
+ type=str,
+ help="url used to set up distributed training",
+ )
+ parser.add_argument(
+ "--dist-backend",
+ default=None,
+ type=str,
+ help="distributed backend. \"nccl\" for GPU, \"hccl\" for Ascend NPU"
+ )
+ parser.add_argument(
+ "--report-to",
+ default='',
+ type=str,
+ help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']"
+ )
+ parser.add_argument(
+ "--wandb-notes",
+ default='',
+ type=str,
+ help="Notes if logging with wandb"
+ )
+ parser.add_argument(
+ "--wandb-project-name",
+ type=str,
+ default='open-clip',
+ help="Name of the project if logging with wandb.",
+ )
+ parser.add_argument(
+ "--debug",
+ default=False,
+ action="store_true",
+ help="If true, more information is logged."
+ )
+ parser.add_argument(
+ "--copy-codebase",
+ default=False,
+ action="store_true",
+ help="If true, we copy the entire base on the log directory, and execute from there."
+ )
+ parser.add_argument(
+ "--horovod",
+ default=False,
+ action="store_true",
+ help="Use horovod for distributed training."
+ )
+ parser.add_argument(
+ "--ddp-static-graph",
+ default=False,
+ action='store_true',
+ help="Enable static graph optimization for DDP in PyTorch >= 1.11.",
+ )
+ parser.add_argument(
+ "--no-set-device-rank",
+ default=False,
+ action="store_true",
+ help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc)."
+ )
+ parser.add_argument(
+ "--seed", type=int, default=0, help="Default random seed."
+ )
+ parser.add_argument(
+ "--grad-clip-norm", type=float, default=None, help="Gradient clip."
+ )
+ parser.add_argument(
+ "--lock-text",
+ default=False,
+ action='store_true',
+ help="Lock full text tower by disabling gradients.",
+ )
+ parser.add_argument(
+ "--lock-text-unlocked-layers",
+ type=int,
+ default=0,
+ help="Leave last n text tower layer groups unlocked.",
+ )
+ parser.add_argument(
+ "--lock-text-freeze-layer-norm",
+ default=False,
+ action='store_true',
+ help="Freeze LayerNorm running stats in text tower for any locked layers.",
+ )
+ parser.add_argument(
+ "--log-every-n-steps",
+ type=int,
+ default=100,
+ help="Log every n steps to tensorboard/console/wandb.",
+ )
+ parser.add_argument(
+ "--coca-caption-loss-weight",
+ type=float,
+ default=2.0,
+ help="Weight assigned to caption loss in CoCa."
+ )
+ parser.add_argument(
+ "--coca-contrastive-loss-weight",
+ type=float,
+ default=1.0,
+ help="Weight assigned to contrastive loss when training CoCa."
+ )
+ parser.add_argument(
+ "--remote-sync",
+ type=str,
+ default=None,
+ help="Optinoally sync with a remote path specified by this arg",
+ )
+ parser.add_argument(
+ "--remote-sync-frequency",
+ type=int,
+ default=300,
+ help="How frequently to sync to a remote directly if --remote-sync is not None.",
+ )
+ parser.add_argument(
+ "--remote-sync-protocol",
+ choices=["s3", "fsspec"],
+ default="s3",
+ help="How to do the remote sync backup if --remote-sync is not None.",
+ )
+ parser.add_argument(
+ "--delete-previous-checkpoint",
+ default=False,
+ action="store_true",
+ help="If true, delete previous checkpoint after storing a new one."
+ )
+ parser.add_argument(
+ "--distill-model",
+ default=None,
+ help='Which model arch to distill from, if any.'
+ )
+ parser.add_argument(
+ "--distill-pretrained",
+ default=None,
+ help='Which pre-trained weights to distill from, if any.'
+ )
+ parser.add_argument(
+ "--use-bnb-linear",
+ default=None,
+ help='Replace the network linear layers from the bitsandbytes library. '
+ 'Allows int8 training/inference, etc.'
+ )
+ parser.add_argument(
+ "--siglip",
+ default=False,
+ action="store_true",
+ help='Use SigLip (sigmoid) loss.'
+ )
+
+ args = parser.parse_args(args)
+
+ # If some params are not passed, we use the default values based on model name.
+ default_params = get_default_params(args.model)
+ for name, val in default_params.items():
+ if getattr(args, name) is None:
+ setattr(args, name, val)
+
+ return args
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/profiler.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6521d1f00d76df484cee15a85139289719a83dd
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/profiler.py
@@ -0,0 +1,249 @@
+import argparse
+
+import torch
+import open_clip
+import pandas as pd
+from torch.utils.flop_counter import FlopCounterMode
+try:
+ import fvcore
+except:
+ fvcore = None
+
+parser = argparse.ArgumentParser(description='OpenCLIP Profiler')
+
+# benchmark specific args
+parser.add_argument('--model', metavar='NAME', default='',
+ help='model(s) to profile')
+parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
+ help='Output csv file for results')
+parser.add_argument('--profiler', default='torch', type=str, choices=['torch', 'fvcore'])
+parser.add_argument('--batch-size', default=1, type=int, help='Batch size for profiling')
+
+
+def profile_fvcore(
+ model,
+ image_input_size=(3, 224, 224),
+ text_input_size=(77,),
+ batch_size=1,
+ detailed=False,
+ force_cpu=False
+):
+ if force_cpu:
+ model = model.to('cpu')
+ device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
+ example_image_input = torch.ones((batch_size,) + image_input_size, device=device, dtype=dtype)
+ example_text_input = torch.ones((batch_size,) + text_input_size, device=device, dtype=torch.int64)
+ fca = fvcore.nn.FlopCountAnalysis(model, (example_image_input, example_text_input))
+ aca = fvcore.nn.ActivationCountAnalysis(model, (example_image_input, example_text_input))
+ if detailed:
+ fcs = fvcore.nn.flop_count_str(fca)
+ print(fcs)
+ return fca.total() / batch_size, aca.total() / batch_size
+
+
+def profile_fvcore_text(
+ model,
+ text_input_size=(77,),
+ batch_size=1,
+ detailed=False,
+ force_cpu=False
+):
+ if force_cpu:
+ model = model.to('cpu')
+ device = next(model.parameters()).device
+ example_input = torch.ones((batch_size,) + text_input_size, device=device, dtype=torch.int64)
+ fca = fvcore.nn.FlopCountAnalysis(model, example_input)
+ aca = fvcore.nn.ActivationCountAnalysis(model, example_input)
+ if detailed:
+ fcs = fvcore.nn.flop_count_str(fca)
+ print(fcs)
+ return fca.total() / batch_size, aca.total() / batch_size
+
+
+def profile_fvcore_image(
+ model,
+ image_input_size=(3, 224, 224),
+ batch_size=1,
+ detailed=False,
+ force_cpu=False
+):
+ if force_cpu:
+ model = model.to('cpu')
+ device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
+ example_input = torch.ones((batch_size,) + image_input_size, device=device, dtype=dtype)
+ fca = fvcore.nn.FlopCountAnalysis(model, example_input)
+ aca = fvcore.nn.ActivationCountAnalysis(model, example_input)
+ if detailed:
+ fcs = fvcore.nn.flop_count_str(fca)
+ print(fcs)
+ return fca.total() / batch_size, aca.total() / batch_size
+
+
+def profile_torch_image(model, image_input_size, batch_size=1, force_cpu=False):
+ """Profile the image encoder using torch.utils.flop_counter"""
+ if force_cpu:
+ model = model.to('cpu')
+ device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
+ example_input = torch.ones((batch_size,) + image_input_size, device=device, dtype=dtype)
+
+ flop_counter = FlopCounterMode()
+ with flop_counter:
+ model(example_input)
+ total_flops = sum(flop_counter.get_flop_counts()['Global'].values())
+ return total_flops / batch_size
+
+
+def profile_torch_text(model, text_input_size, batch_size=1, force_cpu=False):
+ """Profile the text encoder using torch.utils.flop_counter"""
+ if force_cpu:
+ model = model.to('cpu')
+ device = next(model.parameters()).device
+ example_input = torch.ones((batch_size,) + text_input_size, device=device, dtype=torch.int64)
+
+ flop_counter = FlopCounterMode()
+ with flop_counter:
+ model(example_input)
+ total_flops = sum(flop_counter.get_flop_counts()['Global'].values())
+ return total_flops / batch_size
+
+
+def profile_torch(model, text_input_size, image_input_size, batch_size=1, force_cpu=False):
+ """Profile the full model using torch.utils.flop_counter"""
+ if force_cpu:
+ model = model.to('cpu')
+ device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
+ image_input = torch.ones((batch_size,) + image_input_size, device=device, dtype=dtype)
+ text_input = torch.ones((batch_size,) + text_input_size, device=device, dtype=torch.int64)
+
+ flop_counter = FlopCounterMode()
+ with flop_counter:
+ model(image_input, text_input)
+ total_flops = sum(flop_counter.get_flop_counts()['Global'].values())
+ return total_flops / batch_size
+
+
+def count_params(model):
+ return sum(m.numel() for m in model.parameters())
+
+def profile_model(model_name, batch_size=1, profiler='torch', device="cuda"):
+ assert profiler in ['torch', 'fvcore'], 'Only torch and fvcore profilers are supported'
+ if profiler == 'fvcore':
+ assert fvcore is not None, 'Please install fvcore.'
+ model = open_clip.create_model(model_name, force_custom_text=True, pretrained_hf=False)
+ model.eval()
+
+ if torch.cuda.is_available():
+ model = model.cuda()
+ elif device == "npu" and torch.npu.is_available():
+ model = model.npu()
+
+ if isinstance(model.visual.image_size, (tuple, list)):
+ image_input_size = (3,) + tuple(model.visual.image_size[-2:])
+ else:
+ image_input_size = (3, model.visual.image_size, model.visual.image_size)
+
+ text_input_size = (77,)
+ if hasattr(model, 'context_length') and model.context_length:
+ text_input_size = (model.context_length,)
+
+ results = {}
+ results['model'] = model_name
+ results['image_size'] = image_input_size[1]
+
+ model_cfg = open_clip.get_model_config(model_name)
+ if model_cfg:
+ vision_cfg = open_clip.CLIPVisionCfg(**model_cfg['vision_cfg'])
+ text_cfg = open_clip.CLIPTextCfg(**model_cfg['text_cfg'])
+ results['image_width'] = int(vision_cfg.width)
+ results['text_width'] = int(text_cfg.width)
+ results['embed_dim'] = int(model_cfg['embed_dim'])
+ else:
+ results['image_width'] = 0
+ results['text_width'] = 0
+ results['embed_dim'] = 0
+
+ retries = 2
+ while retries:
+ retries -= 1
+ try:
+ results['mparams'] = round(count_params(model) / 1e6, 2)
+ results['image_mparams'] = round(count_params(model.visual) / 1e6, 2)
+ results['text_mparams'] = round(count_params(model.text) / 1e6, 2)
+
+ if profiler == 'fvcore':
+ macs, acts = profile_fvcore(
+ model, image_input_size=image_input_size, text_input_size=text_input_size, force_cpu=not retries, batch_size=batch_size)
+
+ image_macs, image_acts = profile_fvcore_image(
+ model.visual, image_input_size=image_input_size, force_cpu=not retries, batch_size=batch_size)
+
+ text_macs, text_acts = profile_fvcore_text(
+ model.text, text_input_size=text_input_size, force_cpu=not retries, batch_size=batch_size)
+
+ results['gmacs'] = round(macs / 1e9, 2)
+ results['macts'] = round(acts / 1e6, 2)
+
+ results['image_gmacs'] = round(image_macs / 1e9, 2)
+ results['image_macts'] = round(image_acts / 1e6, 2)
+
+ results['text_gmacs'] = round(text_macs / 1e9, 2)
+ results['text_macts'] = round(text_acts / 1e6, 2)
+ elif profiler == 'torch':
+ image_flops = profile_torch_image(
+ model.visual, image_input_size=image_input_size, force_cpu=not retries, batch_size=batch_size)
+ text_flops = profile_torch_text(
+ model.text, text_input_size=text_input_size, force_cpu=not retries, batch_size=batch_size)
+ total_flops = profile_torch(
+ model, image_input_size=image_input_size, text_input_size=text_input_size, force_cpu=not retries, batch_size=batch_size)
+
+ results['gflops'] = round(total_flops / 1e9, 2)
+ results['image_gflops'] = round(image_flops / 1e9, 2)
+ results['text_gflops'] = round(text_flops / 1e9, 2)
+
+ except RuntimeError as e:
+ pass
+ return results
+
+
+def main():
+ args = parser.parse_args()
+
+ # FIXME accept a text file name to allow lists of models in txt/csv
+ if args.model == 'all':
+ parsed_model = open_clip.list_models()
+ else:
+ parsed_model = args.model.split(',')
+
+ results = []
+ models_with_errors = []
+ for m in parsed_model:
+ print('='*100)
+ print(f'Profiling {m}')
+ try:
+ row = profile_model(m, batch_size=args.batch_size, profiler=args.profiler, device=args.device)
+ results.append(row)
+ except Exception as e:
+ print(f'Error profiling {m}: {e}')
+ import traceback
+ traceback.print_exc()
+ models_with_errors.append(m)
+
+ df = pd.DataFrame(results, columns=results[0].keys())
+
+ if 'gmacs' in df.columns:
+ df = df.sort_values(by=['gmacs', 'mparams', 'model'])
+ else:
+ df = df.sort_values(by=['gflops', 'mparams', 'model'])
+
+ print('='*100)
+ print('Done.')
+ print(df)
+ if args.results_file:
+ df.to_csv(args.results_file, index=False)
+
+ if models_with_errors:
+ print('Models with errors:', models_with_errors)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/scheduler.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/scheduler.py
new file mode 100644
index 0000000000000000000000000000000000000000..fba76fcf1720b11d136a5ab6d3a58ab2fbe42f74
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/scheduler.py
@@ -0,0 +1,53 @@
+import numpy as np
+
+
+def assign_learning_rate(optimizer, new_lr):
+ for param_group in optimizer.param_groups:
+ param_group["lr"] = new_lr
+
+
+def _warmup_lr(base_lr, warmup_length, step):
+ return base_lr * (step + 1) / warmup_length
+
+
+def const_lr(optimizer, base_lr, warmup_length, steps):
+ def _lr_adjuster(step):
+ if step < warmup_length:
+ lr = _warmup_lr(base_lr, warmup_length, step)
+ else:
+ lr = base_lr
+ assign_learning_rate(optimizer, lr)
+ return lr
+ return _lr_adjuster
+
+
+def const_lr_cooldown(optimizer, base_lr, warmup_length, steps, cooldown_steps, cooldown_power=1.0, cooldown_end_lr=0.):
+ def _lr_adjuster(step):
+ start_cooldown_step = steps - cooldown_steps
+ if step < warmup_length:
+ lr = _warmup_lr(base_lr, warmup_length, step)
+ else:
+ if step < start_cooldown_step:
+ lr = base_lr
+ else:
+ e = step - start_cooldown_step
+ es = steps - start_cooldown_step
+ # linear decay if power == 1; polynomial decay otherwise;
+ decay = (1 - (e/es)) ** cooldown_power
+ lr = decay * (base_lr - cooldown_end_lr) + cooldown_end_lr
+ assign_learning_rate(optimizer, lr)
+ return lr
+ return _lr_adjuster
+
+
+def cosine_lr(optimizer, base_lr, warmup_length, steps):
+ def _lr_adjuster(step):
+ if step < warmup_length:
+ lr = _warmup_lr(base_lr, warmup_length, step)
+ else:
+ e = step - warmup_length
+ es = steps - warmup_length
+ lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
+ assign_learning_rate(optimizer, lr)
+ return lr
+ return _lr_adjuster
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/train.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..20d4dd0e944b4986164f835b3db73a091606c978
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/train.py
@@ -0,0 +1,384 @@
+import json
+import logging
+import math
+import os
+import time
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch.nn.parallel.distributed import DistributedDataParallel
+
+try:
+ import wandb
+except ImportError:
+ wandb = None
+
+from open_clip import get_input_dtype, CLIP, CustomTextCLIP
+from open_clip_train.distributed import is_master
+from open_clip_train.zero_shot import zero_shot_eval
+from open_clip_train.precision import get_autocast
+
+
+class AverageMeter(object):
+ """Computes and stores the average and current value"""
+
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+
+ def update(self, val, n=1):
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
+
+
+def postprocess_clip_output(model_out):
+ return {
+ "image_features": model_out[0],
+ "text_features": model_out[1],
+ "logit_scale": model_out[2]
+ }
+
+
+def unwrap_model(model):
+ if hasattr(model, 'module'):
+ return model.module
+ else:
+ return model
+
+
+def backward(total_loss, scaler):
+ if scaler is not None:
+ scaler.scale(total_loss).backward()
+ else:
+ total_loss.backward()
+
+
+def train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args, tb_writer=None):
+ device = torch.device(args.device)
+ autocast = get_autocast(args.precision, device_type=device.type)
+ input_dtype = get_input_dtype(args.precision)
+
+ model.train()
+ if args.distill:
+ dist_model.eval()
+
+ data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch
+ dataloader = data['train'].dataloader
+ num_batches_per_epoch = dataloader.num_batches // args.accum_freq
+ sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))
+
+ if args.accum_freq > 1:
+ accum_images, accum_texts, accum_features = [], [], {}
+
+ losses_m = {}
+ batch_time_m = AverageMeter()
+ data_time_m = AverageMeter()
+ end = time.time()
+ for i, batch in enumerate(dataloader):
+ i_accum = i // args.accum_freq
+ step = num_batches_per_epoch * epoch + i_accum
+
+ if not args.skip_scheduler:
+ scheduler(step)
+
+ images, texts = batch
+ images = images.to(device=device, dtype=input_dtype, non_blocking=True)
+ texts = texts.to(device=device, non_blocking=True)
+
+ data_time_m.update(time.time() - end)
+ optimizer.zero_grad()
+
+ if args.accum_freq == 1:
+ with autocast():
+ model_out = model(images, texts)
+ logit_scale = model_out["logit_scale"]
+ if args.distill:
+ with torch.no_grad():
+ dist_model_out = dist_model(images, texts)
+ model_out.update({f'dist_{k}': v for k, v in dist_model_out.items()})
+ losses = loss(**model_out, output_dict=True)
+
+ total_loss = sum(losses.values())
+ losses["loss"] = total_loss
+
+ backward(total_loss, scaler)
+ else:
+ # First, cache the features without any gradient tracking.
+ with torch.no_grad():
+ with autocast():
+ model_out = model(images, texts)
+
+ for f in ("logit_scale", "logit_bias"):
+ model_out.pop(f, None)
+
+ for key, val in model_out.items():
+ if key in accum_features:
+ accum_features[key].append(val)
+ else:
+ accum_features[key] = [val]
+
+ accum_images.append(images)
+ accum_texts.append(texts)
+
+ # If (i + 1) % accum_freq is not zero, move on to the next batch.
+ if ((i + 1) % args.accum_freq) > 0:
+ # FIXME this makes data time logging unreliable when accumulating
+ continue
+
+ # Now, ready to take gradients for the last accum_freq batches.
+ # Re-do the forward pass for those batches, and use the cached features from the other batches as negatives.
+ # Call backwards each time, but only step optimizer at the end.
+ optimizer.zero_grad()
+ for j in range(args.accum_freq):
+ images = accum_images[j]
+ texts = accum_texts[j]
+ with autocast():
+ model_out = model(images, texts)
+
+ inputs_no_accum = {}
+ inputs_no_accum["logit_scale"] = logit_scale = model_out.pop("logit_scale")
+ if "logit_bias" in model_out:
+ inputs_no_accum["logit_bias"] = model_out.pop("logit_bias")
+
+ inputs = {}
+ for key, val in accum_features.items():
+ accumulated = accum_features[key]
+ inputs[key] = torch.cat(accumulated[:j] + [model_out[key]] + accumulated[j + 1:])
+
+ losses = loss(**inputs, **inputs_no_accum, output_dict=True)
+ del inputs
+ del inputs_no_accum
+ total_loss = sum(losses.values())
+ losses["loss"] = total_loss
+
+ backward(total_loss, scaler)
+
+ if scaler is not None:
+ if args.horovod:
+ optimizer.synchronize()
+ scaler.unscale_(optimizer)
+ if args.grad_clip_norm is not None:
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)
+ with optimizer.skip_synchronize():
+ scaler.step(optimizer)
+ else:
+ if args.grad_clip_norm is not None:
+ scaler.unscale_(optimizer)
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)
+ scaler.step(optimizer)
+ scaler.update()
+ else:
+ if args.grad_clip_norm is not None:
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)
+ optimizer.step()
+
+ # reset gradient accum, if enabled
+ if args.accum_freq > 1:
+ accum_images, accum_texts, accum_features = [], [], {}
+
+ # Note: we clamp to 4.6052 = ln(100), as in the original paper.
+ with torch.no_grad():
+ unwrap_model(model).logit_scale.clamp_(0, math.log(100))
+
+ batch_time_m.update(time.time() - end)
+ end = time.time()
+ batch_count = i_accum + 1
+ if is_master(args) and (i_accum % args.log_every_n_steps == 0 or batch_count == num_batches_per_epoch):
+ batch_size = len(images)
+ num_samples = batch_count * batch_size * args.accum_freq * args.world_size
+ samples_per_epoch = dataloader.num_samples
+ percent_complete = 100.0 * batch_count / num_batches_per_epoch
+
+ # NOTE loss is coarsely sampled, just master node and per log update
+ for key, val in losses.items():
+ if key not in losses_m:
+ losses_m[key] = AverageMeter()
+ losses_m[key].update(val.item(), batch_size)
+
+ logit_scale_scalar = logit_scale.item()
+ loss_log = " ".join(
+ [
+ f"{loss_name.capitalize()}: {loss_m.val:#.5g} ({loss_m.avg:#.5g})"
+ for loss_name, loss_m in losses_m.items()
+ ]
+ )
+ samples_per_second = args.accum_freq * args.batch_size * args.world_size / batch_time_m.val
+ samples_per_second_per_gpu = args.accum_freq * args.batch_size / batch_time_m.val
+ logging.info(
+ f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
+ f"Data (t): {data_time_m.avg:.3f} "
+ f"Batch (t): {batch_time_m.avg:.3f}, {samples_per_second:#g}/s, {samples_per_second_per_gpu:#g}/s/gpu "
+ f"LR: {optimizer.param_groups[0]['lr']:5f} "
+ f"Logit Scale: {logit_scale_scalar:.3f} " + loss_log
+ )
+
+ # Save train loss / etc. Using non avg meter values as loggers have their own smoothing
+ log_data = {
+ "data_time": data_time_m.val,
+ "batch_time": batch_time_m.val,
+ "samples_per_second": samples_per_second,
+ "samples_per_second_per_gpu": samples_per_second_per_gpu,
+ "scale": logit_scale_scalar,
+ "lr": optimizer.param_groups[0]["lr"]
+ }
+ log_data.update({name:val.val for name,val in losses_m.items()})
+
+ log_data = {"train/" + name: val for name, val in log_data.items()}
+
+ if tb_writer is not None:
+ for name, val in log_data.items():
+ tb_writer.add_scalar(name, val, step)
+
+ if args.wandb:
+ assert wandb is not None, 'Please install wandb.'
+ log_data['step'] = step # for backwards compatibility
+ wandb.log(log_data, step=step)
+
+ # resetting batch / data time meters per log window
+ batch_time_m.reset()
+ data_time_m.reset()
+ # end for
+
+
+def evaluate(model, data, epoch, args, tb_writer=None, tokenizer=None):
+ metrics = {}
+ if not is_master(args):
+ return metrics
+ device = torch.device(args.device)
+ model.eval()
+
+ zero_shot_metrics = zero_shot_eval(model, data, epoch, args, tokenizer=tokenizer)
+ metrics.update(zero_shot_metrics)
+
+ autocast = get_autocast(args.precision, device_type=device.type)
+ input_dtype = get_input_dtype(args.precision)
+
+ if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):
+ dataloader = data['val'].dataloader
+ num_samples = 0
+ samples_per_val = dataloader.num_samples
+
+ # FIXME this does not scale past small eval datasets
+ # all_image_features @ all_text_features will blow up memory and compute very quickly
+ cumulative_loss = 0.0
+ cumulative_gen_loss = 0.0
+ all_image_features, all_text_features = [], []
+ with torch.inference_mode():
+ for i, batch in enumerate(dataloader):
+ images, texts = batch
+ images = images.to(device=device, dtype=input_dtype, non_blocking=True)
+ texts = texts.to(device=device, non_blocking=True)
+
+ with autocast():
+ model_out = model(images, texts)
+ image_features = model_out["image_features"]
+ text_features = model_out["text_features"]
+ logit_scale = model_out["logit_scale"]
+ # features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly
+ # however, system RAM is easily exceeded and compute time becomes problematic
+ all_image_features.append(image_features.cpu())
+ all_text_features.append(text_features.cpu())
+ logit_scale = logit_scale.mean()
+ logits_per_image = logit_scale * image_features @ text_features.t()
+ logits_per_text = logits_per_image.t()
+
+ batch_size = images.shape[0]
+ labels = torch.arange(batch_size, device=device).long()
+ total_loss = (
+ F.cross_entropy(logits_per_image, labels) +
+ F.cross_entropy(logits_per_text, labels)
+ ) / 2
+
+ gen_loss = maybe_compute_generative_loss(model_out)
+
+ cumulative_loss += total_loss * batch_size
+ num_samples += batch_size
+ if is_master(args) and (i % 100) == 0:
+ logging.info(
+ f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\t"
+ f"Clip Loss: {cumulative_loss / num_samples:.6f}\t")
+
+ if gen_loss is not None:
+ cumulative_gen_loss += gen_loss * batch_size
+ logging.info(
+ f"Generative Loss: {cumulative_gen_loss / num_samples:.6f}\t")
+
+ val_metrics = get_clip_metrics(
+ image_features=torch.cat(all_image_features),
+ text_features=torch.cat(all_text_features),
+ logit_scale=logit_scale.cpu(),
+ )
+ loss = cumulative_loss / num_samples
+ metrics.update(
+ {**val_metrics, "clip_val_loss": loss.item(), "epoch": epoch, "num_samples": num_samples}
+ )
+ if gen_loss is not None:
+ gen_loss = cumulative_gen_loss / num_samples
+ metrics.update({"val_generative_loss": gen_loss.item()})
+
+ if not metrics:
+ return metrics
+
+ logging.info(
+ f"Eval Epoch: {epoch} "
+ + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
+ )
+
+ log_data = {"val/" + name: val for name, val in metrics.items()}
+
+ if args.save_logs:
+ if tb_writer is not None:
+ for name, val in log_data.items():
+ tb_writer.add_scalar(name, val, epoch)
+
+ with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f:
+ f.write(json.dumps(metrics))
+ f.write("\n")
+
+ if args.wandb:
+ assert wandb is not None, 'Please install wandb.'
+ if 'train' in data:
+ dataloader = data['train'].dataloader
+ num_batches_per_epoch = dataloader.num_batches // args.accum_freq
+ step = num_batches_per_epoch * epoch
+ else:
+ step = None
+ log_data['epoch'] = epoch
+ wandb.log(log_data, step=step)
+
+ return metrics
+
+
+def get_clip_metrics(image_features, text_features, logit_scale):
+ metrics = {}
+ logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu()
+ logits_per_text = logits_per_image.t().detach().cpu()
+
+ logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
+ ground_truth = torch.arange(len(text_features)).view(-1, 1)
+
+ for name, logit in logits.items():
+ ranking = torch.argsort(logit, descending=True)
+ preds = torch.where(ranking == ground_truth)[1]
+ preds = preds.detach().cpu().numpy()
+ metrics[f"{name}_mean_rank"] = preds.mean() + 1
+ metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
+ for k in [1, 5, 10]:
+ metrics[f"{name}_R@{k}"] = np.mean(preds < k)
+
+ return metrics
+
+
+def maybe_compute_generative_loss(model_out):
+ if "logits" in model_out and "labels" in model_out:
+ token_logits = model_out["logits"]
+ token_labels = model_out["labels"]
+ return F.cross_entropy(token_logits.permute(0, 2, 1), token_labels)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/zero_shot.py b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/zero_shot.py
new file mode 100644
index 0000000000000000000000000000000000000000..21241536528bef3bcfb6b6c61afe7e030be0a3fe
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/open_clip_train/zero_shot.py
@@ -0,0 +1,86 @@
+import logging
+
+import torch
+from tqdm import tqdm
+
+from open_clip import get_input_dtype, get_tokenizer, build_zero_shot_classifier, \
+ IMAGENET_CLASSNAMES, OPENAI_IMAGENET_TEMPLATES
+from open_clip_train.precision import get_autocast
+
+
+def accuracy(output, target, topk=(1,)):
+ pred = output.topk(max(topk), 1, True, True)[1].t()
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
+ return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
+
+
+def run(model, classifier, dataloader, args):
+ device = torch.device(args.device)
+ autocast = get_autocast(args.precision, device_type=device.type)
+ input_dtype = get_input_dtype(args.precision)
+
+ with torch.inference_mode():
+ top1, top5, n = 0., 0., 0.
+ for images, target in tqdm(dataloader, unit_scale=args.batch_size):
+ images = images.to(device=device, dtype=input_dtype)
+ target = target.to(device)
+
+ with autocast():
+ # predict
+ output = model(image=images)
+ image_features = output['image_features'] if isinstance(output, dict) else output[0]
+ logits = 100. * image_features @ classifier
+
+ # measure accuracy
+ acc1, acc5 = accuracy(logits, target, topk=(1, 5))
+ top1 += acc1
+ top5 += acc5
+ n += images.size(0)
+
+ top1 = (top1 / n)
+ top5 = (top5 / n)
+ return top1, top5
+
+
+def zero_shot_eval(model, data, epoch, args, tokenizer=None):
+ if 'imagenet-val' not in data and 'imagenet-v2' not in data:
+ return {}
+ if args.zeroshot_frequency == 0:
+ return {}
+ if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
+ return {}
+ if args.distributed and not args.horovod:
+ model = model.module
+
+ logging.info('Starting zero-shot imagenet.')
+ if tokenizer is None:
+ tokenizer = get_tokenizer(args.model)
+
+ logging.info('Building zero-shot classifier')
+ device = torch.device(args.device)
+ autocast = get_autocast(args.precision, device_type=device.type)
+ with autocast():
+ classifier = build_zero_shot_classifier(
+ model,
+ tokenizer=tokenizer,
+ classnames=IMAGENET_CLASSNAMES,
+ templates=OPENAI_IMAGENET_TEMPLATES,
+ num_classes_per_batch=10,
+ device=device,
+ use_tqdm=True,
+ )
+
+ logging.info('Using classifier')
+ results = {}
+ if 'imagenet-val' in data:
+ top1, top5 = run(model, classifier, data['imagenet-val'].dataloader, args)
+ results['imagenet-zeroshot-val-top1'] = top1
+ results['imagenet-zeroshot-val-top5'] = top5
+ if 'imagenet-v2' in data:
+ top1, top5 = run(model, classifier, data['imagenet-v2'].dataloader, args)
+ results['imagenetv2-zeroshot-val-top1'] = top1
+ results['imagenetv2-zeroshot-val-top5'] = top5
+
+ logging.info('Finished zero-shot imagenet.')
+
+ return results
diff --git a/evalkit_tf437/lib/python3.10/site-packages/psutil/__pycache__/_psbsd.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/psutil/__pycache__/_psbsd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d8d439693f52f523bbfc3dd89dbcc95ef59a60f
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/psutil/__pycache__/_psbsd.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/psutil/__pycache__/_pswindows.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/psutil/__pycache__/_pswindows.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9933c0e44e899b6e211ef8d35b1091e883378bd4
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/psutil/__pycache__/_pswindows.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests_oauthlib-2.0.0.dist-info/METADATA b/evalkit_tf437/lib/python3.10/site-packages/requests_oauthlib-2.0.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..0a62eb5b6c3624745da1776899ed6d36a587b63b
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/requests_oauthlib-2.0.0.dist-info/METADATA
@@ -0,0 +1,270 @@
+Metadata-Version: 2.1
+Name: requests-oauthlib
+Version: 2.0.0
+Summary: OAuthlib authentication support for Requests.
+Home-page: https://github.com/requests/requests-oauthlib
+Author: Kenneth Reitz
+Author-email: me@kennethreitz.com
+License: ISC
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.4
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+Requires-Dist: oauthlib >=3.0.0
+Requires-Dist: requests >=2.0.0
+Provides-Extra: rsa
+Requires-Dist: oauthlib[signedtoken] >=3.0.0 ; extra == 'rsa'
+
+Requests-OAuthlib |build-status| |coverage-status| |docs|
+=========================================================
+
+This project provides first-class OAuth library support for `Requests `_.
+
+The OAuth 1 workflow
+--------------------
+
+OAuth 1 can seem overly complicated and it sure has its quirks. Luckily,
+requests_oauthlib hides most of these and let you focus at the task at hand.
+
+Accessing protected resources using requests_oauthlib is as simple as:
+
+.. code-block:: pycon
+
+ >>> from requests_oauthlib import OAuth1Session
+ >>> twitter = OAuth1Session('client_key',
+ client_secret='client_secret',
+ resource_owner_key='resource_owner_key',
+ resource_owner_secret='resource_owner_secret')
+ >>> url = 'https://api.twitter.com/1/account/settings.json'
+ >>> r = twitter.get(url)
+
+Before accessing resources you will need to obtain a few credentials from your
+provider (e.g. Twitter) and authorization from the user for whom you wish to
+retrieve resources for. You can read all about this in the full
+`OAuth 1 workflow guide on RTD `_.
+
+The OAuth 2 workflow
+--------------------
+
+OAuth 2 is generally simpler than OAuth 1 but comes in more flavours. The most
+common being the Authorization Code Grant, also known as the WebApplication
+flow.
+
+Fetching a protected resource after obtaining an access token can be extremely
+simple. However, before accessing resources you will need to obtain a few
+credentials from your provider (e.g. Google) and authorization from the user
+for whom you wish to retrieve resources for. You can read all about this in the
+full `OAuth 2 workflow guide on RTD `_.
+
+Installation
+-------------
+
+To install requests and requests_oauthlib you can use pip:
+
+.. code-block:: bash
+
+ pip install requests requests-oauthlib
+
+.. |build-status| image:: https://github.com/requests/requests-oauthlib/actions/workflows/run-tests.yml/badge.svg
+ :target: https://github.com/requests/requests-oauthlib/actions
+.. |coverage-status| image:: https://img.shields.io/coveralls/requests/requests-oauthlib.svg
+ :target: https://coveralls.io/r/requests/requests-oauthlib
+.. |docs| image:: https://readthedocs.org/projects/requests-oauthlib/badge/
+ :alt: Documentation Status
+ :scale: 100%
+ :target: https://requests-oauthlib.readthedocs.io/
+
+
+History
+-------
+
+v2.0.0 (22 March 2024)
+++++++++++++++++++++++++
+
+Full set of changes are in [github](https://github.com/requests/requests-oauthlib/milestone/4?closed=1).
+
+Additions & changes:
+
+- ``OAuth2Session`` now correctly uses the ``self.verify`` value if ``verify``
+ is not overridden in ``fetch_token`` and ``refresh_token``. Fixes `#404
+ `_.
+- ``OAuth2Session`` constructor now uses its ``client.scope`` when a ``client``
+ is provided and ``scope`` is not overridden. Fixes `#408
+ `_
+- Add ``refresh_token_request`` and ``access_token_request`` compliance hooks
+- Add PKCE support and Auth0 example
+- Add support for Python 3.8-3.12
+- Remove support of Python 2.x, <3.7
+- Migrated to Github Action
+- Updated dependencies
+- Cleanup some docs and examples
+
+v1.4.0 (27 Feb 2024)
+++++++++++++++++++++++++
+
+- Version 2.0.0 published initially as 1.4.0, it was yanked eventually.
+
+v1.3.1 (21 January 2022)
+++++++++++++++++++++++++
+
+- Add initial support for OAuth Mutual TLS (draft-ietf-oauth-mtls)
+- Removed outdated LinkedIn Compliance Fixes
+- Add eBay compliance fix
+- Add Spotify OAuth 2 Tutorial
+- Add support for python 3.8, 3.9
+- Fixed LinkedIn Compliance Fixes
+- Fixed ReadTheDocs Documentation and sphinx errors
+- Moved pipeline to GitHub Actions
+
+v1.3.0 (6 November 2019)
+++++++++++++++++++++++++
+
+- Instagram compliance fix
+- Added ``force_querystring`` argument to fetch_token() method on OAuth2Session
+
+v1.2.0 (14 January 2019)
+++++++++++++++++++++++++
+
+- This project now depends on OAuthlib 3.0.0 and above. It does **not** support
+ versions of OAuthlib before 3.0.0.
+- Updated oauth2 tests to use 'sess' for an OAuth2Session instance instead of `auth`
+ because OAuth2Session objects and methods acceept an `auth` paramether which is
+ typically an instance of `requests.auth.HTTPBasicAuth`
+- `OAuth2Session.fetch_token` previously tried to guess how and where to provide
+ "client" and "user" credentials incorrectly. This was incompatible with some
+ OAuth servers and incompatible with breaking changes in oauthlib that seek to
+ correctly provide the `client_id`. The older implementation also did not raise
+ the correct exceptions when username and password are not present on Legacy
+ clients.
+- Avoid automatic netrc authentication for OAuth2Session.
+
+v1.1.0 (9 January 2019)
++++++++++++++++++++++++
+
+- Adjusted version specifier for ``oauthlib`` dependency: this project is
+ not yet compatible with ``oauthlib`` 3.0.0.
+- Dropped dependency on ``nose``.
+- Minor changes to clean up the code and make it more readable/maintainable.
+
+v1.0.0 (4 June 2018)
+++++++++++++++++++++
+
+- **Removed support for Python 2.6 and Python 3.3.**
+ This project now supports Python 2.7, and Python 3.4 and above.
+- Added several examples to the documentation.
+- Added plentymarkets compliance fix.
+- Added a ``token`` property to OAuth1Session, to match the corresponding
+ ``token`` property on OAuth2Session.
+
+v0.8.0 (14 February 2017)
++++++++++++++++++++++++++
+
+- Added Fitbit compliance fix.
+- Fixed an issue where newlines in the response body for the access token
+ request would cause errors when trying to extract the token.
+- Fixed an issue introduced in v0.7.0 where users passing ``auth`` to several
+ methods would encounter conflicts with the ``client_id`` and
+ ``client_secret``-derived auth. The user-supplied ``auth`` argument is now
+ used in preference to those options.
+
+v0.7.0 (22 September 2016)
+++++++++++++++++++++++++++
+
+- Allowed ``OAuth2Session.request`` to take the ``client_id`` and
+ ``client_secret`` parameters for the purposes of automatic token refresh,
+ which may need them.
+
+v0.6.2 (12 July 2016)
++++++++++++++++++++++
+
+- Use ``client_id`` and ``client_secret`` for the Authorization header if
+ provided.
+- Allow explicit bypass of the Authorization header by setting ``auth=False``.
+- Pass through the ``proxies`` kwarg when refreshing tokens.
+- Miscellaneous cleanups.
+
+v0.6.1 (19 February 2016)
++++++++++++++++++++++++++
+
+- Fixed a bug when sending authorization in headers with no username and
+ password present.
+- Make sure we clear the session token before obtaining a new one.
+- Some improvements to the Slack compliance fix.
+- Avoid timing problems around token refresh.
+- Allow passing arbitrary arguments to requests when calling
+ ``fetch_request_token`` and ``fetch_access_token``.
+
+v0.6.0 (14 December 2015)
++++++++++++++++++++++++++
+
+- Add compliance fix for Slack.
+- Add compliance fix for Mailchimp.
+- ``TokenRequestDenied`` exceptions now carry the entire response, not just the
+ status code.
+- Pass through keyword arguments when refreshing tokens automatically.
+- Send authorization in headers, not just body, to maximize compatibility.
+- More getters/setters available for OAuth2 session client values.
+- Allow sending custom headers when refreshing tokens, and set some defaults.
+
+
+v0.5.0 (4 May 2015)
++++++++++++++++++++
+- Fix ``TypeError`` being raised instead of ``TokenMissing`` error.
+- Raise requests exceptions on 4XX and 5XX responses in the OAuth2 flow.
+- Avoid ``AttributeError`` when initializing the ``OAuth2Session`` class
+ without complete client information.
+
+v0.4.2 (16 October 2014)
+++++++++++++++++++++++++
+- New ``authorized`` property on OAuth1Session and OAuth2Session, which allows
+ you to easily determine if the session is already authorized with OAuth tokens
+ or not.
+- New ``TokenMissing`` and ``VerifierMissing`` exception classes for OAuth1Session:
+ this will make it easier to catch and identify these exceptions.
+
+v0.4.1 (6 June 2014)
+++++++++++++++++++++
+- New install target ``[rsa]`` for people using OAuth1 RSA-SHA1 signature
+ method.
+- Fixed bug in OAuth2 where supplied state param was not used in auth url.
+- OAuth2 HTTPS checking can be disabled by setting environment variable
+ ``OAUTHLIB_INSECURE_TRANSPORT``.
+- OAuth1 now re-authorize upon redirects.
+- OAuth1 token fetching now raise a detailed error message when the
+ response body is incorrectly encoded or the request was denied.
+- Added support for custom OAuth1 clients.
+- OAuth2 compliance fix for Sina Weibo.
+- Multiple fixes to facebook compliance fix.
+- Compliance fixes now re-encode body properly as bytes in Python 3.
+- Logging now properly done under ``requests_oauthlib`` namespace instead
+ of piggybacking on oauthlib namespace.
+- Logging introduced for OAuth1 auth and session.
+
+v0.4.0 (29 September 2013)
+++++++++++++++++++++++++++
+- OAuth1Session methods only return unicode strings. #55.
+- Renamed requests_oauthlib.core to requests_oauthlib.oauth1_auth for consistency. #79.
+- Added Facebook compliance fix and access_token_response hook to OAuth2Session. #63.
+- Added LinkedIn compliance fix.
+- Added refresh_token_response compliance hook, invoked before parsing the refresh token.
+- Correctly limit compliance hooks to running only once!
+- Content type guessing should only be done when no content type is given
+- OAuth1 now updates r.headers instead of replacing it with non case insensitive dict
+- Remove last use of Response.content (in OAuth1Session). #44.
+- State param can now be supplied in OAuth2Session.authorize_url
diff --git a/evalkit_tf437/lib/python3.10/site-packages/requests_oauthlib-2.0.0.dist-info/WHEEL b/evalkit_tf437/lib/python3.10/site-packages/requests_oauthlib-2.0.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..832be111324a83de65a3a27be4dcbdee7f5a6692
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/requests_oauthlib-2.0.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.43.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+